diff --git a/client/kafka/async_producer.go b/client/kafka/async_producer.go index 64093447f..694400ab4 100644 --- a/client/kafka/async_producer.go +++ b/client/kafka/async_producer.go @@ -5,7 +5,7 @@ import ( "context" "fmt" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" patronerrors "github.com/beatlabs/patron/errors" "github.com/beatlabs/patron/trace" "github.com/opentracing/opentracing-go" diff --git a/client/kafka/integration_test.go b/client/kafka/integration_test.go index 39f37d409..8e8e33b3c 100644 --- a/client/kafka/integration_test.go +++ b/client/kafka/integration_test.go @@ -6,7 +6,7 @@ import ( "context" "testing" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/opentracing/opentracing-go" "github.com/opentracing/opentracing-go/ext" "github.com/opentracing/opentracing-go/mocktracer" diff --git a/client/kafka/kafka.go b/client/kafka/kafka.go index 15a7a011f..d319b2b49 100644 --- a/client/kafka/kafka.go +++ b/client/kafka/kafka.go @@ -6,7 +6,7 @@ import ( "fmt" "os" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/beatlabs/patron/correlation" patronerrors "github.com/beatlabs/patron/errors" "github.com/beatlabs/patron/internal/validation" diff --git a/client/kafka/kafka_test.go b/client/kafka/kafka_test.go index dad83e5c2..a2cc74508 100644 --- a/client/kafka/kafka_test.go +++ b/client/kafka/kafka_test.go @@ -6,7 +6,7 @@ import ( "strings" "testing" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/beatlabs/patron/correlation" "github.com/beatlabs/patron/trace" "github.com/opentracing/opentracing-go" diff --git a/client/kafka/sync_producer.go b/client/kafka/sync_producer.go index 612653ed0..39aa81e16 100644 --- a/client/kafka/sync_producer.go +++ b/client/kafka/sync_producer.go @@ -5,7 +5,7 @@ import ( "errors" "fmt" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" patronerrors "github.com/beatlabs/patron/errors" "github.com/beatlabs/patron/trace" "github.com/opentracing/opentracing-go" diff --git a/component/async/kafka/group/group.go b/component/async/kafka/group/group.go index bd8ff2fc8..ffe1f2087 100644 --- a/component/async/kafka/group/group.go +++ b/component/async/kafka/group/group.go @@ -7,7 +7,7 @@ import ( "fmt" "strings" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/beatlabs/patron/component/async" "github.com/beatlabs/patron/component/async/kafka" "github.com/beatlabs/patron/internal/validation" diff --git a/component/async/kafka/group/group_test.go b/component/async/kafka/group/group_test.go index 671d98515..226a5af38 100644 --- a/component/async/kafka/group/group_test.go +++ b/component/async/kafka/group/group_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/beatlabs/patron/component/async" "github.com/beatlabs/patron/component/async/kafka" kafkacmp "github.com/beatlabs/patron/component/kafka" diff --git a/component/async/kafka/group/integration_test.go b/component/async/kafka/group/integration_test.go index 19841219e..c12bc0787 100644 --- a/component/async/kafka/group/integration_test.go +++ b/component/async/kafka/group/integration_test.go @@ -13,7 +13,7 @@ import ( "testing" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/beatlabs/patron" "github.com/beatlabs/patron/component/async" "github.com/beatlabs/patron/component/async/kafka" diff --git a/component/async/kafka/kafka.go b/component/async/kafka/kafka.go index 19498c22f..78fe5ca54 100644 --- a/component/async/kafka/kafka.go +++ b/component/async/kafka/kafka.go @@ -8,7 +8,7 @@ import ( "strconv" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/beatlabs/patron/component/async" "github.com/beatlabs/patron/correlation" "github.com/beatlabs/patron/encoding" diff --git a/component/async/kafka/kafka_test.go b/component/async/kafka/kafka_test.go index 010afbd24..16864d602 100644 --- a/component/async/kafka/kafka_test.go +++ b/component/async/kafka/kafka_test.go @@ -11,7 +11,7 @@ import ( "testing" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/beatlabs/patron/component/async" "github.com/beatlabs/patron/correlation" "github.com/beatlabs/patron/encoding" diff --git a/component/async/kafka/option.go b/component/async/kafka/option.go index cd6af5a07..ea8ddaf43 100644 --- a/component/async/kafka/option.go +++ b/component/async/kafka/option.go @@ -5,7 +5,7 @@ import ( "fmt" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/beatlabs/patron/encoding" "github.com/beatlabs/patron/encoding/json" ) diff --git a/component/async/kafka/option_test.go b/component/async/kafka/option_test.go index 81481e86c..dd4a21c83 100644 --- a/component/async/kafka/option_test.go +++ b/component/async/kafka/option_test.go @@ -5,7 +5,7 @@ import ( "testing" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/beatlabs/patron/encoding" "github.com/beatlabs/patron/encoding/json" "github.com/stretchr/testify/assert" diff --git a/component/async/kafka/simple/duration_client_test.go b/component/async/kafka/simple/duration_client_test.go index e761358f6..6155adbde 100644 --- a/component/async/kafka/simple/duration_client_test.go +++ b/component/async/kafka/simple/duration_client_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/component/async/kafka/simple/duration_kafka.go b/component/async/kafka/simple/duration_kafka.go index ff8e0489e..bcfa115a6 100644 --- a/component/async/kafka/simple/duration_kafka.go +++ b/component/async/kafka/simple/duration_kafka.go @@ -6,7 +6,7 @@ import ( "fmt" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" ) type outOfRangeOffsetError struct { diff --git a/component/async/kafka/simple/integration_test.go b/component/async/kafka/simple/integration_test.go index 28380c036..3fb5507e6 100644 --- a/component/async/kafka/simple/integration_test.go +++ b/component/async/kafka/simple/integration_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/beatlabs/patron/component/async/kafka" kafkacmp "github.com/beatlabs/patron/component/kafka" testkafka "github.com/beatlabs/patron/test/kafka" diff --git a/component/async/kafka/simple/simple.go b/component/async/kafka/simple/simple.go index 07b18e97d..8728d55fd 100644 --- a/component/async/kafka/simple/simple.go +++ b/component/async/kafka/simple/simple.go @@ -8,7 +8,7 @@ import ( "sync" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/beatlabs/patron/component/async" "github.com/beatlabs/patron/component/async/kafka" "github.com/beatlabs/patron/internal/validation" diff --git a/component/async/kafka/simple/simple_test.go b/component/async/kafka/simple/simple_test.go index d2f4d8819..d4ba592bc 100644 --- a/component/async/kafka/simple/simple_test.go +++ b/component/async/kafka/simple/simple_test.go @@ -7,7 +7,7 @@ import ( "testing" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/beatlabs/patron/component/async/kafka" kafkacmp "github.com/beatlabs/patron/component/kafka" "github.com/stretchr/testify/assert" diff --git a/component/kafka/component.go b/component/kafka/component.go index 9d20c8924..1e8d64448 100644 --- a/component/kafka/component.go +++ b/component/kafka/component.go @@ -8,7 +8,7 @@ import ( "sync" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/beatlabs/patron/correlation" patronErrors "github.com/beatlabs/patron/errors" "github.com/beatlabs/patron/internal/validation" diff --git a/component/kafka/component_test.go b/component/kafka/component_test.go index 35d7e6ac8..4089b3d62 100644 --- a/component/kafka/component_test.go +++ b/component/kafka/component_test.go @@ -7,7 +7,7 @@ import ( "testing" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/beatlabs/patron/correlation" "github.com/beatlabs/patron/encoding" "github.com/beatlabs/patron/encoding/json" diff --git a/component/kafka/integration_test.go b/component/kafka/integration_test.go index 84a6b8bd6..3b2a49f02 100644 --- a/component/kafka/integration_test.go +++ b/component/kafka/integration_test.go @@ -12,7 +12,7 @@ import ( "testing" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" kafkaclient "github.com/beatlabs/patron/client/kafka" "github.com/beatlabs/patron/correlation" testkafka "github.com/beatlabs/patron/test/kafka" diff --git a/component/kafka/kafka.go b/component/kafka/kafka.go index 14e774737..ce18001d3 100644 --- a/component/kafka/kafka.go +++ b/component/kafka/kafka.go @@ -7,7 +7,7 @@ import ( "fmt" "os" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/opentracing/opentracing-go" ) diff --git a/component/kafka/kafka_test.go b/component/kafka/kafka_test.go index d534b5171..14d6e0279 100644 --- a/component/kafka/kafka_test.go +++ b/component/kafka/kafka_test.go @@ -6,7 +6,7 @@ import ( "strings" "testing" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/beatlabs/patron/correlation" "github.com/opentracing/opentracing-go/mocktracer" "github.com/stretchr/testify/assert" diff --git a/component/kafka/option.go b/component/kafka/option.go index f845d2975..4857de33c 100644 --- a/component/kafka/option.go +++ b/component/kafka/option.go @@ -5,7 +5,7 @@ import ( "fmt" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "golang.org/x/exp/slog" ) diff --git a/component/kafka/option_test.go b/component/kafka/option_test.go index 3cbeeac80..6989e308e 100644 --- a/component/kafka/option_test.go +++ b/component/kafka/option_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/stretchr/testify/assert" ) diff --git a/docker-compose.yml b/docker-compose.yml index d03470d34..1577be8c9 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -21,7 +21,7 @@ services: depends_on: - zookeeper rabbitmq: - image: docker.io/bitnami/rabbitmq:3.9 + image: docker.io/bitnami/rabbitmq:latest ports: - '4369:4369' - '5551:5551' diff --git a/examples/client/main.go b/examples/client/main.go index 658e57fb0..1420d3776 100644 --- a/examples/client/main.go +++ b/examples/client/main.go @@ -10,7 +10,7 @@ import ( "strings" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/sqs" patronamqp "github.com/beatlabs/patron/client/amqp" diff --git a/examples/service/kafka.go b/examples/service/kafka.go index e9c5bdd83..371cf90e8 100644 --- a/examples/service/kafka.go +++ b/examples/service/kafka.go @@ -3,7 +3,7 @@ package main import ( "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/beatlabs/patron" "github.com/beatlabs/patron/component/kafka" "github.com/beatlabs/patron/examples" diff --git a/go.mod b/go.mod index 9cb0feb57..81c897fb8 100644 --- a/go.mod +++ b/go.mod @@ -3,15 +3,15 @@ module github.com/beatlabs/patron go 1.20 require ( - github.com/Shopify/sarama v1.38.1 - github.com/aws/aws-sdk-go-v2 v1.22.1 - github.com/aws/aws-sdk-go-v2/config v1.18.37 - github.com/aws/aws-sdk-go-v2/credentials v1.15.1 - github.com/aws/aws-sdk-go-v2/service/sns v1.23.0 - github.com/aws/aws-sdk-go-v2/service/sqs v1.23.2 + github.com/IBM/sarama v1.40.1 + github.com/aws/aws-sdk-go-v2 v1.23.0 + github.com/aws/aws-sdk-go-v2/config v1.25.3 + github.com/aws/aws-sdk-go-v2/credentials v1.16.2 + github.com/aws/aws-sdk-go-v2/service/sns v1.25.2 + github.com/aws/aws-sdk-go-v2/service/sqs v1.23.4 github.com/eclipse/paho.golang v0.11.0 github.com/elastic/elastic-transport-go/v8 v8.3.0 - github.com/elastic/go-elasticsearch/v8 v8.7.0 + github.com/elastic/go-elasticsearch/v8 v8.11.0 github.com/go-redis/redis/extra/rediscmd v0.2.0 github.com/go-redis/redis/v8 v8.11.5 github.com/go-sql-driver/mysql v1.7.1 @@ -20,30 +20,31 @@ require ( github.com/julienschmidt/httprouter v1.3.0 github.com/opentracing-contrib/go-stdlib v1.0.0 github.com/opentracing/opentracing-go v1.2.0 - github.com/prometheus/client_golang v1.16.0 - github.com/prometheus/client_model v0.4.0 + github.com/prometheus/client_golang v1.17.0 + github.com/prometheus/client_model v0.5.0 github.com/streadway/amqp v1.1.0 github.com/stretchr/testify v1.8.4 github.com/uber/jaeger-client-go v2.30.0+incompatible github.com/uber/jaeger-lib v2.4.2-0.20210604143007-135cf5605a6d+incompatible - go.mongodb.org/mongo-driver v1.11.7 - golang.org/x/exp v0.0.0-20230321023759-10a507213a29 - golang.org/x/time v0.3.0 - google.golang.org/grpc v1.56.3 - google.golang.org/protobuf v1.30.0 + go.mongodb.org/mongo-driver v1.13.0 + golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa + golang.org/x/time v0.4.0 + google.golang.org/grpc v1.59.0 + google.golang.org/protobuf v1.31.0 ) require ( github.com/HdrHistogram/hdrhistogram-go v1.1.2 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.2 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.1 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.1 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.3.42 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.1 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.17.0 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.19.0 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.25.0 // indirect - github.com/aws/smithy-go v1.16.0 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.4 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.3 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.3 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.3 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.17.2 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.20.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.25.3 // indirect + github.com/aws/smithy-go v1.17.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect @@ -62,17 +63,15 @@ require ( github.com/jcmturner/gofork v1.7.6 // indirect github.com/jcmturner/gokrb5/v8 v8.4.3 // indirect github.com/jcmturner/rpc/v2 v2.0.3 // indirect - github.com/klauspost/compress v1.15.14 // indirect - github.com/kr/text v0.2.0 // indirect + github.com/klauspost/compress v1.16.6 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/montanaflynn/stats v0.6.6 // indirect github.com/pierrec/lz4/v4 v4.1.17 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/common v0.42.0 // indirect - github.com/prometheus/procfs v0.10.1 // indirect + github.com/prometheus/common v0.44.0 // indirect + github.com/prometheus/procfs v0.11.1 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect - github.com/rogpeppe/go-internal v1.9.0 // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect github.com/xdg-go/scram v1.1.2 // indirect github.com/xdg-go/stringprep v1.0.4 // indirect @@ -80,9 +79,9 @@ require ( go.uber.org/atomic v1.10.0 // indirect golang.org/x/crypto v0.14.0 // indirect golang.org/x/net v0.17.0 // indirect - golang.org/x/sync v0.2.0 // indirect - golang.org/x/sys v0.13.0 // indirect + golang.org/x/sync v0.3.0 // indirect + golang.org/x/sys v0.14.0 // indirect golang.org/x/text v0.13.0 // indirect - google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index f7a673185..e0bcbccae 100644 --- a/go.sum +++ b/go.sum @@ -2,56 +2,44 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= -github.com/Shopify/sarama v1.38.1 h1:lqqPUPQZ7zPqYlWpTh+LQ9bhYNu2xJL6k1SJN4WVe2A= -github.com/Shopify/sarama v1.38.1/go.mod h1:iwv9a67Ha8VNa+TifujYoWGxWnu2kNVAQdSdZ4X2o5g= +github.com/IBM/sarama v1.40.1 h1:lL01NNg/iBeigUbT+wpPysuTYW6roHo6kc1QrffRf0k= +github.com/IBM/sarama v1.40.1/go.mod h1:+5OFwA5Du9I6QrznhaMHsuwWdWZNMjaBSIxEWEgKOYE= github.com/Shopify/toxiproxy/v2 v2.5.0 h1:i4LPT+qrSlKNtQf5QliVjdP08GyAH8+BUIc9gT0eahc= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= -github.com/aws/aws-sdk-go-v2 v1.18.1/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= -github.com/aws/aws-sdk-go-v2 v1.21.0/go.mod h1:/RfNgGmRxI+iFOB1OeJUyxiU+9s88k3pfHvDagGEp0M= -github.com/aws/aws-sdk-go-v2 v1.21.2/go.mod h1:ErQhvNuEMhJjweavOYhxVkn2RUx7kQXVATHrjKtxIpM= -github.com/aws/aws-sdk-go-v2 v1.22.1 h1:sjnni/AuoTXxHitsIdT0FwmqUuNUuHtufcVDErVFT9U= -github.com/aws/aws-sdk-go-v2 v1.22.1/go.mod h1:Kd0OJtkW3Q0M0lUWGszapWjEvrXDzRW+D21JNsroB+c= -github.com/aws/aws-sdk-go-v2/config v1.18.37 h1:RNAfbPqw1CstCooHaTPhScz7z1PyocQj0UL+l95CgzI= -github.com/aws/aws-sdk-go-v2/config v1.18.37/go.mod h1:8AnEFxW9/XGKCbjYDCJy7iltVNyEI9Iu9qC21UzhhgQ= -github.com/aws/aws-sdk-go-v2/credentials v1.13.35/go.mod h1:o7rCaLtvK0hUggAGclf76mNGGkaG5a9KWlp+d9IpcV8= -github.com/aws/aws-sdk-go-v2/credentials v1.15.1 h1:hmf6lAm9hk7uLCfapZn/jL05lm6Uwdbn1B0fgjyuf4M= -github.com/aws/aws-sdk-go-v2/credentials v1.15.1/go.mod h1:QTcHga3ZbQOneJuxmGBOCxiClxmp+TlvmjFexAnJ790= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.11/go.mod h1:TEPP4tENqBGO99KwVpV9MlOX4NSrSLP8u3KRy2CDwA8= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.2 h1:gIeH4+o1MN/caGBWjoGQTUTIu94xD6fI5B2+TcwBf70= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.2/go.mod h1:wLyMIo/zPOhQhPXTddpfdkSleyigtFi8iMnC+2m/SK4= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.34/go.mod h1:wZpTEecJe0Btj3IYnDx/VlUzor9wm3fJHyvLpQF0VwY= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.41/go.mod h1:CrObHAuPneJBlfEJ5T3szXOUkLEThaGfvnhTf33buas= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43/go.mod h1:auo+PiyLl0n1l8A0e8RIeR8tOzYPfZZH/JNlrJ8igTQ= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.1 h1:fi1ga6WysOyYb5PAf3Exd6B5GiSNpnZim4h1rhlBqx0= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.1/go.mod h1:V5CY8wNurvPUibTi9mwqUqpiFZ5LnioKWIFUDtIzdI8= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.28/go.mod h1:7VRpKQQedkfIEXb4k52I7swUnZP0wohVajJMRn3vsUw= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.35/go.mod h1:SJC1nEVVva1g3pHAIdCp7QsRIkMmLAgoDquQ9Rr8kYw= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37/go.mod h1:Qe+2KtKml+FEsQF/DHmDV+xjtche/hwoF75EG4UlHW8= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.1 h1:ZpaV/j48RlPc4AmOZuPv22pJliXjXq8/reL63YzyFnw= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.1/go.mod h1:R8aXraabD2e3qv1csxM14/X9WF4wFMIY0kH4YEtYD5M= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.42 h1:GPUcE/Yq7Ur8YSUk6lVkoIMWnJNO0HT18GUzCWCgCI0= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.42/go.mod h1:rzfdUlfA+jdgLDmPKjd3Chq9V7LVLYo1Nz++Wb91aRo= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.35/go.mod h1:QGF2Rs33W5MaN9gYdEQOBBFPLwTZkEhRwI33f7KIG0o= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.1 h1:2OXw3ppu1XsB6rqKEMV4tnecTjIY3PRV2U6IP6KPJQo= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.1/go.mod h1:FZB4AdakIqW/yERVdGJA6Z9jraax1beXfhBBnK2wwR8= -github.com/aws/aws-sdk-go-v2/service/sns v1.23.0 h1:iMmh/hzUQ7uJzKBI/QyMpJXUKcaNsVE4/Z+s6E3hMCI= -github.com/aws/aws-sdk-go-v2/service/sns v1.23.0/go.mod h1:gLVePJ104BrkWKr4aU3CURZYZnZN7BQGDsB668Uh3ZY= -github.com/aws/aws-sdk-go-v2/service/sqs v1.23.2 h1:Y2vfLiY3HmaMisuwx6fS2kMRYbajRXXB+9vesGVPseY= -github.com/aws/aws-sdk-go-v2/service/sqs v1.23.2/go.mod h1:TaV67b6JMD1988x/uMDop/JnMFK6v5d4Ru+sDmFg+ww= -github.com/aws/aws-sdk-go-v2/service/sso v1.13.5/go.mod h1:fIAwKQKBFu90pBxx07BFOMJLpRUGu8VOzLJakeY+0K4= -github.com/aws/aws-sdk-go-v2/service/sso v1.17.0 h1:I/Oh3IxGPfHXiGnwM54TD6hNr/8TlUrBXAtTyGhR+zw= -github.com/aws/aws-sdk-go-v2/service/sso v1.17.0/go.mod h1:H6NCMvDBqA+CvIaXzaSqM6LWtzv9BzZrqBOqz+PzRF8= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.15.5/go.mod h1:yygr8ACQRY2PrEcy3xsUI357stq2AxnFM6DIsR9lij4= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.19.0 h1:irbXQkfVYIRaewYSXcu4yVk0m2T+JzZd0dkop7FjmO0= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.19.0/go.mod h1:4wPNCkM22+oRe71oydP66K50ojDUC33XutSMi2pEF/M= -github.com/aws/aws-sdk-go-v2/service/sts v1.21.5/go.mod h1:VC7JDqsqiwXukYEDjoHh9U0fOJtNWh04FPQz4ct4GGU= -github.com/aws/aws-sdk-go-v2/service/sts v1.25.0 h1:sYIFy8tm1xQwRvVQ4CRuBGXKIg9sHNuG6+3UAQuoujk= -github.com/aws/aws-sdk-go-v2/service/sts v1.25.0/go.mod h1:S/LOQUeYDfJeJpFCIJDMjy7dwL4aA33HUdVi+i7uH8k= +github.com/aws/aws-sdk-go-v2 v1.19.1/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= +github.com/aws/aws-sdk-go-v2 v1.23.0 h1:PiHAzmiQQr6JULBUdvR8fKlA+UPKLT/8KbiqpFBWiAo= +github.com/aws/aws-sdk-go-v2 v1.23.0/go.mod h1:i1XDttT4rnf6vxc9AuskLc6s7XBee8rlLilKlc03uAA= +github.com/aws/aws-sdk-go-v2/config v1.25.3 h1:E4m9LbwJOoncDNt3e9MPLbz/saxWcGUlZVBydydD6+8= +github.com/aws/aws-sdk-go-v2/config v1.25.3/go.mod h1:tAByZy03nH5jcq0vZmkcVoo6tRzRHEwSFx3QW4NmDw8= +github.com/aws/aws-sdk-go-v2/credentials v1.16.2 h1:0sdZ5cwfOAipTzZ7eOL0gw4LAhk/RZnTa16cDqIt8tg= +github.com/aws/aws-sdk-go-v2/credentials v1.16.2/go.mod h1:sDdvGhXrSVT5yzBDR7qXz+rhbpiMpUYfF3vJ01QSdrc= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.4 h1:9wKDWEjwSnXZre0/O3+ZwbBl1SmlgWYBbrTV10X/H1s= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.4/go.mod h1:t4i+yGHMCcUNIX1x7YVYa6bH/Do7civ5I6cG/6PMfyA= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.36/go.mod h1:T8Jsn/uNL/AFOXrVYQ1YQaN1r9gN34JU1855/Lyjv+o= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.3 h1:DUwbD79T8gyQ23qVXFUthjzVMTviSHi3y4z58KvghhM= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.3/go.mod h1:7sGSz1JCKHWWBHq98m6sMtWQikmYPpxjqOydDemiVoM= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.30/go.mod h1:v3GSCnFxbHzt9dlWBqvA1K1f9lmWuf4ztupZBCAIVs4= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.3 h1:AplLJCtIaUZDCbr6+gLYdsYNxne4iuaboJhVt9d+WXI= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.3/go.mod h1:ify42Rb7nKeDDPkFjKn7q1bPscVPu/+gmHH8d2c+anU= +github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1 h1:uR9lXYjdPX0xY+NhvaJ4dD8rpSRz5VY81ccIIoNG+lw= +github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.1 h1:rpkF4n0CyFcrJUG/rNNohoTmhtWlFTRI4BsZOh9PvLs= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.1/go.mod h1:l9ymW25HOqymeU2m1gbUQ3rUIsTwKs8gYHXkqDQUhiI= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.3 h1:kJOolE8xBAD13xTCgOakByZkyP4D/owNmvEiioeUNAg= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.3/go.mod h1:Owv1I59vaghv1Ax8zz8ELY8DN7/Y0rGS+WWAmjgi950= +github.com/aws/aws-sdk-go-v2/service/sns v1.25.2 h1:KVWf3qQZxqX0ogLvRfq+uEXfbRexe7Y2JBRQ0TQaxwQ= +github.com/aws/aws-sdk-go-v2/service/sns v1.25.2/go.mod h1:gOyDaoXeBT5gwG0DL+5RFQ7cddwLOablLJdXmWSWdyU= +github.com/aws/aws-sdk-go-v2/service/sqs v1.23.4 h1:TPMp4uoVml+k0rNwo8SoZdGT7+F6x0AfIKvz7OVK9kA= +github.com/aws/aws-sdk-go-v2/service/sqs v1.23.4/go.mod h1:ADgofuTwePPCcluD9j2PTs4DPseqBTILSG8//8Fttno= +github.com/aws/aws-sdk-go-v2/service/sso v1.17.2 h1:V47N5eKgVZoRSvx2+RQ0EpAEit/pqOhqeSQFiS4OFEQ= +github.com/aws/aws-sdk-go-v2/service/sso v1.17.2/go.mod h1:/pE21vno3q1h4bbhUOEi+6Zu/aT26UK2WKkDXd+TssQ= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.20.0 h1:/XiEU7VIFcVWRDQLabyrSjBoKIm8UkYgsvWDuFW8Img= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.20.0/go.mod h1:dWqm5G767qwKPuayKfzm4rjzFmVjiBFbOJrpSPnAMDs= +github.com/aws/aws-sdk-go-v2/service/sts v1.25.3 h1:M2w4kiMGJCCM6Ljmmx/l6mmpfa3gPJVpBencfnsgvqs= +github.com/aws/aws-sdk-go-v2/service/sts v1.25.3/go.mod h1:4EqRHDCKP78hq3zOnmFXu5k0j4bXbRFfCh/zQ6KnEfQ= github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= -github.com/aws/smithy-go v1.14.2/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= -github.com/aws/smithy-go v1.15.0/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= -github.com/aws/smithy-go v1.16.0 h1:gJZEH/Fqh+RsvlJ1Zt4tVAtV6bKkp3cC+R6FCZMNzik= -github.com/aws/smithy-go v1.16.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= +github.com/aws/smithy-go v1.17.0 h1:wWJD7LX6PBV6etBUwO0zElG0nWN9rUhp0WdYeHSHAaI= +github.com/aws/smithy-go v1.17.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -71,11 +59,10 @@ github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/eclipse/paho.golang v0.11.0 h1:6Avu5dkkCfcB61/y1vx+XrPQ0oAl4TPYtY0uw3HbQdM= github.com/eclipse/paho.golang v0.11.0/go.mod h1:rhrV37IEwauUyx8FHrvmXOKo+QRKng5ncoN1vJiJMcs= -github.com/elastic/elastic-transport-go/v8 v8.2.0/go.mod h1:87Tcz8IVNe6rVSLdBux1o/PEItLtyabHU3naC7IoqKI= github.com/elastic/elastic-transport-go/v8 v8.3.0 h1:DJGxovyQLXGr62e9nDMPSxRyWION0Bh6d9eCFBriiHo= github.com/elastic/elastic-transport-go/v8 v8.3.0/go.mod h1:87Tcz8IVNe6rVSLdBux1o/PEItLtyabHU3naC7IoqKI= -github.com/elastic/go-elasticsearch/v8 v8.7.0 h1:ZvbT1YHppBC0QxGnMmaDUxoDa26clwhRaB3Gp5E3UcY= -github.com/elastic/go-elasticsearch/v8 v8.7.0/go.mod h1:lVb8SvJV8McVkdswpL8YR5QKIkhlWaoSq60YpHilOLI= +github.com/elastic/go-elasticsearch/v8 v8.11.0 h1:gUazf443rdYAEAD7JHX5lSXRgTkG4N4IcsV8dcWQPxM= +github.com/elastic/go-elasticsearch/v8 v8.11.0/go.mod h1:GU1BJHO7WeamP7UhuElYwzzHtvf9SDmeVpSSy9+o6Qg= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -147,9 +134,8 @@ github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4d github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.15.14 h1:i7WCKDToww0wA+9qrUZ1xOjp218vfFo3nTU6UHp+gOc= -github.com/klauspost/compress v1.15.14/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/klauspost/compress v1.16.6 h1:91SKEy4K37vkp255cJ8QesJhjyRO0hn9i9G0GoUwLsk= +github.com/klauspost/compress v1.16.6/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -182,18 +168,17 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= -github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= -github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= -github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= -github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= -github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= -github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= -github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= +github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= +github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/streadway/amqp v1.1.0 h1:py12iX8XSyI7aN/3dUT8DFIDJazNJsVJdxNVEpnQTZM= github.com/streadway/amqp v1.1.0/go.mod h1:WYSrTEYHOXHd0nwFeUXAe2G2hRnQT+deZJJf88uS9Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -207,26 +192,22 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.4.2-0.20210604143007-135cf5605a6d+incompatible h1:73eb49SfAfRZEhxIKR0tz5MUMu2zjJxJUZlFCHInV34= github.com/uber/jaeger-lib v2.4.2-0.20210604143007-135cf5605a6d+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= -github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a h1:fZHgsYlfvtyqToslyjUt3VOPF4J7aK/3MPcK7xp3PDk= github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a/go.mod h1:ul22v+Nro/R083muKhosV54bj5niojjWZvU8xrevuH4= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.mongodb.org/mongo-driver v1.11.7 h1:LIwYxASDLGUg/8wOhgOOZhX8tQa/9tgZPgzZoVqJvcs= -go.mongodb.org/mongo-driver v1.11.7/go.mod h1:G9TgswdsWjX4tmDA5zfs2+6AEPpYJwqblyjsfuh8oXY= +go.mongodb.org/mongo-driver v1.13.0 h1:67DgFFjYOCMWdtTEmKFpV3ffWlFnh+CYZ8ZS/tXWUfY= +go.mongodb.org/mongo-driver v1.13.0/go.mod h1:/rGBTebI3XYboVmgz+Wv3Bcbl3aD0QF9zl6kDDw18rQ= go.opentelemetry.io/otel v0.13.0/go.mod h1:dlSNewoRYikTkotEnxdmuBHgzT+k/idJSfDv/FxEnOY= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= @@ -244,8 +225,8 @@ golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= -golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ= +golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -268,10 +249,9 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= -golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -287,8 +267,8 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -297,10 +277,11 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.4.0 h1:Z81tqI5ddIoXDPvVQ7/7CC9TnLM7ubaFG2qXYd5BbYY= +golang.org/x/time v0.4.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -314,10 +295,10 @@ gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJ gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= -google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= -google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= +google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -326,10 +307,9 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= diff --git a/service.go b/service.go index 4d8864891..1b949e1a6 100644 --- a/service.go +++ b/service.go @@ -179,7 +179,7 @@ func defaultLogAttrs(name, version string) []slog.Attr { } func setupLogging(lc logConfig) { - ho := slog.HandlerOptions{ + opts := &slog.HandlerOptions{ AddSource: true, Level: getLogLevel(), } @@ -187,9 +187,9 @@ func setupLogging(lc logConfig) { var hnd slog.Handler if lc.json { - hnd = ho.NewJSONHandler(os.Stderr) + hnd = slog.NewJSONHandler(os.Stderr, opts) } else { - hnd = ho.NewTextHandler(os.Stderr) + hnd = slog.NewTextHandler(os.Stderr, opts) } slog.New(hnd.WithAttrs(lc.attrs)) diff --git a/test/kafka/kafka.go b/test/kafka/kafka.go index 8fa02470c..2169c836a 100644 --- a/test/kafka/kafka.go +++ b/test/kafka/kafka.go @@ -6,7 +6,7 @@ import ( "fmt" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/beatlabs/patron/component/async" ) diff --git a/vendor/github.com/Shopify/sarama/.gitignore b/vendor/github.com/IBM/sarama/.gitignore similarity index 100% rename from vendor/github.com/Shopify/sarama/.gitignore rename to vendor/github.com/IBM/sarama/.gitignore diff --git a/vendor/github.com/Shopify/sarama/.golangci.yml b/vendor/github.com/IBM/sarama/.golangci.yml similarity index 86% rename from vendor/github.com/Shopify/sarama/.golangci.yml rename to vendor/github.com/IBM/sarama/.golangci.yml index 0b419abbf..3d87645c1 100644 --- a/vendor/github.com/Shopify/sarama/.golangci.yml +++ b/vendor/github.com/IBM/sarama/.golangci.yml @@ -19,7 +19,7 @@ linters-settings: misspell: locale: US goimports: - local-prefixes: github.com/Shopify/sarama + local-prefixes: github.com/IBM/sarama gocritic: enabled-tags: - diagnostic @@ -39,11 +39,18 @@ linters-settings: lines: 300 statements: 300 + depguard: + rules: + main: + deny: + - pkg: "io/ioutil" + desc: Use the "io" and "os" packages instead. + linters: disable-all: true enable: - bodyclose - - deadcode + # - deadcode - depguard - exportloopref - dogsled @@ -68,12 +75,12 @@ linters: # - paralleltest # - scopelint - staticcheck - - structcheck + # - structcheck # - stylecheck - typecheck - unconvert - unused - - varcheck + # - varcheck - whitespace issues: diff --git a/vendor/github.com/IBM/sarama/CHANGELOG.md b/vendor/github.com/IBM/sarama/CHANGELOG.md new file mode 100644 index 000000000..3737fc327 --- /dev/null +++ b/vendor/github.com/IBM/sarama/CHANGELOG.md @@ -0,0 +1,1511 @@ +# Changelog + +## Version 1.40.0 (2023-07-17) + +## What's Changed + +Note: this is the first release after the transition of Sarama ownership from Shopify to IBM in https://github.com/IBM/sarama/issues/2461 + +### :rotating_light: Breaking Changes + +- chore: migrate module to github.com/IBM/sarama by @dnwe in https://github.com/IBM/sarama/pull/2492 +- fix: restore (\*OffsetCommitRequest) AddBlock func by @dnwe in https://github.com/IBM/sarama/pull/2494 + +### :bug: Fixes + +- fix(consumer): don't retry FindCoordinator forever by @dnwe in https://github.com/IBM/sarama/pull/2427 +- fix(metrics): fix race condition when calling Broker.Open() twice by @vincentbernat in https://github.com/IBM/sarama/pull/2428 +- fix: use version 4 of DescribeGroupsRequest only if kafka broker vers… …ion is >= 2.4 by @faillefer in https://github.com/IBM/sarama/pull/2451 +- Fix HighWaterMarkOffset of mocks partition consumer by @gr8web in https://github.com/IBM/sarama/pull/2447 +- fix: prevent data race in balance strategy by @napallday in https://github.com/IBM/sarama/pull/2453 + +### :package: Dependency updates + +- chore(deps): bump golang.org/x/net from 0.5.0 to 0.7.0 by @dependabot in https://github.com/IBM/sarama/pull/2452 + +### :wrench: Maintenance + +- chore: add kafka 3.3.2 by @dnwe in https://github.com/IBM/sarama/pull/2434 +- chore(ci): remove Shopify/shopify-cla-action by @dnwe in https://github.com/IBM/sarama/pull/2489 +- chore: bytes.Equal instead bytes.Compare by @testwill in https://github.com/IBM/sarama/pull/2485 + +## New Contributors + +- @dependabot made their first contribution in https://github.com/IBM/sarama/pull/2452 +- @gr8web made their first contribution in https://github.com/IBM/sarama/pull/2447 +- @testwill made their first contribution in https://github.com/IBM/sarama/pull/2485 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.38.1...v1.40.0 + +## Version 1.38.1 (2023-01-22) + +## What's Changed +### :bug: Fixes +* fix(example): correct `records-number` param in txn producer readme by @diallo-han in https://github.com/IBM/sarama/pull/2420 +* fix: use newConsumer method in newConsumerGroup method by @Lumotheninja in https://github.com/IBM/sarama/pull/2424 +### :package: Dependency updates +* chore(deps): bump module github.com/klauspost/compress to v1.15.14 by @dnwe in https://github.com/IBM/sarama/pull/2410 +* chore(deps): bump module golang.org/x/net to v0.5.0 by @dnwe in https://github.com/IBM/sarama/pull/2413 +* chore(deps): bump module github.com/stretchr/testify to v1.8.1 by @dnwe in https://github.com/IBM/sarama/pull/2411 +* chore(deps): bump module github.com/xdg-go/scram to v1.1.2 by @dnwe in https://github.com/IBM/sarama/pull/2412 +* chore(deps): bump module golang.org/x/sync to v0.1.0 by @dnwe in https://github.com/IBM/sarama/pull/2414 +* chore(deps): bump github.com/eapache/go-xerial-snappy digest to bf00bc1 by @dnwe in https://github.com/IBM/sarama/pull/2418 + +## New Contributors +* @diallo-han made their first contribution in https://github.com/IBM/sarama/pull/2420 +* @Lumotheninja made their first contribution in https://github.com/IBM/sarama/pull/2424 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.38.0...v1.38.1 + +## Version 1.38.0 (2023-01-08) + +## What's Changed +### :tada: New Features / Improvements +* feat(producer): improve memory usage of zstd encoder by using our own pool management by @rtreffer in https://github.com/IBM/sarama/pull/2375 +* feat(proto): implement and use MetadataRequest v7 by @dnwe in https://github.com/IBM/sarama/pull/2388 +* feat(metrics): add protocol-requests-rate metric by @auntan in https://github.com/IBM/sarama/pull/2373 +### :bug: Fixes +* fix(proto): track and supply leader epoch to FetchRequest by @dnwe in https://github.com/IBM/sarama/pull/2389 +* fix(example): improve arg name used for tls skip verify by @michaeljmarshall in https://github.com/IBM/sarama/pull/2385 +* fix(zstd): default back to GOMAXPROCS concurrency by @bgreenlee in https://github.com/IBM/sarama/pull/2404 +* fix(producer): add nil check while producer is retrying by @hsweif in https://github.com/IBM/sarama/pull/2387 +* fix(producer): return errors for every message in retryBatch to avoid producer hang forever by @cch123 in https://github.com/IBM/sarama/pull/2378 +* fix(metrics): fix race when accessing metric registry by @vincentbernat in https://github.com/IBM/sarama/pull/2409 +### :package: Dependency updates +* chore(deps): bump golang.org/x/net to v0.4.0 by @dnwe in https://github.com/IBM/sarama/pull/2403 +### :wrench: Maintenance +* chore(ci): replace set-output command in GH Action by @dnwe in https://github.com/IBM/sarama/pull/2390 +* chore(ci): include kafka 3.3.1 in testing matrix by @dnwe in https://github.com/IBM/sarama/pull/2406 + +## New Contributors +* @michaeljmarshall made their first contribution in https://github.com/IBM/sarama/pull/2385 +* @bgreenlee made their first contribution in https://github.com/IBM/sarama/pull/2404 +* @hsweif made their first contribution in https://github.com/IBM/sarama/pull/2387 +* @cch123 made their first contribution in https://github.com/IBM/sarama/pull/2378 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.37.2...v1.38.0 + +## Version 1.37.2 (2022-10-04) + +## What's Changed +### :bug: Fixes +* fix: ensure updateMetaDataMs is 64-bit aligned by @dnwe in https://github.com/IBM/sarama/pull/2356 +### :heavy_plus_sign: Other Changes +* fix: bump go.mod specification to go 1.17 by @dnwe in https://github.com/IBM/sarama/pull/2357 + + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.37.1...v1.37.2 + +## Version 1.37.1 (2022-10-04) + +## What's Changed +### :bug: Fixes +* fix: support existing deprecated Rebalance.Strategy field usage by @spongecaptain in https://github.com/IBM/sarama/pull/2352 +* fix(test): consumer group rebalance strategy compatibility by @Jacob-bzx in https://github.com/IBM/sarama/pull/2353 +* fix(producer): replace time.After with time.Timer to avoid high memory usage by @Jacob-bzx in https://github.com/IBM/sarama/pull/2355 + +## New Contributors +* @spongecaptain made their first contribution in https://github.com/IBM/sarama/pull/2352 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.37.0...v1.37.1 + +## Version 1.37.0 (2022-09-28) + +## What's Changed + +### :rotating_light: Breaking Changes +* Due to a change in [github.com/klauspost/compress v1.15.10](https://github.com/klauspost/compress/releases/tag/v1.15.10), Sarama v1.37.0 requires Go 1.17 going forward, unfortunately due to an oversight this wasn't reflected in the go.mod declaration at time of release. + +### :tada: New Features / Improvements +* feat(consumer): support multiple balance strategies by @Jacob-bzx in https://github.com/IBM/sarama/pull/2339 +* feat(producer): transactional API by @ryarnyah in https://github.com/IBM/sarama/pull/2295 +* feat(mocks): support key in MockFetchResponse. by @Skandalik in https://github.com/IBM/sarama/pull/2328 +### :bug: Fixes +* fix: avoid panic when Metadata.RefreshFrequency is 0 by @Jacob-bzx in https://github.com/IBM/sarama/pull/2329 +* fix(consumer): avoid pushing unrelated responses to paused children by @pkoutsovasilis in https://github.com/IBM/sarama/pull/2317 +* fix: prevent metrics leak with cleanup by @auntan in https://github.com/IBM/sarama/pull/2340 +* fix: race condition(may panic) when closing consumer group by @Jacob-bzx in https://github.com/IBM/sarama/pull/2331 +* fix(consumer): default ResetInvalidOffsets to true by @dnwe in https://github.com/IBM/sarama/pull/2345 +* Validate the `Config` when creating a mock producer/consumer by @joewreschnig in https://github.com/IBM/sarama/pull/2327 +### :package: Dependency updates +* chore(deps): bump module github.com/pierrec/lz4/v4 to v4.1.16 by @dnwe in https://github.com/IBM/sarama/pull/2335 +* chore(deps): bump golang.org/x/net digest to bea034e by @dnwe in https://github.com/IBM/sarama/pull/2333 +* chore(deps): bump golang.org/x/sync digest to 7f9b162 by @dnwe in https://github.com/IBM/sarama/pull/2334 +* chore(deps): bump golang.org/x/net digest to f486391 by @dnwe in https://github.com/IBM/sarama/pull/2348 +* chore(deps): bump module github.com/shopify/toxiproxy/v2 to v2.5.0 by @dnwe in https://github.com/IBM/sarama/pull/2336 +* chore(deps): bump module github.com/klauspost/compress to v1.15.11 by @dnwe in https://github.com/IBM/sarama/pull/2349 +* chore(deps): bump module github.com/pierrec/lz4/v4 to v4.1.17 by @dnwe in https://github.com/IBM/sarama/pull/2350 +### :wrench: Maintenance +* chore(ci): bump kafka-versions to latest by @dnwe in https://github.com/IBM/sarama/pull/2346 +* chore(ci): bump go-versions to N and N-1 by @dnwe in https://github.com/IBM/sarama/pull/2347 + +## New Contributors +* @Jacob-bzx made their first contribution in https://github.com/IBM/sarama/pull/2329 +* @pkoutsovasilis made their first contribution in https://github.com/IBM/sarama/pull/2317 +* @Skandalik made their first contribution in https://github.com/IBM/sarama/pull/2328 +* @auntan made their first contribution in https://github.com/IBM/sarama/pull/2340 +* @ryarnyah made their first contribution in https://github.com/IBM/sarama/pull/2295 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.36.0...v1.37.0 + +## Version 1.36.0 (2022-08-11) + +## What's Changed +### :tada: New Features / Improvements +* feat: add option to propagate OffsetOutOfRange error by @dkolistratova in https://github.com/IBM/sarama/pull/2252 +* feat(producer): expose ProducerMessage.byteSize() function by @k8scat in https://github.com/IBM/sarama/pull/2315 +* feat(metrics): track consumer fetch request rates by @dnwe in https://github.com/IBM/sarama/pull/2299 +### :bug: Fixes +* fix(consumer): avoid submitting empty fetch requests when paused by @raulnegreiros in https://github.com/IBM/sarama/pull/2143 +### :package: Dependency updates +* chore(deps): bump module github.com/klauspost/compress to v1.15.9 by @dnwe in https://github.com/IBM/sarama/pull/2304 +* chore(deps): bump golang.org/x/net digest to c7608f3 by @dnwe in https://github.com/IBM/sarama/pull/2301 +* chore(deps): bump golangci/golangci-lint-action action to v3 by @dnwe in https://github.com/IBM/sarama/pull/2311 +* chore(deps): bump golang.org/x/net digest to 07c6da5 by @dnwe in https://github.com/IBM/sarama/pull/2307 +* chore(deps): bump github actions versions (major) by @dnwe in https://github.com/IBM/sarama/pull/2313 +* chore(deps): bump module github.com/jcmturner/gofork to v1.7.6 by @dnwe in https://github.com/IBM/sarama/pull/2305 +* chore(deps): bump golang.org/x/sync digest to 886fb93 by @dnwe in https://github.com/IBM/sarama/pull/2302 +* chore(deps): bump module github.com/jcmturner/gokrb5/v8 to v8.4.3 by @dnwe in https://github.com/IBM/sarama/pull/2303 +### :wrench: Maintenance +* chore: add kafka 3.1.1 to the version matrix by @dnwe in https://github.com/IBM/sarama/pull/2300 +### :heavy_plus_sign: Other Changes +* Migrate off probot-CLA to new GitHub Action by @cursedcoder in https://github.com/IBM/sarama/pull/2294 +* Forgot to remove cla probot by @cursedcoder in https://github.com/IBM/sarama/pull/2297 +* chore(lint): re-enable a small amount of go-critic by @dnwe in https://github.com/IBM/sarama/pull/2312 + +## New Contributors +* @cursedcoder made their first contribution in https://github.com/IBM/sarama/pull/2294 +* @dkolistratova made their first contribution in https://github.com/IBM/sarama/pull/2252 +* @k8scat made their first contribution in https://github.com/IBM/sarama/pull/2315 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.35.0...v1.36.0 + +## Version 1.35.0 (2022-07-22) + +## What's Changed +### :bug: Fixes +* fix: fix metadata retry backoff invalid when get metadata failed by @Stephan14 in https://github.com/IBM/sarama/pull/2256 +* fix(balance): sort and de-deplicate memberIDs by @dnwe in https://github.com/IBM/sarama/pull/2285 +* fix: prevent DescribeLogDirs hang in admin client by @zerowidth in https://github.com/IBM/sarama/pull/2269 +* fix: include assignment-less members in SyncGroup by @dnwe in https://github.com/IBM/sarama/pull/2292 +### :package: Dependency updates +* chore(deps): bump module github.com/stretchr/testify to v1.8.0 by @dnwe in https://github.com/IBM/sarama/pull/2284 +* chore(deps): bump module github.com/eapache/go-resiliency to v1.3.0 by @dnwe in https://github.com/IBM/sarama/pull/2283 +* chore(deps): bump golang.org/x/net digest to 1185a90 by @dnwe in https://github.com/IBM/sarama/pull/2279 +* chore(deps): bump module github.com/pierrec/lz4/v4 to v4.1.15 by @dnwe in https://github.com/IBM/sarama/pull/2281 +* chore(deps): bump module github.com/klauspost/compress to v1.15.8 by @dnwe in https://github.com/IBM/sarama/pull/2280 +### :wrench: Maintenance +* chore: rename `any` func to avoid identifier by @dnwe in https://github.com/IBM/sarama/pull/2272 +* chore: add and test against kafka 3.2.0 by @dnwe in https://github.com/IBM/sarama/pull/2288 +* chore: document Fetch protocol fields by @dnwe in https://github.com/IBM/sarama/pull/2289 +### :heavy_plus_sign: Other Changes +* chore(ci): fix redirect with GITHUB_STEP_SUMMARY by @dnwe in https://github.com/IBM/sarama/pull/2286 +* fix(test): permit ECONNRESET in TestInitProducerID by @dnwe in https://github.com/IBM/sarama/pull/2287 +* fix: ensure empty or devel version valid by @dnwe in https://github.com/IBM/sarama/pull/2291 + +## New Contributors +* @zerowidth made their first contribution in https://github.com/IBM/sarama/pull/2269 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.34.1...v1.35.0 + +## Version 1.34.1 (2022-06-07) + +## What's Changed +### :bug: Fixes +* fix(examples): check session.Context().Done() in examples/consumergroup by @zxc111 in https://github.com/IBM/sarama/pull/2240 +* fix(protocol): move AuthorizedOperations into GroupDescription of DescribeGroupsResponse by @aiquestion in https://github.com/IBM/sarama/pull/2247 +* fix(protocol): tidyup DescribeGroupsResponse by @dnwe in https://github.com/IBM/sarama/pull/2248 +* fix(consumer): range balance strategy not like reference by @njhartwell in https://github.com/IBM/sarama/pull/2245 +### :wrench: Maintenance +* chore(ci): experiment with using tparse by @dnwe in https://github.com/IBM/sarama/pull/2236 +* chore(deps): bump thirdparty dependencies to latest releases by @dnwe in https://github.com/IBM/sarama/pull/2242 + +## New Contributors +* @zxc111 made their first contribution in https://github.com/IBM/sarama/pull/2240 +* @njhartwell made their first contribution in https://github.com/IBM/sarama/pull/2245 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.34.0...v1.34.1 + +## Version 1.34.0 (2022-05-30) + +## What's Changed +### :tada: New Features / Improvements +* KIP-345: support static membership by @aiquestion in https://github.com/IBM/sarama/pull/2230 +### :bug: Fixes +* fix: KIP-368 use receiver goroutine to process all sasl v1 responses by @k-wall in https://github.com/IBM/sarama/pull/2234 +### :wrench: Maintenance +* chore(deps): bump module github.com/pierrec/lz4 to v4 by @dnwe in https://github.com/IBM/sarama/pull/2231 +* chore(deps): bump golang.org/x/net digest to 2e3eb7b by @dnwe in https://github.com/IBM/sarama/pull/2232 + +## New Contributors +* @aiquestion made their first contribution in https://github.com/IBM/sarama/pull/2230 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.33.0...v1.34.0 + +## Version 1.33.0 (2022-05-11) + +## What's Changed +### :rotating_light: Breaking Changes + +**Note: with this change, the user of Sarama is required to use Go 1.13's errors.Is etc (rather then ==) when forming conditionals returned by this library.** +* feat: make `ErrOutOfBrokers` wrap the underlying error that prevented connections to the brokers by @k-wall in https://github.com/IBM/sarama/pull/2131 + + +### :tada: New Features / Improvements +* feat(message): add UnmarshalText method to CompressionCodec by @vincentbernat in https://github.com/IBM/sarama/pull/2172 +* KIP-368 : Allow SASL Connections to Periodically Re-Authenticate by @k-wall in https://github.com/IBM/sarama/pull/2197 +* feat: add batched CreateACLs func to ClusterAdmin by @nkostoulas in https://github.com/IBM/sarama/pull/2191 +### :bug: Fixes +* fix: TestRecordBatchDecoding failing sporadically by @k-wall in https://github.com/IBM/sarama/pull/2154 +* feat(test): add an fvt for broker deadlock by @dnwe in https://github.com/IBM/sarama/pull/2144 +* fix: avoid starvation in subscriptionManager by @dnwe in https://github.com/IBM/sarama/pull/2109 +* fix: remove "Is your cluster reachable?" from msg by @dnwe in https://github.com/IBM/sarama/pull/2165 +* fix: remove trailing fullstop from error strings by @dnwe in https://github.com/IBM/sarama/pull/2166 +* fix: return underlying sasl error message by @dnwe in https://github.com/IBM/sarama/pull/2164 +* fix: potential data race on a global variable by @pior in https://github.com/IBM/sarama/pull/2171 +* fix: AdminClient | CreateACLs | check for error in response, return error if needed by @omris94 in https://github.com/IBM/sarama/pull/2185 +* producer: ensure that the management message (fin) is never "leaked" by @niamster in https://github.com/IBM/sarama/pull/2182 +* fix: prevent RefreshBrokers leaking old brokers by @k-wall in https://github.com/IBM/sarama/pull/2203 +* fix: prevent RefreshController leaking controller by @k-wall in https://github.com/IBM/sarama/pull/2204 +* fix: prevent AsyncProducer retryBatch from leaking by @k-wall in https://github.com/IBM/sarama/pull/2208 +* fix: prevent metrics leak when authenticate fails by @Stephan14 in https://github.com/IBM/sarama/pull/2205 +* fix: prevent deadlock between subscription manager and consumer goroutines by @niamster in https://github.com/IBM/sarama/pull/2194 +* fix: prevent idempotent producer epoch exhaustion by @ladislavmacoun in https://github.com/IBM/sarama/pull/2178 +* fix(test): mockbroker offsetResponse vers behavior by @dnwe in https://github.com/IBM/sarama/pull/2213 +* fix: cope with OffsetsLoadInProgress on Join+Sync by @dnwe in https://github.com/IBM/sarama/pull/2214 +* fix: make default MaxWaitTime 500ms by @dnwe in https://github.com/IBM/sarama/pull/2227 +### :package: Dependency updates +* chore(deps): bump xdg-go/scram and klauspost/compress by @dnwe in https://github.com/IBM/sarama/pull/2170 +### :wrench: Maintenance +* fix(test): skip TestReadOnlyAndAllCommittedMessages by @dnwe in https://github.com/IBM/sarama/pull/2161 +* fix(test): remove t.Parallel() by @dnwe in https://github.com/IBM/sarama/pull/2162 +* chore(ci): bump along to Go 1.17+1.18 and bump golangci-lint by @dnwe in https://github.com/IBM/sarama/pull/2183 +* chore: switch to multi-arch compatible docker images by @dnwe in https://github.com/IBM/sarama/pull/2210 +### :heavy_plus_sign: Other Changes +* Remediate a number go-routine leaks (mainly test issues) by @k-wall in https://github.com/IBM/sarama/pull/2198 +* chore: retract v1.32.0 due to #2150 by @dnwe in https://github.com/IBM/sarama/pull/2199 +* chore: bump functional test timeout to 12m by @dnwe in https://github.com/IBM/sarama/pull/2200 +* fix(admin): make DeleteRecords err consistent by @dnwe in https://github.com/IBM/sarama/pull/2226 + +## New Contributors +* @k-wall made their first contribution in https://github.com/IBM/sarama/pull/2154 +* @pior made their first contribution in https://github.com/IBM/sarama/pull/2171 +* @omris94 made their first contribution in https://github.com/IBM/sarama/pull/2185 +* @vincentbernat made their first contribution in https://github.com/IBM/sarama/pull/2172 +* @niamster made their first contribution in https://github.com/IBM/sarama/pull/2182 +* @ladislavmacoun made their first contribution in https://github.com/IBM/sarama/pull/2178 +* @nkostoulas made their first contribution in https://github.com/IBM/sarama/pull/2191 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.32.0...v1.33.0 + +## Version 1.32.0 (2022-02-24) + +### ⚠️ This release has been superseded by v1.33.0 and should _not_ be used. + +* chore: retract v1.32.0 due to #2150 by @dnwe in https://github.com/IBM/sarama/pull/2199 + +--- + +## What's Changed +### :bug: Fixes +* Fix deadlock when closing Broker in brokerProducer by @slaunay in https://github.com/IBM/sarama/pull/2133 +### :package: Dependency updates +* chore: refresh dependencies to latest by @dnwe in https://github.com/IBM/sarama/pull/2159 +### :wrench: Maintenance +* fix: rework RebalancingMultiplePartitions test by @dnwe in https://github.com/IBM/sarama/pull/2130 +* fix(test): use Sarama transactional producer by @dnwe in https://github.com/IBM/sarama/pull/1939 +* chore: enable t.Parallel() wherever possible by @dnwe in https://github.com/IBM/sarama/pull/2138 +### :heavy_plus_sign: Other Changes +* chore: restrict to 1 testbinary at once by @dnwe in https://github.com/IBM/sarama/pull/2145 +* chore: restrict to 1 parallel test at once by @dnwe in https://github.com/IBM/sarama/pull/2146 +* Remove myself from codeowners by @bai in https://github.com/IBM/sarama/pull/2147 +* chore: add retractions for known bad versions by @dnwe in https://github.com/IBM/sarama/pull/2160 + + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.31.1...v1.32.0 + +## Version 1.31.1 (2022-02-01) + +- #2126 - @bai - Populate missing kafka versions +- #2124 - @bai - Add Kafka 3.1.0 to CI matrix, migrate to bitnami kafka image +- #2123 - @bai - Update klauspost/compress to 0.14 +- #2122 - @dnwe - fix(test): make it simpler to re-use toxiproxy +- #2119 - @bai - Add Kafka 3.1.0 version number +- #2005 - @raulnegreiros - feat: add methods to pause/resume consumer's consumption +- #2051 - @seveas - Expose the TLS connection state of a broker connection +- #2117 - @wuhuizuo - feat: add method MockApiVersionsResponse.SetApiKeys +- #2110 - @dnwe - fix: ensure heartbeats only stop after cleanup +- #2113 - @mosceo - Fix typo + +## Version 1.31.0 (2022-01-18) + +## What's Changed +### :tada: New Features / Improvements +* feat: expose IncrementalAlterConfigs API in admin.go by @fengyinqiao in https://github.com/IBM/sarama/pull/2088 +* feat: allow AsyncProducer to have MaxOpenRequests inflight produce requests per broker by @xujianhai666 in https://github.com/IBM/sarama/pull/1686 +* Support request pipelining in AsyncProducer by @slaunay in https://github.com/IBM/sarama/pull/2094 +### :bug: Fixes +* fix(test): add fluent interface for mocks where missing by @grongor in https://github.com/IBM/sarama/pull/2080 +* fix(test): test for ConsumePartition with OffsetOldest by @grongor in https://github.com/IBM/sarama/pull/2081 +* fix: set HWMO during creation of partitionConsumer (fix incorrect HWMO before first fetch) by @grongor in https://github.com/IBM/sarama/pull/2082 +* fix: ignore non-nil but empty error strings in Describe/Alter client quotas responses by @agriffaut in https://github.com/IBM/sarama/pull/2096 +* fix: skip over KIP-482 tagged fields by @dnwe in https://github.com/IBM/sarama/pull/2107 +* fix: clear preferredReadReplica if broker shutdown by @dnwe in https://github.com/IBM/sarama/pull/2108 +* fix(test): correct wrong offsets in mock Consumer by @grongor in https://github.com/IBM/sarama/pull/2078 +* fix: correct bugs in DescribeGroupsResponse by @dnwe in https://github.com/IBM/sarama/pull/2111 +### :wrench: Maintenance +* chore: bump runtime and test dependencies by @dnwe in https://github.com/IBM/sarama/pull/2100 +### :memo: Documentation +* docs: refresh README.md for Kafka 3.0.0 by @dnwe in https://github.com/IBM/sarama/pull/2099 +### :heavy_plus_sign: Other Changes +* Fix typo by @mosceo in https://github.com/IBM/sarama/pull/2084 + +## New Contributors +* @grongor made their first contribution in https://github.com/IBM/sarama/pull/2080 +* @fengyinqiao made their first contribution in https://github.com/IBM/sarama/pull/2088 +* @xujianhai666 made their first contribution in https://github.com/IBM/sarama/pull/1686 +* @mosceo made their first contribution in https://github.com/IBM/sarama/pull/2084 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.30.1...v1.31.0 + +## Version 1.30.1 (2021-12-04) + +## What's Changed +### :tada: New Features / Improvements +* feat(zstd): pass level param through to compress/zstd encoder by @lizthegrey in https://github.com/IBM/sarama/pull/2045 +### :bug: Fixes +* fix: set min-go-version to 1.16 by @troyanov in https://github.com/IBM/sarama/pull/2048 +* logger: fix debug logs' formatting directives by @utrack in https://github.com/IBM/sarama/pull/2054 +* fix: stuck on the batch with zero records length by @pachmu in https://github.com/IBM/sarama/pull/2057 +* fix: only update preferredReadReplica if valid by @dnwe in https://github.com/IBM/sarama/pull/2076 +### :wrench: Maintenance +* chore: add release notes configuration by @dnwe in https://github.com/IBM/sarama/pull/2046 +* chore: confluent platform version bump by @lizthegrey in https://github.com/IBM/sarama/pull/2070 + +## Notes +* ℹ️ from Sarama 1.30.x onward the minimum version of Go toolchain required is 1.16.x + +## New Contributors +* @troyanov made their first contribution in https://github.com/IBM/sarama/pull/2048 +* @lizthegrey made their first contribution in https://github.com/IBM/sarama/pull/2045 +* @utrack made their first contribution in https://github.com/IBM/sarama/pull/2054 +* @pachmu made their first contribution in https://github.com/IBM/sarama/pull/2057 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.30.0...v1.30.1 + +## Version 1.30.0 (2021-09-29) + +⚠️ This release has been superseded by v1.30.1 and should _not_ be used. + +**regression**: enabling rackawareness causes severe throughput drops (#2071) — fixed in v1.30.1 via #2076 + +--- + +ℹ️ **Note: from Sarama 1.30.0 the minimum version of Go toolchain required is 1.16.x** + +--- + +# New Features / Improvements + +- #1983 - @zifengyu - allow configure AllowAutoTopicCreation argument in metadata refresh +- #2000 - @matzew - Using xdg-go module for SCRAM +- #2003 - @gdm85 - feat: add counter metrics for consumer group join/sync and their failures +- #1992 - @zhaomoran - feat: support SaslHandshakeRequest v0 for SCRAM +- #2006 - @faillefer - Add support for DeleteOffsets operation +- #1909 - @agriffaut - KIP-546 Client quota APIs +- #1633 - @aldelucca1 - feat: allow balance strategies to provide initial state +- #1275 - @dnwe - log: add a DebugLogger that proxies to Logger +- #2018 - @dnwe - feat: use DebugLogger reference for goldenpath log +- #2019 - @dnwe - feat: add logging & a metric for producer throttle +- #2023 - @dnwe - feat: add Controller() to ClusterAdmin interface +- #2025 - @dnwe - feat: support ApiVersionsRequest V3 protocol +- #2028 - @dnwe - feat: send ApiVersionsRequest on broker open +- #2034 - @bai - Add support for kafka 3.0.0 + +# Fixes + +- #1990 - @doxsch - fix: correctly pass ValidateOnly through to CreatePartitionsRequest +- #1988 - @LubergAlexander - fix: correct WithCustomFallbackPartitioner implementation +- #2001 - @HurSungYun - docs: inform AsyncProducer Close pitfalls +- #1973 - @qiangmzsx - fix: metrics still taking up too much memory when metrics.UseNilMetrics=true +- #2007 - @bai - Add support for Go 1.17 +- #2009 - @dnwe - fix: enable nilerr linter and fix iferr checks +- #2010 - @dnwe - chore: enable exportloopref and misspell linters +- #2013 - @faillefer - fix(test): disable encoded response/request check when map contains multiple elements +- #2015 - @bai - Change default branch to main +- #1718 - @crivera-fastly - fix: correct the error handling in client.InitProducerID() +- #1984 - @null-sleep - fix(test): bump confluentPlatformVersion from 6.1.1 to 6.2.0 +- #2016 - @dnwe - chore: replace deprecated Go calls +- #2017 - @dnwe - chore: delete legacy vagrant script +- #2020 - @dnwe - fix(test): remove testLogger from TrackLeader test +- #2024 - @dnwe - chore: bump toxiproxy container to v2.1.5 +- #2033 - @bai - Update dependencies +- #2031 - @gdm85 - docs: do not mention buffered messages in sync producer Close method +- #2035 - @dnwe - chore: populate the missing kafka versions +- #2038 - @dnwe - feat: add a fuzzing workflow to github actions + +## New Contributors +* @zifengyu made their first contribution in https://github.com/IBM/sarama/pull/1983 +* @doxsch made their first contribution in https://github.com/IBM/sarama/pull/1990 +* @LubergAlexander made their first contribution in https://github.com/IBM/sarama/pull/1988 +* @HurSungYun made their first contribution in https://github.com/IBM/sarama/pull/2001 +* @gdm85 made their first contribution in https://github.com/IBM/sarama/pull/2003 +* @qiangmzsx made their first contribution in https://github.com/IBM/sarama/pull/1973 +* @zhaomoran made their first contribution in https://github.com/IBM/sarama/pull/1992 +* @faillefer made their first contribution in https://github.com/IBM/sarama/pull/2006 +* @crivera-fastly made their first contribution in https://github.com/IBM/sarama/pull/1718 +* @null-sleep made their first contribution in https://github.com/IBM/sarama/pull/1984 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.29.1...v1.30.0 + +## Version 1.29.1 (2021-06-24) + +# New Features / Improvements + +- #1966 - @ajanikow - KIP-339: Add Incremental Config updates API +- #1964 - @ajanikow - Add DelegationToken ResourceType + +# Fixes + +- #1962 - @hanxiaolin - fix(consumer): call interceptors when MaxProcessingTime expire +- #1971 - @KerryJava - fix kafka-producer-performance throughput panic +- #1968 - @dnwe - chore: bump golang.org/x versions +- #1956 - @joewreschnig - Allow checking the entire `ProducerMessage` in the mock producers +- #1963 - @dnwe - fix: ensure backoff timer is re-used +- #1949 - @dnwe - fix: explicitly use uint64 for payload length + +## Version 1.29.0 (2021-05-07) + +### New Features / Improvements + +- #1917 - @arkady-emelyanov - KIP-554: Add Broker-side SCRAM Config API +- #1869 - @wyndhblb - zstd: encode+decode performance improvements +- #1541 - @izolight - add String, (Un)MarshalText for acl types. +- #1921 - @bai - Add support for Kafka 2.8.0 + +### Fixes +- #1936 - @dnwe - fix(consumer): follow preferred broker +- #1933 - @ozzieba - Use gofork for encoding/asn1 to fix ASN errors during Kerberos authentication +- #1929 - @celrenheit - Handle isolation level in Offset(Request|Response) and require stable offset in FetchOffset(Request|Response) +- #1926 - @dnwe - fix: correct initial CodeQL findings +- #1925 - @bai - Test out CodeQL +- #1923 - @bestgopher - Remove redundant switch-case, fix doc typos +- #1922 - @bai - Update go dependencies +- #1898 - @mmaslankaprv - Parsing only known control batches value +- #1887 - @withshubh - Fix: issues affecting code quality + +## Version 1.28.0 (2021-02-15) + +**Note that with this release we change `RoundRobinBalancer` strategy to match Java client behavior. See #1788 for details.** + +- #1870 - @kvch - Update Kerberos library to latest major +- #1876 - @bai - Update docs, reference pkg.go.dev +- #1846 - @wclaeys - Do not ignore Consumer.Offsets.AutoCommit.Enable config on Close +- #1747 - @XSAM - fix: mock sync producer does not handle the offset while sending messages +- #1863 - @bai - Add support for Kafka 2.7.0 + update lz4 and klauspost/compress dependencies +- #1788 - @kzinglzy - feat[balance_strategy]: announcing a new round robin balance strategy +- #1862 - @bai - Fix CI setenv permissions issues +- #1832 - @ilyakaznacheev - Update Godoc link to pkg.go.dev +- #1822 - @danp - KIP-392: Allow consumers to fetch from closest replica + +## Version 1.27.2 (2020-10-21) + +### Improvements + +#1750 - @krantideep95 Adds missing mock responses for mocking consumer group + +## Fixes + +#1817 - reverts #1785 - Add private method to Client interface to prevent implementation + +## Version 1.27.1 (2020-10-07) + +### Improvements + +#1775 - @d1egoaz - Adds a Producer Interceptor example +#1781 - @justin-chen - Refresh brokers given list of seed brokers +#1784 - @justin-chen - Add randomize seed broker method +#1790 - @d1egoaz - remove example binary +#1798 - @bai - Test against Go 1.15 +#1785 - @justin-chen - Add private method to Client interface to prevent implementation +#1802 - @uvw - Support Go 1.13 error unwrapping + +## Fixes + +#1791 - @stanislavkozlovski - bump default version to 1.0.0 + +## Version 1.27.0 (2020-08-11) + +### Improvements + +#1466 - @rubenvp8510 - Expose kerberos fast negotiation configuration +#1695 - @KJTsanaktsidis - Use docker-compose to run the functional tests +#1699 - @wclaeys - Consumer group support for manually comitting offsets +#1714 - @bai - Bump Go to version 1.14.3, golangci-lint to 1.27.0 +#1726 - @d1egoaz - Include zstd on the functional tests +#1730 - @d1egoaz - KIP-42 Add producer and consumer interceptors +#1738 - @varun06 - fixed variable names that are named same as some std lib package names +#1741 - @varun06 - updated zstd dependency to latest v1.10.10 +#1743 - @varun06 - Fixed declaration dependencies and other lint issues in code base +#1763 - @alrs - remove deprecated tls options from test +#1769 - @bai - Add support for Kafka 2.6.0 + +## Fixes + +#1697 - @kvch - Use gofork for encoding/asn1 to fix ASN errors during Kerberos authentication +#1744 - @alrs - Fix isBalanced Function Signature + +## Version 1.26.4 (2020-05-19) + +## Fixes + +- #1701 - @d1egoaz - Set server name only for the current broker +- #1694 - @dnwe - testfix: set KAFKA_HEAP_OPTS for zk and kafka + +## Version 1.26.3 (2020-05-07) + +## Fixes + +- #1692 - @d1egoaz - Set tls ServerName to fix issue: either ServerName or InsecureSkipVerify must be specified in the tls.Config + +## Version 1.26.2 (2020-05-06) + +## ⚠️ Known Issues + +This release has been marked as not ready for production and may be unstable, please use v1.26.4. + +### Improvements + +- #1560 - @iyacontrol - add sync pool for gzip 1-9 +- #1605 - @dnwe - feat: protocol support for V11 fetch w/ rackID +- #1617 - @sladkoff / @dwi-di / @random-dwi - Add support for alter/list partition reassignements APIs +- #1632 - @bai - Add support for Go 1.14 +- #1640 - @random-dwi - Feature/fix list partition reassignments +- #1646 - @mimaison - Add DescribeLogDirs to admin client +- #1667 - @bai - Add support for kafka 2.5.0 + +## Fixes + +- #1594 - @sladkoff - Sets ConfigEntry.Default flag in addition to the ConfigEntry.Source for Kafka versions > V1_1_0_0 +- #1601 - @alrs - fix: remove use of testing.T.FailNow() inside goroutine +- #1602 - @d1egoaz - adds a note about consumer groups Consume method +- #1607 - @darklore - Fix memory leak when Broker.Open and Broker.Close called repeatedly +- #1613 - @wblakecaldwell - Updated "retrying" log message when BackoffFunc implemented +- #1614 - @alrs - produce_response.go: Remove Unused Functions +- #1619 - @alrs - tools/kafka-producer-performance: prune unused flag variables +- #1639 - @agriffaut - Handle errors with no message but error code +- #1643 - @kzinglzy - fix `config.net.keepalive` +- #1644 - @KJTsanaktsidis - Fix brokers continually allocating new Session IDs +- #1645 - @Stephan14 - Remove broker(s) which no longer exist in metadata +- #1650 - @lavoiesl - Return the response error in heartbeatLoop +- #1661 - @KJTsanaktsidis - Fix "broker received out of order sequence" when brokers die +- #1666 - @KevinJCross - Bugfix: Allow TLS connections to work over socks proxy. + +## Version 1.26.1 (2020-02-04) + +Improvements: +- Add requests-in-flight metric ([1539](https://github.com/IBM/sarama/pull/1539)) +- Fix misleading example for cluster admin ([1595](https://github.com/IBM/sarama/pull/1595)) +- Replace Travis with GitHub Actions, linters housekeeping ([1573](https://github.com/IBM/sarama/pull/1573)) +- Allow BalanceStrategy to provide custom assignment data ([1592](https://github.com/IBM/sarama/pull/1592)) + +Bug Fixes: +- Adds back Consumer.Offsets.CommitInterval to fix API ([1590](https://github.com/IBM/sarama/pull/1590)) +- Fix error message s/CommitInterval/AutoCommit.Interval ([1589](https://github.com/IBM/sarama/pull/1589)) + +## Version 1.26.0 (2020-01-24) + +New Features: +- Enable zstd compression + ([1574](https://github.com/IBM/sarama/pull/1574), + [1582](https://github.com/IBM/sarama/pull/1582)) +- Support headers in tools kafka-console-producer + ([1549](https://github.com/IBM/sarama/pull/1549)) + +Improvements: +- Add SASL AuthIdentity to SASL frames (authzid) + ([1585](https://github.com/IBM/sarama/pull/1585)). + +Bug Fixes: +- Sending messages with ZStd compression enabled fails in multiple ways + ([1252](https://github.com/IBM/sarama/issues/1252)). +- Use the broker for any admin on BrokerConfig + ([1571](https://github.com/IBM/sarama/pull/1571)). +- Set DescribeConfigRequest Version field + ([1576](https://github.com/IBM/sarama/pull/1576)). +- ConsumerGroup flooding logs with client/metadata update req + ([1578](https://github.com/IBM/sarama/pull/1578)). +- MetadataRequest version in DescribeCluster + ([1580](https://github.com/IBM/sarama/pull/1580)). +- Fix deadlock in consumer group handleError + ([1581](https://github.com/IBM/sarama/pull/1581)) +- Fill in the Fetch{Request,Response} protocol + ([1582](https://github.com/IBM/sarama/pull/1582)). +- Retry topic request on ControllerNotAvailable + ([1586](https://github.com/IBM/sarama/pull/1586)). + +## Version 1.25.0 (2020-01-13) + +New Features: +- Support TLS protocol in kafka-producer-performance + ([1538](https://github.com/IBM/sarama/pull/1538)). +- Add support for kafka 2.4.0 + ([1552](https://github.com/IBM/sarama/pull/1552)). + +Improvements: +- Allow the Consumer to disable auto-commit offsets + ([1164](https://github.com/IBM/sarama/pull/1164)). +- Produce records with consistent timestamps + ([1455](https://github.com/IBM/sarama/pull/1455)). + +Bug Fixes: +- Fix incorrect SetTopicMetadata name mentions + ([1534](https://github.com/IBM/sarama/pull/1534)). +- Fix client.tryRefreshMetadata Println + ([1535](https://github.com/IBM/sarama/pull/1535)). +- Fix panic on calling updateMetadata on closed client + ([1531](https://github.com/IBM/sarama/pull/1531)). +- Fix possible faulty metrics in TestFuncProducing + ([1545](https://github.com/IBM/sarama/pull/1545)). + +## Version 1.24.1 (2019-10-31) + +New Features: +- Add DescribeLogDirs Request/Response pair + ([1520](https://github.com/IBM/sarama/pull/1520)). + +Bug Fixes: +- Fix ClusterAdmin returning invalid controller ID on DescribeCluster + ([1518](https://github.com/IBM/sarama/pull/1518)). +- Fix issue with consumergroup not rebalancing when new partition is added + ([1525](https://github.com/IBM/sarama/pull/1525)). +- Ensure consistent use of read/write deadlines + ([1529](https://github.com/IBM/sarama/pull/1529)). + +## Version 1.24.0 (2019-10-09) + +New Features: +- Add sticky partition assignor + ([1416](https://github.com/IBM/sarama/pull/1416)). +- Switch from cgo zstd package to pure Go implementation + ([1477](https://github.com/IBM/sarama/pull/1477)). + +Improvements: +- Allow creating ClusterAdmin from client + ([1415](https://github.com/IBM/sarama/pull/1415)). +- Set KafkaVersion in ListAcls method + ([1452](https://github.com/IBM/sarama/pull/1452)). +- Set request version in CreateACL ClusterAdmin method + ([1458](https://github.com/IBM/sarama/pull/1458)). +- Set request version in DeleteACL ClusterAdmin method + ([1461](https://github.com/IBM/sarama/pull/1461)). +- Handle missed error codes on TopicMetaDataRequest and GroupCoordinatorRequest + ([1464](https://github.com/IBM/sarama/pull/1464)). +- Remove direct usage of gofork + ([1465](https://github.com/IBM/sarama/pull/1465)). +- Add support for Go 1.13 + ([1478](https://github.com/IBM/sarama/pull/1478)). +- Improve behavior of NewMockListAclsResponse + ([1481](https://github.com/IBM/sarama/pull/1481)). + +Bug Fixes: +- Fix race condition in consumergroup example + ([1434](https://github.com/IBM/sarama/pull/1434)). +- Fix brokerProducer goroutine leak + ([1442](https://github.com/IBM/sarama/pull/1442)). +- Use released version of lz4 library + ([1469](https://github.com/IBM/sarama/pull/1469)). +- Set correct version in MockDeleteTopicsResponse + ([1484](https://github.com/IBM/sarama/pull/1484)). +- Fix CLI help message typo + ([1494](https://github.com/IBM/sarama/pull/1494)). + +Known Issues: +- Please **don't** use Zstd, as it doesn't work right now. + See https://github.com/IBM/sarama/issues/1252 + +## Version 1.23.1 (2019-07-22) + +Bug Fixes: +- Fix fetch delete bug record + ([1425](https://github.com/IBM/sarama/pull/1425)). +- Handle SASL/OAUTHBEARER token rejection + ([1428](https://github.com/IBM/sarama/pull/1428)). + +## Version 1.23.0 (2019-07-02) + +New Features: +- Add support for Kafka 2.3.0 + ([1418](https://github.com/IBM/sarama/pull/1418)). +- Add support for ListConsumerGroupOffsets v2 + ([1374](https://github.com/IBM/sarama/pull/1374)). +- Add support for DeleteConsumerGroup + ([1417](https://github.com/IBM/sarama/pull/1417)). +- Add support for SASLVersion configuration + ([1410](https://github.com/IBM/sarama/pull/1410)). +- Add kerberos support + ([1366](https://github.com/IBM/sarama/pull/1366)). + +Improvements: +- Improve sasl_scram_client example + ([1406](https://github.com/IBM/sarama/pull/1406)). +- Fix shutdown and race-condition in consumer-group example + ([1404](https://github.com/IBM/sarama/pull/1404)). +- Add support for error codes 77—81 + ([1397](https://github.com/IBM/sarama/pull/1397)). +- Pool internal objects allocated per message + ([1385](https://github.com/IBM/sarama/pull/1385)). +- Reduce packet decoder allocations + ([1373](https://github.com/IBM/sarama/pull/1373)). +- Support timeout when fetching metadata + ([1359](https://github.com/IBM/sarama/pull/1359)). + +Bug Fixes: +- Fix fetch size integer overflow + ([1376](https://github.com/IBM/sarama/pull/1376)). +- Handle and log throttled FetchResponses + ([1383](https://github.com/IBM/sarama/pull/1383)). +- Refactor misspelled word Resouce to Resource + ([1368](https://github.com/IBM/sarama/pull/1368)). + +## Version 1.22.1 (2019-04-29) + +Improvements: +- Use zstd 1.3.8 + ([1350](https://github.com/IBM/sarama/pull/1350)). +- Add support for SaslHandshakeRequest v1 + ([1354](https://github.com/IBM/sarama/pull/1354)). + +Bug Fixes: +- Fix V5 MetadataRequest nullable topics array + ([1353](https://github.com/IBM/sarama/pull/1353)). +- Use a different SCRAM client for each broker connection + ([1349](https://github.com/IBM/sarama/pull/1349)). +- Fix AllowAutoTopicCreation for MetadataRequest greater than v3 + ([1344](https://github.com/IBM/sarama/pull/1344)). + +## Version 1.22.0 (2019-04-09) + +New Features: +- Add Offline Replicas Operation to Client + ([1318](https://github.com/IBM/sarama/pull/1318)). +- Allow using proxy when connecting to broker + ([1326](https://github.com/IBM/sarama/pull/1326)). +- Implement ReadCommitted + ([1307](https://github.com/IBM/sarama/pull/1307)). +- Add support for Kafka 2.2.0 + ([1331](https://github.com/IBM/sarama/pull/1331)). +- Add SASL SCRAM-SHA-512 and SCRAM-SHA-256 mechanismes + ([1331](https://github.com/IBM/sarama/pull/1295)). + +Improvements: +- Unregister all broker metrics on broker stop + ([1232](https://github.com/IBM/sarama/pull/1232)). +- Add SCRAM authentication example + ([1303](https://github.com/IBM/sarama/pull/1303)). +- Add consumergroup examples + ([1304](https://github.com/IBM/sarama/pull/1304)). +- Expose consumer batch size metric + ([1296](https://github.com/IBM/sarama/pull/1296)). +- Add TLS options to console producer and consumer + ([1300](https://github.com/IBM/sarama/pull/1300)). +- Reduce client close bookkeeping + ([1297](https://github.com/IBM/sarama/pull/1297)). +- Satisfy error interface in create responses + ([1154](https://github.com/IBM/sarama/pull/1154)). +- Please lint gods + ([1346](https://github.com/IBM/sarama/pull/1346)). + +Bug Fixes: +- Fix multi consumer group instance crash + ([1338](https://github.com/IBM/sarama/pull/1338)). +- Update lz4 to latest version + ([1347](https://github.com/IBM/sarama/pull/1347)). +- Retry ErrNotCoordinatorForConsumer in new consumergroup session + ([1231](https://github.com/IBM/sarama/pull/1231)). +- Fix cleanup error handler + ([1332](https://github.com/IBM/sarama/pull/1332)). +- Fix rate condition in PartitionConsumer + ([1156](https://github.com/IBM/sarama/pull/1156)). + +## Version 1.21.0 (2019-02-24) + +New Features: +- Add CreateAclRequest, DescribeAclRequest, DeleteAclRequest + ([1236](https://github.com/IBM/sarama/pull/1236)). +- Add DescribeTopic, DescribeConsumerGroup, ListConsumerGroups, ListConsumerGroupOffsets admin requests + ([1178](https://github.com/IBM/sarama/pull/1178)). +- Implement SASL/OAUTHBEARER + ([1240](https://github.com/IBM/sarama/pull/1240)). + +Improvements: +- Add Go mod support + ([1282](https://github.com/IBM/sarama/pull/1282)). +- Add error codes 73—76 + ([1239](https://github.com/IBM/sarama/pull/1239)). +- Add retry backoff function + ([1160](https://github.com/IBM/sarama/pull/1160)). +- Maintain metadata in the producer even when retries are disabled + ([1189](https://github.com/IBM/sarama/pull/1189)). +- Include ReplicaAssignment in ListTopics + ([1274](https://github.com/IBM/sarama/pull/1274)). +- Add producer performance tool + ([1222](https://github.com/IBM/sarama/pull/1222)). +- Add support LogAppend timestamps + ([1258](https://github.com/IBM/sarama/pull/1258)). + +Bug Fixes: +- Fix potential deadlock when a heartbeat request fails + ([1286](https://github.com/IBM/sarama/pull/1286)). +- Fix consuming compacted topic + ([1227](https://github.com/IBM/sarama/pull/1227)). +- Set correct Kafka version for DescribeConfigsRequest v1 + ([1277](https://github.com/IBM/sarama/pull/1277)). +- Update kafka test version + ([1273](https://github.com/IBM/sarama/pull/1273)). + +## Version 1.20.1 (2019-01-10) + +New Features: +- Add optional replica id in offset request + ([1100](https://github.com/IBM/sarama/pull/1100)). + +Improvements: +- Implement DescribeConfigs Request + Response v1 & v2 + ([1230](https://github.com/IBM/sarama/pull/1230)). +- Reuse compression objects + ([1185](https://github.com/IBM/sarama/pull/1185)). +- Switch from png to svg for GoDoc link in README + ([1243](https://github.com/IBM/sarama/pull/1243)). +- Fix typo in deprecation notice for FetchResponseBlock.Records + ([1242](https://github.com/IBM/sarama/pull/1242)). +- Fix typos in consumer metadata response file + ([1244](https://github.com/IBM/sarama/pull/1244)). + +Bug Fixes: +- Revert to individual msg retries for non-idempotent + ([1203](https://github.com/IBM/sarama/pull/1203)). +- Respect MaxMessageBytes limit for uncompressed messages + ([1141](https://github.com/IBM/sarama/pull/1141)). + +## Version 1.20.0 (2018-12-10) + +New Features: + - Add support for zstd compression + ([#1170](https://github.com/IBM/sarama/pull/1170)). + - Add support for Idempotent Producer + ([#1152](https://github.com/IBM/sarama/pull/1152)). + - Add support support for Kafka 2.1.0 + ([#1229](https://github.com/IBM/sarama/pull/1229)). + - Add support support for OffsetCommit request/response pairs versions v1 to v5 + ([#1201](https://github.com/IBM/sarama/pull/1201)). + - Add support support for OffsetFetch request/response pair up to version v5 + ([#1198](https://github.com/IBM/sarama/pull/1198)). + +Improvements: + - Export broker's Rack setting + ([#1173](https://github.com/IBM/sarama/pull/1173)). + - Always use latest patch version of Go on CI + ([#1202](https://github.com/IBM/sarama/pull/1202)). + - Add error codes 61 to 72 + ([#1195](https://github.com/IBM/sarama/pull/1195)). + +Bug Fixes: + - Fix build without cgo + ([#1182](https://github.com/IBM/sarama/pull/1182)). + - Fix go vet suggestion in consumer group file + ([#1209](https://github.com/IBM/sarama/pull/1209)). + - Fix typos in code and comments + ([#1228](https://github.com/IBM/sarama/pull/1228)). + +## Version 1.19.0 (2018-09-27) + +New Features: + - Implement a higher-level consumer group + ([#1099](https://github.com/IBM/sarama/pull/1099)). + +Improvements: + - Add support for Go 1.11 + ([#1176](https://github.com/IBM/sarama/pull/1176)). + +Bug Fixes: + - Fix encoding of `MetadataResponse` with version 2 and higher + ([#1174](https://github.com/IBM/sarama/pull/1174)). + - Fix race condition in mock async producer + ([#1174](https://github.com/IBM/sarama/pull/1174)). + +## Version 1.18.0 (2018-09-07) + +New Features: + - Make `Partitioner.RequiresConsistency` vary per-message + ([#1112](https://github.com/IBM/sarama/pull/1112)). + - Add customizable partitioner + ([#1118](https://github.com/IBM/sarama/pull/1118)). + - Add `ClusterAdmin` support for `CreateTopic`, `DeleteTopic`, `CreatePartitions`, + `DeleteRecords`, `DescribeConfig`, `AlterConfig`, `CreateACL`, `ListAcls`, `DeleteACL` + ([#1055](https://github.com/IBM/sarama/pull/1055)). + +Improvements: + - Add support for Kafka 2.0.0 + ([#1149](https://github.com/IBM/sarama/pull/1149)). + - Allow setting `LocalAddr` when dialing an address to support multi-homed hosts + ([#1123](https://github.com/IBM/sarama/pull/1123)). + - Simpler offset management + ([#1127](https://github.com/IBM/sarama/pull/1127)). + +Bug Fixes: + - Fix mutation of `ProducerMessage.MetaData` when producing to Kafka + ([#1110](https://github.com/IBM/sarama/pull/1110)). + - Fix consumer block when response did not contain all the + expected topic/partition blocks + ([#1086](https://github.com/IBM/sarama/pull/1086)). + - Fix consumer block when response contains only constrol messages + ([#1115](https://github.com/IBM/sarama/pull/1115)). + - Add timeout config for ClusterAdmin requests + ([#1142](https://github.com/IBM/sarama/pull/1142)). + - Add version check when producing message with headers + ([#1117](https://github.com/IBM/sarama/pull/1117)). + - Fix `MetadataRequest` for empty list of topics + ([#1132](https://github.com/IBM/sarama/pull/1132)). + - Fix producer topic metadata on-demand fetch when topic error happens in metadata response + ([#1125](https://github.com/IBM/sarama/pull/1125)). + +## Version 1.17.0 (2018-05-30) + +New Features: + - Add support for gzip compression levels + ([#1044](https://github.com/IBM/sarama/pull/1044)). + - Add support for Metadata request/response pairs versions v1 to v5 + ([#1047](https://github.com/IBM/sarama/pull/1047), + [#1069](https://github.com/IBM/sarama/pull/1069)). + - Add versioning to JoinGroup request/response pairs + ([#1098](https://github.com/IBM/sarama/pull/1098)) + - Add support for CreatePartitions, DeleteGroups, DeleteRecords request/response pairs + ([#1065](https://github.com/IBM/sarama/pull/1065), + [#1096](https://github.com/IBM/sarama/pull/1096), + [#1027](https://github.com/IBM/sarama/pull/1027)). + - Add `Controller()` method to Client interface + ([#1063](https://github.com/IBM/sarama/pull/1063)). + +Improvements: + - ConsumerMetadataReq/Resp has been migrated to FindCoordinatorReq/Resp + ([#1010](https://github.com/IBM/sarama/pull/1010)). + - Expose missing protocol parts: `msgSet` and `recordBatch` + ([#1049](https://github.com/IBM/sarama/pull/1049)). + - Add support for v1 DeleteTopics Request + ([#1052](https://github.com/IBM/sarama/pull/1052)). + - Add support for Go 1.10 + ([#1064](https://github.com/IBM/sarama/pull/1064)). + - Claim support for Kafka 1.1.0 + ([#1073](https://github.com/IBM/sarama/pull/1073)). + +Bug Fixes: + - Fix FindCoordinatorResponse.encode to allow nil Coordinator + ([#1050](https://github.com/IBM/sarama/pull/1050), + [#1051](https://github.com/IBM/sarama/pull/1051)). + - Clear all metadata when we have the latest topic info + ([#1033](https://github.com/IBM/sarama/pull/1033)). + - Make `PartitionConsumer.Close` idempotent + ([#1092](https://github.com/IBM/sarama/pull/1092)). + +## Version 1.16.0 (2018-02-12) + +New Features: + - Add support for the Create/Delete Topics request/response pairs + ([#1007](https://github.com/IBM/sarama/pull/1007), + [#1008](https://github.com/IBM/sarama/pull/1008)). + - Add support for the Describe/Create/Delete ACL request/response pairs + ([#1009](https://github.com/IBM/sarama/pull/1009)). + - Add support for the five transaction-related request/response pairs + ([#1016](https://github.com/IBM/sarama/pull/1016)). + +Improvements: + - Permit setting version on mock producer responses + ([#999](https://github.com/IBM/sarama/pull/999)). + - Add `NewMockBrokerListener` helper for testing TLS connections + ([#1019](https://github.com/IBM/sarama/pull/1019)). + - Changed the default value for `Consumer.Fetch.Default` from 32KiB to 1MiB + which results in much higher throughput in most cases + ([#1024](https://github.com/IBM/sarama/pull/1024)). + - Reuse the `time.Ticker` across fetch requests in the PartitionConsumer to + reduce CPU and memory usage when processing many partitions + ([#1028](https://github.com/IBM/sarama/pull/1028)). + - Assign relative offsets to messages in the producer to save the brokers a + recompression pass + ([#1002](https://github.com/IBM/sarama/pull/1002), + [#1015](https://github.com/IBM/sarama/pull/1015)). + +Bug Fixes: + - Fix producing uncompressed batches with the new protocol format + ([#1032](https://github.com/IBM/sarama/issues/1032)). + - Fix consuming compacted topics with the new protocol format + ([#1005](https://github.com/IBM/sarama/issues/1005)). + - Fix consuming topics with a mix of protocol formats + ([#1021](https://github.com/IBM/sarama/issues/1021)). + - Fix consuming when the broker includes multiple batches in a single response + ([#1022](https://github.com/IBM/sarama/issues/1022)). + - Fix detection of `PartialTrailingMessage` when the partial message was + truncated before the magic value indicating its version + ([#1030](https://github.com/IBM/sarama/pull/1030)). + - Fix expectation-checking in the mock of `SyncProducer.SendMessages` + ([#1035](https://github.com/IBM/sarama/pull/1035)). + +## Version 1.15.0 (2017-12-08) + +New Features: + - Claim official support for Kafka 1.0, though it did already work + ([#984](https://github.com/IBM/sarama/pull/984)). + - Helper methods for Kafka version numbers to/from strings + ([#989](https://github.com/IBM/sarama/pull/989)). + - Implement CreatePartitions request/response + ([#985](https://github.com/IBM/sarama/pull/985)). + +Improvements: + - Add error codes 45-60 + ([#986](https://github.com/IBM/sarama/issues/986)). + +Bug Fixes: + - Fix slow consuming for certain Kafka 0.11/1.0 configurations + ([#982](https://github.com/IBM/sarama/pull/982)). + - Correctly determine when a FetchResponse contains the new message format + ([#990](https://github.com/IBM/sarama/pull/990)). + - Fix producing with multiple headers + ([#996](https://github.com/IBM/sarama/pull/996)). + - Fix handling of truncated record batches + ([#998](https://github.com/IBM/sarama/pull/998)). + - Fix leaking metrics when closing brokers + ([#991](https://github.com/IBM/sarama/pull/991)). + +## Version 1.14.0 (2017-11-13) + +New Features: + - Add support for the new Kafka 0.11 record-batch format, including the wire + protocol and the necessary behavioural changes in the producer and consumer. + Transactions and idempotency are not yet supported, but producing and + consuming should work with all the existing bells and whistles (batching, + compression, etc) as well as the new custom headers. Thanks to Vlad Hanciuta + of Arista Networks for this work. Part of + ([#901](https://github.com/IBM/sarama/issues/901)). + +Bug Fixes: + - Fix encoding of ProduceResponse versions in test + ([#970](https://github.com/IBM/sarama/pull/970)). + - Return partial replicas list when we have it + ([#975](https://github.com/IBM/sarama/pull/975)). + +## Version 1.13.0 (2017-10-04) + +New Features: + - Support for FetchRequest version 3 + ([#905](https://github.com/IBM/sarama/pull/905)). + - Permit setting version on mock FetchResponses + ([#939](https://github.com/IBM/sarama/pull/939)). + - Add a configuration option to support storing only minimal metadata for + extremely large clusters + ([#937](https://github.com/IBM/sarama/pull/937)). + - Add `PartitionOffsetManager.ResetOffset` for backtracking tracked offsets + ([#932](https://github.com/IBM/sarama/pull/932)). + +Improvements: + - Provide the block-level timestamp when consuming compressed messages + ([#885](https://github.com/IBM/sarama/issues/885)). + - `Client.Replicas` and `Client.InSyncReplicas` now respect the order returned + by the broker, which can be meaningful + ([#930](https://github.com/IBM/sarama/pull/930)). + - Use a `Ticker` to reduce consumer timer overhead at the cost of higher + variance in the actual timeout + ([#933](https://github.com/IBM/sarama/pull/933)). + +Bug Fixes: + - Gracefully handle messages with negative timestamps + ([#907](https://github.com/IBM/sarama/pull/907)). + - Raise a proper error when encountering an unknown message version + ([#940](https://github.com/IBM/sarama/pull/940)). + +## Version 1.12.0 (2017-05-08) + +New Features: + - Added support for the `ApiVersions` request and response pair, and Kafka + version 0.10.2 ([#867](https://github.com/IBM/sarama/pull/867)). Note + that you still need to specify the Kafka version in the Sarama configuration + for the time being. + - Added a `Brokers` method to the Client which returns the complete set of + active brokers ([#813](https://github.com/IBM/sarama/pull/813)). + - Added an `InSyncReplicas` method to the Client which returns the set of all + in-sync broker IDs for the given partition, now that the Kafka versions for + which this was misleading are no longer in our supported set + ([#872](https://github.com/IBM/sarama/pull/872)). + - Added a `NewCustomHashPartitioner` method which allows constructing a hash + partitioner with a custom hash method in case the default (FNV-1a) is not + suitable + ([#837](https://github.com/IBM/sarama/pull/837), + [#841](https://github.com/IBM/sarama/pull/841)). + +Improvements: + - Recognize more Kafka error codes + ([#859](https://github.com/IBM/sarama/pull/859)). + +Bug Fixes: + - Fix an issue where decoding a malformed FetchRequest would not return the + correct error ([#818](https://github.com/IBM/sarama/pull/818)). + - Respect ordering of group protocols in JoinGroupRequests. This fix is + transparent if you're using the `AddGroupProtocol` or + `AddGroupProtocolMetadata` helpers; otherwise you will need to switch from + the `GroupProtocols` field (now deprecated) to use `OrderedGroupProtocols` + ([#812](https://github.com/IBM/sarama/issues/812)). + - Fix an alignment-related issue with atomics on 32-bit architectures + ([#859](https://github.com/IBM/sarama/pull/859)). + +## Version 1.11.0 (2016-12-20) + +_Important:_ As of Sarama 1.11 it is necessary to set the config value of +`Producer.Return.Successes` to true in order to use the SyncProducer. Previous +versions would silently override this value when instantiating a SyncProducer +which led to unexpected values and data races. + +New Features: + - Metrics! Thanks to Sébastien Launay for all his work on this feature + ([#701](https://github.com/IBM/sarama/pull/701), + [#746](https://github.com/IBM/sarama/pull/746), + [#766](https://github.com/IBM/sarama/pull/766)). + - Add support for LZ4 compression + ([#786](https://github.com/IBM/sarama/pull/786)). + - Add support for ListOffsetRequest v1 and Kafka 0.10.1 + ([#775](https://github.com/IBM/sarama/pull/775)). + - Added a `HighWaterMarks` method to the Consumer which aggregates the + `HighWaterMarkOffset` values of its child topic/partitions + ([#769](https://github.com/IBM/sarama/pull/769)). + +Bug Fixes: + - Fixed producing when using timestamps, compression and Kafka 0.10 + ([#759](https://github.com/IBM/sarama/pull/759)). + - Added missing decoder methods to DescribeGroups response + ([#756](https://github.com/IBM/sarama/pull/756)). + - Fix producer shutdown when `Return.Errors` is disabled + ([#787](https://github.com/IBM/sarama/pull/787)). + - Don't mutate configuration in SyncProducer + ([#790](https://github.com/IBM/sarama/pull/790)). + - Fix crash on SASL initialization failure + ([#795](https://github.com/IBM/sarama/pull/795)). + +## Version 1.10.1 (2016-08-30) + +Bug Fixes: + - Fix the documentation for `HashPartitioner` which was incorrect + ([#717](https://github.com/IBM/sarama/pull/717)). + - Permit client creation even when it is limited by ACLs + ([#722](https://github.com/IBM/sarama/pull/722)). + - Several fixes to the consumer timer optimization code, regressions introduced + in v1.10.0. Go's timers are finicky + ([#730](https://github.com/IBM/sarama/pull/730), + [#733](https://github.com/IBM/sarama/pull/733), + [#734](https://github.com/IBM/sarama/pull/734)). + - Handle consuming compressed relative offsets with Kafka 0.10 + ([#735](https://github.com/IBM/sarama/pull/735)). + +## Version 1.10.0 (2016-08-02) + +_Important:_ As of Sarama 1.10 it is necessary to tell Sarama the version of +Kafka you are running against (via the `config.Version` value) in order to use +features that may not be compatible with old Kafka versions. If you don't +specify this value it will default to 0.8.2 (the minimum supported), and trying +to use more recent features (like the offset manager) will fail with an error. + +_Also:_ The offset-manager's behaviour has been changed to match the upstream +java consumer (see [#705](https://github.com/IBM/sarama/pull/705) and +[#713](https://github.com/IBM/sarama/pull/713)). If you use the +offset-manager, please ensure that you are committing one *greater* than the +last consumed message offset or else you may end up consuming duplicate +messages. + +New Features: + - Support for Kafka 0.10 + ([#672](https://github.com/IBM/sarama/pull/672), + [#678](https://github.com/IBM/sarama/pull/678), + [#681](https://github.com/IBM/sarama/pull/681), and others). + - Support for configuring the target Kafka version + ([#676](https://github.com/IBM/sarama/pull/676)). + - Batch producing support in the SyncProducer + ([#677](https://github.com/IBM/sarama/pull/677)). + - Extend producer mock to allow setting expectations on message contents + ([#667](https://github.com/IBM/sarama/pull/667)). + +Improvements: + - Support `nil` compressed messages for deleting in compacted topics + ([#634](https://github.com/IBM/sarama/pull/634)). + - Pre-allocate decoding errors, greatly reducing heap usage and GC time against + misbehaving brokers ([#690](https://github.com/IBM/sarama/pull/690)). + - Re-use consumer expiry timers, removing one allocation per consumed message + ([#707](https://github.com/IBM/sarama/pull/707)). + +Bug Fixes: + - Actually default the client ID to "sarama" like we say we do + ([#664](https://github.com/IBM/sarama/pull/664)). + - Fix a rare issue where `Client.Leader` could return the wrong error + ([#685](https://github.com/IBM/sarama/pull/685)). + - Fix a possible tight loop in the consumer + ([#693](https://github.com/IBM/sarama/pull/693)). + - Match upstream's offset-tracking behaviour + ([#705](https://github.com/IBM/sarama/pull/705)). + - Report UnknownTopicOrPartition errors from the offset manager + ([#706](https://github.com/IBM/sarama/pull/706)). + - Fix possible negative partition value from the HashPartitioner + ([#709](https://github.com/IBM/sarama/pull/709)). + +## Version 1.9.0 (2016-05-16) + +New Features: + - Add support for custom offset manager retention durations + ([#602](https://github.com/IBM/sarama/pull/602)). + - Publish low-level mocks to enable testing of third-party producer/consumer + implementations ([#570](https://github.com/IBM/sarama/pull/570)). + - Declare support for Golang 1.6 + ([#611](https://github.com/IBM/sarama/pull/611)). + - Support for SASL plain-text auth + ([#648](https://github.com/IBM/sarama/pull/648)). + +Improvements: + - Simplified broker locking scheme slightly + ([#604](https://github.com/IBM/sarama/pull/604)). + - Documentation cleanup + ([#605](https://github.com/IBM/sarama/pull/605), + [#621](https://github.com/IBM/sarama/pull/621), + [#654](https://github.com/IBM/sarama/pull/654)). + +Bug Fixes: + - Fix race condition shutting down the OffsetManager + ([#658](https://github.com/IBM/sarama/pull/658)). + +## Version 1.8.0 (2016-02-01) + +New Features: + - Full support for Kafka 0.9: + - All protocol messages and fields + ([#586](https://github.com/IBM/sarama/pull/586), + [#588](https://github.com/IBM/sarama/pull/588), + [#590](https://github.com/IBM/sarama/pull/590)). + - Verified that TLS support works + ([#581](https://github.com/IBM/sarama/pull/581)). + - Fixed the OffsetManager compatibility + ([#585](https://github.com/IBM/sarama/pull/585)). + +Improvements: + - Optimize for fewer system calls when reading from the network + ([#584](https://github.com/IBM/sarama/pull/584)). + - Automatically retry `InvalidMessage` errors to match upstream behaviour + ([#589](https://github.com/IBM/sarama/pull/589)). + +## Version 1.7.0 (2015-12-11) + +New Features: + - Preliminary support for Kafka 0.9 + ([#572](https://github.com/IBM/sarama/pull/572)). This comes with several + caveats: + - Protocol-layer support is mostly in place + ([#577](https://github.com/IBM/sarama/pull/577)), however Kafka 0.9 + renamed some messages and fields, which we did not in order to preserve API + compatibility. + - The producer and consumer work against 0.9, but the offset manager does + not ([#573](https://github.com/IBM/sarama/pull/573)). + - TLS support may or may not work + ([#581](https://github.com/IBM/sarama/pull/581)). + +Improvements: + - Don't wait for request timeouts on dead brokers, greatly speeding recovery + when the TCP connection is left hanging + ([#548](https://github.com/IBM/sarama/pull/548)). + - Refactored part of the producer. The new version provides a much more elegant + solution to [#449](https://github.com/IBM/sarama/pull/449). It is also + slightly more efficient, and much more precise in calculating batch sizes + when compression is used + ([#549](https://github.com/IBM/sarama/pull/549), + [#550](https://github.com/IBM/sarama/pull/550), + [#551](https://github.com/IBM/sarama/pull/551)). + +Bug Fixes: + - Fix race condition in consumer test mock + ([#553](https://github.com/IBM/sarama/pull/553)). + +## Version 1.6.1 (2015-09-25) + +Bug Fixes: + - Fix panic that could occur if a user-supplied message value failed to encode + ([#449](https://github.com/IBM/sarama/pull/449)). + +## Version 1.6.0 (2015-09-04) + +New Features: + - Implementation of a consumer offset manager using the APIs introduced in + Kafka 0.8.2. The API is designed mainly for integration into a future + high-level consumer, not for direct use, although it is *possible* to use it + directly. + ([#461](https://github.com/IBM/sarama/pull/461)). + +Improvements: + - CRC32 calculation is much faster on machines with SSE4.2 instructions, + removing a major hotspot from most profiles + ([#255](https://github.com/IBM/sarama/pull/255)). + +Bug Fixes: + - Make protocol decoding more robust against some malformed packets generated + by go-fuzz ([#523](https://github.com/IBM/sarama/pull/523), + [#525](https://github.com/IBM/sarama/pull/525)) or found in other ways + ([#528](https://github.com/IBM/sarama/pull/528)). + - Fix a potential race condition panic in the consumer on shutdown + ([#529](https://github.com/IBM/sarama/pull/529)). + +## Version 1.5.0 (2015-08-17) + +New Features: + - TLS-encrypted network connections are now supported. This feature is subject + to change when Kafka releases built-in TLS support, but for now this is + enough to work with TLS-terminating proxies + ([#154](https://github.com/IBM/sarama/pull/154)). + +Improvements: + - The consumer will not block if a single partition is not drained by the user; + all other partitions will continue to consume normally + ([#485](https://github.com/IBM/sarama/pull/485)). + - Formatting of error strings has been much improved + ([#495](https://github.com/IBM/sarama/pull/495)). + - Internal refactoring of the producer for code cleanliness and to enable + future work ([#300](https://github.com/IBM/sarama/pull/300)). + +Bug Fixes: + - Fix a potential deadlock in the consumer on shutdown + ([#475](https://github.com/IBM/sarama/pull/475)). + +## Version 1.4.3 (2015-07-21) + +Bug Fixes: + - Don't include the partitioner in the producer's "fetch partitions" + circuit-breaker ([#466](https://github.com/IBM/sarama/pull/466)). + - Don't retry messages until the broker is closed when abandoning a broker in + the producer ([#468](https://github.com/IBM/sarama/pull/468)). + - Update the import path for snappy-go, it has moved again and the API has + changed slightly ([#486](https://github.com/IBM/sarama/pull/486)). + +## Version 1.4.2 (2015-05-27) + +Bug Fixes: + - Update the import path for snappy-go, it has moved from google code to github + ([#456](https://github.com/IBM/sarama/pull/456)). + +## Version 1.4.1 (2015-05-25) + +Improvements: + - Optimizations when decoding snappy messages, thanks to John Potocny + ([#446](https://github.com/IBM/sarama/pull/446)). + +Bug Fixes: + - Fix hypothetical race conditions on producer shutdown + ([#450](https://github.com/IBM/sarama/pull/450), + [#451](https://github.com/IBM/sarama/pull/451)). + +## Version 1.4.0 (2015-05-01) + +New Features: + - The consumer now implements `Topics()` and `Partitions()` methods to enable + users to dynamically choose what topics/partitions to consume without + instantiating a full client + ([#431](https://github.com/IBM/sarama/pull/431)). + - The partition-consumer now exposes the high water mark offset value returned + by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/IBM/sarama/pull/339)). + - Added a `kafka-console-consumer` tool capable of handling multiple + partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer` + ([#439](https://github.com/IBM/sarama/pull/439), + [#442](https://github.com/IBM/sarama/pull/442)). + +Improvements: + - The producer's logging during retry scenarios is more consistent, more + useful, and slightly less verbose + ([#429](https://github.com/IBM/sarama/pull/429)). + - The client now shuffles its initial list of seed brokers in order to prevent + thundering herd on the first broker in the list + ([#441](https://github.com/IBM/sarama/pull/441)). + +Bug Fixes: + - The producer now correctly manages its state if retries occur when it is + shutting down, fixing several instances of confusing behaviour and at least + one potential deadlock ([#419](https://github.com/IBM/sarama/pull/419)). + - The consumer now handles messages for different partitions asynchronously, + making it much more resilient to specific user code ordering + ([#325](https://github.com/IBM/sarama/pull/325)). + +## Version 1.3.0 (2015-04-16) + +New Features: + - The client now tracks consumer group coordinators using + ConsumerMetadataRequests similar to how it tracks partition leadership using + regular MetadataRequests ([#411](https://github.com/IBM/sarama/pull/411)). + This adds two methods to the client API: + - `Coordinator(consumerGroup string) (*Broker, error)` + - `RefreshCoordinator(consumerGroup string) error` + +Improvements: + - ConsumerMetadataResponses now automatically create a Broker object out of the + ID/address/port combination for the Coordinator; accessing the fields + individually has been deprecated + ([#413](https://github.com/IBM/sarama/pull/413)). + - Much improved handling of `OffsetOutOfRange` errors in the consumer. + Consumers will fail to start if the provided offset is out of range + ([#418](https://github.com/IBM/sarama/pull/418)) + and they will automatically shut down if the offset falls out of range + ([#424](https://github.com/IBM/sarama/pull/424)). + - Small performance improvement in encoding and decoding protocol messages + ([#427](https://github.com/IBM/sarama/pull/427)). + +Bug Fixes: + - Fix a rare race condition in the client's background metadata refresher if + it happens to be activated while the client is being closed + ([#422](https://github.com/IBM/sarama/pull/422)). + +## Version 1.2.0 (2015-04-07) + +Improvements: + - The producer's behaviour when `Flush.Frequency` is set is now more intuitive + ([#389](https://github.com/IBM/sarama/pull/389)). + - The producer is now somewhat more memory-efficient during and after retrying + messages due to an improved queue implementation + ([#396](https://github.com/IBM/sarama/pull/396)). + - The consumer produces much more useful logging output when leadership + changes ([#385](https://github.com/IBM/sarama/pull/385)). + - The client's `GetOffset` method will now automatically refresh metadata and + retry once in the event of stale information or similar + ([#394](https://github.com/IBM/sarama/pull/394)). + - Broker connections now have support for using TCP keepalives + ([#407](https://github.com/IBM/sarama/issues/407)). + +Bug Fixes: + - The OffsetCommitRequest message now correctly implements all three possible + API versions ([#390](https://github.com/IBM/sarama/pull/390), + [#400](https://github.com/IBM/sarama/pull/400)). + +## Version 1.1.0 (2015-03-20) + +Improvements: + - Wrap the producer's partitioner call in a circuit-breaker so that repeatedly + broken topics don't choke throughput + ([#373](https://github.com/IBM/sarama/pull/373)). + +Bug Fixes: + - Fix the producer's internal reference counting in certain unusual scenarios + ([#367](https://github.com/IBM/sarama/pull/367)). + - Fix the consumer's internal reference counting in certain unusual scenarios + ([#369](https://github.com/IBM/sarama/pull/369)). + - Fix a condition where the producer's internal control messages could have + gotten stuck ([#368](https://github.com/IBM/sarama/pull/368)). + - Fix an issue where invalid partition lists would be cached when asking for + metadata for a non-existant topic ([#372](https://github.com/IBM/sarama/pull/372)). + + +## Version 1.0.0 (2015-03-17) + +Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are: + +- The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking. +- The consumer has been rewritten to only open one connection per broker instead of one connection per partition. +- The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/IBM/sarama/mocks` package. +- For most uses cases, it is no longer necessary to open a `Client`; this will be done for you. +- All the configuration values have been unified in the `Config` struct. +- Much improved test suite. diff --git a/vendor/github.com/IBM/sarama/CONTRIBUTING.md b/vendor/github.com/IBM/sarama/CONTRIBUTING.md new file mode 100644 index 000000000..173b2a384 --- /dev/null +++ b/vendor/github.com/IBM/sarama/CONTRIBUTING.md @@ -0,0 +1,46 @@ +## Contributing + +[fork]: https://github.com/IBM/sarama/fork +[pr]: https://github.com/IBM/sarama/compare +[released]: https://help.github.com/articles/github-terms-of-service/#6-contributions-under-repository-license + +Hi there! We are thrilled that you would like to contribute to Sarama. +Your help is essential for keeping it great. + +Contributions to this project are [released][released] to the public under the project's [opensource license](LICENSE.md). +By contributing to this project you agree to the [Developer Certificate of Origin](https://developercertificate.org/) (DCO). +The DCO was created by the Linux Kernel community and is a simple statement that you, as a contributor, wrote or otherwise have the legal right to contribute those changes. + +Contributors must _sign-off_ that they adhere to these requirements by adding a `Signed-off-by` line to all commit messages with an email address that matches the commit author: + +``` +feat: this is my commit message + +Signed-off-by: Random J Developer +``` + +Git even has a `-s` command line option to append this automatically to your commit message: + +``` +$ git commit -s -m 'This is my commit message' +``` + +## Submitting a pull request + +0. [Fork][fork] and clone the repository +1. Create a new branch: `git checkout -b my-branch-name` +2. Make your change, push to your fork and [submit a pull request][pr] +3. Wait for your pull request to be reviewed and merged. + +Here are a few things you can do that will increase the likelihood of your pull request being accepted: + +- Keep your change as focused as possible. If there are multiple changes you would like to make that are not dependent upon each other, consider submitting them as separate pull requests. +- Write a [good commit message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html). + +## Further Reading + +- [Developer Certificate of Origin versus Contributor License Agreements](https://julien.ponge.org/blog/developer-certificate-of-origin-versus-contributor-license-agreements/) +- [The most powerful contributor agreement](https://lwn.net/Articles/592503/) +- [How to Contribute to Open Source](https://opensource.guide/how-to-contribute/) +- [Using Pull Requests](https://help.github.com/articles/about-pull-requests/) +- [GitHub Help](https://help.github.com) diff --git a/vendor/github.com/Shopify/sarama/Dockerfile.kafka b/vendor/github.com/IBM/sarama/Dockerfile.kafka similarity index 82% rename from vendor/github.com/Shopify/sarama/Dockerfile.kafka rename to vendor/github.com/IBM/sarama/Dockerfile.kafka index 48a9c178a..90fdb1669 100644 --- a/vendor/github.com/Shopify/sarama/Dockerfile.kafka +++ b/vendor/github.com/IBM/sarama/Dockerfile.kafka @@ -3,7 +3,8 @@ FROM registry.access.redhat.com/ubi8/ubi-minimal:latest USER root RUN microdnf update \ - && microdnf install curl gzip java-11-openjdk-headless tar \ + && microdnf install curl gzip java-11-openjdk-headless tar tzdata-java \ + && microdnf reinstall tzdata \ && microdnf clean all ENV JAVA_HOME=/usr/lib/jvm/jre-11 @@ -20,7 +21,7 @@ ARG KAFKA_MIRROR="https://s3-us-west-2.amazonaws.com/kafka-packages" RUN mkdir -p "/opt/kafka-2.8.2" && chmod a+rw /opt/kafka-2.8.2 && curl -s "$KAFKA_MIRROR/kafka_2.12-2.8.2.tgz" | tar xz --strip-components=1 -C "/opt/kafka-2.8.2" RUN mkdir -p "/opt/kafka-3.1.2" && chmod a+rw /opt/kafka-3.1.2 && curl -s "$KAFKA_MIRROR/kafka_2.12-3.1.2.tgz" | tar xz --strip-components=1 -C "/opt/kafka-3.1.2" RUN mkdir -p "/opt/kafka-3.2.3" && chmod a+rw /opt/kafka-3.2.3 && curl -s "$KAFKA_MIRROR/kafka_2.12-3.2.3.tgz" | tar xz --strip-components=1 -C "/opt/kafka-3.2.3" -RUN mkdir -p "/opt/kafka-3.3.1" && chmod a+rw /opt/kafka-3.3.1 && curl -s "$KAFKA_MIRROR/kafka_2.12-3.3.1.tgz" | tar xz --strip-components=1 -C "/opt/kafka-3.3.1" +RUN mkdir -p "/opt/kafka-3.3.2" && chmod a+rw /opt/kafka-3.3.2 && curl -s "$KAFKA_MIRROR/kafka_2.12-3.3.2.tgz" | tar xz --strip-components=1 -C "/opt/kafka-3.3.2" COPY entrypoint.sh / diff --git a/vendor/github.com/Shopify/sarama/LICENSE b/vendor/github.com/IBM/sarama/LICENSE.md similarity index 95% rename from vendor/github.com/Shopify/sarama/LICENSE rename to vendor/github.com/IBM/sarama/LICENSE.md index d2bf4352f..f2c7f0c5b 100644 --- a/vendor/github.com/Shopify/sarama/LICENSE +++ b/vendor/github.com/IBM/sarama/LICENSE.md @@ -1,4 +1,7 @@ +# MIT License + Copyright (c) 2013 Shopify +Copyright (c) 2023 IBM Corporation Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the diff --git a/vendor/github.com/Shopify/sarama/Makefile b/vendor/github.com/IBM/sarama/Makefile similarity index 100% rename from vendor/github.com/Shopify/sarama/Makefile rename to vendor/github.com/IBM/sarama/Makefile diff --git a/vendor/github.com/Shopify/sarama/README.md b/vendor/github.com/IBM/sarama/README.md similarity index 72% rename from vendor/github.com/Shopify/sarama/README.md rename to vendor/github.com/IBM/sarama/README.md index 0ee6e6a7f..a1f6137e5 100644 --- a/vendor/github.com/Shopify/sarama/README.md +++ b/vendor/github.com/IBM/sarama/README.md @@ -1,18 +1,17 @@ # sarama -[![Go Reference](https://pkg.go.dev/badge/github.com/Shopify/sarama.svg)](https://pkg.go.dev/github.com/Shopify/sarama) -[![Coverage](https://codecov.io/gh/Shopify/sarama/branch/main/graph/badge.svg)](https://codecov.io/gh/Shopify/sarama) +[![Go Reference](https://pkg.go.dev/badge/github.com/IBM/sarama.svg)](https://pkg.go.dev/github.com/IBM/sarama) Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/). ## Getting started -- API documentation and examples are available via [pkg.go.dev](https://pkg.go.dev/github.com/Shopify/sarama). +- API documentation and examples are available via [pkg.go.dev](https://pkg.go.dev/github.com/IBM/sarama). - Mocks for testing are available in the [mocks](./mocks) subpackage. - The [examples](./examples) directory contains more elaborate example applications. - The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation. -You might also want to look at the [Frequently Asked Questions](https://github.com/Shopify/sarama/wiki/Frequently-Asked-Questions). +You might also want to look at the [Frequently Asked Questions](https://github.com/IBM/sarama/wiki/Frequently-Asked-Questions). ## Compatibility and API stability @@ -21,13 +20,13 @@ the two latest stable releases of Kafka and Go, and we provide a two month grace period for older releases. However, older releases of Kafka are still likely to work. Sarama follows semantic versioning and provides API stability via the gopkg.in service. -You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1. +You can import a version with a guaranteed stable API via http://gopkg.in/IBM/sarama.v1. A changelog is available [here](CHANGELOG.md). ## Contributing -- Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/main/.github/CONTRIBUTING.md). -- Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more technical and design details. +- Get started by checking our [contribution guidelines](https://github.com/IBM/sarama/blob/main/.github/CONTRIBUTING.md). +- Read the [Sarama wiki](https://github.com/IBM/sarama/wiki) for more technical and design details. - The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol) contains a wealth of useful information. - For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers. - If you have any questions, just ask! diff --git a/vendor/github.com/Shopify/sarama/Vagrantfile b/vendor/github.com/IBM/sarama/Vagrantfile similarity index 100% rename from vendor/github.com/Shopify/sarama/Vagrantfile rename to vendor/github.com/IBM/sarama/Vagrantfile diff --git a/vendor/github.com/Shopify/sarama/acl_bindings.go b/vendor/github.com/IBM/sarama/acl_bindings.go similarity index 100% rename from vendor/github.com/Shopify/sarama/acl_bindings.go rename to vendor/github.com/IBM/sarama/acl_bindings.go diff --git a/vendor/github.com/Shopify/sarama/acl_create_request.go b/vendor/github.com/IBM/sarama/acl_create_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/acl_create_request.go rename to vendor/github.com/IBM/sarama/acl_create_request.go diff --git a/vendor/github.com/Shopify/sarama/acl_create_response.go b/vendor/github.com/IBM/sarama/acl_create_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/acl_create_response.go rename to vendor/github.com/IBM/sarama/acl_create_response.go diff --git a/vendor/github.com/Shopify/sarama/acl_delete_request.go b/vendor/github.com/IBM/sarama/acl_delete_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/acl_delete_request.go rename to vendor/github.com/IBM/sarama/acl_delete_request.go diff --git a/vendor/github.com/Shopify/sarama/acl_delete_response.go b/vendor/github.com/IBM/sarama/acl_delete_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/acl_delete_response.go rename to vendor/github.com/IBM/sarama/acl_delete_response.go diff --git a/vendor/github.com/Shopify/sarama/acl_describe_request.go b/vendor/github.com/IBM/sarama/acl_describe_request.go similarity index 93% rename from vendor/github.com/Shopify/sarama/acl_describe_request.go rename to vendor/github.com/IBM/sarama/acl_describe_request.go index e0fe9023a..98edb6740 100644 --- a/vendor/github.com/Shopify/sarama/acl_describe_request.go +++ b/vendor/github.com/IBM/sarama/acl_describe_request.go @@ -1,6 +1,6 @@ package sarama -// DescribeAclsRequest is a secribe acl request type +// DescribeAclsRequest is a describe acl request type type DescribeAclsRequest struct { Version int AclFilter diff --git a/vendor/github.com/Shopify/sarama/acl_describe_response.go b/vendor/github.com/IBM/sarama/acl_describe_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/acl_describe_response.go rename to vendor/github.com/IBM/sarama/acl_describe_response.go diff --git a/vendor/github.com/Shopify/sarama/acl_filter.go b/vendor/github.com/IBM/sarama/acl_filter.go similarity index 100% rename from vendor/github.com/Shopify/sarama/acl_filter.go rename to vendor/github.com/IBM/sarama/acl_filter.go diff --git a/vendor/github.com/Shopify/sarama/acl_types.go b/vendor/github.com/IBM/sarama/acl_types.go similarity index 93% rename from vendor/github.com/Shopify/sarama/acl_types.go rename to vendor/github.com/IBM/sarama/acl_types.go index c3ba8ddcf..62bb5342a 100644 --- a/vendor/github.com/Shopify/sarama/acl_types.go +++ b/vendor/github.com/IBM/sarama/acl_types.go @@ -60,7 +60,7 @@ func (a *AclOperation) MarshalText() ([]byte, error) { return []byte(a.String()), nil } -// UnmarshalText takes a text reprentation of the operation and converts it to an AclOperation +// UnmarshalText takes a text representation of the operation and converts it to an AclOperation func (a *AclOperation) UnmarshalText(text []byte) error { normalized := strings.ToLower(string(text)) mapping := map[string]AclOperation{ @@ -114,7 +114,7 @@ func (a *AclPermissionType) MarshalText() ([]byte, error) { return []byte(a.String()), nil } -// UnmarshalText takes a text reprentation of the permission type and converts it to an AclPermissionType +// UnmarshalText takes a text representation of the permission type and converts it to an AclPermissionType func (a *AclPermissionType) UnmarshalText(text []byte) error { normalized := strings.ToLower(string(text)) mapping := map[string]AclPermissionType{ @@ -166,7 +166,7 @@ func (a *AclResourceType) MarshalText() ([]byte, error) { return []byte(a.String()), nil } -// UnmarshalText takes a text reprentation of the resource type and converts it to an AclResourceType +// UnmarshalText takes a text representation of the resource type and converts it to an AclResourceType func (a *AclResourceType) UnmarshalText(text []byte) error { normalized := strings.ToLower(string(text)) mapping := map[string]AclResourceType{ @@ -217,7 +217,7 @@ func (a *AclResourcePatternType) MarshalText() ([]byte, error) { return []byte(a.String()), nil } -// UnmarshalText takes a text reprentation of the resource pattern type and converts it to an AclResourcePatternType +// UnmarshalText takes a text representation of the resource pattern type and converts it to an AclResourcePatternType func (a *AclResourcePatternType) UnmarshalText(text []byte) error { normalized := strings.ToLower(string(text)) mapping := map[string]AclResourcePatternType{ diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go b/vendor/github.com/IBM/sarama/add_offsets_to_txn_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go rename to vendor/github.com/IBM/sarama/add_offsets_to_txn_request.go diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go b/vendor/github.com/IBM/sarama/add_offsets_to_txn_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go rename to vendor/github.com/IBM/sarama/add_offsets_to_txn_response.go diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go b/vendor/github.com/IBM/sarama/add_partitions_to_txn_request.go similarity index 96% rename from vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go rename to vendor/github.com/IBM/sarama/add_partitions_to_txn_request.go index 57ecf6488..1d6da75f5 100644 --- a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go +++ b/vendor/github.com/IBM/sarama/add_partitions_to_txn_request.go @@ -1,6 +1,6 @@ package sarama -// AddPartitionsToTxnRequest is a add paartition request +// AddPartitionsToTxnRequest is a add partition request type AddPartitionsToTxnRequest struct { TransactionalID string ProducerID int64 diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go b/vendor/github.com/IBM/sarama/add_partitions_to_txn_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go rename to vendor/github.com/IBM/sarama/add_partitions_to_txn_response.go diff --git a/vendor/github.com/Shopify/sarama/admin.go b/vendor/github.com/IBM/sarama/admin.go similarity index 96% rename from vendor/github.com/Shopify/sarama/admin.go rename to vendor/github.com/IBM/sarama/admin.go index a334daff5..29eeca1c6 100644 --- a/vendor/github.com/Shopify/sarama/admin.go +++ b/vendor/github.com/IBM/sarama/admin.go @@ -207,19 +207,17 @@ func isErrNoController(err error) bool { // provided retryable func) up to the maximum number of tries permitted by // the admin client configuration func (ca *clusterAdmin) retryOnError(retryable func(error) bool, fn func() error) error { - var err error - for attempt := 0; attempt < ca.conf.Admin.Retry.Max; attempt++ { - err = fn() - if err == nil || !retryable(err) { + for attemptsRemaining := ca.conf.Admin.Retry.Max + 1; ; { + err := fn() + attemptsRemaining-- + if err == nil || attemptsRemaining <= 0 || !retryable(err) { return err } Logger.Printf( "admin/request retrying after %dms... (%d attempts remaining)\n", - ca.conf.Admin.Retry.Backoff/time.Millisecond, ca.conf.Admin.Retry.Max-attempt) + ca.conf.Admin.Retry.Backoff/time.Millisecond, attemptsRemaining) time.Sleep(ca.conf.Admin.Retry.Backoff) - continue } - return err } func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error { @@ -275,13 +273,19 @@ func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateO } func (ca *clusterAdmin) DescribeTopics(topics []string) (metadata []*TopicMetadata, err error) { - controller, err := ca.Controller() - if err != nil { - return nil, err - } - - request := NewMetadataRequest(ca.conf.Version, topics) - response, err := controller.GetMetadata(request) + var response *MetadataResponse + err = ca.retryOnError(isErrNoController, func() error { + controller, err := ca.Controller() + if err != nil { + return err + } + request := NewMetadataRequest(ca.conf.Version, topics) + response, err = controller.GetMetadata(request) + if isErrNoController(err) { + _, _ = ca.refreshController() + } + return err + }) if err != nil { return nil, err } @@ -289,13 +293,20 @@ func (ca *clusterAdmin) DescribeTopics(topics []string) (metadata []*TopicMetada } func (ca *clusterAdmin) DescribeCluster() (brokers []*Broker, controllerID int32, err error) { - controller, err := ca.Controller() - if err != nil { - return nil, int32(0), err - } + var response *MetadataResponse + err = ca.retryOnError(isErrNoController, func() error { + controller, err := ca.Controller() + if err != nil { + return err + } - request := NewMetadataRequest(ca.conf.Version, nil) - response, err := controller.GetMetadata(request) + request := NewMetadataRequest(ca.conf.Version, nil) + response, err = controller.GetMetadata(request) + if isErrNoController(err) { + _, _ = ca.refreshController() + } + return err + }) if err != nil { return nil, int32(0), err } @@ -545,13 +556,20 @@ func (ca *clusterAdmin) ListPartitionReassignments(topic string, partitions []in request.AddBlock(topic, partitions) - b, err := ca.Controller() - if err != nil { - return nil, err - } - _ = b.Open(ca.client.Config()) + var rsp *ListPartitionReassignmentsResponse + err = ca.retryOnError(isErrNoController, func() error { + b, err := ca.Controller() + if err != nil { + return err + } + _ = b.Open(ca.client.Config()) - rsp, err := b.ListPartitionReassignments(request) + rsp, err = b.ListPartitionReassignments(request) + if isErrNoController(err) { + _, _ = ca.refreshController() + } + return err + }) if err == nil && rsp != nil { return rsp.TopicStatus, nil @@ -891,7 +909,7 @@ func (ca *clusterAdmin) DescribeConsumerGroups(groups []string) (result []*Group describeReq := &DescribeGroupsRequest{ Groups: brokerGroups, } - if ca.conf.Version.IsAtLeast(V2_3_0_0) { + if ca.conf.Version.IsAtLeast(V2_4_0_0) { describeReq.Version = 4 } response, err := broker.DescribeGroups(describeReq) diff --git a/vendor/github.com/Shopify/sarama/alter_client_quotas_request.go b/vendor/github.com/IBM/sarama/alter_client_quotas_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/alter_client_quotas_request.go rename to vendor/github.com/IBM/sarama/alter_client_quotas_request.go diff --git a/vendor/github.com/Shopify/sarama/alter_client_quotas_response.go b/vendor/github.com/IBM/sarama/alter_client_quotas_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/alter_client_quotas_response.go rename to vendor/github.com/IBM/sarama/alter_client_quotas_response.go diff --git a/vendor/github.com/Shopify/sarama/alter_configs_request.go b/vendor/github.com/IBM/sarama/alter_configs_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/alter_configs_request.go rename to vendor/github.com/IBM/sarama/alter_configs_request.go diff --git a/vendor/github.com/Shopify/sarama/alter_configs_response.go b/vendor/github.com/IBM/sarama/alter_configs_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/alter_configs_response.go rename to vendor/github.com/IBM/sarama/alter_configs_response.go diff --git a/vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go b/vendor/github.com/IBM/sarama/alter_partition_reassignments_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go rename to vendor/github.com/IBM/sarama/alter_partition_reassignments_request.go diff --git a/vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go b/vendor/github.com/IBM/sarama/alter_partition_reassignments_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go rename to vendor/github.com/IBM/sarama/alter_partition_reassignments_response.go diff --git a/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_request.go b/vendor/github.com/IBM/sarama/alter_user_scram_credentials_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/alter_user_scram_credentials_request.go rename to vendor/github.com/IBM/sarama/alter_user_scram_credentials_request.go diff --git a/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_response.go b/vendor/github.com/IBM/sarama/alter_user_scram_credentials_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/alter_user_scram_credentials_response.go rename to vendor/github.com/IBM/sarama/alter_user_scram_credentials_response.go diff --git a/vendor/github.com/Shopify/sarama/api_versions_request.go b/vendor/github.com/IBM/sarama/api_versions_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/api_versions_request.go rename to vendor/github.com/IBM/sarama/api_versions_request.go diff --git a/vendor/github.com/Shopify/sarama/api_versions_response.go b/vendor/github.com/IBM/sarama/api_versions_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/api_versions_response.go rename to vendor/github.com/IBM/sarama/api_versions_response.go diff --git a/vendor/github.com/Shopify/sarama/async_producer.go b/vendor/github.com/IBM/sarama/async_producer.go similarity index 99% rename from vendor/github.com/Shopify/sarama/async_producer.go rename to vendor/github.com/IBM/sarama/async_producer.go index 50f226f8e..dfd891237 100644 --- a/vendor/github.com/Shopify/sarama/async_producer.go +++ b/vendor/github.com/IBM/sarama/async_producer.go @@ -50,7 +50,7 @@ type AsyncProducer interface { // errors to be returned. Errors() <-chan *ProducerError - // IsTransactional return true when current producer is is transactional. + // IsTransactional return true when current producer is transactional. IsTransactional() bool // TxnStatus return current producer transaction status. diff --git a/vendor/github.com/Shopify/sarama/balance_strategy.go b/vendor/github.com/IBM/sarama/balance_strategy.go similarity index 95% rename from vendor/github.com/Shopify/sarama/balance_strategy.go rename to vendor/github.com/IBM/sarama/balance_strategy.go index 4594df6f6..8635bdf7d 100644 --- a/vendor/github.com/Shopify/sarama/balance_strategy.go +++ b/vendor/github.com/IBM/sarama/balance_strategy.go @@ -57,7 +57,8 @@ type BalanceStrategy interface { // -------------------------------------------------------------------- -// BalanceStrategyRange is the default and assigns partitions as ranges to consumer group members. +// NewBalanceStrategyRange returns a range balance strategy, +// which is the default and assigns partitions as ranges to consumer group members. // This follows the same logic as // https://kafka.apache.org/31/javadoc/org/apache/kafka/clients/consumer/RangeAssignor.html // @@ -65,27 +66,33 @@ type BalanceStrategy interface { // // M1: {T1: [0, 1, 2], T2: [0, 1, 2]} // M2: {T2: [3, 4, 5], T2: [3, 4, 5]} -var BalanceStrategyRange = &balanceStrategy{ - name: RangeBalanceStrategyName, - coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) { - partitionsPerConsumer := len(partitions) / len(memberIDs) - consumersWithExtraPartition := len(partitions) % len(memberIDs) - - sort.Strings(memberIDs) - - for i, memberID := range memberIDs { - min := i*partitionsPerConsumer + int(math.Min(float64(consumersWithExtraPartition), float64(i))) - extra := 0 - if i < consumersWithExtraPartition { - extra = 1 +func NewBalanceStrategyRange() BalanceStrategy { + return &balanceStrategy{ + name: RangeBalanceStrategyName, + coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) { + partitionsPerConsumer := len(partitions) / len(memberIDs) + consumersWithExtraPartition := len(partitions) % len(memberIDs) + + sort.Strings(memberIDs) + + for i, memberID := range memberIDs { + min := i*partitionsPerConsumer + int(math.Min(float64(consumersWithExtraPartition), float64(i))) + extra := 0 + if i < consumersWithExtraPartition { + extra = 1 + } + max := min + partitionsPerConsumer + extra + plan.Add(memberID, topic, partitions[min:max]...) } - max := min + partitionsPerConsumer + extra - plan.Add(memberID, topic, partitions[min:max]...) - } - }, + }, + } } -// BalanceStrategySticky assigns partitions to members with an attempt to preserve earlier assignments +// Deprecated: use NewBalanceStrategyRange to avoid data race issue +var BalanceStrategyRange = NewBalanceStrategyRange() + +// NewBalanceStrategySticky returns a sticky balance strategy, +// which assigns partitions to members with an attempt to preserve earlier assignments // while maintain a balanced partition distribution. // Example with topic T with six partitions (0..5) and two members (M1, M2): // @@ -97,13 +104,18 @@ var BalanceStrategyRange = &balanceStrategy{ // M1: {T: [0, 2]} // M2: {T: [1, 3]} // M3: {T: [4, 5]} -var BalanceStrategySticky = &stickyBalanceStrategy{} +func NewBalanceStrategySticky() BalanceStrategy { + return &stickyBalanceStrategy{} +} + +// Deprecated: use NewBalanceStrategySticky to avoid data race issue +var BalanceStrategySticky = NewBalanceStrategySticky() // -------------------------------------------------------------------- type balanceStrategy struct { - name string coreFn func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) + name string } // Name implements BalanceStrategy. @@ -171,10 +183,7 @@ func (s *stickyBalanceStrategy) Plan(members map[string]ConsumerGroupMemberMetad } // determine if we're dealing with a completely fresh assignment, or if there's existing assignment state - isFreshAssignment := false - if len(currentAssignment) == 0 { - isFreshAssignment = true - } + isFreshAssignment := len(currentAssignment) == 0 // create a mapping of all current topic partitions and the consumers that can be assigned to them partition2AllPotentialConsumers := make(map[topicPartitionAssignment][]string) @@ -281,10 +290,7 @@ func strsContains(s []string, value string) bool { // Balance assignments across consumers for maximum fairness and stickiness. func (s *stickyBalanceStrategy) balance(currentAssignment map[string][]topicPartitionAssignment, prevAssignment map[topicPartitionAssignment]consumerGenerationPair, sortedPartitions []topicPartitionAssignment, unassignedPartitions []topicPartitionAssignment, sortedCurrentSubscriptions []string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, currentPartitionConsumer map[topicPartitionAssignment]string) { - initializing := false - if len(sortedCurrentSubscriptions) == 0 || len(currentAssignment[sortedCurrentSubscriptions[0]]) == 0 { - initializing = true - } + initializing := len(sortedCurrentSubscriptions) == 0 || len(currentAssignment[sortedCurrentSubscriptions[0]]) == 0 // assign all unassigned partitions for _, partition := range unassignedPartitions { @@ -337,11 +343,17 @@ func (s *stickyBalanceStrategy) balance(currentAssignment map[string][]topicPart } } -// BalanceStrategyRoundRobin assigns partitions to members in alternating order. +// NewBalanceStrategyRoundRobin returns a round-robin balance strategy, +// which assigns partitions to members in alternating order. // For example, there are two topics (t0, t1) and two consumer (m0, m1), and each topic has three partitions (p0, p1, p2): // M0: [t0p0, t0p2, t1p1] // M1: [t0p1, t1p0, t1p2] -var BalanceStrategyRoundRobin = new(roundRobinBalancer) +func NewBalanceStrategyRoundRobin() BalanceStrategy { + return new(roundRobinBalancer) +} + +// Deprecated: use NewBalanceStrategyRoundRobin to avoid data race issue +var BalanceStrategyRoundRobin = NewBalanceStrategyRoundRobin() type roundRobinBalancer struct{} @@ -414,8 +426,8 @@ func (tp *topicAndPartition) comparedValue() string { } type memberAndTopic struct { - memberID string topics map[string]struct{} + memberID string } func (m *memberAndTopic) hasTopic(topic string) bool { @@ -681,11 +693,8 @@ func sortPartitions(currentAssignment map[string][]topicPartitionAssignment, par } heap.Init(&pq) - for { - // loop until no consumer-group members remain - if pq.Len() == 0 { - break - } + // loop until no consumer-group members remain + for pq.Len() != 0 { member := pq[0] // partitions that were assigned to a different consumer last time @@ -1106,7 +1115,7 @@ type assignmentPriorityQueue []*consumerGroupMember func (pq assignmentPriorityQueue) Len() int { return len(pq) } func (pq assignmentPriorityQueue) Less(i, j int) bool { - // order asssignment priority queue in descending order using assignment-count/member-id + // order assignment priority queue in descending order using assignment-count/member-id if len(pq[i].assignments) == len(pq[j].assignments) { return strings.Compare(pq[i].id, pq[j].id) > 0 } diff --git a/vendor/github.com/Shopify/sarama/broker.go b/vendor/github.com/IBM/sarama/broker.go similarity index 99% rename from vendor/github.com/Shopify/sarama/broker.go rename to vendor/github.com/IBM/sarama/broker.go index d049e9b47..7ed987fe3 100644 --- a/vendor/github.com/Shopify/sarama/broker.go +++ b/vendor/github.com/IBM/sarama/broker.go @@ -175,7 +175,9 @@ func (b *Broker) Open(conf *Config) error { b.lock.Lock() - b.metricRegistry = newCleanupRegistry(conf.MetricRegistry) + if b.metricRegistry == nil { + b.metricRegistry = newCleanupRegistry(conf.MetricRegistry) + } go withRecover(func() { defer func() { @@ -453,7 +455,7 @@ func (b *Broker) AsyncProduce(request *ProduceRequest, cb ProduceCallback) error return } - // Wellformed response + // Well-formed response b.updateThrottleMetric(res.ThrottleTime) cb(res, nil) }, diff --git a/vendor/github.com/Shopify/sarama/client.go b/vendor/github.com/IBM/sarama/client.go similarity index 98% rename from vendor/github.com/Shopify/sarama/client.go rename to vendor/github.com/IBM/sarama/client.go index f7872a1b3..d9fb77d64 100644 --- a/vendor/github.com/Shopify/sarama/client.go +++ b/vendor/github.com/IBM/sarama/client.go @@ -50,7 +50,7 @@ type Client interface { // topic/partition, as determined by querying the cluster metadata. Leader(topic string, partitionID int32) (*Broker, error) - // LeaderAndEpoch returns the the leader and its epoch for the current + // LeaderAndEpoch returns the leader and its epoch for the current // topic/partition, as determined by querying the cluster metadata. LeaderAndEpoch(topic string, partitionID int32) (*Broker, int32, error) @@ -132,10 +132,10 @@ const ( ) type client struct { - // updateMetaDataMs stores the time at which metadata was lasted updated. + // updateMetadataMs stores the time at which metadata was lasted updated. // Note: this accessed atomically so must be the first word in the struct // as per golang/go#41970 - updateMetaDataMs int64 + updateMetadataMs int64 conf *Config closer, closed chan none // for shutting down background metadata updater @@ -513,7 +513,7 @@ func (client *client) RefreshMetadata(topics ...string) error { // Prior to 0.8.2, Kafka will throw exceptions on an empty topic and not return a proper // error. This handles the case by returning an error instead of sending it - // off to Kafka. See: https://github.com/Shopify/sarama/pull/38#issuecomment-26362310 + // off to Kafka. See: https://github.com/IBM/sarama/pull/38#issuecomment-26362310 for _, topic := range topics { if topic == "" { return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return @@ -975,13 +975,14 @@ func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int, time.Sleep(backoff) } - t := atomic.LoadInt64(&client.updateMetaDataMs) - if time.Since(time.Unix(t/1e3, 0)) < backoff { + t := atomic.LoadInt64(&client.updateMetadataMs) + if time.Since(time.UnixMilli(t)) < backoff { return err } + attemptsRemaining-- Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining) - return client.tryRefreshMetadata(topics, attemptsRemaining-1, deadline) + return client.tryRefreshMetadata(topics, attemptsRemaining, deadline) } return err } @@ -999,10 +1000,7 @@ func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int, req := NewMetadataRequest(client.conf.Version, topics) req.AllowAutoTopicCreation = allowAutoTopicCreation - t := atomic.LoadInt64(&client.updateMetaDataMs) - if !atomic.CompareAndSwapInt64(&client.updateMetaDataMs, t, time.Now().UnixNano()/int64(time.Millisecond)) { - return nil - } + atomic.StoreInt64(&client.updateMetadataMs, time.Now().UnixMilli()) response, err := broker.GetMetadata(req) var kerror KError @@ -1160,9 +1158,10 @@ func (client *client) findCoordinator(coordinatorKey string, coordinatorType Coo retry := func(err error) (*FindCoordinatorResponse, error) { if attemptsRemaining > 0 { backoff := client.computeBackoff(attemptsRemaining) + attemptsRemaining-- Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining) time.Sleep(backoff) - return client.findCoordinator(coordinatorKey, coordinatorType, attemptsRemaining-1) + return client.findCoordinator(coordinatorKey, coordinatorType, attemptsRemaining) } return nil, err } diff --git a/vendor/github.com/Shopify/sarama/compress.go b/vendor/github.com/IBM/sarama/compress.go similarity index 100% rename from vendor/github.com/Shopify/sarama/compress.go rename to vendor/github.com/IBM/sarama/compress.go diff --git a/vendor/github.com/Shopify/sarama/config.go b/vendor/github.com/IBM/sarama/config.go similarity index 98% rename from vendor/github.com/Shopify/sarama/config.go rename to vendor/github.com/IBM/sarama/config.go index b07034434..eb27d98ac 100644 --- a/vendor/github.com/Shopify/sarama/config.go +++ b/vendor/github.com/IBM/sarama/config.go @@ -294,7 +294,7 @@ type Config struct { Interval time.Duration } Rebalance struct { - // Strategy for allocating topic partitions to members (default BalanceStrategyRange) + // Strategy for allocating topic partitions to members. // Deprecated: Strategy exists for historical compatibility // and should not be used. Please use GroupStrategies. Strategy BalanceStrategy @@ -302,7 +302,7 @@ type Config struct { // GroupStrategies is the priority-ordered list of client-side consumer group // balancing strategies that will be offered to the coordinator. The first // strategy that all group members support will be chosen by the leader. - // default: [BalanceStrategyRange] + // default: [ NewBalanceStrategyRange() ] GroupStrategies []BalanceStrategy // The maximum allowed time for each worker to join the group once a rebalance has begun. @@ -539,7 +539,7 @@ func NewConfig() *Config { c.Consumer.Group.Session.Timeout = 10 * time.Second c.Consumer.Group.Heartbeat.Interval = 3 * time.Second - c.Consumer.Group.Rebalance.GroupStrategies = []BalanceStrategy{BalanceStrategyRange} + c.Consumer.Group.Rebalance.GroupStrategies = []BalanceStrategy{NewBalanceStrategyRange()} c.Consumer.Group.Rebalance.Timeout = 60 * time.Second c.Consumer.Group.Rebalance.Retry.Max = 4 c.Consumer.Group.Rebalance.Retry.Backoff = 2 * time.Second @@ -650,19 +650,26 @@ func (c *Config) Validate() error { return ConfigurationError("Net.SASL.GSSAPI.ServiceName must not be empty when GSS-API mechanism is used") } - if c.Net.SASL.GSSAPI.AuthType == KRB5_USER_AUTH { + switch c.Net.SASL.GSSAPI.AuthType { + case KRB5_USER_AUTH: if c.Net.SASL.GSSAPI.Password == "" { return ConfigurationError("Net.SASL.GSSAPI.Password must not be empty when GSS-API " + "mechanism is used and Net.SASL.GSSAPI.AuthType = KRB5_USER_AUTH") } - } else if c.Net.SASL.GSSAPI.AuthType == KRB5_KEYTAB_AUTH { + case KRB5_KEYTAB_AUTH: if c.Net.SASL.GSSAPI.KeyTabPath == "" { return ConfigurationError("Net.SASL.GSSAPI.KeyTabPath must not be empty when GSS-API mechanism is used" + - " and Net.SASL.GSSAPI.AuthType = KRB5_KEYTAB_AUTH") + " and Net.SASL.GSSAPI.AuthType = KRB5_KEYTAB_AUTH") } - } else { - return ConfigurationError("Net.SASL.GSSAPI.AuthType is invalid. Possible values are KRB5_USER_AUTH and KRB5_KEYTAB_AUTH") + case KRB5_CCACHE_AUTH: + if c.Net.SASL.GSSAPI.CCachePath == "" { + return ConfigurationError("Net.SASL.GSSAPI.CCachePath must not be empty when GSS-API mechanism is used" + + " and Net.SASL.GSSAPI.AuthType = KRB5_CCACHE_AUTH") + } + default: + return ConfigurationError("Net.SASL.GSSAPI.AuthType is invalid. Possible values are KRB5_USER_AUTH, KRB5_KEYTAB_AUTH, and KRB5_CCACHE_AUTH") } + if c.Net.SASL.GSSAPI.KerberosConfigPath == "" { return ConfigurationError("Net.SASL.GSSAPI.KerberosConfigPath must not be empty when GSS-API mechanism is used") } diff --git a/vendor/github.com/Shopify/sarama/config_resource_type.go b/vendor/github.com/IBM/sarama/config_resource_type.go similarity index 100% rename from vendor/github.com/Shopify/sarama/config_resource_type.go rename to vendor/github.com/IBM/sarama/config_resource_type.go diff --git a/vendor/github.com/Shopify/sarama/consumer.go b/vendor/github.com/IBM/sarama/consumer.go similarity index 99% rename from vendor/github.com/Shopify/sarama/consumer.go rename to vendor/github.com/IBM/sarama/consumer.go index eb27df8d7..4d08b3dda 100644 --- a/vendor/github.com/Shopify/sarama/consumer.go +++ b/vendor/github.com/IBM/sarama/consumer.go @@ -85,13 +85,13 @@ type Consumer interface { // New calls to the broker will return records from these partitions if there are any to be fetched. Resume(topicPartitions map[string][]int32) - // Pause suspends fetching from all partitions. Future calls to the broker will not return any + // PauseAll suspends fetching from all partitions. Future calls to the broker will not return any // records from these partitions until they have been resumed using Resume()/ResumeAll(). // Note that this method does not affect partition subscription. // In particular, it does not cause a group rebalance when automatic assignment is used. PauseAll() - // Resume resumes all partitions which have been paused with Pause()/PauseAll(). + // ResumeAll resumes all partitions which have been paused with Pause()/PauseAll(). // New calls to the broker will return records from these partitions if there are any to be fetched. ResumeAll() } diff --git a/vendor/github.com/Shopify/sarama/consumer_group.go b/vendor/github.com/IBM/sarama/consumer_group.go similarity index 99% rename from vendor/github.com/Shopify/sarama/consumer_group.go rename to vendor/github.com/IBM/sarama/consumer_group.go index ecdbcfa68..68f463976 100644 --- a/vendor/github.com/Shopify/sarama/consumer_group.go +++ b/vendor/github.com/IBM/sarama/consumer_group.go @@ -252,7 +252,10 @@ func (c *consumerGroup) retryNewSession(ctx context.Context, topics []string, ha if refreshCoordinator { err := c.client.RefreshCoordinator(c.groupID) if err != nil { - return c.retryNewSession(ctx, topics, handler, retries, true) + if retries <= 0 { + return nil, err + } + return c.retryNewSession(ctx, topics, handler, retries-1, true) } } @@ -403,7 +406,7 @@ func (c *consumerGroup) newSession(ctx context.Context, topics []string, handler claims = members.Topics // in the case of stateful balance strategies, hold on to the returned - // assignment metadata, otherwise, reset the statically defined conusmer + // assignment metadata, otherwise, reset the statically defined consumer // group metadata if members.UserData != nil { c.userData = members.UserData diff --git a/vendor/github.com/Shopify/sarama/consumer_group_members.go b/vendor/github.com/IBM/sarama/consumer_group_members.go similarity index 100% rename from vendor/github.com/Shopify/sarama/consumer_group_members.go rename to vendor/github.com/IBM/sarama/consumer_group_members.go diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go b/vendor/github.com/IBM/sarama/consumer_metadata_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/consumer_metadata_request.go rename to vendor/github.com/IBM/sarama/consumer_metadata_request.go diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go b/vendor/github.com/IBM/sarama/consumer_metadata_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/consumer_metadata_response.go rename to vendor/github.com/IBM/sarama/consumer_metadata_response.go diff --git a/vendor/github.com/Shopify/sarama/control_record.go b/vendor/github.com/IBM/sarama/control_record.go similarity index 100% rename from vendor/github.com/Shopify/sarama/control_record.go rename to vendor/github.com/IBM/sarama/control_record.go diff --git a/vendor/github.com/Shopify/sarama/crc32_field.go b/vendor/github.com/IBM/sarama/crc32_field.go similarity index 100% rename from vendor/github.com/Shopify/sarama/crc32_field.go rename to vendor/github.com/IBM/sarama/crc32_field.go diff --git a/vendor/github.com/Shopify/sarama/create_partitions_request.go b/vendor/github.com/IBM/sarama/create_partitions_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/create_partitions_request.go rename to vendor/github.com/IBM/sarama/create_partitions_request.go diff --git a/vendor/github.com/Shopify/sarama/create_partitions_response.go b/vendor/github.com/IBM/sarama/create_partitions_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/create_partitions_response.go rename to vendor/github.com/IBM/sarama/create_partitions_response.go diff --git a/vendor/github.com/Shopify/sarama/create_topics_request.go b/vendor/github.com/IBM/sarama/create_topics_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/create_topics_request.go rename to vendor/github.com/IBM/sarama/create_topics_request.go diff --git a/vendor/github.com/Shopify/sarama/create_topics_response.go b/vendor/github.com/IBM/sarama/create_topics_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/create_topics_response.go rename to vendor/github.com/IBM/sarama/create_topics_response.go diff --git a/vendor/github.com/IBM/sarama/decompress.go b/vendor/github.com/IBM/sarama/decompress.go new file mode 100644 index 000000000..a01cefaa5 --- /dev/null +++ b/vendor/github.com/IBM/sarama/decompress.go @@ -0,0 +1,98 @@ +package sarama + +import ( + "bytes" + "compress/gzip" + "fmt" + "sync" + + snappy "github.com/eapache/go-xerial-snappy" + "github.com/pierrec/lz4/v4" +) + +var ( + lz4ReaderPool = sync.Pool{ + New: func() interface{} { + return lz4.NewReader(nil) + }, + } + + gzipReaderPool sync.Pool + + bufferPool = sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + } + + bytesPool = sync.Pool{ + New: func() interface{} { + res := make([]byte, 0, 4096) + return &res + }, + } +) + +func decompress(cc CompressionCodec, data []byte) ([]byte, error) { + switch cc { + case CompressionNone: + return data, nil + case CompressionGZIP: + var err error + reader, ok := gzipReaderPool.Get().(*gzip.Reader) + if !ok { + reader, err = gzip.NewReader(bytes.NewReader(data)) + } else { + err = reader.Reset(bytes.NewReader(data)) + } + + if err != nil { + return nil, err + } + + buffer := bufferPool.Get().(*bytes.Buffer) + _, err = buffer.ReadFrom(reader) + // copy the buffer to a new slice with the correct length + // reuse gzipReader and buffer + gzipReaderPool.Put(reader) + res := make([]byte, buffer.Len()) + copy(res, buffer.Bytes()) + buffer.Reset() + bufferPool.Put(buffer) + + return res, err + case CompressionSnappy: + return snappy.Decode(data) + case CompressionLZ4: + reader, ok := lz4ReaderPool.Get().(*lz4.Reader) + if !ok { + reader = lz4.NewReader(bytes.NewReader(data)) + } else { + reader.Reset(bytes.NewReader(data)) + } + buffer := bufferPool.Get().(*bytes.Buffer) + _, err := buffer.ReadFrom(reader) + // copy the buffer to a new slice with the correct length + // reuse lz4Reader and buffer + lz4ReaderPool.Put(reader) + res := make([]byte, buffer.Len()) + copy(res, buffer.Bytes()) + buffer.Reset() + bufferPool.Put(buffer) + + return res, err + case CompressionZSTD: + buffer := *bytesPool.Get().(*[]byte) + var err error + buffer, err = zstdDecompress(ZstdDecoderParams{}, buffer, data) + // copy the buffer to a new slice with the correct length and reuse buffer + res := make([]byte, len(buffer)) + copy(res, buffer) + buffer = buffer[:0] + bytesPool.Put(&buffer) + + return res, err + default: + return nil, PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", cc)} + } +} diff --git a/vendor/github.com/Shopify/sarama/delete_groups_request.go b/vendor/github.com/IBM/sarama/delete_groups_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/delete_groups_request.go rename to vendor/github.com/IBM/sarama/delete_groups_request.go diff --git a/vendor/github.com/Shopify/sarama/delete_groups_response.go b/vendor/github.com/IBM/sarama/delete_groups_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/delete_groups_response.go rename to vendor/github.com/IBM/sarama/delete_groups_response.go diff --git a/vendor/github.com/Shopify/sarama/delete_offsets_request.go b/vendor/github.com/IBM/sarama/delete_offsets_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/delete_offsets_request.go rename to vendor/github.com/IBM/sarama/delete_offsets_request.go diff --git a/vendor/github.com/Shopify/sarama/delete_offsets_response.go b/vendor/github.com/IBM/sarama/delete_offsets_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/delete_offsets_response.go rename to vendor/github.com/IBM/sarama/delete_offsets_response.go diff --git a/vendor/github.com/Shopify/sarama/delete_records_request.go b/vendor/github.com/IBM/sarama/delete_records_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/delete_records_request.go rename to vendor/github.com/IBM/sarama/delete_records_request.go diff --git a/vendor/github.com/Shopify/sarama/delete_records_response.go b/vendor/github.com/IBM/sarama/delete_records_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/delete_records_response.go rename to vendor/github.com/IBM/sarama/delete_records_response.go diff --git a/vendor/github.com/Shopify/sarama/delete_topics_request.go b/vendor/github.com/IBM/sarama/delete_topics_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/delete_topics_request.go rename to vendor/github.com/IBM/sarama/delete_topics_request.go diff --git a/vendor/github.com/Shopify/sarama/delete_topics_response.go b/vendor/github.com/IBM/sarama/delete_topics_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/delete_topics_response.go rename to vendor/github.com/IBM/sarama/delete_topics_response.go diff --git a/vendor/github.com/Shopify/sarama/describe_client_quotas_request.go b/vendor/github.com/IBM/sarama/describe_client_quotas_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/describe_client_quotas_request.go rename to vendor/github.com/IBM/sarama/describe_client_quotas_request.go diff --git a/vendor/github.com/Shopify/sarama/describe_client_quotas_response.go b/vendor/github.com/IBM/sarama/describe_client_quotas_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/describe_client_quotas_response.go rename to vendor/github.com/IBM/sarama/describe_client_quotas_response.go diff --git a/vendor/github.com/Shopify/sarama/describe_configs_request.go b/vendor/github.com/IBM/sarama/describe_configs_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/describe_configs_request.go rename to vendor/github.com/IBM/sarama/describe_configs_request.go diff --git a/vendor/github.com/Shopify/sarama/describe_configs_response.go b/vendor/github.com/IBM/sarama/describe_configs_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/describe_configs_response.go rename to vendor/github.com/IBM/sarama/describe_configs_response.go diff --git a/vendor/github.com/Shopify/sarama/describe_groups_request.go b/vendor/github.com/IBM/sarama/describe_groups_request.go similarity index 92% rename from vendor/github.com/Shopify/sarama/describe_groups_request.go rename to vendor/github.com/IBM/sarama/describe_groups_request.go index f81f69ac4..fc8e6b588 100644 --- a/vendor/github.com/Shopify/sarama/describe_groups_request.go +++ b/vendor/github.com/IBM/sarama/describe_groups_request.go @@ -44,8 +44,14 @@ func (r *DescribeGroupsRequest) headerVersion() int16 { func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion { switch r.Version { - case 1, 2, 3, 4: + case 1: + return V1_1_0_0 + case 2: + return V2_0_0_0 + case 3: return V2_3_0_0 + case 4: + return V2_4_0_0 } return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/describe_groups_response.go b/vendor/github.com/IBM/sarama/describe_groups_response.go similarity index 98% rename from vendor/github.com/Shopify/sarama/describe_groups_response.go rename to vendor/github.com/IBM/sarama/describe_groups_response.go index 09052e431..12bf93e15 100644 --- a/vendor/github.com/Shopify/sarama/describe_groups_response.go +++ b/vendor/github.com/IBM/sarama/describe_groups_response.go @@ -65,8 +65,14 @@ func (r *DescribeGroupsResponse) headerVersion() int16 { func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion { switch r.Version { - case 1, 2, 3, 4: + case 1: + return V1_1_0_0 + case 2: + return V2_0_0_0 + case 3: return V2_3_0_0 + case 4: + return V2_4_0_0 } return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go b/vendor/github.com/IBM/sarama/describe_log_dirs_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/describe_log_dirs_request.go rename to vendor/github.com/IBM/sarama/describe_log_dirs_request.go diff --git a/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go b/vendor/github.com/IBM/sarama/describe_log_dirs_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/describe_log_dirs_response.go rename to vendor/github.com/IBM/sarama/describe_log_dirs_response.go diff --git a/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_request.go b/vendor/github.com/IBM/sarama/describe_user_scram_credentials_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/describe_user_scram_credentials_request.go rename to vendor/github.com/IBM/sarama/describe_user_scram_credentials_request.go diff --git a/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_response.go b/vendor/github.com/IBM/sarama/describe_user_scram_credentials_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/describe_user_scram_credentials_response.go rename to vendor/github.com/IBM/sarama/describe_user_scram_credentials_response.go diff --git a/vendor/github.com/Shopify/sarama/dev.yml b/vendor/github.com/IBM/sarama/dev.yml similarity index 100% rename from vendor/github.com/Shopify/sarama/dev.yml rename to vendor/github.com/IBM/sarama/dev.yml diff --git a/vendor/github.com/Shopify/sarama/docker-compose.yml b/vendor/github.com/IBM/sarama/docker-compose.yml similarity index 96% rename from vendor/github.com/Shopify/sarama/docker-compose.yml rename to vendor/github.com/IBM/sarama/docker-compose.yml index e1119c87f..22ee21bf9 100644 --- a/vendor/github.com/Shopify/sarama/docker-compose.yml +++ b/vendor/github.com/IBM/sarama/docker-compose.yml @@ -40,7 +40,7 @@ services: dockerfile: Dockerfile.kafka restart: always environment: - KAFKA_VERSION: ${KAFKA_VERSION:-3.3.1} + KAFKA_VERSION: ${KAFKA_VERSION:-3.3.2} KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29091' KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-1:9091,LISTENER_LOCAL://localhost:29091' @@ -62,7 +62,7 @@ services: dockerfile: Dockerfile.kafka restart: always environment: - KAFKA_VERSION: ${KAFKA_VERSION:-3.3.1} + KAFKA_VERSION: ${KAFKA_VERSION:-3.3.2} KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29092' KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-2:9091,LISTENER_LOCAL://localhost:29092' @@ -84,7 +84,7 @@ services: dockerfile: Dockerfile.kafka restart: always environment: - KAFKA_VERSION: ${KAFKA_VERSION:-3.3.1} + KAFKA_VERSION: ${KAFKA_VERSION:-3.3.2} KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29093' KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-3:9091,LISTENER_LOCAL://localhost:29093' @@ -106,7 +106,7 @@ services: dockerfile: Dockerfile.kafka restart: always environment: - KAFKA_VERSION: ${KAFKA_VERSION:-3.3.1} + KAFKA_VERSION: ${KAFKA_VERSION:-3.3.2} KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29094' KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-4:9091,LISTENER_LOCAL://localhost:29094' @@ -128,7 +128,7 @@ services: dockerfile: Dockerfile.kafka restart: always environment: - KAFKA_VERSION: ${KAFKA_VERSION:-3.3.1} + KAFKA_VERSION: ${KAFKA_VERSION:-3.3.2} KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29095' KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-5:9091,LISTENER_LOCAL://localhost:29095' diff --git a/vendor/github.com/Shopify/sarama/encoder_decoder.go b/vendor/github.com/IBM/sarama/encoder_decoder.go similarity index 100% rename from vendor/github.com/Shopify/sarama/encoder_decoder.go rename to vendor/github.com/IBM/sarama/encoder_decoder.go diff --git a/vendor/github.com/Shopify/sarama/end_txn_request.go b/vendor/github.com/IBM/sarama/end_txn_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/end_txn_request.go rename to vendor/github.com/IBM/sarama/end_txn_request.go diff --git a/vendor/github.com/Shopify/sarama/end_txn_response.go b/vendor/github.com/IBM/sarama/end_txn_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/end_txn_response.go rename to vendor/github.com/IBM/sarama/end_txn_response.go diff --git a/vendor/github.com/Shopify/sarama/entrypoint.sh b/vendor/github.com/IBM/sarama/entrypoint.sh similarity index 94% rename from vendor/github.com/Shopify/sarama/entrypoint.sh rename to vendor/github.com/IBM/sarama/entrypoint.sh index 8cd2efcb9..7b344fae8 100644 --- a/vendor/github.com/Shopify/sarama/entrypoint.sh +++ b/vendor/github.com/IBM/sarama/entrypoint.sh @@ -1,6 +1,6 @@ #!/bin/bash -KAFKA_VERSION="${KAFKA_VERSION:-3.3.1}" +KAFKA_VERSION="${KAFKA_VERSION:-3.3.2}" KAFKA_HOME="/opt/kafka-${KAFKA_VERSION}" if [ ! -d "${KAFKA_HOME}" ]; then diff --git a/vendor/github.com/Shopify/sarama/errors.go b/vendor/github.com/IBM/sarama/errors.go similarity index 99% rename from vendor/github.com/Shopify/sarama/errors.go rename to vendor/github.com/IBM/sarama/errors.go index 27977f166..8d1d16834 100644 --- a/vendor/github.com/Shopify/sarama/errors.go +++ b/vendor/github.com/IBM/sarama/errors.go @@ -79,7 +79,7 @@ var ErrTransactionNotReady = errors.New("transaction manager: transaction is not // ErrNonTransactedProducer when calling BeginTxn, CommitTxn or AbortTxn on a non transactional producer. var ErrNonTransactedProducer = errors.New("transaction manager: you need to add TransactionalID to producer") -// ErrTransitionNotAllowed when txnmgr state transiion is not valid. +// ErrTransitionNotAllowed when txnmgr state transition is not valid. var ErrTransitionNotAllowed = errors.New("transaction manager: invalid transition attempted") // ErrCannotTransitionNilError when transition is attempted with an nil error. @@ -89,7 +89,7 @@ var ErrCannotTransitionNilError = errors.New("transaction manager: cannot transi var ErrTxnUnableToParseResponse = errors.New("transaction manager: unable to parse response") // MultiErrorFormat specifies the formatter applied to format multierrors. The -// default implementation is a consensed version of the hashicorp/go-multierror +// default implementation is a condensed version of the hashicorp/go-multierror // default one var MultiErrorFormat multierror.ErrorFormatFunc = func(es []error) string { if len(es) == 1 { diff --git a/vendor/github.com/Shopify/sarama/fetch_request.go b/vendor/github.com/IBM/sarama/fetch_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/fetch_request.go rename to vendor/github.com/IBM/sarama/fetch_request.go diff --git a/vendor/github.com/Shopify/sarama/fetch_response.go b/vendor/github.com/IBM/sarama/fetch_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/fetch_response.go rename to vendor/github.com/IBM/sarama/fetch_response.go diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_request.go b/vendor/github.com/IBM/sarama/find_coordinator_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/find_coordinator_request.go rename to vendor/github.com/IBM/sarama/find_coordinator_request.go diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_response.go b/vendor/github.com/IBM/sarama/find_coordinator_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/find_coordinator_response.go rename to vendor/github.com/IBM/sarama/find_coordinator_response.go diff --git a/vendor/github.com/Shopify/sarama/gssapi_kerberos.go b/vendor/github.com/IBM/sarama/gssapi_kerberos.go similarity index 99% rename from vendor/github.com/Shopify/sarama/gssapi_kerberos.go rename to vendor/github.com/IBM/sarama/gssapi_kerberos.go index ab8b70196..8abbcdc38 100644 --- a/vendor/github.com/Shopify/sarama/gssapi_kerberos.go +++ b/vendor/github.com/IBM/sarama/gssapi_kerberos.go @@ -23,6 +23,7 @@ const ( GSS_API_GENERIC_TAG = 0x60 KRB5_USER_AUTH = 1 KRB5_KEYTAB_AUTH = 2 + KRB5_CCACHE_AUTH = 3 GSS_API_INITIAL = 1 GSS_API_VERIFY = 2 GSS_API_FINISH = 3 @@ -31,6 +32,7 @@ const ( type GSSAPIConfig struct { AuthType int KeyTabPath string + CCachePath string KerberosConfigPath string ServiceName string Username string diff --git a/vendor/github.com/Shopify/sarama/heartbeat_request.go b/vendor/github.com/IBM/sarama/heartbeat_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/heartbeat_request.go rename to vendor/github.com/IBM/sarama/heartbeat_request.go diff --git a/vendor/github.com/Shopify/sarama/heartbeat_response.go b/vendor/github.com/IBM/sarama/heartbeat_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/heartbeat_response.go rename to vendor/github.com/IBM/sarama/heartbeat_response.go diff --git a/vendor/github.com/Shopify/sarama/incremental_alter_configs_request.go b/vendor/github.com/IBM/sarama/incremental_alter_configs_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/incremental_alter_configs_request.go rename to vendor/github.com/IBM/sarama/incremental_alter_configs_request.go diff --git a/vendor/github.com/Shopify/sarama/incremental_alter_configs_response.go b/vendor/github.com/IBM/sarama/incremental_alter_configs_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/incremental_alter_configs_response.go rename to vendor/github.com/IBM/sarama/incremental_alter_configs_response.go diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_request.go b/vendor/github.com/IBM/sarama/init_producer_id_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/init_producer_id_request.go rename to vendor/github.com/IBM/sarama/init_producer_id_request.go diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_response.go b/vendor/github.com/IBM/sarama/init_producer_id_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/init_producer_id_response.go rename to vendor/github.com/IBM/sarama/init_producer_id_response.go diff --git a/vendor/github.com/Shopify/sarama/interceptors.go b/vendor/github.com/IBM/sarama/interceptors.go similarity index 100% rename from vendor/github.com/Shopify/sarama/interceptors.go rename to vendor/github.com/IBM/sarama/interceptors.go diff --git a/vendor/github.com/Shopify/sarama/join_group_request.go b/vendor/github.com/IBM/sarama/join_group_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/join_group_request.go rename to vendor/github.com/IBM/sarama/join_group_request.go diff --git a/vendor/github.com/Shopify/sarama/join_group_response.go b/vendor/github.com/IBM/sarama/join_group_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/join_group_response.go rename to vendor/github.com/IBM/sarama/join_group_response.go diff --git a/vendor/github.com/Shopify/sarama/kerberos_client.go b/vendor/github.com/IBM/sarama/kerberos_client.go similarity index 79% rename from vendor/github.com/Shopify/sarama/kerberos_client.go rename to vendor/github.com/IBM/sarama/kerberos_client.go index 01a53193b..289126879 100644 --- a/vendor/github.com/Shopify/sarama/kerberos_client.go +++ b/vendor/github.com/IBM/sarama/kerberos_client.go @@ -3,6 +3,7 @@ package sarama import ( krb5client "github.com/jcmturner/gokrb5/v8/client" krb5config "github.com/jcmturner/gokrb5/v8/config" + "github.com/jcmturner/gokrb5/v8/credentials" "github.com/jcmturner/gokrb5/v8/keytab" "github.com/jcmturner/gokrb5/v8/types" ) @@ -32,13 +33,23 @@ func NewKerberosClient(config *GSSAPIConfig) (KerberosClient, error) { func createClient(config *GSSAPIConfig, cfg *krb5config.Config) (KerberosClient, error) { var client *krb5client.Client - if config.AuthType == KRB5_KEYTAB_AUTH { + switch config.AuthType { + case KRB5_KEYTAB_AUTH: kt, err := keytab.Load(config.KeyTabPath) if err != nil { return nil, err } client = krb5client.NewWithKeytab(config.Username, config.Realm, kt, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST)) - } else { + case KRB5_CCACHE_AUTH: + cc, err := credentials.LoadCCache(config.CCachePath) + if err != nil { + return nil, err + } + client, err = krb5client.NewFromCCache(cc, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST)) + if err != nil { + return nil, err + } + default: client = krb5client.NewWithPassword(config.Username, config.Realm, config.Password, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST)) } diff --git a/vendor/github.com/Shopify/sarama/leave_group_request.go b/vendor/github.com/IBM/sarama/leave_group_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/leave_group_request.go rename to vendor/github.com/IBM/sarama/leave_group_request.go diff --git a/vendor/github.com/Shopify/sarama/leave_group_response.go b/vendor/github.com/IBM/sarama/leave_group_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/leave_group_response.go rename to vendor/github.com/IBM/sarama/leave_group_response.go diff --git a/vendor/github.com/Shopify/sarama/length_field.go b/vendor/github.com/IBM/sarama/length_field.go similarity index 100% rename from vendor/github.com/Shopify/sarama/length_field.go rename to vendor/github.com/IBM/sarama/length_field.go diff --git a/vendor/github.com/Shopify/sarama/list_groups_request.go b/vendor/github.com/IBM/sarama/list_groups_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/list_groups_request.go rename to vendor/github.com/IBM/sarama/list_groups_request.go diff --git a/vendor/github.com/Shopify/sarama/list_groups_response.go b/vendor/github.com/IBM/sarama/list_groups_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/list_groups_response.go rename to vendor/github.com/IBM/sarama/list_groups_response.go diff --git a/vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go b/vendor/github.com/IBM/sarama/list_partition_reassignments_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go rename to vendor/github.com/IBM/sarama/list_partition_reassignments_request.go diff --git a/vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go b/vendor/github.com/IBM/sarama/list_partition_reassignments_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go rename to vendor/github.com/IBM/sarama/list_partition_reassignments_response.go diff --git a/vendor/github.com/Shopify/sarama/message.go b/vendor/github.com/IBM/sarama/message.go similarity index 100% rename from vendor/github.com/Shopify/sarama/message.go rename to vendor/github.com/IBM/sarama/message.go diff --git a/vendor/github.com/Shopify/sarama/message_set.go b/vendor/github.com/IBM/sarama/message_set.go similarity index 100% rename from vendor/github.com/Shopify/sarama/message_set.go rename to vendor/github.com/IBM/sarama/message_set.go diff --git a/vendor/github.com/Shopify/sarama/metadata_request.go b/vendor/github.com/IBM/sarama/metadata_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/metadata_request.go rename to vendor/github.com/IBM/sarama/metadata_request.go diff --git a/vendor/github.com/Shopify/sarama/metadata_response.go b/vendor/github.com/IBM/sarama/metadata_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/metadata_response.go rename to vendor/github.com/IBM/sarama/metadata_response.go diff --git a/vendor/github.com/Shopify/sarama/metrics.go b/vendor/github.com/IBM/sarama/metrics.go similarity index 100% rename from vendor/github.com/Shopify/sarama/metrics.go rename to vendor/github.com/IBM/sarama/metrics.go diff --git a/vendor/github.com/Shopify/sarama/mockbroker.go b/vendor/github.com/IBM/sarama/mockbroker.go similarity index 100% rename from vendor/github.com/Shopify/sarama/mockbroker.go rename to vendor/github.com/IBM/sarama/mockbroker.go diff --git a/vendor/github.com/Shopify/sarama/mockkerberos.go b/vendor/github.com/IBM/sarama/mockkerberos.go similarity index 100% rename from vendor/github.com/Shopify/sarama/mockkerberos.go rename to vendor/github.com/IBM/sarama/mockkerberos.go diff --git a/vendor/github.com/Shopify/sarama/mockresponses.go b/vendor/github.com/IBM/sarama/mockresponses.go similarity index 100% rename from vendor/github.com/Shopify/sarama/mockresponses.go rename to vendor/github.com/IBM/sarama/mockresponses.go diff --git a/vendor/github.com/Shopify/sarama/offset_commit_request.go b/vendor/github.com/IBM/sarama/offset_commit_request.go similarity index 95% rename from vendor/github.com/Shopify/sarama/offset_commit_request.go rename to vendor/github.com/IBM/sarama/offset_commit_request.go index 5dd88220d..ed0566fe6 100644 --- a/vendor/github.com/Shopify/sarama/offset_commit_request.go +++ b/vendor/github.com/IBM/sarama/offset_commit_request.go @@ -220,7 +220,11 @@ func (r *OffsetCommitRequest) requiredVersion() KafkaVersion { } } -func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, leaderEpoch int32, timestamp int64, metadata string) { +func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, timestamp int64, metadata string) { + r.AddBlockWithLeaderEpoch(topic, partitionID, offset, 0, timestamp, metadata) +} + +func (r *OffsetCommitRequest) AddBlockWithLeaderEpoch(topic string, partitionID int32, offset int64, leaderEpoch int32, timestamp int64, metadata string) { if r.blocks == nil { r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock) } diff --git a/vendor/github.com/Shopify/sarama/offset_commit_response.go b/vendor/github.com/IBM/sarama/offset_commit_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/offset_commit_response.go rename to vendor/github.com/IBM/sarama/offset_commit_response.go diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_request.go b/vendor/github.com/IBM/sarama/offset_fetch_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/offset_fetch_request.go rename to vendor/github.com/IBM/sarama/offset_fetch_request.go diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_response.go b/vendor/github.com/IBM/sarama/offset_fetch_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/offset_fetch_response.go rename to vendor/github.com/IBM/sarama/offset_fetch_response.go diff --git a/vendor/github.com/Shopify/sarama/offset_manager.go b/vendor/github.com/IBM/sarama/offset_manager.go similarity index 98% rename from vendor/github.com/Shopify/sarama/offset_manager.go rename to vendor/github.com/IBM/sarama/offset_manager.go index 1ea15ff93..9b7960599 100644 --- a/vendor/github.com/Shopify/sarama/offset_manager.go +++ b/vendor/github.com/IBM/sarama/offset_manager.go @@ -304,7 +304,7 @@ func (om *offsetManager) constructRequest() *OffsetCommitRequest { for _, pom := range topicManagers { pom.lock.Lock() if pom.dirty { - r.AddBlock(pom.topic, pom.partition, pom.offset, pom.leaderEpoch, perPartitionTimestamp, pom.metadata) + r.AddBlockWithLeaderEpoch(pom.topic, pom.partition, pom.offset, pom.leaderEpoch, perPartitionTimestamp, pom.metadata) } pom.lock.Unlock() } @@ -359,13 +359,13 @@ func (om *offsetManager) handleResponse(broker *Broker, req *OffsetCommitRequest // nothing wrong but we didn't commit, we'll get it next time round case ErrFencedInstancedId: pom.handleError(err) - // TODO close the whole consumer for instacne fenced.... + // TODO close the whole consumer for instance fenced.... om.tryCancelSession() case ErrUnknownTopicOrPartition: // let the user know *and* try redispatching - if topic-auto-create is // enabled, redispatching should trigger a metadata req and create the // topic; if not then re-dispatching won't help, but we've let the user - // know and it shouldn't hurt either (see https://github.com/Shopify/sarama/issues/706) + // know and it shouldn't hurt either (see https://github.com/IBM/sarama/issues/706) fallthrough default: // dunno, tell the user and try redispatching diff --git a/vendor/github.com/Shopify/sarama/offset_request.go b/vendor/github.com/IBM/sarama/offset_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/offset_request.go rename to vendor/github.com/IBM/sarama/offset_request.go diff --git a/vendor/github.com/Shopify/sarama/offset_response.go b/vendor/github.com/IBM/sarama/offset_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/offset_response.go rename to vendor/github.com/IBM/sarama/offset_response.go diff --git a/vendor/github.com/Shopify/sarama/packet_decoder.go b/vendor/github.com/IBM/sarama/packet_decoder.go similarity index 98% rename from vendor/github.com/Shopify/sarama/packet_decoder.go rename to vendor/github.com/IBM/sarama/packet_decoder.go index b8cae5350..526e0f42f 100644 --- a/vendor/github.com/Shopify/sarama/packet_decoder.go +++ b/vendor/github.com/IBM/sarama/packet_decoder.go @@ -55,7 +55,7 @@ type pushDecoder interface { // Saves the offset into the input buffer as the location to actually read the calculated value when able. saveOffset(in int) - // Returns the length of data to reserve for the input of this encoder (eg 4 bytes for a CRC32). + // Returns the length of data to reserve for the input of this encoder (e.g. 4 bytes for a CRC32). reserveLength() int // Indicates that all required data is now available to calculate and check the field. diff --git a/vendor/github.com/Shopify/sarama/packet_encoder.go b/vendor/github.com/IBM/sarama/packet_encoder.go similarity index 100% rename from vendor/github.com/Shopify/sarama/packet_encoder.go rename to vendor/github.com/IBM/sarama/packet_encoder.go diff --git a/vendor/github.com/Shopify/sarama/partitioner.go b/vendor/github.com/IBM/sarama/partitioner.go similarity index 100% rename from vendor/github.com/Shopify/sarama/partitioner.go rename to vendor/github.com/IBM/sarama/partitioner.go diff --git a/vendor/github.com/Shopify/sarama/prep_encoder.go b/vendor/github.com/IBM/sarama/prep_encoder.go similarity index 100% rename from vendor/github.com/Shopify/sarama/prep_encoder.go rename to vendor/github.com/IBM/sarama/prep_encoder.go diff --git a/vendor/github.com/Shopify/sarama/produce_request.go b/vendor/github.com/IBM/sarama/produce_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/produce_request.go rename to vendor/github.com/IBM/sarama/produce_request.go diff --git a/vendor/github.com/Shopify/sarama/produce_response.go b/vendor/github.com/IBM/sarama/produce_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/produce_response.go rename to vendor/github.com/IBM/sarama/produce_response.go diff --git a/vendor/github.com/Shopify/sarama/produce_set.go b/vendor/github.com/IBM/sarama/produce_set.go similarity index 100% rename from vendor/github.com/Shopify/sarama/produce_set.go rename to vendor/github.com/IBM/sarama/produce_set.go diff --git a/vendor/github.com/Shopify/sarama/quota_types.go b/vendor/github.com/IBM/sarama/quota_types.go similarity index 100% rename from vendor/github.com/Shopify/sarama/quota_types.go rename to vendor/github.com/IBM/sarama/quota_types.go diff --git a/vendor/github.com/Shopify/sarama/real_decoder.go b/vendor/github.com/IBM/sarama/real_decoder.go similarity index 100% rename from vendor/github.com/Shopify/sarama/real_decoder.go rename to vendor/github.com/IBM/sarama/real_decoder.go diff --git a/vendor/github.com/Shopify/sarama/real_encoder.go b/vendor/github.com/IBM/sarama/real_encoder.go similarity index 100% rename from vendor/github.com/Shopify/sarama/real_encoder.go rename to vendor/github.com/IBM/sarama/real_encoder.go diff --git a/vendor/github.com/Shopify/sarama/record.go b/vendor/github.com/IBM/sarama/record.go similarity index 100% rename from vendor/github.com/Shopify/sarama/record.go rename to vendor/github.com/IBM/sarama/record.go diff --git a/vendor/github.com/Shopify/sarama/record_batch.go b/vendor/github.com/IBM/sarama/record_batch.go similarity index 100% rename from vendor/github.com/Shopify/sarama/record_batch.go rename to vendor/github.com/IBM/sarama/record_batch.go diff --git a/vendor/github.com/Shopify/sarama/records.go b/vendor/github.com/IBM/sarama/records.go similarity index 100% rename from vendor/github.com/Shopify/sarama/records.go rename to vendor/github.com/IBM/sarama/records.go diff --git a/vendor/github.com/Shopify/sarama/request.go b/vendor/github.com/IBM/sarama/request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/request.go rename to vendor/github.com/IBM/sarama/request.go diff --git a/vendor/github.com/Shopify/sarama/response_header.go b/vendor/github.com/IBM/sarama/response_header.go similarity index 100% rename from vendor/github.com/Shopify/sarama/response_header.go rename to vendor/github.com/IBM/sarama/response_header.go diff --git a/vendor/github.com/Shopify/sarama/sarama.go b/vendor/github.com/IBM/sarama/sarama.go similarity index 99% rename from vendor/github.com/Shopify/sarama/sarama.go rename to vendor/github.com/IBM/sarama/sarama.go index a42bc075a..4d5f60a66 100644 --- a/vendor/github.com/Shopify/sarama/sarama.go +++ b/vendor/github.com/IBM/sarama/sarama.go @@ -91,7 +91,7 @@ import ( var ( // Logger is the instance of a StdLogger interface that Sarama writes connection - // management events to. By default it is set to discard all log messages via ioutil.Discard, + // management events to. By default it is set to discard all log messages via io.Discard, // but you can set it to redirect wherever you want. Logger StdLogger = log.New(io.Discard, "[Sarama] ", log.LstdFlags) diff --git a/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go b/vendor/github.com/IBM/sarama/sasl_authenticate_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/sasl_authenticate_request.go rename to vendor/github.com/IBM/sarama/sasl_authenticate_request.go diff --git a/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go b/vendor/github.com/IBM/sarama/sasl_authenticate_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/sasl_authenticate_response.go rename to vendor/github.com/IBM/sarama/sasl_authenticate_response.go diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go b/vendor/github.com/IBM/sarama/sasl_handshake_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/sasl_handshake_request.go rename to vendor/github.com/IBM/sarama/sasl_handshake_request.go diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go b/vendor/github.com/IBM/sarama/sasl_handshake_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/sasl_handshake_response.go rename to vendor/github.com/IBM/sarama/sasl_handshake_response.go diff --git a/vendor/github.com/Shopify/sarama/scram_formatter.go b/vendor/github.com/IBM/sarama/scram_formatter.go similarity index 100% rename from vendor/github.com/Shopify/sarama/scram_formatter.go rename to vendor/github.com/IBM/sarama/scram_formatter.go diff --git a/vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go b/vendor/github.com/IBM/sarama/sticky_assignor_user_data.go similarity index 100% rename from vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go rename to vendor/github.com/IBM/sarama/sticky_assignor_user_data.go diff --git a/vendor/github.com/Shopify/sarama/sync_group_request.go b/vendor/github.com/IBM/sarama/sync_group_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/sync_group_request.go rename to vendor/github.com/IBM/sarama/sync_group_request.go diff --git a/vendor/github.com/Shopify/sarama/sync_group_response.go b/vendor/github.com/IBM/sarama/sync_group_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/sync_group_response.go rename to vendor/github.com/IBM/sarama/sync_group_response.go diff --git a/vendor/github.com/Shopify/sarama/sync_producer.go b/vendor/github.com/IBM/sarama/sync_producer.go similarity index 98% rename from vendor/github.com/Shopify/sarama/sync_producer.go rename to vendor/github.com/IBM/sarama/sync_producer.go index 8765ac336..3119baa6d 100644 --- a/vendor/github.com/Shopify/sarama/sync_producer.go +++ b/vendor/github.com/IBM/sarama/sync_producer.go @@ -33,7 +33,7 @@ type SyncProducer interface { // TxnStatus return current producer transaction status. TxnStatus() ProducerTxnStatusFlag - // IsTransactional return true when current producer is is transactional. + // IsTransactional return true when current producer is transactional. IsTransactional() bool // BeginTxn mark current transaction as ready. diff --git a/vendor/github.com/Shopify/sarama/timestamp.go b/vendor/github.com/IBM/sarama/timestamp.go similarity index 100% rename from vendor/github.com/Shopify/sarama/timestamp.go rename to vendor/github.com/IBM/sarama/timestamp.go diff --git a/vendor/github.com/Shopify/sarama/transaction_manager.go b/vendor/github.com/IBM/sarama/transaction_manager.go similarity index 99% rename from vendor/github.com/Shopify/sarama/transaction_manager.go rename to vendor/github.com/IBM/sarama/transaction_manager.go index e18abecd3..e1bcda3f9 100644 --- a/vendor/github.com/Shopify/sarama/transaction_manager.go +++ b/vendor/github.com/IBM/sarama/transaction_manager.go @@ -14,7 +14,7 @@ type ProducerTxnStatusFlag int16 const ( // ProducerTxnFlagUninitialized when txnmgr is created ProducerTxnFlagUninitialized ProducerTxnStatusFlag = 1 << iota - // ProducerTxnFlagInitializing when txnmgr is initilizing + // ProducerTxnFlagInitializing when txnmgr is initializing ProducerTxnFlagInitializing // ProducerTxnFlagReady when is ready to receive transaction ProducerTxnFlagReady @@ -22,7 +22,7 @@ const ( ProducerTxnFlagInTransaction // ProducerTxnFlagEndTransaction when transaction will be committed ProducerTxnFlagEndTransaction - // ProducerTxnFlagInError whan having abortable or fatal error + // ProducerTxnFlagInError when having abortable or fatal error ProducerTxnFlagInError // ProducerTxnFlagCommittingTransaction when committing txn ProducerTxnFlagCommittingTransaction @@ -117,13 +117,13 @@ var producerTxnTransitions = map[ProducerTxnStatusFlag][]ProducerTxnStatusFlag{ ProducerTxnFlagReady, ProducerTxnFlagInError, }, - // When we need are initilizing + // When we need are initializing ProducerTxnFlagInitializing: { ProducerTxnFlagInitializing, ProducerTxnFlagReady, ProducerTxnFlagInError, }, - // When we have initilized transactional producer + // When we have initialized transactional producer ProducerTxnFlagReady: { ProducerTxnFlagInTransaction, }, @@ -660,7 +660,7 @@ func (t *transactionManager) finishTransaction(commit bool) error { t.mutex.Lock() defer t.mutex.Unlock() - // Ensure no error when committing or abording + // Ensure no error when committing or aborting if commit && t.currentTxnStatus()&ProducerTxnFlagInError != 0 { return t.lastError } else if !commit && t.currentTxnStatus()&ProducerTxnFlagFatalError != 0 { diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go b/vendor/github.com/IBM/sarama/txn_offset_commit_request.go similarity index 100% rename from vendor/github.com/Shopify/sarama/txn_offset_commit_request.go rename to vendor/github.com/IBM/sarama/txn_offset_commit_request.go diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go b/vendor/github.com/IBM/sarama/txn_offset_commit_response.go similarity index 100% rename from vendor/github.com/Shopify/sarama/txn_offset_commit_response.go rename to vendor/github.com/IBM/sarama/txn_offset_commit_response.go diff --git a/vendor/github.com/Shopify/sarama/utils.go b/vendor/github.com/IBM/sarama/utils.go similarity index 98% rename from vendor/github.com/Shopify/sarama/utils.go rename to vendor/github.com/IBM/sarama/utils.go index 819b6597c..4526543d6 100644 --- a/vendor/github.com/Shopify/sarama/utils.go +++ b/vendor/github.com/IBM/sarama/utils.go @@ -193,6 +193,7 @@ var ( V3_2_3_0 = newKafkaVersion(3, 2, 3, 0) V3_3_0_0 = newKafkaVersion(3, 3, 0, 0) V3_3_1_0 = newKafkaVersion(3, 3, 1, 0) + V3_3_2_0 = newKafkaVersion(3, 3, 2, 0) SupportedVersions = []KafkaVersion{ V0_8_2_0, @@ -248,9 +249,10 @@ var ( V3_2_3_0, V3_3_0_0, V3_3_1_0, + V3_3_2_0, } MinVersion = V0_8_2_0 - MaxVersion = V3_3_1_0 + MaxVersion = V3_3_2_0 DefaultVersion = V1_0_0_0 // reduced set of versions to matrix test @@ -266,7 +268,7 @@ var ( V2_8_2_0, V3_1_2_0, V3_2_3_0, - V3_3_1_0, + V3_3_2_0, } ) diff --git a/vendor/github.com/Shopify/sarama/version.go b/vendor/github.com/IBM/sarama/version.go similarity index 100% rename from vendor/github.com/Shopify/sarama/version.go rename to vendor/github.com/IBM/sarama/version.go diff --git a/vendor/github.com/Shopify/sarama/zstd.go b/vendor/github.com/IBM/sarama/zstd.go similarity index 100% rename from vendor/github.com/Shopify/sarama/zstd.go rename to vendor/github.com/IBM/sarama/zstd.go diff --git a/vendor/github.com/Shopify/sarama/CHANGELOG.md b/vendor/github.com/Shopify/sarama/CHANGELOG.md deleted file mode 100644 index c2f92ec9a..000000000 --- a/vendor/github.com/Shopify/sarama/CHANGELOG.md +++ /dev/null @@ -1,1187 +0,0 @@ -# Changelog - -## Version 1.31.1 (2022-02-01) - -- #2126 - @bai - Populate missing kafka versions -- #2124 - @bai - Add Kafka 3.1.0 to CI matrix, migrate to bitnami kafka image -- #2123 - @bai - Update klauspost/compress to 0.14 -- #2122 - @dnwe - fix(test): make it simpler to re-use toxiproxy -- #2119 - @bai - Add Kafka 3.1.0 version number -- #2005 - @raulnegreiros - feat: add methods to pause/resume consumer's consumption -- #2051 - @seveas - Expose the TLS connection state of a broker connection -- #2117 - @wuhuizuo - feat: add method MockApiVersionsResponse.SetApiKeys -- #2110 - @dnwe - fix: ensure heartbeats only stop after cleanup -- #2113 - @mosceo - Fix typo - -## Version 1.31.0 (2022-01-18) - -## What's Changed -### :tada: New Features / Improvements -* feat: expose IncrementalAlterConfigs API in admin.go by @fengyinqiao in https://github.com/Shopify/sarama/pull/2088 -* feat: allow AsyncProducer to have MaxOpenRequests inflight produce requests per broker by @xujianhai666 in https://github.com/Shopify/sarama/pull/1686 -* Support request pipelining in AsyncProducer by @slaunay in https://github.com/Shopify/sarama/pull/2094 -### :bug: Fixes -* fix(test): add fluent interface for mocks where missing by @grongor in https://github.com/Shopify/sarama/pull/2080 -* fix(test): test for ConsumePartition with OffsetOldest by @grongor in https://github.com/Shopify/sarama/pull/2081 -* fix: set HWMO during creation of partitionConsumer (fix incorrect HWMO before first fetch) by @grongor in https://github.com/Shopify/sarama/pull/2082 -* fix: ignore non-nil but empty error strings in Describe/Alter client quotas responses by @agriffaut in https://github.com/Shopify/sarama/pull/2096 -* fix: skip over KIP-482 tagged fields by @dnwe in https://github.com/Shopify/sarama/pull/2107 -* fix: clear preferredReadReplica if broker shutdown by @dnwe in https://github.com/Shopify/sarama/pull/2108 -* fix(test): correct wrong offsets in mock Consumer by @grongor in https://github.com/Shopify/sarama/pull/2078 -* fix: correct bugs in DescribeGroupsResponse by @dnwe in https://github.com/Shopify/sarama/pull/2111 -### :wrench: Maintenance -* chore: bump runtime and test dependencies by @dnwe in https://github.com/Shopify/sarama/pull/2100 -### :memo: Documentation -* docs: refresh README.md for Kafka 3.0.0 by @dnwe in https://github.com/Shopify/sarama/pull/2099 -### :heavy_plus_sign: Other Changes -* Fix typo by @mosceo in https://github.com/Shopify/sarama/pull/2084 - -## New Contributors -* @grongor made their first contribution in https://github.com/Shopify/sarama/pull/2080 -* @fengyinqiao made their first contribution in https://github.com/Shopify/sarama/pull/2088 -* @xujianhai666 made their first contribution in https://github.com/Shopify/sarama/pull/1686 -* @mosceo made their first contribution in https://github.com/Shopify/sarama/pull/2084 - -**Full Changelog**: https://github.com/Shopify/sarama/compare/v1.30.1...v1.31.0 - -## Version 1.30.1 (2021-12-04) - -## What's Changed -### :tada: New Features / Improvements -* feat(zstd): pass level param through to compress/zstd encoder by @lizthegrey in https://github.com/Shopify/sarama/pull/2045 -### :bug: Fixes -* fix: set min-go-version to 1.16 by @troyanov in https://github.com/Shopify/sarama/pull/2048 -* logger: fix debug logs' formatting directives by @utrack in https://github.com/Shopify/sarama/pull/2054 -* fix: stuck on the batch with zero records length by @pachmu in https://github.com/Shopify/sarama/pull/2057 -* fix: only update preferredReadReplica if valid by @dnwe in https://github.com/Shopify/sarama/pull/2076 -### :wrench: Maintenance -* chore: add release notes configuration by @dnwe in https://github.com/Shopify/sarama/pull/2046 -* chore: confluent platform version bump by @lizthegrey in https://github.com/Shopify/sarama/pull/2070 - -## Notes -* ℹ️ from Sarama 1.30.x onward the minimum version of Go toolchain required is 1.16.x - -## New Contributors -* @troyanov made their first contribution in https://github.com/Shopify/sarama/pull/2048 -* @lizthegrey made their first contribution in https://github.com/Shopify/sarama/pull/2045 -* @utrack made their first contribution in https://github.com/Shopify/sarama/pull/2054 -* @pachmu made their first contribution in https://github.com/Shopify/sarama/pull/2057 - -**Full Changelog**: https://github.com/Shopify/sarama/compare/v1.30.0...v1.30.1 - -## Version 1.30.0 (2021-09-29) - -⚠️ This release has been superseded by v1.30.1 and should _not_ be used. - -**regression**: enabling rackawareness causes severe throughput drops (#2071) — fixed in v1.30.1 via #2076 - ---- - -ℹ️ **Note: from Sarama 1.30.0 the minimum version of Go toolchain required is 1.16.x** - ---- - -# New Features / Improvements - -- #1983 - @zifengyu - allow configure AllowAutoTopicCreation argument in metadata refresh -- #2000 - @matzew - Using xdg-go module for SCRAM -- #2003 - @gdm85 - feat: add counter metrics for consumer group join/sync and their failures -- #1992 - @zhaomoran - feat: support SaslHandshakeRequest v0 for SCRAM -- #2006 - @faillefer - Add support for DeleteOffsets operation -- #1909 - @agriffaut - KIP-546 Client quota APIs -- #1633 - @aldelucca1 - feat: allow balance strategies to provide initial state -- #1275 - @dnwe - log: add a DebugLogger that proxies to Logger -- #2018 - @dnwe - feat: use DebugLogger reference for goldenpath log -- #2019 - @dnwe - feat: add logging & a metric for producer throttle -- #2023 - @dnwe - feat: add Controller() to ClusterAdmin interface -- #2025 - @dnwe - feat: support ApiVersionsRequest V3 protocol -- #2028 - @dnwe - feat: send ApiVersionsRequest on broker open -- #2034 - @bai - Add support for kafka 3.0.0 - -# Fixes - -- #1990 - @doxsch - fix: correctly pass ValidateOnly through to CreatePartitionsRequest -- #1988 - @LubergAlexander - fix: correct WithCustomFallbackPartitioner implementation -- #2001 - @HurSungYun - docs: inform AsyncProducer Close pitfalls -- #1973 - @qiangmzsx - fix: metrics still taking up too much memory when metrics.UseNilMetrics=true -- #2007 - @bai - Add support for Go 1.17 -- #2009 - @dnwe - fix: enable nilerr linter and fix iferr checks -- #2010 - @dnwe - chore: enable exportloopref and misspell linters -- #2013 - @faillefer - fix(test): disable encoded response/request check when map contains multiple elements -- #2015 - @bai - Change default branch to main -- #1718 - @crivera-fastly - fix: correct the error handling in client.InitProducerID() -- #1984 - @null-sleep - fix(test): bump confluentPlatformVersion from 6.1.1 to 6.2.0 -- #2016 - @dnwe - chore: replace deprecated Go calls -- #2017 - @dnwe - chore: delete legacy vagrant script -- #2020 - @dnwe - fix(test): remove testLogger from TrackLeader test -- #2024 - @dnwe - chore: bump toxiproxy container to v2.1.5 -- #2033 - @bai - Update dependencies -- #2031 - @gdm85 - docs: do not mention buffered messages in sync producer Close method -- #2035 - @dnwe - chore: populate the missing kafka versions -- #2038 - @dnwe - feat: add a fuzzing workflow to github actions - -## New Contributors -* @zifengyu made their first contribution in https://github.com/Shopify/sarama/pull/1983 -* @doxsch made their first contribution in https://github.com/Shopify/sarama/pull/1990 -* @LubergAlexander made their first contribution in https://github.com/Shopify/sarama/pull/1988 -* @HurSungYun made their first contribution in https://github.com/Shopify/sarama/pull/2001 -* @gdm85 made their first contribution in https://github.com/Shopify/sarama/pull/2003 -* @qiangmzsx made their first contribution in https://github.com/Shopify/sarama/pull/1973 -* @zhaomoran made their first contribution in https://github.com/Shopify/sarama/pull/1992 -* @faillefer made their first contribution in https://github.com/Shopify/sarama/pull/2006 -* @crivera-fastly made their first contribution in https://github.com/Shopify/sarama/pull/1718 -* @null-sleep made their first contribution in https://github.com/Shopify/sarama/pull/1984 - -**Full Changelog**: https://github.com/Shopify/sarama/compare/v1.29.1...v1.30.0 - -## Version 1.29.1 (2021-06-24) - -# New Features / Improvements - -- #1966 - @ajanikow - KIP-339: Add Incremental Config updates API -- #1964 - @ajanikow - Add DelegationToken ResourceType - -# Fixes - -- #1962 - @hanxiaolin - fix(consumer): call interceptors when MaxProcessingTime expire -- #1971 - @KerryJava - fix kafka-producer-performance throughput panic -- #1968 - @dnwe - chore: bump golang.org/x versions -- #1956 - @joewreschnig - Allow checking the entire `ProducerMessage` in the mock producers -- #1963 - @dnwe - fix: ensure backoff timer is re-used -- #1949 - @dnwe - fix: explicitly use uint64 for payload length - -## Version 1.29.0 (2021-05-07) - -### New Features / Improvements - -- #1917 - @arkady-emelyanov - KIP-554: Add Broker-side SCRAM Config API -- #1869 - @wyndhblb - zstd: encode+decode performance improvements -- #1541 - @izolight - add String, (Un)MarshalText for acl types. -- #1921 - @bai - Add support for Kafka 2.8.0 - -### Fixes -- #1936 - @dnwe - fix(consumer): follow preferred broker -- #1933 - @ozzieba - Use gofork for encoding/asn1 to fix ASN errors during Kerberos authentication -- #1929 - @celrenheit - Handle isolation level in Offset(Request|Response) and require stable offset in FetchOffset(Request|Response) -- #1926 - @dnwe - fix: correct initial CodeQL findings -- #1925 - @bai - Test out CodeQL -- #1923 - @bestgopher - Remove redundant switch-case, fix doc typos -- #1922 - @bai - Update go dependencies -- #1898 - @mmaslankaprv - Parsing only known control batches value -- #1887 - @withshubh - Fix: issues affecting code quality - -## Version 1.28.0 (2021-02-15) - -**Note that with this release we change `RoundRobinBalancer` strategy to match Java client behavior. See #1788 for details.** - -- #1870 - @kvch - Update Kerberos library to latest major -- #1876 - @bai - Update docs, reference pkg.go.dev -- #1846 - @wclaeys - Do not ignore Consumer.Offsets.AutoCommit.Enable config on Close -- #1747 - @XSAM - fix: mock sync producer does not handle the offset while sending messages -- #1863 - @bai - Add support for Kafka 2.7.0 + update lz4 and klauspost/compress dependencies -- #1788 - @kzinglzy - feat[balance_strategy]: announcing a new round robin balance strategy -- #1862 - @bai - Fix CI setenv permissions issues -- #1832 - @ilyakaznacheev - Update Godoc link to pkg.go.dev -- #1822 - @danp - KIP-392: Allow consumers to fetch from closest replica - -## Version 1.27.2 (2020-10-21) - -### Improvements - -#1750 - @krantideep95 Adds missing mock responses for mocking consumer group - -## Fixes - -#1817 - reverts #1785 - Add private method to Client interface to prevent implementation - -## Version 1.27.1 (2020-10-07) - -### Improvements - -#1775 - @d1egoaz - Adds a Producer Interceptor example -#1781 - @justin-chen - Refresh brokers given list of seed brokers -#1784 - @justin-chen - Add randomize seed broker method -#1790 - @d1egoaz - remove example binary -#1798 - @bai - Test against Go 1.15 -#1785 - @justin-chen - Add private method to Client interface to prevent implementation -#1802 - @uvw - Support Go 1.13 error unwrapping - -## Fixes - -#1791 - @stanislavkozlovski - bump default version to 1.0.0 - -## Version 1.27.0 (2020-08-11) - -### Improvements - -#1466 - @rubenvp8510 - Expose kerberos fast negotiation configuration -#1695 - @KJTsanaktsidis - Use docker-compose to run the functional tests -#1699 - @wclaeys - Consumer group support for manually comitting offsets -#1714 - @bai - Bump Go to version 1.14.3, golangci-lint to 1.27.0 -#1726 - @d1egoaz - Include zstd on the functional tests -#1730 - @d1egoaz - KIP-42 Add producer and consumer interceptors -#1738 - @varun06 - fixed variable names that are named same as some std lib package names -#1741 - @varun06 - updated zstd dependency to latest v1.10.10 -#1743 - @varun06 - Fixed declaration dependencies and other lint issues in code base -#1763 - @alrs - remove deprecated tls options from test -#1769 - @bai - Add support for Kafka 2.6.0 - -## Fixes - -#1697 - @kvch - Use gofork for encoding/asn1 to fix ASN errors during Kerberos authentication -#1744 - @alrs - Fix isBalanced Function Signature - -## Version 1.26.4 (2020-05-19) - -## Fixes - -- #1701 - @d1egoaz - Set server name only for the current broker -- #1694 - @dnwe - testfix: set KAFKA_HEAP_OPTS for zk and kafka - -## Version 1.26.3 (2020-05-07) - -## Fixes - -- #1692 - @d1egoaz - Set tls ServerName to fix issue: either ServerName or InsecureSkipVerify must be specified in the tls.Config - -## Version 1.26.2 (2020-05-06) - -## ⚠️ Known Issues - -This release has been marked as not ready for production and may be unstable, please use v1.26.4. - -### Improvements - -- #1560 - @iyacontrol - add sync pool for gzip 1-9 -- #1605 - @dnwe - feat: protocol support for V11 fetch w/ rackID -- #1617 - @sladkoff / @dwi-di / @random-dwi - Add support for alter/list partition reassignements APIs -- #1632 - @bai - Add support for Go 1.14 -- #1640 - @random-dwi - Feature/fix list partition reassignments -- #1646 - @mimaison - Add DescribeLogDirs to admin client -- #1667 - @bai - Add support for kafka 2.5.0 - -## Fixes - -- #1594 - @sladkoff - Sets ConfigEntry.Default flag in addition to the ConfigEntry.Source for Kafka versions > V1_1_0_0 -- #1601 - @alrs - fix: remove use of testing.T.FailNow() inside goroutine -- #1602 - @d1egoaz - adds a note about consumer groups Consume method -- #1607 - @darklore - Fix memory leak when Broker.Open and Broker.Close called repeatedly -- #1613 - @wblakecaldwell - Updated "retrying" log message when BackoffFunc implemented -- #1614 - @alrs - produce_response.go: Remove Unused Functions -- #1619 - @alrs - tools/kafka-producer-performance: prune unused flag variables -- #1639 - @agriffaut - Handle errors with no message but error code -- #1643 - @kzinglzy - fix `config.net.keepalive` -- #1644 - @KJTsanaktsidis - Fix brokers continually allocating new Session IDs -- #1645 - @Stephan14 - Remove broker(s) which no longer exist in metadata -- #1650 - @lavoiesl - Return the response error in heartbeatLoop -- #1661 - @KJTsanaktsidis - Fix "broker received out of order sequence" when brokers die -- #1666 - @KevinJCross - Bugfix: Allow TLS connections to work over socks proxy. - -## Version 1.26.1 (2020-02-04) - -Improvements: -- Add requests-in-flight metric ([1539](https://github.com/Shopify/sarama/pull/1539)) -- Fix misleading example for cluster admin ([1595](https://github.com/Shopify/sarama/pull/1595)) -- Replace Travis with GitHub Actions, linters housekeeping ([1573](https://github.com/Shopify/sarama/pull/1573)) -- Allow BalanceStrategy to provide custom assignment data ([1592](https://github.com/Shopify/sarama/pull/1592)) - -Bug Fixes: -- Adds back Consumer.Offsets.CommitInterval to fix API ([1590](https://github.com/Shopify/sarama/pull/1590)) -- Fix error message s/CommitInterval/AutoCommit.Interval ([1589](https://github.com/Shopify/sarama/pull/1589)) - -## Version 1.26.0 (2020-01-24) - -New Features: -- Enable zstd compression - ([1574](https://github.com/Shopify/sarama/pull/1574), - [1582](https://github.com/Shopify/sarama/pull/1582)) -- Support headers in tools kafka-console-producer - ([1549](https://github.com/Shopify/sarama/pull/1549)) - -Improvements: -- Add SASL AuthIdentity to SASL frames (authzid) - ([1585](https://github.com/Shopify/sarama/pull/1585)). - -Bug Fixes: -- Sending messages with ZStd compression enabled fails in multiple ways - ([1252](https://github.com/Shopify/sarama/issues/1252)). -- Use the broker for any admin on BrokerConfig - ([1571](https://github.com/Shopify/sarama/pull/1571)). -- Set DescribeConfigRequest Version field - ([1576](https://github.com/Shopify/sarama/pull/1576)). -- ConsumerGroup flooding logs with client/metadata update req - ([1578](https://github.com/Shopify/sarama/pull/1578)). -- MetadataRequest version in DescribeCluster - ([1580](https://github.com/Shopify/sarama/pull/1580)). -- Fix deadlock in consumer group handleError - ([1581](https://github.com/Shopify/sarama/pull/1581)) -- Fill in the Fetch{Request,Response} protocol - ([1582](https://github.com/Shopify/sarama/pull/1582)). -- Retry topic request on ControllerNotAvailable - ([1586](https://github.com/Shopify/sarama/pull/1586)). - -## Version 1.25.0 (2020-01-13) - -New Features: -- Support TLS protocol in kafka-producer-performance - ([1538](https://github.com/Shopify/sarama/pull/1538)). -- Add support for kafka 2.4.0 - ([1552](https://github.com/Shopify/sarama/pull/1552)). - -Improvements: -- Allow the Consumer to disable auto-commit offsets - ([1164](https://github.com/Shopify/sarama/pull/1164)). -- Produce records with consistent timestamps - ([1455](https://github.com/Shopify/sarama/pull/1455)). - -Bug Fixes: -- Fix incorrect SetTopicMetadata name mentions - ([1534](https://github.com/Shopify/sarama/pull/1534)). -- Fix client.tryRefreshMetadata Println - ([1535](https://github.com/Shopify/sarama/pull/1535)). -- Fix panic on calling updateMetadata on closed client - ([1531](https://github.com/Shopify/sarama/pull/1531)). -- Fix possible faulty metrics in TestFuncProducing - ([1545](https://github.com/Shopify/sarama/pull/1545)). - -## Version 1.24.1 (2019-10-31) - -New Features: -- Add DescribeLogDirs Request/Response pair - ([1520](https://github.com/Shopify/sarama/pull/1520)). - -Bug Fixes: -- Fix ClusterAdmin returning invalid controller ID on DescribeCluster - ([1518](https://github.com/Shopify/sarama/pull/1518)). -- Fix issue with consumergroup not rebalancing when new partition is added - ([1525](https://github.com/Shopify/sarama/pull/1525)). -- Ensure consistent use of read/write deadlines - ([1529](https://github.com/Shopify/sarama/pull/1529)). - -## Version 1.24.0 (2019-10-09) - -New Features: -- Add sticky partition assignor - ([1416](https://github.com/Shopify/sarama/pull/1416)). -- Switch from cgo zstd package to pure Go implementation - ([1477](https://github.com/Shopify/sarama/pull/1477)). - -Improvements: -- Allow creating ClusterAdmin from client - ([1415](https://github.com/Shopify/sarama/pull/1415)). -- Set KafkaVersion in ListAcls method - ([1452](https://github.com/Shopify/sarama/pull/1452)). -- Set request version in CreateACL ClusterAdmin method - ([1458](https://github.com/Shopify/sarama/pull/1458)). -- Set request version in DeleteACL ClusterAdmin method - ([1461](https://github.com/Shopify/sarama/pull/1461)). -- Handle missed error codes on TopicMetaDataRequest and GroupCoordinatorRequest - ([1464](https://github.com/Shopify/sarama/pull/1464)). -- Remove direct usage of gofork - ([1465](https://github.com/Shopify/sarama/pull/1465)). -- Add support for Go 1.13 - ([1478](https://github.com/Shopify/sarama/pull/1478)). -- Improve behavior of NewMockListAclsResponse - ([1481](https://github.com/Shopify/sarama/pull/1481)). - -Bug Fixes: -- Fix race condition in consumergroup example - ([1434](https://github.com/Shopify/sarama/pull/1434)). -- Fix brokerProducer goroutine leak - ([1442](https://github.com/Shopify/sarama/pull/1442)). -- Use released version of lz4 library - ([1469](https://github.com/Shopify/sarama/pull/1469)). -- Set correct version in MockDeleteTopicsResponse - ([1484](https://github.com/Shopify/sarama/pull/1484)). -- Fix CLI help message typo - ([1494](https://github.com/Shopify/sarama/pull/1494)). - -Known Issues: -- Please **don't** use Zstd, as it doesn't work right now. - See https://github.com/Shopify/sarama/issues/1252 - -## Version 1.23.1 (2019-07-22) - -Bug Fixes: -- Fix fetch delete bug record - ([1425](https://github.com/Shopify/sarama/pull/1425)). -- Handle SASL/OAUTHBEARER token rejection - ([1428](https://github.com/Shopify/sarama/pull/1428)). - -## Version 1.23.0 (2019-07-02) - -New Features: -- Add support for Kafka 2.3.0 - ([1418](https://github.com/Shopify/sarama/pull/1418)). -- Add support for ListConsumerGroupOffsets v2 - ([1374](https://github.com/Shopify/sarama/pull/1374)). -- Add support for DeleteConsumerGroup - ([1417](https://github.com/Shopify/sarama/pull/1417)). -- Add support for SASLVersion configuration - ([1410](https://github.com/Shopify/sarama/pull/1410)). -- Add kerberos support - ([1366](https://github.com/Shopify/sarama/pull/1366)). - -Improvements: -- Improve sasl_scram_client example - ([1406](https://github.com/Shopify/sarama/pull/1406)). -- Fix shutdown and race-condition in consumer-group example - ([1404](https://github.com/Shopify/sarama/pull/1404)). -- Add support for error codes 77—81 - ([1397](https://github.com/Shopify/sarama/pull/1397)). -- Pool internal objects allocated per message - ([1385](https://github.com/Shopify/sarama/pull/1385)). -- Reduce packet decoder allocations - ([1373](https://github.com/Shopify/sarama/pull/1373)). -- Support timeout when fetching metadata - ([1359](https://github.com/Shopify/sarama/pull/1359)). - -Bug Fixes: -- Fix fetch size integer overflow - ([1376](https://github.com/Shopify/sarama/pull/1376)). -- Handle and log throttled FetchResponses - ([1383](https://github.com/Shopify/sarama/pull/1383)). -- Refactor misspelled word Resouce to Resource - ([1368](https://github.com/Shopify/sarama/pull/1368)). - -## Version 1.22.1 (2019-04-29) - -Improvements: -- Use zstd 1.3.8 - ([1350](https://github.com/Shopify/sarama/pull/1350)). -- Add support for SaslHandshakeRequest v1 - ([1354](https://github.com/Shopify/sarama/pull/1354)). - -Bug Fixes: -- Fix V5 MetadataRequest nullable topics array - ([1353](https://github.com/Shopify/sarama/pull/1353)). -- Use a different SCRAM client for each broker connection - ([1349](https://github.com/Shopify/sarama/pull/1349)). -- Fix AllowAutoTopicCreation for MetadataRequest greater than v3 - ([1344](https://github.com/Shopify/sarama/pull/1344)). - -## Version 1.22.0 (2019-04-09) - -New Features: -- Add Offline Replicas Operation to Client - ([1318](https://github.com/Shopify/sarama/pull/1318)). -- Allow using proxy when connecting to broker - ([1326](https://github.com/Shopify/sarama/pull/1326)). -- Implement ReadCommitted - ([1307](https://github.com/Shopify/sarama/pull/1307)). -- Add support for Kafka 2.2.0 - ([1331](https://github.com/Shopify/sarama/pull/1331)). -- Add SASL SCRAM-SHA-512 and SCRAM-SHA-256 mechanismes - ([1331](https://github.com/Shopify/sarama/pull/1295)). - -Improvements: -- Unregister all broker metrics on broker stop - ([1232](https://github.com/Shopify/sarama/pull/1232)). -- Add SCRAM authentication example - ([1303](https://github.com/Shopify/sarama/pull/1303)). -- Add consumergroup examples - ([1304](https://github.com/Shopify/sarama/pull/1304)). -- Expose consumer batch size metric - ([1296](https://github.com/Shopify/sarama/pull/1296)). -- Add TLS options to console producer and consumer - ([1300](https://github.com/Shopify/sarama/pull/1300)). -- Reduce client close bookkeeping - ([1297](https://github.com/Shopify/sarama/pull/1297)). -- Satisfy error interface in create responses - ([1154](https://github.com/Shopify/sarama/pull/1154)). -- Please lint gods - ([1346](https://github.com/Shopify/sarama/pull/1346)). - -Bug Fixes: -- Fix multi consumer group instance crash - ([1338](https://github.com/Shopify/sarama/pull/1338)). -- Update lz4 to latest version - ([1347](https://github.com/Shopify/sarama/pull/1347)). -- Retry ErrNotCoordinatorForConsumer in new consumergroup session - ([1231](https://github.com/Shopify/sarama/pull/1231)). -- Fix cleanup error handler - ([1332](https://github.com/Shopify/sarama/pull/1332)). -- Fix rate condition in PartitionConsumer - ([1156](https://github.com/Shopify/sarama/pull/1156)). - -## Version 1.21.0 (2019-02-24) - -New Features: -- Add CreateAclRequest, DescribeAclRequest, DeleteAclRequest - ([1236](https://github.com/Shopify/sarama/pull/1236)). -- Add DescribeTopic, DescribeConsumerGroup, ListConsumerGroups, ListConsumerGroupOffsets admin requests - ([1178](https://github.com/Shopify/sarama/pull/1178)). -- Implement SASL/OAUTHBEARER - ([1240](https://github.com/Shopify/sarama/pull/1240)). - -Improvements: -- Add Go mod support - ([1282](https://github.com/Shopify/sarama/pull/1282)). -- Add error codes 73—76 - ([1239](https://github.com/Shopify/sarama/pull/1239)). -- Add retry backoff function - ([1160](https://github.com/Shopify/sarama/pull/1160)). -- Maintain metadata in the producer even when retries are disabled - ([1189](https://github.com/Shopify/sarama/pull/1189)). -- Include ReplicaAssignment in ListTopics - ([1274](https://github.com/Shopify/sarama/pull/1274)). -- Add producer performance tool - ([1222](https://github.com/Shopify/sarama/pull/1222)). -- Add support LogAppend timestamps - ([1258](https://github.com/Shopify/sarama/pull/1258)). - -Bug Fixes: -- Fix potential deadlock when a heartbeat request fails - ([1286](https://github.com/Shopify/sarama/pull/1286)). -- Fix consuming compacted topic - ([1227](https://github.com/Shopify/sarama/pull/1227)). -- Set correct Kafka version for DescribeConfigsRequest v1 - ([1277](https://github.com/Shopify/sarama/pull/1277)). -- Update kafka test version - ([1273](https://github.com/Shopify/sarama/pull/1273)). - -## Version 1.20.1 (2019-01-10) - -New Features: -- Add optional replica id in offset request - ([1100](https://github.com/Shopify/sarama/pull/1100)). - -Improvements: -- Implement DescribeConfigs Request + Response v1 & v2 - ([1230](https://github.com/Shopify/sarama/pull/1230)). -- Reuse compression objects - ([1185](https://github.com/Shopify/sarama/pull/1185)). -- Switch from png to svg for GoDoc link in README - ([1243](https://github.com/Shopify/sarama/pull/1243)). -- Fix typo in deprecation notice for FetchResponseBlock.Records - ([1242](https://github.com/Shopify/sarama/pull/1242)). -- Fix typos in consumer metadata response file - ([1244](https://github.com/Shopify/sarama/pull/1244)). - -Bug Fixes: -- Revert to individual msg retries for non-idempotent - ([1203](https://github.com/Shopify/sarama/pull/1203)). -- Respect MaxMessageBytes limit for uncompressed messages - ([1141](https://github.com/Shopify/sarama/pull/1141)). - -## Version 1.20.0 (2018-12-10) - -New Features: - - Add support for zstd compression - ([#1170](https://github.com/Shopify/sarama/pull/1170)). - - Add support for Idempotent Producer - ([#1152](https://github.com/Shopify/sarama/pull/1152)). - - Add support support for Kafka 2.1.0 - ([#1229](https://github.com/Shopify/sarama/pull/1229)). - - Add support support for OffsetCommit request/response pairs versions v1 to v5 - ([#1201](https://github.com/Shopify/sarama/pull/1201)). - - Add support support for OffsetFetch request/response pair up to version v5 - ([#1198](https://github.com/Shopify/sarama/pull/1198)). - -Improvements: - - Export broker's Rack setting - ([#1173](https://github.com/Shopify/sarama/pull/1173)). - - Always use latest patch version of Go on CI - ([#1202](https://github.com/Shopify/sarama/pull/1202)). - - Add error codes 61 to 72 - ([#1195](https://github.com/Shopify/sarama/pull/1195)). - -Bug Fixes: - - Fix build without cgo - ([#1182](https://github.com/Shopify/sarama/pull/1182)). - - Fix go vet suggestion in consumer group file - ([#1209](https://github.com/Shopify/sarama/pull/1209)). - - Fix typos in code and comments - ([#1228](https://github.com/Shopify/sarama/pull/1228)). - -## Version 1.19.0 (2018-09-27) - -New Features: - - Implement a higher-level consumer group - ([#1099](https://github.com/Shopify/sarama/pull/1099)). - -Improvements: - - Add support for Go 1.11 - ([#1176](https://github.com/Shopify/sarama/pull/1176)). - -Bug Fixes: - - Fix encoding of `MetadataResponse` with version 2 and higher - ([#1174](https://github.com/Shopify/sarama/pull/1174)). - - Fix race condition in mock async producer - ([#1174](https://github.com/Shopify/sarama/pull/1174)). - -## Version 1.18.0 (2018-09-07) - -New Features: - - Make `Partitioner.RequiresConsistency` vary per-message - ([#1112](https://github.com/Shopify/sarama/pull/1112)). - - Add customizable partitioner - ([#1118](https://github.com/Shopify/sarama/pull/1118)). - - Add `ClusterAdmin` support for `CreateTopic`, `DeleteTopic`, `CreatePartitions`, - `DeleteRecords`, `DescribeConfig`, `AlterConfig`, `CreateACL`, `ListAcls`, `DeleteACL` - ([#1055](https://github.com/Shopify/sarama/pull/1055)). - -Improvements: - - Add support for Kafka 2.0.0 - ([#1149](https://github.com/Shopify/sarama/pull/1149)). - - Allow setting `LocalAddr` when dialing an address to support multi-homed hosts - ([#1123](https://github.com/Shopify/sarama/pull/1123)). - - Simpler offset management - ([#1127](https://github.com/Shopify/sarama/pull/1127)). - -Bug Fixes: - - Fix mutation of `ProducerMessage.MetaData` when producing to Kafka - ([#1110](https://github.com/Shopify/sarama/pull/1110)). - - Fix consumer block when response did not contain all the - expected topic/partition blocks - ([#1086](https://github.com/Shopify/sarama/pull/1086)). - - Fix consumer block when response contains only constrol messages - ([#1115](https://github.com/Shopify/sarama/pull/1115)). - - Add timeout config for ClusterAdmin requests - ([#1142](https://github.com/Shopify/sarama/pull/1142)). - - Add version check when producing message with headers - ([#1117](https://github.com/Shopify/sarama/pull/1117)). - - Fix `MetadataRequest` for empty list of topics - ([#1132](https://github.com/Shopify/sarama/pull/1132)). - - Fix producer topic metadata on-demand fetch when topic error happens in metadata response - ([#1125](https://github.com/Shopify/sarama/pull/1125)). - -## Version 1.17.0 (2018-05-30) - -New Features: - - Add support for gzip compression levels - ([#1044](https://github.com/Shopify/sarama/pull/1044)). - - Add support for Metadata request/response pairs versions v1 to v5 - ([#1047](https://github.com/Shopify/sarama/pull/1047), - [#1069](https://github.com/Shopify/sarama/pull/1069)). - - Add versioning to JoinGroup request/response pairs - ([#1098](https://github.com/Shopify/sarama/pull/1098)) - - Add support for CreatePartitions, DeleteGroups, DeleteRecords request/response pairs - ([#1065](https://github.com/Shopify/sarama/pull/1065), - [#1096](https://github.com/Shopify/sarama/pull/1096), - [#1027](https://github.com/Shopify/sarama/pull/1027)). - - Add `Controller()` method to Client interface - ([#1063](https://github.com/Shopify/sarama/pull/1063)). - -Improvements: - - ConsumerMetadataReq/Resp has been migrated to FindCoordinatorReq/Resp - ([#1010](https://github.com/Shopify/sarama/pull/1010)). - - Expose missing protocol parts: `msgSet` and `recordBatch` - ([#1049](https://github.com/Shopify/sarama/pull/1049)). - - Add support for v1 DeleteTopics Request - ([#1052](https://github.com/Shopify/sarama/pull/1052)). - - Add support for Go 1.10 - ([#1064](https://github.com/Shopify/sarama/pull/1064)). - - Claim support for Kafka 1.1.0 - ([#1073](https://github.com/Shopify/sarama/pull/1073)). - -Bug Fixes: - - Fix FindCoordinatorResponse.encode to allow nil Coordinator - ([#1050](https://github.com/Shopify/sarama/pull/1050), - [#1051](https://github.com/Shopify/sarama/pull/1051)). - - Clear all metadata when we have the latest topic info - ([#1033](https://github.com/Shopify/sarama/pull/1033)). - - Make `PartitionConsumer.Close` idempotent - ([#1092](https://github.com/Shopify/sarama/pull/1092)). - -## Version 1.16.0 (2018-02-12) - -New Features: - - Add support for the Create/Delete Topics request/response pairs - ([#1007](https://github.com/Shopify/sarama/pull/1007), - [#1008](https://github.com/Shopify/sarama/pull/1008)). - - Add support for the Describe/Create/Delete ACL request/response pairs - ([#1009](https://github.com/Shopify/sarama/pull/1009)). - - Add support for the five transaction-related request/response pairs - ([#1016](https://github.com/Shopify/sarama/pull/1016)). - -Improvements: - - Permit setting version on mock producer responses - ([#999](https://github.com/Shopify/sarama/pull/999)). - - Add `NewMockBrokerListener` helper for testing TLS connections - ([#1019](https://github.com/Shopify/sarama/pull/1019)). - - Changed the default value for `Consumer.Fetch.Default` from 32KiB to 1MiB - which results in much higher throughput in most cases - ([#1024](https://github.com/Shopify/sarama/pull/1024)). - - Reuse the `time.Ticker` across fetch requests in the PartitionConsumer to - reduce CPU and memory usage when processing many partitions - ([#1028](https://github.com/Shopify/sarama/pull/1028)). - - Assign relative offsets to messages in the producer to save the brokers a - recompression pass - ([#1002](https://github.com/Shopify/sarama/pull/1002), - [#1015](https://github.com/Shopify/sarama/pull/1015)). - -Bug Fixes: - - Fix producing uncompressed batches with the new protocol format - ([#1032](https://github.com/Shopify/sarama/issues/1032)). - - Fix consuming compacted topics with the new protocol format - ([#1005](https://github.com/Shopify/sarama/issues/1005)). - - Fix consuming topics with a mix of protocol formats - ([#1021](https://github.com/Shopify/sarama/issues/1021)). - - Fix consuming when the broker includes multiple batches in a single response - ([#1022](https://github.com/Shopify/sarama/issues/1022)). - - Fix detection of `PartialTrailingMessage` when the partial message was - truncated before the magic value indicating its version - ([#1030](https://github.com/Shopify/sarama/pull/1030)). - - Fix expectation-checking in the mock of `SyncProducer.SendMessages` - ([#1035](https://github.com/Shopify/sarama/pull/1035)). - -## Version 1.15.0 (2017-12-08) - -New Features: - - Claim official support for Kafka 1.0, though it did already work - ([#984](https://github.com/Shopify/sarama/pull/984)). - - Helper methods for Kafka version numbers to/from strings - ([#989](https://github.com/Shopify/sarama/pull/989)). - - Implement CreatePartitions request/response - ([#985](https://github.com/Shopify/sarama/pull/985)). - -Improvements: - - Add error codes 45-60 - ([#986](https://github.com/Shopify/sarama/issues/986)). - -Bug Fixes: - - Fix slow consuming for certain Kafka 0.11/1.0 configurations - ([#982](https://github.com/Shopify/sarama/pull/982)). - - Correctly determine when a FetchResponse contains the new message format - ([#990](https://github.com/Shopify/sarama/pull/990)). - - Fix producing with multiple headers - ([#996](https://github.com/Shopify/sarama/pull/996)). - - Fix handling of truncated record batches - ([#998](https://github.com/Shopify/sarama/pull/998)). - - Fix leaking metrics when closing brokers - ([#991](https://github.com/Shopify/sarama/pull/991)). - -## Version 1.14.0 (2017-11-13) - -New Features: - - Add support for the new Kafka 0.11 record-batch format, including the wire - protocol and the necessary behavioural changes in the producer and consumer. - Transactions and idempotency are not yet supported, but producing and - consuming should work with all the existing bells and whistles (batching, - compression, etc) as well as the new custom headers. Thanks to Vlad Hanciuta - of Arista Networks for this work. Part of - ([#901](https://github.com/Shopify/sarama/issues/901)). - -Bug Fixes: - - Fix encoding of ProduceResponse versions in test - ([#970](https://github.com/Shopify/sarama/pull/970)). - - Return partial replicas list when we have it - ([#975](https://github.com/Shopify/sarama/pull/975)). - -## Version 1.13.0 (2017-10-04) - -New Features: - - Support for FetchRequest version 3 - ([#905](https://github.com/Shopify/sarama/pull/905)). - - Permit setting version on mock FetchResponses - ([#939](https://github.com/Shopify/sarama/pull/939)). - - Add a configuration option to support storing only minimal metadata for - extremely large clusters - ([#937](https://github.com/Shopify/sarama/pull/937)). - - Add `PartitionOffsetManager.ResetOffset` for backtracking tracked offsets - ([#932](https://github.com/Shopify/sarama/pull/932)). - -Improvements: - - Provide the block-level timestamp when consuming compressed messages - ([#885](https://github.com/Shopify/sarama/issues/885)). - - `Client.Replicas` and `Client.InSyncReplicas` now respect the order returned - by the broker, which can be meaningful - ([#930](https://github.com/Shopify/sarama/pull/930)). - - Use a `Ticker` to reduce consumer timer overhead at the cost of higher - variance in the actual timeout - ([#933](https://github.com/Shopify/sarama/pull/933)). - -Bug Fixes: - - Gracefully handle messages with negative timestamps - ([#907](https://github.com/Shopify/sarama/pull/907)). - - Raise a proper error when encountering an unknown message version - ([#940](https://github.com/Shopify/sarama/pull/940)). - -## Version 1.12.0 (2017-05-08) - -New Features: - - Added support for the `ApiVersions` request and response pair, and Kafka - version 0.10.2 ([#867](https://github.com/Shopify/sarama/pull/867)). Note - that you still need to specify the Kafka version in the Sarama configuration - for the time being. - - Added a `Brokers` method to the Client which returns the complete set of - active brokers ([#813](https://github.com/Shopify/sarama/pull/813)). - - Added an `InSyncReplicas` method to the Client which returns the set of all - in-sync broker IDs for the given partition, now that the Kafka versions for - which this was misleading are no longer in our supported set - ([#872](https://github.com/Shopify/sarama/pull/872)). - - Added a `NewCustomHashPartitioner` method which allows constructing a hash - partitioner with a custom hash method in case the default (FNV-1a) is not - suitable - ([#837](https://github.com/Shopify/sarama/pull/837), - [#841](https://github.com/Shopify/sarama/pull/841)). - -Improvements: - - Recognize more Kafka error codes - ([#859](https://github.com/Shopify/sarama/pull/859)). - -Bug Fixes: - - Fix an issue where decoding a malformed FetchRequest would not return the - correct error ([#818](https://github.com/Shopify/sarama/pull/818)). - - Respect ordering of group protocols in JoinGroupRequests. This fix is - transparent if you're using the `AddGroupProtocol` or - `AddGroupProtocolMetadata` helpers; otherwise you will need to switch from - the `GroupProtocols` field (now deprecated) to use `OrderedGroupProtocols` - ([#812](https://github.com/Shopify/sarama/issues/812)). - - Fix an alignment-related issue with atomics on 32-bit architectures - ([#859](https://github.com/Shopify/sarama/pull/859)). - -## Version 1.11.0 (2016-12-20) - -_Important:_ As of Sarama 1.11 it is necessary to set the config value of -`Producer.Return.Successes` to true in order to use the SyncProducer. Previous -versions would silently override this value when instantiating a SyncProducer -which led to unexpected values and data races. - -New Features: - - Metrics! Thanks to Sébastien Launay for all his work on this feature - ([#701](https://github.com/Shopify/sarama/pull/701), - [#746](https://github.com/Shopify/sarama/pull/746), - [#766](https://github.com/Shopify/sarama/pull/766)). - - Add support for LZ4 compression - ([#786](https://github.com/Shopify/sarama/pull/786)). - - Add support for ListOffsetRequest v1 and Kafka 0.10.1 - ([#775](https://github.com/Shopify/sarama/pull/775)). - - Added a `HighWaterMarks` method to the Consumer which aggregates the - `HighWaterMarkOffset` values of its child topic/partitions - ([#769](https://github.com/Shopify/sarama/pull/769)). - -Bug Fixes: - - Fixed producing when using timestamps, compression and Kafka 0.10 - ([#759](https://github.com/Shopify/sarama/pull/759)). - - Added missing decoder methods to DescribeGroups response - ([#756](https://github.com/Shopify/sarama/pull/756)). - - Fix producer shutdown when `Return.Errors` is disabled - ([#787](https://github.com/Shopify/sarama/pull/787)). - - Don't mutate configuration in SyncProducer - ([#790](https://github.com/Shopify/sarama/pull/790)). - - Fix crash on SASL initialization failure - ([#795](https://github.com/Shopify/sarama/pull/795)). - -## Version 1.10.1 (2016-08-30) - -Bug Fixes: - - Fix the documentation for `HashPartitioner` which was incorrect - ([#717](https://github.com/Shopify/sarama/pull/717)). - - Permit client creation even when it is limited by ACLs - ([#722](https://github.com/Shopify/sarama/pull/722)). - - Several fixes to the consumer timer optimization code, regressions introduced - in v1.10.0. Go's timers are finicky - ([#730](https://github.com/Shopify/sarama/pull/730), - [#733](https://github.com/Shopify/sarama/pull/733), - [#734](https://github.com/Shopify/sarama/pull/734)). - - Handle consuming compressed relative offsets with Kafka 0.10 - ([#735](https://github.com/Shopify/sarama/pull/735)). - -## Version 1.10.0 (2016-08-02) - -_Important:_ As of Sarama 1.10 it is necessary to tell Sarama the version of -Kafka you are running against (via the `config.Version` value) in order to use -features that may not be compatible with old Kafka versions. If you don't -specify this value it will default to 0.8.2 (the minimum supported), and trying -to use more recent features (like the offset manager) will fail with an error. - -_Also:_ The offset-manager's behaviour has been changed to match the upstream -java consumer (see [#705](https://github.com/Shopify/sarama/pull/705) and -[#713](https://github.com/Shopify/sarama/pull/713)). If you use the -offset-manager, please ensure that you are committing one *greater* than the -last consumed message offset or else you may end up consuming duplicate -messages. - -New Features: - - Support for Kafka 0.10 - ([#672](https://github.com/Shopify/sarama/pull/672), - [#678](https://github.com/Shopify/sarama/pull/678), - [#681](https://github.com/Shopify/sarama/pull/681), and others). - - Support for configuring the target Kafka version - ([#676](https://github.com/Shopify/sarama/pull/676)). - - Batch producing support in the SyncProducer - ([#677](https://github.com/Shopify/sarama/pull/677)). - - Extend producer mock to allow setting expectations on message contents - ([#667](https://github.com/Shopify/sarama/pull/667)). - -Improvements: - - Support `nil` compressed messages for deleting in compacted topics - ([#634](https://github.com/Shopify/sarama/pull/634)). - - Pre-allocate decoding errors, greatly reducing heap usage and GC time against - misbehaving brokers ([#690](https://github.com/Shopify/sarama/pull/690)). - - Re-use consumer expiry timers, removing one allocation per consumed message - ([#707](https://github.com/Shopify/sarama/pull/707)). - -Bug Fixes: - - Actually default the client ID to "sarama" like we say we do - ([#664](https://github.com/Shopify/sarama/pull/664)). - - Fix a rare issue where `Client.Leader` could return the wrong error - ([#685](https://github.com/Shopify/sarama/pull/685)). - - Fix a possible tight loop in the consumer - ([#693](https://github.com/Shopify/sarama/pull/693)). - - Match upstream's offset-tracking behaviour - ([#705](https://github.com/Shopify/sarama/pull/705)). - - Report UnknownTopicOrPartition errors from the offset manager - ([#706](https://github.com/Shopify/sarama/pull/706)). - - Fix possible negative partition value from the HashPartitioner - ([#709](https://github.com/Shopify/sarama/pull/709)). - -## Version 1.9.0 (2016-05-16) - -New Features: - - Add support for custom offset manager retention durations - ([#602](https://github.com/Shopify/sarama/pull/602)). - - Publish low-level mocks to enable testing of third-party producer/consumer - implementations ([#570](https://github.com/Shopify/sarama/pull/570)). - - Declare support for Golang 1.6 - ([#611](https://github.com/Shopify/sarama/pull/611)). - - Support for SASL plain-text auth - ([#648](https://github.com/Shopify/sarama/pull/648)). - -Improvements: - - Simplified broker locking scheme slightly - ([#604](https://github.com/Shopify/sarama/pull/604)). - - Documentation cleanup - ([#605](https://github.com/Shopify/sarama/pull/605), - [#621](https://github.com/Shopify/sarama/pull/621), - [#654](https://github.com/Shopify/sarama/pull/654)). - -Bug Fixes: - - Fix race condition shutting down the OffsetManager - ([#658](https://github.com/Shopify/sarama/pull/658)). - -## Version 1.8.0 (2016-02-01) - -New Features: - - Full support for Kafka 0.9: - - All protocol messages and fields - ([#586](https://github.com/Shopify/sarama/pull/586), - [#588](https://github.com/Shopify/sarama/pull/588), - [#590](https://github.com/Shopify/sarama/pull/590)). - - Verified that TLS support works - ([#581](https://github.com/Shopify/sarama/pull/581)). - - Fixed the OffsetManager compatibility - ([#585](https://github.com/Shopify/sarama/pull/585)). - -Improvements: - - Optimize for fewer system calls when reading from the network - ([#584](https://github.com/Shopify/sarama/pull/584)). - - Automatically retry `InvalidMessage` errors to match upstream behaviour - ([#589](https://github.com/Shopify/sarama/pull/589)). - -## Version 1.7.0 (2015-12-11) - -New Features: - - Preliminary support for Kafka 0.9 - ([#572](https://github.com/Shopify/sarama/pull/572)). This comes with several - caveats: - - Protocol-layer support is mostly in place - ([#577](https://github.com/Shopify/sarama/pull/577)), however Kafka 0.9 - renamed some messages and fields, which we did not in order to preserve API - compatibility. - - The producer and consumer work against 0.9, but the offset manager does - not ([#573](https://github.com/Shopify/sarama/pull/573)). - - TLS support may or may not work - ([#581](https://github.com/Shopify/sarama/pull/581)). - -Improvements: - - Don't wait for request timeouts on dead brokers, greatly speeding recovery - when the TCP connection is left hanging - ([#548](https://github.com/Shopify/sarama/pull/548)). - - Refactored part of the producer. The new version provides a much more elegant - solution to [#449](https://github.com/Shopify/sarama/pull/449). It is also - slightly more efficient, and much more precise in calculating batch sizes - when compression is used - ([#549](https://github.com/Shopify/sarama/pull/549), - [#550](https://github.com/Shopify/sarama/pull/550), - [#551](https://github.com/Shopify/sarama/pull/551)). - -Bug Fixes: - - Fix race condition in consumer test mock - ([#553](https://github.com/Shopify/sarama/pull/553)). - -## Version 1.6.1 (2015-09-25) - -Bug Fixes: - - Fix panic that could occur if a user-supplied message value failed to encode - ([#449](https://github.com/Shopify/sarama/pull/449)). - -## Version 1.6.0 (2015-09-04) - -New Features: - - Implementation of a consumer offset manager using the APIs introduced in - Kafka 0.8.2. The API is designed mainly for integration into a future - high-level consumer, not for direct use, although it is *possible* to use it - directly. - ([#461](https://github.com/Shopify/sarama/pull/461)). - -Improvements: - - CRC32 calculation is much faster on machines with SSE4.2 instructions, - removing a major hotspot from most profiles - ([#255](https://github.com/Shopify/sarama/pull/255)). - -Bug Fixes: - - Make protocol decoding more robust against some malformed packets generated - by go-fuzz ([#523](https://github.com/Shopify/sarama/pull/523), - [#525](https://github.com/Shopify/sarama/pull/525)) or found in other ways - ([#528](https://github.com/Shopify/sarama/pull/528)). - - Fix a potential race condition panic in the consumer on shutdown - ([#529](https://github.com/Shopify/sarama/pull/529)). - -## Version 1.5.0 (2015-08-17) - -New Features: - - TLS-encrypted network connections are now supported. This feature is subject - to change when Kafka releases built-in TLS support, but for now this is - enough to work with TLS-terminating proxies - ([#154](https://github.com/Shopify/sarama/pull/154)). - -Improvements: - - The consumer will not block if a single partition is not drained by the user; - all other partitions will continue to consume normally - ([#485](https://github.com/Shopify/sarama/pull/485)). - - Formatting of error strings has been much improved - ([#495](https://github.com/Shopify/sarama/pull/495)). - - Internal refactoring of the producer for code cleanliness and to enable - future work ([#300](https://github.com/Shopify/sarama/pull/300)). - -Bug Fixes: - - Fix a potential deadlock in the consumer on shutdown - ([#475](https://github.com/Shopify/sarama/pull/475)). - -## Version 1.4.3 (2015-07-21) - -Bug Fixes: - - Don't include the partitioner in the producer's "fetch partitions" - circuit-breaker ([#466](https://github.com/Shopify/sarama/pull/466)). - - Don't retry messages until the broker is closed when abandoning a broker in - the producer ([#468](https://github.com/Shopify/sarama/pull/468)). - - Update the import path for snappy-go, it has moved again and the API has - changed slightly ([#486](https://github.com/Shopify/sarama/pull/486)). - -## Version 1.4.2 (2015-05-27) - -Bug Fixes: - - Update the import path for snappy-go, it has moved from google code to github - ([#456](https://github.com/Shopify/sarama/pull/456)). - -## Version 1.4.1 (2015-05-25) - -Improvements: - - Optimizations when decoding snappy messages, thanks to John Potocny - ([#446](https://github.com/Shopify/sarama/pull/446)). - -Bug Fixes: - - Fix hypothetical race conditions on producer shutdown - ([#450](https://github.com/Shopify/sarama/pull/450), - [#451](https://github.com/Shopify/sarama/pull/451)). - -## Version 1.4.0 (2015-05-01) - -New Features: - - The consumer now implements `Topics()` and `Partitions()` methods to enable - users to dynamically choose what topics/partitions to consume without - instantiating a full client - ([#431](https://github.com/Shopify/sarama/pull/431)). - - The partition-consumer now exposes the high water mark offset value returned - by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/Shopify/sarama/pull/339)). - - Added a `kafka-console-consumer` tool capable of handling multiple - partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer` - ([#439](https://github.com/Shopify/sarama/pull/439), - [#442](https://github.com/Shopify/sarama/pull/442)). - -Improvements: - - The producer's logging during retry scenarios is more consistent, more - useful, and slightly less verbose - ([#429](https://github.com/Shopify/sarama/pull/429)). - - The client now shuffles its initial list of seed brokers in order to prevent - thundering herd on the first broker in the list - ([#441](https://github.com/Shopify/sarama/pull/441)). - -Bug Fixes: - - The producer now correctly manages its state if retries occur when it is - shutting down, fixing several instances of confusing behaviour and at least - one potential deadlock ([#419](https://github.com/Shopify/sarama/pull/419)). - - The consumer now handles messages for different partitions asynchronously, - making it much more resilient to specific user code ordering - ([#325](https://github.com/Shopify/sarama/pull/325)). - -## Version 1.3.0 (2015-04-16) - -New Features: - - The client now tracks consumer group coordinators using - ConsumerMetadataRequests similar to how it tracks partition leadership using - regular MetadataRequests ([#411](https://github.com/Shopify/sarama/pull/411)). - This adds two methods to the client API: - - `Coordinator(consumerGroup string) (*Broker, error)` - - `RefreshCoordinator(consumerGroup string) error` - -Improvements: - - ConsumerMetadataResponses now automatically create a Broker object out of the - ID/address/port combination for the Coordinator; accessing the fields - individually has been deprecated - ([#413](https://github.com/Shopify/sarama/pull/413)). - - Much improved handling of `OffsetOutOfRange` errors in the consumer. - Consumers will fail to start if the provided offset is out of range - ([#418](https://github.com/Shopify/sarama/pull/418)) - and they will automatically shut down if the offset falls out of range - ([#424](https://github.com/Shopify/sarama/pull/424)). - - Small performance improvement in encoding and decoding protocol messages - ([#427](https://github.com/Shopify/sarama/pull/427)). - -Bug Fixes: - - Fix a rare race condition in the client's background metadata refresher if - it happens to be activated while the client is being closed - ([#422](https://github.com/Shopify/sarama/pull/422)). - -## Version 1.2.0 (2015-04-07) - -Improvements: - - The producer's behaviour when `Flush.Frequency` is set is now more intuitive - ([#389](https://github.com/Shopify/sarama/pull/389)). - - The producer is now somewhat more memory-efficient during and after retrying - messages due to an improved queue implementation - ([#396](https://github.com/Shopify/sarama/pull/396)). - - The consumer produces much more useful logging output when leadership - changes ([#385](https://github.com/Shopify/sarama/pull/385)). - - The client's `GetOffset` method will now automatically refresh metadata and - retry once in the event of stale information or similar - ([#394](https://github.com/Shopify/sarama/pull/394)). - - Broker connections now have support for using TCP keepalives - ([#407](https://github.com/Shopify/sarama/issues/407)). - -Bug Fixes: - - The OffsetCommitRequest message now correctly implements all three possible - API versions ([#390](https://github.com/Shopify/sarama/pull/390), - [#400](https://github.com/Shopify/sarama/pull/400)). - -## Version 1.1.0 (2015-03-20) - -Improvements: - - Wrap the producer's partitioner call in a circuit-breaker so that repeatedly - broken topics don't choke throughput - ([#373](https://github.com/Shopify/sarama/pull/373)). - -Bug Fixes: - - Fix the producer's internal reference counting in certain unusual scenarios - ([#367](https://github.com/Shopify/sarama/pull/367)). - - Fix the consumer's internal reference counting in certain unusual scenarios - ([#369](https://github.com/Shopify/sarama/pull/369)). - - Fix a condition where the producer's internal control messages could have - gotten stuck ([#368](https://github.com/Shopify/sarama/pull/368)). - - Fix an issue where invalid partition lists would be cached when asking for - metadata for a non-existant topic ([#372](https://github.com/Shopify/sarama/pull/372)). - - -## Version 1.0.0 (2015-03-17) - -Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are: - -- The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking. -- The consumer has been rewritten to only open one connection per broker instead of one connection per partition. -- The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/Shopify/sarama/mocks` package. -- For most uses cases, it is no longer necessary to open a `Client`; this will be done for you. -- All the configuration values have been unified in the `Config` struct. -- Much improved test suite. diff --git a/vendor/github.com/Shopify/sarama/decompress.go b/vendor/github.com/Shopify/sarama/decompress.go deleted file mode 100644 index aa7fb7498..000000000 --- a/vendor/github.com/Shopify/sarama/decompress.go +++ /dev/null @@ -1,61 +0,0 @@ -package sarama - -import ( - "bytes" - "compress/gzip" - "fmt" - "io" - "sync" - - snappy "github.com/eapache/go-xerial-snappy" - "github.com/pierrec/lz4/v4" -) - -var ( - lz4ReaderPool = sync.Pool{ - New: func() interface{} { - return lz4.NewReader(nil) - }, - } - - gzipReaderPool sync.Pool -) - -func decompress(cc CompressionCodec, data []byte) ([]byte, error) { - switch cc { - case CompressionNone: - return data, nil - case CompressionGZIP: - var err error - reader, ok := gzipReaderPool.Get().(*gzip.Reader) - if !ok { - reader, err = gzip.NewReader(bytes.NewReader(data)) - } else { - err = reader.Reset(bytes.NewReader(data)) - } - - if err != nil { - return nil, err - } - - defer gzipReaderPool.Put(reader) - - return io.ReadAll(reader) - case CompressionSnappy: - return snappy.Decode(data) - case CompressionLZ4: - reader, ok := lz4ReaderPool.Get().(*lz4.Reader) - if !ok { - reader = lz4.NewReader(bytes.NewReader(data)) - } else { - reader.Reset(bytes.NewReader(data)) - } - defer lz4ReaderPool.Put(reader) - - return io.ReadAll(reader) - case CompressionZSTD: - return zstdDecompress(ZstdDecoderParams{}, nil, data) - default: - return nil, PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", cc)} - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/.gitignore b/vendor/github.com/aws/aws-sdk-go-v2/.gitignore deleted file mode 100644 index 5f8b8c94f..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/.gitignore +++ /dev/null @@ -1,14 +0,0 @@ -dist -/doc -/doc-staging -.yardoc -Gemfile.lock -/internal/awstesting/integration/smoke/**/importmarker__.go -/internal/awstesting/integration/smoke/_test/ -/vendor -/private/model/cli/gen-api/gen-api -.gradle/ -build/ -.idea/ -bin/ -.vscode/ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/.golangci.toml b/vendor/github.com/aws/aws-sdk-go-v2/.golangci.toml deleted file mode 100644 index 8792d0ca6..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/.golangci.toml +++ /dev/null @@ -1,27 +0,0 @@ -[run] -concurrency = 4 -timeout = "1m" -issues-exit-code = 0 -modules-download-mode = "readonly" -allow-parallel-runners = true -skip-dirs = ["internal/repotools"] -skip-dirs-use-default = true -skip-files = ["service/transcribestreaming/eventstream_test.go"] -[output] -format = "github-actions" - -[linters-settings.cyclop] -skip-tests = false - -[linters-settings.errcheck] -check-blank = true - -[linters] -disable-all = true -enable = ["errcheck"] -fast = false - -[issues] -exclude-use-default = false - -# Refer config definitions at https://golangci-lint.run/usage/configuration/#config-file diff --git a/vendor/github.com/aws/aws-sdk-go-v2/.travis.yml b/vendor/github.com/aws/aws-sdk-go-v2/.travis.yml deleted file mode 100644 index 4b498a7a2..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/.travis.yml +++ /dev/null @@ -1,31 +0,0 @@ -language: go -sudo: true -dist: bionic - -branches: - only: - - main - -os: - - linux - - osx - # Travis doesn't work with windows and Go tip - #- windows - -go: - - tip - -matrix: - allow_failures: - - go: tip - -before_install: - - if [ "$TRAVIS_OS_NAME" = "windows" ]; then choco install make; fi - - (cd /tmp/; go get golang.org/x/lint/golint) - -env: - - EACHMODULE_CONCURRENCY=4 - -script: - - make ci-test-no-generate; - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/CHANGELOG.md deleted file mode 100644 index 48105a44b..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/CHANGELOG.md +++ /dev/null @@ -1,12065 +0,0 @@ -# Release (2023-11-01) - -## General Highlights -* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.74.0](service/connect/CHANGELOG.md#v1740-2023-11-01) - * **Feature**: Adds the BatchGetFlowAssociation API which returns flow associations (flow-resource) corresponding to the list of resourceArns supplied in the request. This release also adds IsDefault, LastModifiedRegion and LastModifiedTime fields to the responses of several Describe and List APIs. -* `github.com/aws/aws-sdk-go-v2/service/globalaccelerator`: [v1.19.0](service/globalaccelerator/CHANGELOG.md#v1190-2023-11-01) - * **Feature**: Global Accelerator now support accelerators with cross account endpoints. -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.61.0](service/rds/CHANGELOG.md#v1610-2023-11-01) - * **Feature**: This release adds support for customized networking resources to Amazon RDS Custom. -* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.35.0](service/redshift/CHANGELOG.md#v1350-2023-11-01) - * **Feature**: Added support for Multi-AZ deployments for Provisioned RA3 clusters that provide 99.99% SLA availability. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.115.0](service/sagemaker/CHANGELOG.md#v11150-2023-11-01) - * **Feature**: Support for batch transform input in Model dashboard - -# Release (2023-10-31) - -## General Highlights -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/amplify`: [v1.16.0](service/amplify/CHANGELOG.md#v1160-2023-10-31) - * **Feature**: Add backend field to CreateBranch and UpdateBranch requests. Add pagination support for ListApps, ListDomainAssociations, ListBranches, and ListJobs -* `github.com/aws/aws-sdk-go-v2/service/applicationinsights`: [v1.20.0](service/applicationinsights/CHANGELOG.md#v1200-2023-10-31) - * **Feature**: Automate attaching managed policies -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.129.0](service/ec2/CHANGELOG.md#v11290-2023-10-31) - * **Feature**: Capacity Blocks for ML are a new EC2 purchasing option for reserving GPU instances on a future date to support short duration machine learning (ML) workloads. Capacity Blocks automatically place instances close together inside Amazon EC2 UltraClusters for low-latency, high-throughput networking. -* `github.com/aws/aws-sdk-go-v2/service/m2`: [v1.8.0](service/m2/CHANGELOG.md#v180-2023-10-31) - * **Feature**: Added name filter ability for ListDataSets API, added ForceUpdate for Updating environment and BatchJob submission using S3BatchJobIdentifier -* `github.com/aws/aws-sdk-go-v2/service/neptunedata`: [v1.1.0](service/neptunedata/CHANGELOG.md#v110-2023-10-31) - * **Feature**: Minor change to not retry CancelledByUserException -* `github.com/aws/aws-sdk-go-v2/service/translate`: [v1.20.0](service/translate/CHANGELOG.md#v1200-2023-10-31) - * **Feature**: Added support for Brevity translation settings feature. - -# Release (2023-10-30) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.72.0](service/connect/CHANGELOG.md#v1720-2023-10-30) - * **Feature**: This release adds InstanceId field for phone number APIs. -* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.22.0](service/dataexchange/CHANGELOG.md#v1220-2023-10-30) - * **Feature**: We added a new API action: SendDataSetNotification. -* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.30.0](service/datasync/CHANGELOG.md#v1300-2023-10-30) - * **Feature**: Platform version changes to support AL1 deprecation initiative. -* `github.com/aws/aws-sdk-go-v2/service/finspace`: [v1.14.0](service/finspace/CHANGELOG.md#v1140-2023-10-30) - * **Feature**: Introducing new API UpdateKxClusterCodeConfiguration, introducing new cache types for clusters and introducing new deployment modes for updating clusters. -* `github.com/aws/aws-sdk-go-v2/service/mediapackagev2`: [v1.4.0](service/mediapackagev2/CHANGELOG.md#v140-2023-10-30) - * **Feature**: This feature allows customers to create a combination of manifest filtering, startover and time delay configuration that applies to all egress requests by default. -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.59.0](service/rds/CHANGELOG.md#v1590-2023-10-30) - * **Feature**: This release launches the CreateIntegration, DeleteIntegration, and DescribeIntegrations APIs to manage zero-ETL Integrations. -* `github.com/aws/aws-sdk-go-v2/service/redshiftserverless`: [v1.9.0](service/redshiftserverless/CHANGELOG.md#v190-2023-10-30) - * **Feature**: Added support for custom domain names for Amazon Redshift Serverless workgroups. This feature enables customers to create a custom domain name and use ACM to generate fully secure connections to it. -* `github.com/aws/aws-sdk-go-v2/service/resiliencehub`: [v1.14.0](service/resiliencehub/CHANGELOG.md#v1140-2023-10-30) - * **Feature**: Introduced the ability to filter applications by their last assessment date and time and have included metrics for the application's estimated workload Recovery Time Objective (RTO) and estimated workload Recovery Point Objective (RPO). -* `github.com/aws/aws-sdk-go-v2/service/s3outposts`: [v1.19.0](service/s3outposts/CHANGELOG.md#v1190-2023-10-30) - * **Feature**: Updated ListOutpostsWithS3 API response to include S3OutpostArn for use with AWS RAM. -* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.16.1](service/wisdom/CHANGELOG.md#v1161-2023-10-30) - * **Documentation**: This release added necessary API documents on creating a Wisdom knowledge base to integrate with S3. - -# Release (2023-10-27) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.30.0](service/emr/CHANGELOG.md#v1300-2023-10-27) - * **Feature**: Updated CreateCluster API request and DescribeCluster API responses to include EbsRootVolumeIops, and EbsRootVolumeThroughput attributes that specify the user configured root volume IOPS and throughput for Amazon EBS root device volume. This feature will be available from Amazon EMR releases 6.15.0 -* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.24.0](service/neptune/CHANGELOG.md#v1240-2023-10-27) - * **Feature**: Update TdeCredentialPassword type to SensitiveString -* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.23.1](service/pinpoint/CHANGELOG.md#v1231-2023-10-27) - * **Documentation**: Updated documentation to describe the case insensitivity for EndpointIds. -* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.33.0](service/redshift/CHANGELOG.md#v1330-2023-10-27) - * **Feature**: added support to create a dual stack cluster -* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.40.1](service/wafv2/CHANGELOG.md#v1401-2023-10-27) - * **Documentation**: Updates the descriptions for the calls that manage web ACL associations, to provide information for customer-managed IAM policies. - -# Release (2023-10-26) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.26.0](service/appstream/CHANGELOG.md#v1260-2023-10-26) - * **Feature**: This release introduces multi-session fleets, allowing customers to provision more than one user session on a single fleet instance. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.128.0](service/ec2/CHANGELOG.md#v11280-2023-10-26) - * **Feature**: Launching GetSecurityGroupsForVpc API. This API gets security groups that can be associated by the AWS account making the request with network interfaces in the specified VPC. -* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.32.0](service/networkfirewall/CHANGELOG.md#v1320-2023-10-26) - * **Feature**: Network Firewall now supports inspection of outbound SSL/TLS traffic. -* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.23.0](service/opensearch/CHANGELOG.md#v1230-2023-10-26) - * **Feature**: You can specify ipv4 or dualstack IPAddressType for cluster endpoints. If you specify IPAddressType as dualstack, the new endpoint will be visible under the 'EndpointV2' parameter and will support IPv4 and IPv6 requests. Whereas, the 'Endpoint' will continue to serve IPv4 requests. -* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.32.0](service/redshift/CHANGELOG.md#v1320-2023-10-26) - * **Feature**: Add Redshift APIs GetResourcePolicy, DeleteResourcePolicy, PutResourcePolicy and DescribeInboundIntegrations for the new Amazon Redshift Zero-ETL integration feature, which can be used to control data ingress into Redshift namespace, and view inbound integrations. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.113.0](service/sagemaker/CHANGELOG.md#v11130-2023-10-26) - * **Feature**: Amazon Sagemaker Autopilot now supports Text Generation jobs. -* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.23.0](service/sns/CHANGELOG.md#v1230-2023-10-26) - * **Feature**: Message Archiving and Replay is now supported in Amazon SNS for FIFO topics. -* `github.com/aws/aws-sdk-go-v2/service/ssmsap`: [v1.6.0](service/ssmsap/CHANGELOG.md#v160-2023-10-26) - * **Feature**: AWS Systems Manager for SAP added support for registration and discovery of SAP ABAP applications -* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.35.0](service/transfer/CHANGELOG.md#v1350-2023-10-26) - * **Feature**: No API changes from previous release. This release migrated the model to Smithy keeping all features unchanged. - -# Release (2023-10-25) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/connectcases`: [v1.8.0](service/connectcases/CHANGELOG.md#v180-2023-10-25) - * **Feature**: Increase maximum length of CommentBody to 3000, and increase maximum length of StringValue to 1500 -* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.20.0](service/groundstation/CHANGELOG.md#v1200-2023-10-25) - * **Feature**: This release will allow KMS alias names to be used when creating Mission Profiles -* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.24.0](service/iam/CHANGELOG.md#v1240-2023-10-25) - * **Feature**: Updates to GetAccessKeyLastUsed action to replace NoSuchEntity error with AccessDeniedException error. - -# Release (2023-10-24) - -## General Highlights -* **Feature**: **BREAKFIX**: Correct nullability and default value representation of various input fields across a large number of services. Calling code that references one or more of the affected fields will need to update usage accordingly. See [2162](https://github.com/aws/aws-sdk-go-v2/issues/2162). -* **Feature**: **BREAKFIX**: Correct nullability representation of APIGateway-based services. -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/codepipeline`: [v1.18.0](service/codepipeline/CHANGELOG.md#v1180-2023-10-24) - * **Feature**: Add ability to trigger pipelines from git tags, define variables at pipeline level and new pipeline type V2. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.127.0](service/ec2/CHANGELOG.md#v11270-2023-10-24) - * **Feature**: This release updates the documentation for InstanceInterruptionBehavior and HibernationOptionsRequest to more accurately describe the behavior of these two parameters when using Spot hibernation. -* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.30.0](service/eks/CHANGELOG.md#v1300-2023-10-24) - * **Feature**: Added support for Cluster Subnet and Security Group mutability. -* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.23.0](service/iam/CHANGELOG.md#v1230-2023-10-24) - * **Feature**: Add the partitional endpoint for IAM in iso-f. -* `github.com/aws/aws-sdk-go-v2/service/migrationhubconfig`: [v1.16.0](service/migrationhubconfig/CHANGELOG.md#v1160-2023-10-24) - * **Feature**: This release introduces DeleteHomeRegionControl API that customers can use to delete the Migration Hub Home Region configuration -* `github.com/aws/aws-sdk-go-v2/service/migrationhubstrategy`: [v1.12.0](service/migrationhubstrategy/CHANGELOG.md#v1120-2023-10-24) - * **Feature**: This release introduces multi-data-source feature in Migration Hub Strategy Recommendations. This feature now supports vCenter as a data source to fetch inventory in addition to ADS and Import from file workflow that is currently supported with MHSR collector. -* `github.com/aws/aws-sdk-go-v2/service/opensearchserverless`: [v1.6.0](service/opensearchserverless/CHANGELOG.md#v160-2023-10-24) - * **Feature**: This release includes the following new APIs: CreateLifecyclePolicy, UpdateLifecyclePolicy, BatchGetLifecyclePolicy, DeleteLifecyclePolicy, ListLifecyclePolicies and BatchGetEffectiveLifecyclePolicy to support the data lifecycle management feature. - -# Release (2023-10-23) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/marketplacecommerceanalytics`: [v1.15.0](service/marketplacecommerceanalytics/CHANGELOG.md#v1150-2023-10-23) - * **Feature**: The StartSupportDataExport operation has been deprecated as part of the Product Support Connection deprecation. As of December 2022, Product Support Connection is no longer supported. -* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.20.0](service/networkmanager/CHANGELOG.md#v1200-2023-10-23) - * **Feature**: This release adds API support for Tunnel-less Connect (NoEncap Protocol) for AWS Cloud WAN -* `github.com/aws/aws-sdk-go-v2/service/redshiftserverless`: [v1.8.0](service/redshiftserverless/CHANGELOG.md#v180-2023-10-23) - * **Feature**: This release adds support for customers to see the patch version and workgroup version in Amazon Redshift Serverless. -* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.32.0](service/rekognition/CHANGELOG.md#v1320-2023-10-23) - * **Feature**: Amazon Rekognition introduces StartMediaAnalysisJob, GetMediaAnalysisJob, and ListMediaAnalysisJobs operations to run a bulk analysis of images with a Detect Moderation model. - -# Release (2023-10-20) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/appconfig`: [v1.22.0](service/appconfig/CHANGELOG.md#v1220-2023-10-20) - * **Feature**: Update KmsKeyIdentifier constraints to support AWS KMS multi-Region keys. -* `github.com/aws/aws-sdk-go-v2/service/appintegrations`: [v1.19.0](service/appintegrations/CHANGELOG.md#v1190-2023-10-20) - * **Feature**: Updated ScheduleConfig to be an optional input to CreateDataIntegration to support event driven downloading of files from sources such as Amazon s3 using Amazon Connect AppIntegrations. -* `github.com/aws/aws-sdk-go-v2/service/applicationdiscoveryservice`: [v1.19.0](service/applicationdiscoveryservice/CHANGELOG.md#v1190-2023-10-20) - * **Feature**: This release introduces three new APIs: StartBatchDeleteConfigurationTask, DescribeBatchDeleteConfigurationTask, and BatchDeleteAgents. -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.70.0](service/connect/CHANGELOG.md#v1700-2023-10-20) - * **Feature**: This release adds support for updating phone number metadata, such as phone number description. -* `github.com/aws/aws-sdk-go-v2/service/medicalimaging`: [v1.2.3](service/medicalimaging/CHANGELOG.md#v123-2023-10-20) - * **Documentation**: Updates on documentation links -* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.39.0](service/ssm/CHANGELOG.md#v1390-2023-10-20) - * **Feature**: This release introduces a new API: DeleteOpsItem. This allows deletion of an OpsItem. - -# Release (2023-10-19) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.126.0](service/ec2/CHANGELOG.md#v11260-2023-10-19) - * **Feature**: Amazon EC2 C7a instances, powered by 4th generation AMD EPYC processors, are ideal for high performance, compute-intensive workloads such as high performance computing. Amazon EC2 R7i instances are next-generation memory optimized and powered by custom 4th Generation Intel Xeon Scalable processors. -* `github.com/aws/aws-sdk-go-v2/service/managedblockchainquery`: [v1.3.0](service/managedblockchainquery/CHANGELOG.md#v130-2023-10-19) - * **Feature**: This release adds support for Ethereum Sepolia network -* `github.com/aws/aws-sdk-go-v2/service/neptunedata`: [v1.0.4](service/neptunedata/CHANGELOG.md#v104-2023-10-19) - * **Documentation**: Doc changes to add IAM action mappings for the data actions. -* `github.com/aws/aws-sdk-go-v2/service/omics`: [v1.11.0](service/omics/CHANGELOG.md#v1110-2023-10-19) - * **Feature**: This change enables customers to retrieve failure reasons with detailed status messages for their failed runs -* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.21.0](service/opensearch/CHANGELOG.md#v1210-2023-10-19) - * **Feature**: Added Cluster Administrative options for node restart, opensearch process restart and opensearch dashboard restart for Multi-AZ without standby domains -* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.46.0](service/quicksight/CHANGELOG.md#v1460-2023-10-19) - * **Feature**: This release adds the following: 1) Trino and Starburst Database Connectors 2) Custom total for tables and pivot tables 3) Enable restricted folders 4) Add rolling dates for time equality filters 5) Refine DataPathValue and introduce DataPathType 6) Add SeriesType to ReferenceLineDataConfiguration -* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.21.6](service/secretsmanager/CHANGELOG.md#v1216-2023-10-19) - * **Documentation**: Documentation updates for Secrets Manager -* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.22.0](service/servicecatalog/CHANGELOG.md#v1220-2023-10-19) - * **Feature**: Introduce support for EXTERNAL product and provisioning artifact type in CreateProduct and CreateProvisioningArtifact APIs. -* `github.com/aws/aws-sdk-go-v2/service/verifiedpermissions`: [v1.3.0](service/verifiedpermissions/CHANGELOG.md#v130-2023-10-19) - * **Feature**: Improving Amazon Verified Permissions Create experience -* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.31.3](service/workspaces/CHANGELOG.md#v1313-2023-10-19) - * **Documentation**: Documentation updates for WorkSpaces - -# Release (2023-10-18) - -## General Highlights -* **Feature**: Add handwritten paginators that were present in some services in the v1 SDK. -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/cloud9`: [v1.18.11](service/cloud9/CHANGELOG.md#v11811-2023-10-18) - * **Documentation**: Update to imageId parameter behavior and dates updated. -* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.23.0](service/dynamodb/CHANGELOG.md#v1230-2023-10-18) - * **Documentation**: Updating descriptions for several APIs. -* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.44.0](service/kendra/CHANGELOG.md#v1440-2023-10-18) - * **Feature**: Changes for a new feature in Amazon Kendra's Query API to Collapse/Expand query results -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.57.0](service/rds/CHANGELOG.md#v1570-2023-10-18) - * **Feature**: This release adds support for upgrading the storage file system configuration on the DB instance using a blue/green deployment or a read replica. -* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.16.0](service/wisdom/CHANGELOG.md#v1160-2023-10-18) - * **Feature**: This release adds an max limit of 25 recommendation ids for NotifyRecommendationsReceived API. - -# Release (2023-10-17) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/applicationdiscoveryservice`: [v1.18.0](service/applicationdiscoveryservice/CHANGELOG.md#v1180-2023-10-17) - * **Feature**: This release introduces three new APIs: StartBatchDeleteConfigurationTask, DescribeBatchDeleteConfigurationTask, and BatchDeleteAgents. -* `github.com/aws/aws-sdk-go-v2/service/codepipeline`: [v1.17.0](service/codepipeline/CHANGELOG.md#v1170-2023-10-17) - * **Feature**: Add retryMode ALL_ACTIONS to RetryStageExecution API that retries a failed stage starting from first action in the stage -* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.30.4](service/ecs/CHANGELOG.md#v1304-2023-10-17) - * **Documentation**: Documentation only updates to address Amazon ECS tickets. -* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.29.0](service/guardduty/CHANGELOG.md#v1290-2023-10-17) - * **Feature**: Add domainWithSuffix finding field to dnsRequestAction -* `github.com/aws/aws-sdk-go-v2/service/kafka`: [v1.23.0](service/kafka/CHANGELOG.md#v1230-2023-10-17) - * **Feature**: AWS Managed Streaming for Kafka is launching MSK Replicator, a new feature that enables customers to reliably replicate data across Amazon MSK clusters in same or different AWS regions. You can now use SDK to create, list, describe, delete, update, and manage tags of MSK Replicators. -* `github.com/aws/aws-sdk-go-v2/service/route53recoverycluster`: [v1.14.0](service/route53recoverycluster/CHANGELOG.md#v1140-2023-10-17) - * **Feature**: Adds Owner field to ListRoutingControls API. -* `github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig`: [v1.14.0](service/route53recoverycontrolconfig/CHANGELOG.md#v1140-2023-10-17) - * **Feature**: Adds permissions for GetResourcePolicy to support returning details about AWS Resource Access Manager resource policies for shared resources. - -# Release (2023-10-16) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/config`: [v1.19.0](config/CHANGELOG.md#v1190-2023-10-16) - * **Feature**: Modify logic of retrieving user agent appID from env config -* `github.com/aws/aws-sdk-go-v2/feature/cloudfront/sign`: [v1.4.0](feature/cloudfront/sign/CHANGELOG.md#v140-2023-10-16) - * **Feature**: Add support for loading PKCS8-formatted private keys. -* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.35.0](service/cloudformation/CHANGELOG.md#v1350-2023-10-16) - * **Feature**: SDK and documentation updates for UpdateReplacePolicy -* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.17.0](service/drs/CHANGELOG.md#v1170-2023-10-16) - * **Feature**: Updated exsiting API to allow AWS Elastic Disaster Recovery support of launching recovery into existing EC2 instances. -* `github.com/aws/aws-sdk-go-v2/service/entityresolution`: [v1.3.0](service/entityresolution/CHANGELOG.md#v130-2023-10-16) - * **Feature**: This launch expands our matching techniques to include provider-based matching to help customer match, link, and enhance records with minimal data movement. With data service providers, we have removed the need for customers to build bespoke integrations,. -* `github.com/aws/aws-sdk-go-v2/service/managedblockchainquery`: [v1.2.0](service/managedblockchainquery/CHANGELOG.md#v120-2023-10-16) - * **Feature**: This release introduces two new APIs: GetAssetContract and ListAssetContracts. This release also adds support for Bitcoin Testnet. -* `github.com/aws/aws-sdk-go-v2/service/mediapackagev2`: [v1.3.0](service/mediapackagev2/CHANGELOG.md#v130-2023-10-16) - * **Feature**: This release allows customers to manage MediaPackage v2 resource using CloudFormation. -* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.20.0](service/opensearch/CHANGELOG.md#v1200-2023-10-16) - * **Feature**: This release allows customers to list and associate optional plugin packages with compatible Amazon OpenSearch Service clusters for enhanced functionality. -* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.30.0](service/redshift/CHANGELOG.md#v1300-2023-10-16) - * **Feature**: Added support for managing credentials of provisioned cluster admin using AWS Secrets Manager. -* `github.com/aws/aws-sdk-go-v2/service/redshiftserverless`: [v1.7.0](service/redshiftserverless/CHANGELOG.md#v170-2023-10-16) - * **Feature**: Added support for managing credentials of serverless namespace admin using AWS Secrets Manager. -* `github.com/aws/aws-sdk-go-v2/service/sesv2`: [v1.21.0](service/sesv2/CHANGELOG.md#v1210-2023-10-16) - * **Feature**: This release provides enhanced visibility into your SES identity verification status. This will offer you more actionable insights, enabling you to promptly address any verification-related issues. -* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.34.2](service/transfer/CHANGELOG.md#v1342-2023-10-16) - * **Documentation**: Documentation updates for AWS Transfer Family -* `github.com/aws/aws-sdk-go-v2/service/xray`: [v1.19.0](service/xray/CHANGELOG.md#v1190-2023-10-16) - * **Feature**: This releases enhances GetTraceSummaries API to support new TimeRangeType Service to query trace summaries by segment end time. - -# Release (2023-10-12) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2`: v1.21.2 - * **Bug Fix**: Improve recognition of retryable DNS errors. -* `github.com/aws/aws-sdk-go-v2/config`: [v1.18.45](config/CHANGELOG.md#v11845-2023-10-12) - * **Bug Fix**: Fail to load config if an explicitly provided profile doesn't exist. -* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.27.0](service/auditmanager/CHANGELOG.md#v1270-2023-10-12) - * **Feature**: This release introduces a new limit to the awsAccounts parameter. When you create or update an assessment, there is now a limit of 200 AWS accounts that can be specified in the assessment scope. -* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.31.0](service/autoscaling/CHANGELOG.md#v1310-2023-10-12) - * **Feature**: Update the NotificationMetadata field to only allow visible ascii characters. Add paginators to DescribeInstanceRefreshes, DescribeLoadBalancers, and DescribeLoadBalancerTargetGroups -* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.37.0](service/configservice/CHANGELOG.md#v1370-2023-10-12) - * **Feature**: Add enums for resource types supported by Config -* `github.com/aws/aws-sdk-go-v2/service/controltower`: [v1.4.0](service/controltower/CHANGELOG.md#v140-2023-10-12) - * **Feature**: Added new EnabledControl resource details to ListEnabledControls API and added new GetEnabledControl API. -* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.29.0](service/customerprofiles/CHANGELOG.md#v1290-2023-10-12) - * **Feature**: Adds sensitive trait to various shapes in Customer Profiles Calculated Attribute API model. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.125.0](service/ec2/CHANGELOG.md#v11250-2023-10-12) - * **Feature**: This release adds Ubuntu Pro as a supported platform for On-Demand Capacity Reservations and adds support for setting an Amazon Machine Image (AMI) to disabled state. Disabling the AMI makes it private if it was previously shared, and prevents new EC2 instance launches from it. -* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.21.6](service/elasticloadbalancingv2/CHANGELOG.md#v1216-2023-10-12) - * **Documentation**: This release enables routing policies with Availability Zone affinity for Network Load Balancers. -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.63.0](service/glue/CHANGELOG.md#v1630-2023-10-12) - * **Feature**: Extending version control support to GitLab and Bitbucket from AWSGlue -* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.17.0](service/inspector2/CHANGELOG.md#v1170-2023-10-12) - * **Feature**: Add MacOs ec2 platform support -* `github.com/aws/aws-sdk-go-v2/service/ivsrealtime`: [v1.5.0](service/ivsrealtime/CHANGELOG.md#v150-2023-10-12) - * **Feature**: Update GetParticipant to return additional metadata. -* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.40.0](service/lambda/CHANGELOG.md#v1400-2023-10-12) - * **Feature**: Adds support for Lambda functions to access Dual-Stack subnets over IPv6, via an opt-in flag in CreateFunction and UpdateFunctionConfiguration APIs -* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.28.0](service/location/CHANGELOG.md#v1280-2023-10-12) - * **Feature**: This release adds endpoint updates for all AWS Location resource operations. -* `github.com/aws/aws-sdk-go-v2/service/machinelearning`: [v1.18.0](service/machinelearning/CHANGELOG.md#v1180-2023-10-12) - * **Feature**: This release marks Password field as sensitive -* `github.com/aws/aws-sdk-go-v2/service/pricing`: [v1.21.9](service/pricing/CHANGELOG.md#v1219-2023-10-12) - * **Documentation**: Documentation updates for Price List -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.56.0](service/rds/CHANGELOG.md#v1560-2023-10-12) - * **Feature**: This release adds support for adding a dedicated log volume to open-source RDS instances. -* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.31.0](service/rekognition/CHANGELOG.md#v1310-2023-10-12) - * **Feature**: Amazon Rekognition introduces support for Custom Moderation. This allows the enhancement of accuracy for detect moderation labels operations by creating custom adapters tuned on customer data. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.111.0](service/sagemaker/CHANGELOG.md#v11110-2023-10-12) - * **Feature**: Amazon SageMaker Canvas adds KendraSettings and DirectDeploySettings support for CanvasAppSettings -* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.25.0](service/textract/CHANGELOG.md#v1250-2023-10-12) - * **Feature**: This release adds 9 new APIs for adapter and adapter version management, 3 new APIs for tagging, and updates AnalyzeDocument and StartDocumentAnalysis API parameters for using adapters. -* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.29.0](service/transcribe/CHANGELOG.md#v1290-2023-10-12) - * **Feature**: This release is to enable m4a format to customers -* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.31.2](service/workspaces/CHANGELOG.md#v1312-2023-10-12) - * **Documentation**: Updated the CreateWorkspaces action documentation to clarify that the PCoIP protocol is only available for Windows bundles. - -# Release (2023-10-06) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.124.0](service/ec2/CHANGELOG.md#v11240-2023-10-06) - * **Feature**: Documentation updates for Elastic Compute Cloud (EC2). -* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.33.0](service/fsx/CHANGELOG.md#v1330-2023-10-06) - * **Feature**: After performing steps to repair the Active Directory configuration of a file system, use this action to initiate the process of attempting to recover to the file system. -* `github.com/aws/aws-sdk-go-v2/service/marketplacecatalog`: [v1.18.0](service/marketplacecatalog/CHANGELOG.md#v1180-2023-10-06) - * **Feature**: This release adds support for Document type as an alternative for stringified JSON for StartChangeSet, DescribeChangeSet and DescribeEntity APIs -* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.45.0](service/quicksight/CHANGELOG.md#v1450-2023-10-06) - * **Feature**: NullOption in FilterListConfiguration; Dataset schema/table max length increased; Support total placement for pivot table visual; Lenient mode relaxes the validation to create resources with definition; Data sources can be added to folders; Redshift data sources support IAM Role-based authentication -* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.34.0](service/transfer/CHANGELOG.md#v1340-2023-10-06) - * **Feature**: This release updates the max character limit of PreAuthenticationLoginBanner and PostAuthenticationLoginBanner to 4096 characters - -# Release (2023-10-05) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/omics`: [v1.10.0](service/omics/CHANGELOG.md#v1100-2023-10-05) - * **Feature**: Add Etag Support for Omics Storage in ListReadSets and GetReadSetMetadata API -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.55.1](service/rds/CHANGELOG.md#v1551-2023-10-05) - * **Documentation**: Updates Amazon RDS documentation for corrections and minor improvements. -* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.30.0](service/route53/CHANGELOG.md#v1300-2023-10-05) - * **Feature**: Add hostedzonetype filter to ListHostedZones API. -* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.37.0](service/securityhub/CHANGELOG.md#v1370-2023-10-05) - * **Feature**: Added new resource detail objects to ASFF, including resources for AwsEventsEventbus, AwsEventsEndpoint, AwsDmsEndpoint, AwsDmsReplicationTask, AwsDmsReplicationInstance, AwsRoute53HostedZone, and AwsMskCluster -* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.21.0](service/storagegateway/CHANGELOG.md#v1210-2023-10-05) - * **Feature**: Add SoftwareVersion to response of DescribeGatewayInformation. -* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.31.0](service/workspaces/CHANGELOG.md#v1310-2023-10-05) - * **Feature**: This release introduces Manage applications. This feature allows users to manage their WorkSpaces applications by associating or disassociating their WorkSpaces with applications. The DescribeWorkspaces API will now additionally return OperatingSystemName in its responses. - -# Release (2023-10-04) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/appconfig`: [v1.21.0](service/appconfig/CHANGELOG.md#v1210-2023-10-04) - * **Feature**: AWS AppConfig introduces KMS customer-managed key (CMK) encryption support for data saved to AppConfig's hosted configuration store. -* `github.com/aws/aws-sdk-go-v2/service/datazone`: [v1.0.0](service/datazone/CHANGELOG.md#v100-2023-10-04) - * **Release**: New AWS service client module - * **Feature**: Initial release of Amazon DataZone -* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.28.0](service/mediatailor/CHANGELOG.md#v1280-2023-10-04) - * **Feature**: Updates DescribeVodSource to include a list of ad break opportunities in the response -* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.21.0](service/mgn/CHANGELOG.md#v1210-2023-10-04) - * **Feature**: This release includes the following new APIs: ListConnectors, CreateConnector, UpdateConnector, DeleteConnector and UpdateSourceServer to support the source action framework feature. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.110.0](service/sagemaker/CHANGELOG.md#v11100-2023-10-04) - * **Feature**: Adding support for AdditionalS3DataSource, a data source used for training or inference that is in addition to the input dataset or model data. - -# Release (2023-10-03) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.69.0](service/connect/CHANGELOG.md#v1690-2023-10-03) - * **Feature**: GetMetricDataV2 API: Update to include new metrics CONTACTS_RESOLVED_IN_X , AVG_HOLD_TIME_ALL_CONTACTS , AVG_RESOLUTION_TIME , ABANDONMENT_RATE , AGENT_NON_RESPONSE_WITHOUT_CUSTOMER_ABANDONS with added features: Interval Period, TimeZone, Negate MetricFilters, Extended date time range. -* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.27.0](service/location/CHANGELOG.md#v1270-2023-10-03) - * **Feature**: Amazon Location Service adds support for bounding polygon queries. Additionally, the GeofenceCount field has been added to the DescribeGeofenceCollection API response. -* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.43.0](service/mediaconvert/CHANGELOG.md#v1430-2023-10-03) - * **Feature**: This release adds the ability to replace video frames without modifying the audio essence. -* `github.com/aws/aws-sdk-go-v2/service/oam`: [v1.4.0](service/oam/CHANGELOG.md#v140-2023-10-03) - * **Feature**: This release adds support for sharing AWS::ApplicationInsights::Application resources. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.109.0](service/sagemaker/CHANGELOG.md#v11090-2023-10-03) - * **Feature**: This release allows users to run Selective Execution in SageMaker Pipelines without SourcePipelineExecutionArn if selected steps do not have any dependent steps. -* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.23.0](service/wellarchitected/CHANGELOG.md#v1230-2023-10-03) - * **Feature**: AWS Well-Architected now supports Review Templates that allows you to create templates with pre-filled answers for Well-Architected and Custom Lens best practices. - -# Release (2023-10-02) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/bedrock`: [v1.1.0](service/bedrock/CHANGELOG.md#v110-2023-10-02) - * **Feature**: Provisioned throughput feature with Amazon and third-party base models, and update validators for model identifier and taggable resource ARNs. -* `github.com/aws/aws-sdk-go-v2/service/bedrockruntime`: [v1.1.0](service/bedrockruntime/CHANGELOG.md#v110-2023-10-02) - * **Feature**: Add model timeout exception for InvokeModelWithResponseStream API and update validator for invoke model identifier. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.123.0](service/ec2/CHANGELOG.md#v11230-2023-10-02) - * **Feature**: Introducing Amazon EC2 R7iz instances with 3.9 GHz sustained all-core turbo frequency and deliver up to 20% better performance than previous generation z1d instances. -* `github.com/aws/aws-sdk-go-v2/service/managedblockchain`: [v1.16.6](service/managedblockchain/CHANGELOG.md#v1166-2023-10-02) - * **Documentation**: Remove Rinkeby as option from Ethereum APIs -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.55.0](service/rds/CHANGELOG.md#v1550-2023-10-02) - * **Feature**: Adds DefaultCertificateForNewLaunches field in the DescribeCertificates API response. -* `github.com/aws/aws-sdk-go-v2/service/sso`: [v1.15.0](service/sso/CHANGELOG.md#v1150-2023-10-02) - * **Feature**: Fix FIPS Endpoints in aws-us-gov. -* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.23.0](service/sts/CHANGELOG.md#v1230-2023-10-02) - * **Feature**: STS API updates for assumeRole -* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.33.9](service/transfer/CHANGELOG.md#v1339-2023-10-02) - * **Documentation**: Documentation updates for AWS Transfer Family - -# Release (2023-09-28) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/bedrock`: [v1.0.0](service/bedrock/CHANGELOG.md#v100-2023-09-28) - * **Release**: New AWS service client module - * **Feature**: Model Invocation logging added to enable or disable logs in customer account. Model listing and description support added. Provisioned Throughput feature added. Custom model support added for creating custom models. Also includes list, and delete functions for custom model. -* `github.com/aws/aws-sdk-go-v2/service/bedrockruntime`: [v1.0.0](service/bedrockruntime/CHANGELOG.md#v100-2023-09-28) - * **Release**: New AWS service client module - * **Feature**: Run Inference: Added support to run the inference on models. Includes set of APIs for running inference in streaming and non-streaming mode. -* `github.com/aws/aws-sdk-go-v2/service/budgets`: [v1.17.0](service/budgets/CHANGELOG.md#v1170-2023-09-28) - * **Feature**: Update DescribeBudgets and DescribeBudgetNotificationsForAccount MaxResults limit to 1000. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.122.0](service/ec2/CHANGELOG.md#v11220-2023-09-28) - * **Feature**: Adds support for Customer Managed Key encryption for Amazon Verified Access resources -* `github.com/aws/aws-sdk-go-v2/service/iotfleetwise`: [v1.6.0](service/iotfleetwise/CHANGELOG.md#v160-2023-09-28) - * **Feature**: AWS IoT FleetWise now supports encryption through a customer managed AWS KMS key. The PutEncryptionConfiguration and GetEncryptionConfiguration APIs were added. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.108.0](service/sagemaker/CHANGELOG.md#v11080-2023-09-28) - * **Feature**: Online store feature groups supports Standard and InMemory tier storage types for low latency storage for real-time data retrieval. The InMemory tier supports collection types List, Set, and Vector. -* `github.com/aws/aws-sdk-go-v2/service/sagemakerfeaturestoreruntime`: [v1.18.0](service/sagemakerfeaturestoreruntime/CHANGELOG.md#v1180-2023-09-28) - * **Feature**: Feature Store supports read/write of records with collection type features. -* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.39.1](service/wafv2/CHANGELOG.md#v1391-2023-09-28) - * **Documentation**: Correct and improve the documentation for the FieldToMatch option JA3 fingerprint. - -# Release (2023-09-27) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.27.0](service/cognitoidentityprovider/CHANGELOG.md#v1270-2023-09-27) - * **Feature**: The UserPoolType Status field is no longer used. -* `github.com/aws/aws-sdk-go-v2/service/firehose`: [v1.19.0](service/firehose/CHANGELOG.md#v1190-2023-09-27) - * **Feature**: Features : Adding support for new data ingestion source to Kinesis Firehose - AWS Managed Services Kafka. -* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.40.0](service/iot/CHANGELOG.md#v1400-2023-09-27) - * **Feature**: Added support for IoT Rules Engine Kafka Action Headers -* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.24.0](service/textract/CHANGELOG.md#v1240-2023-09-27) - * **Feature**: This release adds new feature - Layout to Analyze Document API which can automatically extract layout elements such as titles, paragraphs, headers, section headers, lists, page numbers, footers, table areas, key-value areas and figure areas and order the elements as a human would read. - -# Release (2023-09-26) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/appintegrations`: [v1.18.0](service/appintegrations/CHANGELOG.md#v1180-2023-09-26) - * **Feature**: The Amazon AppIntegrations service adds a set of APIs (in preview) to manage third party applications to be used in Amazon Connect agent workspace. -* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.21.0](service/apprunner/CHANGELOG.md#v1210-2023-09-26) - * **Feature**: This release allows an App Runner customer to specify a custom source directory to run the build & start command. This change allows App Runner to support monorepo based repositories -* `github.com/aws/aws-sdk-go-v2/service/codedeploy`: [v1.18.1](service/codedeploy/CHANGELOG.md#v1181-2023-09-26) - * **Documentation**: CodeDeploy now supports In-place and Blue/Green EC2 deployments with multiple Classic Load Balancers and multiple Target Groups. -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.68.0](service/connect/CHANGELOG.md#v1680-2023-09-26) - * **Feature**: This release updates a set of Amazon Connect APIs that provides the ability to integrate third party applications in the Amazon Connect agent workspace. -* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.22.0](service/dynamodb/CHANGELOG.md#v1220-2023-09-26) - * **Feature**: Amazon DynamoDB now supports Incremental Export as an enhancement to the existing Export Table -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.121.0](service/ec2/CHANGELOG.md#v11210-2023-09-26) - * **Feature**: The release includes AWS verified access to support FIPs compliance in North America regions -* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.24.0](service/lakeformation/CHANGELOG.md#v1240-2023-09-26) - * **Feature**: This release adds three new API support "CreateLakeFormationOptIn", "DeleteLakeFormationOptIn" and "ListLakeFormationOptIns", and also updates the corresponding documentation. -* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.22.6](service/pinpoint/CHANGELOG.md#v1226-2023-09-26) - * **Documentation**: Update documentation for RemoveAttributes to more accurately reflect its behavior when attributes are deleted. -* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.40.0](service/s3/CHANGELOG.md#v1400-2023-09-26) - * **Feature**: This release adds a new field COMPLETED to the ReplicationStatus Enum. You can now use this field to validate the replication status of S3 objects using the AWS SDK. - -# Release (2023-09-25) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/amplifyuibuilder`: [v1.13.0](service/amplifyuibuilder/CHANGELOG.md#v1130-2023-09-25) - * **Feature**: Support for generating code that is compatible with future versions of amplify project dependencies. -* `github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines`: [v1.9.0](service/chimesdkmediapipelines/CHANGELOG.md#v190-2023-09-25) - * **Feature**: Adds support for sending WebRTC audio to Amazon Kineses Video Streams. -* `github.com/aws/aws-sdk-go-v2/service/emrserverless`: [v1.11.0](service/emrserverless/CHANGELOG.md#v1110-2023-09-25) - * **Feature**: This release adds support for application-wide default job configurations. -* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.17.0](service/finspacedata/CHANGELOG.md#v1170-2023-09-25) - * **Feature**: Adding sensitive trait to attributes. Change max SessionDuration from 720 to 60. Correct "ApiAccess" attribute to "apiAccess" to maintain consistency between APIs. -* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.44.0](service/quicksight/CHANGELOG.md#v1440-2023-09-25) - * **Feature**: Added ability to tag users upon creation. -* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.38.0](service/ssm/CHANGELOG.md#v1380-2023-09-25) - * **Feature**: This release updates the enum values for ResourceType in SSM DescribeInstanceInformation input and ConnectionStatus in GetConnectionStatus output. -* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.39.0](service/wafv2/CHANGELOG.md#v1390-2023-09-25) - * **Feature**: You can now perform an exact match against the web request's JA3 fingerprint. - -# Release (2023-09-22) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/config`: [v1.18.42](config/CHANGELOG.md#v11842-2023-09-22) - * **Bug Fix**: Fixed a bug where merging `max_attempts` or `duration_seconds` fields across shared config files with invalid values would silently default them to 0. - * **Bug Fix**: Move type assertion of config values out of the parsing stage, which resolves an issue where the contents of a profile would silently be dropped with certain numeric formats. -* `github.com/aws/aws-sdk-go-v2/internal/ini`: [v1.3.43](internal/ini/CHANGELOG.md#v1343-2023-09-22) - * **Bug Fix**: Fixed a bug where merging `max_attempts` or `duration_seconds` fields across shared config files with invalid values would silently default them to 0. - * **Bug Fix**: Move type assertion of config values out of the parsing stage, which resolves an issue where the contents of a profile would silently be dropped with certain numeric formats. -* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.20.0](service/braket/CHANGELOG.md#v1200-2023-09-22) - * **Feature**: This release adds support to view the device queue depth (the number of queued quantum tasks and hybrid jobs on a device) and queue position for a quantum task and hybrid job. -* `github.com/aws/aws-sdk-go-v2/service/cloudwatchevents`: [v1.18.0](service/cloudwatchevents/CHANGELOG.md#v1180-2023-09-22) - * **Feature**: Adds sensitive trait to various shapes in Jetstream Connections API model. -* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.31.0](service/databasemigrationservice/CHANGELOG.md#v1310-2023-09-22) - * **Feature**: new vendors for DMS CSF: MongoDB, MariaDB, DocumentDb and Redshift -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.120.0](service/ec2/CHANGELOG.md#v11200-2023-09-22) - * **Feature**: EC2 M2 Pro Mac instances are powered by Apple M2 Pro Mac Mini computers featuring 12 core CPU, 19 core GPU, 32 GiB of memory, and 16 core Apple Neural Engine and uniquely enabled by the AWS Nitro System through high-speed Thunderbolt connections. -* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.21.7](service/efs/CHANGELOG.md#v1217-2023-09-22) - * **Documentation**: Documentation updates for Elastic File System -* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.28.0](service/guardduty/CHANGELOG.md#v1280-2023-09-22) - * **Feature**: Add `EKS_CLUSTER_NAME` to filter and sort key. -* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.42.0](service/mediaconvert/CHANGELOG.md#v1420-2023-09-22) - * **Feature**: This release supports the creation of of audio-only tracks in CMAF output groups. - -# Release (2023-09-20) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/appconfig`: [v1.20.0](service/appconfig/CHANGELOG.md#v1200-2023-09-20) - * **Feature**: Enabling boto3 paginators for list APIs and adding documentation around ServiceQuotaExceededException errors -* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.20.0](service/apprunner/CHANGELOG.md#v1200-2023-09-20) - * **Feature**: This release adds improvements for managing App Runner auto scaling configuration resources. New APIs: UpdateDefaultAutoScalingConfiguration and ListServicesForAutoScalingConfiguration. Updated API: DeleteAutoScalingConfiguration. -* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.24.0](service/cloudwatchlogs/CHANGELOG.md#v1240-2023-09-20) - * **Feature**: Add ClientToken to QueryDefinition CFN Handler in CWL -* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.20.0](service/codeartifact/CHANGELOG.md#v1200-2023-09-20) - * **Feature**: Add support for the Swift package format. -* `github.com/aws/aws-sdk-go-v2/service/kinesisvideo`: [v1.18.4](service/kinesisvideo/CHANGELOG.md#v1184-2023-09-20) - * **Documentation**: Updated DescribeMediaStorageConfiguration, StartEdgeConfigurationUpdate, ImageGenerationConfiguration$SamplingInterval, and UpdateMediaStorageConfiguration to match AWS Docs. -* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.39.0](service/s3/CHANGELOG.md#v1390-2023-09-20) - * **Feature**: Fix an issue where the SDK can fail to unmarshall response due to NumberFormatException -* `github.com/aws/aws-sdk-go-v2/service/servicediscovery`: [v1.24.0](service/servicediscovery/CHANGELOG.md#v1240-2023-09-20) - * **Feature**: Adds a new DiscoverInstancesRevision API and also adds InstanceRevision field to the DiscoverInstances API response. -* `github.com/aws/aws-sdk-go-v2/service/ssooidc`: [v1.17.0](service/ssooidc/CHANGELOG.md#v1170-2023-09-20) - * **Feature**: Update FIPS endpoints in aws-us-gov. - -# Release (2023-09-19) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.119.0](service/ec2/CHANGELOG.md#v11190-2023-09-19) - * **Feature**: This release adds support for C7i, and R7a instance types. -* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.30.0](service/outposts/CHANGELOG.md#v1300-2023-09-19) - * **Feature**: This release adds the InstanceFamilies field to the ListAssets response. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.107.0](service/sagemaker/CHANGELOG.md#v11070-2023-09-19) - * **Feature**: This release adds support for one-time model monitoring schedules that are executed immediately without delay, explicit data analysis windows for model monitoring schedules and exclude features attributes to remove features from model monitor analysis. - -# Release (2023-09-18) - -## General Highlights -* **Feature**: Adds several endpoint ruleset changes across all models: smaller rulesets, removed non-unique regional endpoints, fixes FIPS and DualStack endpoints, and make region not required in SDK::Endpoint. Additional breakfix to cognito-sync field. -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.21.0](service/accessanalyzer/CHANGELOG.md#v1210-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/acm`: [v1.19.0](service/acm/CHANGELOG.md#v1190-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/amplify`: [v1.15.0](service/amplify/CHANGELOG.md#v1150-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/apigatewaymanagementapi`: [v1.13.0](service/apigatewaymanagementapi/CHANGELOG.md#v1130-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/appconfig`: [v1.19.0](service/appconfig/CHANGELOG.md#v1190-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/appconfigdata`: [v1.8.0](service/appconfigdata/CHANGELOG.md#v180-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/appfabric`: [v1.2.0](service/appfabric/CHANGELOG.md#v120-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/appintegrations`: [v1.17.0](service/appintegrations/CHANGELOG.md#v1170-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/applicationcostprofiler`: [v1.12.0](service/applicationcostprofiler/CHANGELOG.md#v1120-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/appmesh`: [v1.19.0](service/appmesh/CHANGELOG.md#v1190-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/arczonalshift`: [v1.3.0](service/arczonalshift/CHANGELOG.md#v130-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/autoscalingplans`: [v1.15.0](service/autoscalingplans/CHANGELOG.md#v1150-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/backupgateway`: [v1.11.0](service/backupgateway/CHANGELOG.md#v1110-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/backupstorage`: [v1.3.0](service/backupstorage/CHANGELOG.md#v130-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.19.0](service/braket/CHANGELOG.md#v1190-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/chimesdkvoice`: [v1.9.0](service/chimesdkvoice/CHANGELOG.md#v190-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/clouddirectory`: [v1.15.0](service/clouddirectory/CHANGELOG.md#v1150-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/cloudhsmv2`: [v1.16.0](service/cloudhsmv2/CHANGELOG.md#v1160-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/cloudsearch`: [v1.16.0](service/cloudsearch/CHANGELOG.md#v1160-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/cloudsearchdomain`: [v1.14.0](service/cloudsearchdomain/CHANGELOG.md#v1140-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/cloudtraildata`: [v1.2.0](service/cloudtraildata/CHANGELOG.md#v120-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/codebuild`: [v1.22.0](service/codebuild/CHANGELOG.md#v1220-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/codedeploy`: [v1.18.0](service/codedeploy/CHANGELOG.md#v1180-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/codeguruprofiler`: [v1.15.0](service/codeguruprofiler/CHANGELOG.md#v1150-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/codegurureviewer`: [v1.19.0](service/codegurureviewer/CHANGELOG.md#v1190-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/codegurusecurity`: [v1.2.0](service/codegurusecurity/CHANGELOG.md#v120-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/codestar`: [v1.15.0](service/codestar/CHANGELOG.md#v1150-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/codestarnotifications`: [v1.16.0](service/codestarnotifications/CHANGELOG.md#v1160-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/cognitoidentity`: [v1.17.0](service/cognitoidentity/CHANGELOG.md#v1170-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/cognitosync`: [v1.14.0](service/cognitosync/CHANGELOG.md#v1140-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/connectcases`: [v1.7.0](service/connectcases/CHANGELOG.md#v170-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/connectcontactlens`: [v1.15.0](service/connectcontactlens/CHANGELOG.md#v1150-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/controltower`: [v1.3.0](service/controltower/CHANGELOG.md#v130-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.23.0](service/databrew/CHANGELOG.md#v1230-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.21.0](service/dataexchange/CHANGELOG.md#v1210-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/datapipeline`: [v1.16.0](service/datapipeline/CHANGELOG.md#v1160-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/dax`: [v1.14.0](service/dax/CHANGELOG.md#v1140-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/devicefarm`: [v1.17.0](service/devicefarm/CHANGELOG.md#v1170-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/docdbelastic`: [v1.3.0](service/docdbelastic/CHANGELOG.md#v130-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/ec2instanceconnect`: [v1.17.0](service/ec2instanceconnect/CHANGELOG.md#v1170-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/ecrpublic`: [v1.18.0](service/ecrpublic/CHANGELOG.md#v1180-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk`: [v1.17.0](service/elasticbeanstalk/CHANGELOG.md#v1170-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing`: [v1.17.0](service/elasticloadbalancing/CHANGELOG.md#v1170-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/elastictranscoder`: [v1.16.0](service/elastictranscoder/CHANGELOG.md#v1160-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/evidently`: [v1.13.0](service/evidently/CHANGELOG.md#v1130-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.16.0](service/finspacedata/CHANGELOG.md#v1160-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/fis`: [v1.16.0](service/fis/CHANGELOG.md#v1160-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.27.0](service/forecast/CHANGELOG.md#v1270-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/forecastquery`: [v1.15.0](service/forecastquery/CHANGELOG.md#v1150-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/gamesparks`: [v1.4.0](service/gamesparks/CHANGELOG.md#v140-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/glacier`: [v1.16.0](service/glacier/CHANGELOG.md#v1160-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/greengrass`: [v1.17.0](service/greengrass/CHANGELOG.md#v1170-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.24.0](service/greengrassv2/CHANGELOG.md#v1240-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/honeycode`: [v1.15.0](service/honeycode/CHANGELOG.md#v1150-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/inspector`: [v1.15.0](service/inspector/CHANGELOG.md#v1150-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/iot1clickdevicesservice`: [v1.13.0](service/iot1clickdevicesservice/CHANGELOG.md#v1130-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/iot1clickprojects`: [v1.14.0](service/iot1clickprojects/CHANGELOG.md#v1140-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/iotanalytics`: [v1.16.0](service/iotanalytics/CHANGELOG.md#v1160-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/iotevents`: [v1.17.0](service/iotevents/CHANGELOG.md#v1170-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/ioteventsdata`: [v1.15.0](service/ioteventsdata/CHANGELOG.md#v1150-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/iotfleethub`: [v1.15.0](service/iotfleethub/CHANGELOG.md#v1150-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/iotjobsdataplane`: [v1.14.0](service/iotjobsdataplane/CHANGELOG.md#v1140-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/iotroborunner`: [v1.3.0](service/iotroborunner/CHANGELOG.md#v130-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/iotsecuretunneling`: [v1.17.0](service/iotsecuretunneling/CHANGELOG.md#v1170-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/iotthingsgraph`: [v1.16.0](service/iotthingsgraph/CHANGELOG.md#v1160-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/ivschat`: [v1.6.0](service/ivschat/CHANGELOG.md#v160-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/kendraranking`: [v1.2.0](service/kendraranking/CHANGELOG.md#v120-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.19.0](service/kinesis/CHANGELOG.md#v1190-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/kinesisanalytics`: [v1.16.0](service/kinesisanalytics/CHANGELOG.md#v1160-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/kinesisvideoarchivedmedia`: [v1.17.0](service/kinesisvideoarchivedmedia/CHANGELOG.md#v1170-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/kinesisvideomedia`: [v1.13.0](service/kinesisvideomedia/CHANGELOG.md#v1130-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/kinesisvideosignaling`: [v1.13.0](service/kinesisvideosignaling/CHANGELOG.md#v1130-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/kinesisvideowebrtcstorage`: [v1.4.0](service/kinesisvideowebrtcstorage/CHANGELOG.md#v140-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/lexmodelbuildingservice`: [v1.19.0](service/lexmodelbuildingservice/CHANGELOG.md#v1190-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/lexruntimeservice`: [v1.15.0](service/lexruntimeservice/CHANGELOG.md#v1150-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.19.0](service/lexruntimev2/CHANGELOG.md#v1190-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/licensemanager`: [v1.20.0](service/licensemanager/CHANGELOG.md#v1200-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/licensemanagerlinuxsubscriptions`: [v1.3.0](service/licensemanagerlinuxsubscriptions/CHANGELOG.md#v130-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/licensemanagerusersubscriptions`: [v1.4.0](service/licensemanagerusersubscriptions/CHANGELOG.md#v140-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.21.0](service/lookoutmetrics/CHANGELOG.md#v1210-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/lookoutvision`: [v1.17.0](service/lookoutvision/CHANGELOG.md#v1170-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/machinelearning`: [v1.17.0](service/machinelearning/CHANGELOG.md#v1170-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/macie`: [v1.17.0](service/macie/CHANGELOG.md#v1170-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.29.7](service/macie2/CHANGELOG.md#v1297-2023-09-18) - * **Documentation**: This release changes the default managedDataIdentifierSelector setting for new classification jobs to RECOMMENDED. By default, new classification jobs now use the recommended set of managed data identifiers. -* `github.com/aws/aws-sdk-go-v2/service/marketplacecommerceanalytics`: [v1.14.0](service/marketplacecommerceanalytics/CHANGELOG.md#v1140-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/marketplaceentitlementservice`: [v1.14.0](service/marketplaceentitlementservice/CHANGELOG.md#v1140-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/marketplacemetering`: [v1.16.0](service/marketplacemetering/CHANGELOG.md#v1160-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/mediapackagev2`: [v1.2.0](service/mediapackagev2/CHANGELOG.md#v120-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.24.0](service/mediapackagevod/CHANGELOG.md#v1240-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/mediastore`: [v1.15.0](service/mediastore/CHANGELOG.md#v1150-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/mediastoredata`: [v1.15.0](service/mediastoredata/CHANGELOG.md#v1150-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/medicalimaging`: [v1.2.0](service/medicalimaging/CHANGELOG.md#v120-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/migrationhub`: [v1.15.0](service/migrationhub/CHANGELOG.md#v1150-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/migrationhubconfig`: [v1.15.0](service/migrationhubconfig/CHANGELOG.md#v1150-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/migrationhuborchestrator`: [v1.3.0](service/migrationhuborchestrator/CHANGELOG.md#v130-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/migrationhubstrategy`: [v1.11.0](service/migrationhubstrategy/CHANGELOG.md#v1110-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/mobile`: [v1.14.0](service/mobile/CHANGELOG.md#v1140-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/mturk`: [v1.16.0](service/mturk/CHANGELOG.md#v1160-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.22.0](service/neptune/CHANGELOG.md#v1220-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.19.0](service/networkmanager/CHANGELOG.md#v1190-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.18.0](service/nimble/CHANGELOG.md#v1180-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/oam`: [v1.3.0](service/oam/CHANGELOG.md#v130-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/opensearchserverless`: [v1.5.0](service/opensearchserverless/CHANGELOG.md#v150-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/opsworks`: [v1.16.0](service/opsworks/CHANGELOG.md#v1160-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/opsworkscm`: [v1.17.0](service/opsworkscm/CHANGELOG.md#v1170-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/osis`: [v1.2.0](service/osis/CHANGELOG.md#v120-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/panorama`: [v1.13.0](service/panorama/CHANGELOG.md#v1130-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/paymentcryptography`: [v1.2.0](service/paymentcryptography/CHANGELOG.md#v120-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/personalizeevents`: [v1.15.0](service/personalizeevents/CHANGELOG.md#v1150-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/personalizeruntime`: [v1.15.0](service/personalizeruntime/CHANGELOG.md#v1150-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/pinpointemail`: [v1.14.0](service/pinpointemail/CHANGELOG.md#v1140-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoice`: [v1.13.0](service/pinpointsmsvoice/CHANGELOG.md#v1130-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoicev2`: [v1.3.0](service/pinpointsmsvoicev2/CHANGELOG.md#v130-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/pipes`: [v1.4.0](service/pipes/CHANGELOG.md#v140-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/qldbsession`: [v1.16.0](service/qldbsession/CHANGELOG.md#v1160-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/rbin`: [v1.10.0](service/rbin/CHANGELOG.md#v1100-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/rdsdata`: [v1.15.0](service/rdsdata/CHANGELOG.md#v1150-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/redshiftserverless`: [v1.6.0](service/redshiftserverless/CHANGELOG.md#v160-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/resourceexplorer2`: [v1.4.0](service/resourceexplorer2/CHANGELOG.md#v140-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/resourcegroups`: [v1.16.0](service/resourcegroups/CHANGELOG.md#v1160-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi`: [v1.16.0](service/resourcegroupstaggingapi/CHANGELOG.md#v1160-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/robomaker`: [v1.20.0](service/robomaker/CHANGELOG.md#v1200-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/route53recoverycluster`: [v1.13.0](service/route53recoverycluster/CHANGELOG.md#v1130-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig`: [v1.13.0](service/route53recoverycontrolconfig/CHANGELOG.md#v1130-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/route53recoveryreadiness`: [v1.11.0](service/route53recoveryreadiness/CHANGELOG.md#v1110-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/route53resolver`: [v1.20.0](service/route53resolver/CHANGELOG.md#v1200-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/rum`: [v1.12.0](service/rum/CHANGELOG.md#v1120-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/s3outposts`: [v1.18.0](service/s3outposts/CHANGELOG.md#v1180-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/sagemakera2iruntime`: [v1.17.0](service/sagemakera2iruntime/CHANGELOG.md#v1170-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/sagemakeredge`: [v1.15.0](service/sagemakeredge/CHANGELOG.md#v1150-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/sagemakergeospatial`: [v1.5.0](service/sagemakergeospatial/CHANGELOG.md#v150-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/sagemakermetrics`: [v1.2.0](service/sagemakermetrics/CHANGELOG.md#v120-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/scheduler`: [v1.3.0](service/scheduler/CHANGELOG.md#v130-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/schemas`: [v1.17.0](service/schemas/CHANGELOG.md#v1170-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/serverlessapplicationrepository`: [v1.14.0](service/serverlessapplicationrepository/CHANGELOG.md#v1140-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry`: [v1.19.0](service/servicecatalogappregistry/CHANGELOG.md#v1190-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/servicediscovery`: [v1.23.0](service/servicediscovery/CHANGELOG.md#v1230-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/shield`: [v1.20.0](service/shield/CHANGELOG.md#v1200-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/sms`: [v1.15.0](service/sms/CHANGELOG.md#v1150-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/snowdevicemanagement`: [v1.11.0](service/snowdevicemanagement/CHANGELOG.md#v1110-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.22.0](service/sns/CHANGELOG.md#v1220-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/ssmcontacts`: [v1.17.0](service/ssmcontacts/CHANGELOG.md#v1170-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.23.0](service/ssmincidents/CHANGELOG.md#v1230-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/ssmsap`: [v1.5.0](service/ssmsap/CHANGELOG.md#v150-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/sso`: [v1.14.0](service/sso/CHANGELOG.md#v1140-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/ssooidc`: [v1.16.0](service/ssooidc/CHANGELOG.md#v1160-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.20.0](service/storagegateway/CHANGELOG.md#v1200-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.22.0](service/sts/CHANGELOG.md#v1220-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/supportapp`: [v1.4.0](service/supportapp/CHANGELOG.md#v140-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/synthetics`: [v1.19.0](service/synthetics/CHANGELOG.md#v1190-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.23.0](service/textract/CHANGELOG.md#v1230-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/timestreamquery`: [v1.17.0](service/timestreamquery/CHANGELOG.md#v1170-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/timestreamwrite`: [v1.19.0](service/timestreamwrite/CHANGELOG.md#v1190-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/tnb`: [v1.3.0](service/tnb/CHANGELOG.md#v130-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/transcribestreaming`: [v1.11.0](service/transcribestreaming/CHANGELOG.md#v1110-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/voiceid`: [v1.15.0](service/voiceid/CHANGELOG.md#v1150-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/waf`: [v1.14.0](service/waf/CHANGELOG.md#v1140-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/wafregional`: [v1.15.0](service/wafregional/CHANGELOG.md#v1150-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/workdocs`: [v1.16.0](service/workdocs/CHANGELOG.md#v1160-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/worklink`: [v1.15.0](service/worklink/CHANGELOG.md#v1150-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* `github.com/aws/aws-sdk-go-v2/service/workmail`: [v1.20.0](service/workmail/CHANGELOG.md#v1200-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. - * **Feature**: This release includes four new APIs UpdateUser, UpdateGroup, ListGroupsForEntity and DescribeEntity, along with RemoteUsers and some enhancements to existing APIs. -* `github.com/aws/aws-sdk-go-v2/service/workmailmessageflow`: [v1.14.0](service/workmailmessageflow/CHANGELOG.md#v1140-2023-09-18) - * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. - -# Release (2023-09-15) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.24.0](service/appstream/CHANGELOG.md#v1240-2023-09-15) - * **Feature**: This release introduces app block builder, allowing customers to provision a resource to package applications into an app block -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.67.0](service/connect/CHANGELOG.md#v1670-2023-09-15) - * **Feature**: New rule type (OnMetricDataUpdate) has been added -* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.29.1](service/datasync/CHANGELOG.md#v1291-2023-09-15) - * **Documentation**: Documentation-only updates for AWS DataSync. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.106.0](service/sagemaker/CHANGELOG.md#v11060-2023-09-15) - * **Feature**: This release introduces Skip Model Validation for Model Packages - -# Release (2023-09-14) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.23.0](service/appstream/CHANGELOG.md#v1230-2023-09-14) - * **Feature**: This release introduces multi-session fleets, allowing customers to provision more than one user session on a single fleet instance. -* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.34.6](service/cloudformation/CHANGELOG.md#v1346-2023-09-14) - * **Documentation**: Documentation updates for AWS CloudFormation -* `github.com/aws/aws-sdk-go-v2/service/entityresolution`: [v1.2.0](service/entityresolution/CHANGELOG.md#v120-2023-09-14) - * **Feature**: Changed "ResolutionTechniques" and "MappedInputFields" in workflow and schema mapping operations to be required fields. -* `github.com/aws/aws-sdk-go-v2/service/lookoutequipment`: [v1.19.0](service/lookoutequipment/CHANGELOG.md#v1190-2023-09-14) - * **Feature**: This release adds APIs for the new scheduled retraining feature. - -# Release (2023-09-13) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/cloud9`: [v1.18.8](service/cloud9/CHANGELOG.md#v1188-2023-09-13) - * **Documentation**: Update to include information on Ubuntu 18 deprecation. -* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.16.0](service/drs/CHANGELOG.md#v1160-2023-09-13) - * **Feature**: Updated existing APIs and added new ones to support using AWS Elastic Disaster Recovery post-launch actions. Added support for new regions. -* `github.com/aws/aws-sdk-go-v2/service/firehose`: [v1.18.0](service/firehose/CHANGELOG.md#v1180-2023-09-13) - * **Feature**: DocumentIdOptions has been added for the Amazon OpenSearch destination. -* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.27.0](service/guardduty/CHANGELOG.md#v1270-2023-09-13) - * **Feature**: Add `managementType` field to ListCoverage API response. -* `github.com/aws/aws-sdk-go-v2/service/internetmonitor`: [v1.6.0](service/internetmonitor/CHANGELOG.md#v160-2023-09-13) - * **Feature**: This release updates the Amazon CloudWatch Internet Monitor API domain name. -* `github.com/aws/aws-sdk-go-v2/service/ivsrealtime`: [v1.4.4](service/ivsrealtime/CHANGELOG.md#v144-2023-09-13) - * **Documentation**: Doc only update that changes description for ParticipantToken. -* `github.com/aws/aws-sdk-go-v2/service/simspaceweaver`: [v1.5.1](service/simspaceweaver/CHANGELOG.md#v151-2023-09-13) - * **Documentation**: Edited the introductory text for the API reference. -* `github.com/aws/aws-sdk-go-v2/service/xray`: [v1.18.0](service/xray/CHANGELOG.md#v1180-2023-09-13) - * **Feature**: Add StartTime field in GetTraceSummaries API response for each TraceSummary. - -# Release (2023-09-12) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.118.0](service/ec2/CHANGELOG.md#v11180-2023-09-12) - * **Feature**: This release adds support for restricting public sharing of AMIs through AMI Block Public Access -* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.22.0](service/eventbridge/CHANGELOG.md#v1220-2023-09-12) - * **Feature**: Adds sensitive trait to various shapes in Jetstream Connections API model. -* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.43.0](service/kendra/CHANGELOG.md#v1430-2023-09-12) - * **Feature**: Amazon Kendra now supports confidence score buckets for retrieved passage results using the Retrieve API. - -# Release (2023-09-11) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.20.0](service/ecr/CHANGELOG.md#v1200-2023-09-11) - * **Feature**: This release will have ValidationException be thrown from ECR LifecyclePolicy APIs in regions LifecyclePolicy is not supported, this includes existing Amazon Dedicated Cloud (ADC) regions. This release will also change Tag: TagValue and Tag: TagKey to required. -* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.37.0](service/medialive/CHANGELOG.md#v1370-2023-09-11) - * **Feature**: AWS Elemental Link now supports attaching a Link UHD device to a MediaConnect flow. -* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.43.0](service/quicksight/CHANGELOG.md#v1430-2023-09-11) - * **Feature**: This release launches new updates to QuickSight KPI visuals - support for sparklines, new templated layout and new targets for conditional formatting rules. - -# Release (2023-09-08) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.32.6](service/fsx/CHANGELOG.md#v1326-2023-09-08) - * **Documentation**: Amazon FSx documentation fixes -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.105.0](service/sagemaker/CHANGELOG.md#v11050-2023-09-08) - * **Feature**: Autopilot APIs will now support holiday featurization for Timeseries models. The models will now hold holiday metadata and should be able to accommodate holiday effect during inference. -* `github.com/aws/aws-sdk-go-v2/service/ssoadmin`: [v1.18.0](service/ssoadmin/CHANGELOG.md#v1180-2023-09-08) - * **Feature**: Content updates to IAM Identity Center API for China Regions. -* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.30.0](service/workspaces/CHANGELOG.md#v1300-2023-09-08) - * **Feature**: A new field "ErrorDetails" will be added to the output of "DescribeWorkspaceImages" API call. This field provides in-depth details about the error occurred during image import process. These details include the possible causes of the errors and troubleshooting information. - -# Release (2023-09-07) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.36.2](service/securityhub/CHANGELOG.md#v1362-2023-09-07) - * **Documentation**: Documentation updates for AWS Security Hub -* `github.com/aws/aws-sdk-go-v2/service/simspaceweaver`: [v1.5.0](service/simspaceweaver/CHANGELOG.md#v150-2023-09-07) - * **Feature**: BucketName and ObjectKey are now required for the S3Location data type. BucketName is now required for the S3Destination data type. - -# Release (2023-09-06) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.35.0](service/appflow/CHANGELOG.md#v1350-2023-09-06) - * **Feature**: Adding OAuth2.0 support for servicenow connector. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.117.0](service/ec2/CHANGELOG.md#v11170-2023-09-06) - * **Feature**: This release adds 'outpost' location type to the DescribeInstanceTypeOfferings API, allowing customers that have been allowlisted for outpost to query their offerings in the API. -* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.21.4](service/elasticloadbalancingv2/CHANGELOG.md#v1214-2023-09-06) - * **Documentation**: This release enables default UDP connection termination and disabling unhealthy target connection termination for Network Load Balancers. -* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.36.0](service/medialive/CHANGELOG.md#v1360-2023-09-06) - * **Feature**: Adds advanced Output Locking options for Epoch Locking: Custom Epoch and Jam Sync Time -* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.38.0](service/wafv2/CHANGELOG.md#v1380-2023-09-06) - * **Feature**: The targeted protection level of the Bot Control managed rule group now provides optional, machine-learning analysis of traffic statistics to detect some bot-related activity. You can enable or disable the machine learning functionality through the API. - -# Release (2023-09-05) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/billingconductor`: [v1.9.0](service/billingconductor/CHANGELOG.md#v190-2023-09-05) - * **Feature**: This release adds support for line item filtering in for the custom line item resource. -* `github.com/aws/aws-sdk-go-v2/service/cloud9`: [v1.18.7](service/cloud9/CHANGELOG.md#v1187-2023-09-05) - * **Documentation**: Added support for Ubuntu 22.04 that was not picked up in a previous Trebuchet request. Doc-only update. -* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.27.0](service/computeoptimizer/CHANGELOG.md#v1270-2023-09-05) - * **Feature**: This release adds support to provide recommendations for G4dn and P3 instances that use NVIDIA GPUs. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.116.0](service/ec2/CHANGELOG.md#v11160-2023-09-05) - * **Feature**: Introducing Amazon EC2 C7gd, M7gd, and R7gd Instances with up to 3.8 TB of local NVMe-based SSD block-level storage. These instances are powered by AWS Graviton3 processors, delivering up to 25% better performance over Graviton2-based instances. -* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.30.1](service/ecs/CHANGELOG.md#v1301-2023-09-05) - * **Documentation**: Documentation only update for Amazon ECS. -* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.21.0](service/eventbridge/CHANGELOG.md#v1210-2023-09-05) - * **Feature**: Improve Endpoint Ruleset test coverage. -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.54.0](service/rds/CHANGELOG.md#v1540-2023-09-05) - * **Feature**: Add support for feature integration with AWS Backup. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.104.0](service/sagemaker/CHANGELOG.md#v11040-2023-09-05) - * **Feature**: SageMaker Neo now supports data input shape derivation for Pytorch 2.0 and XGBoost compilation job for cloud instance targets. You can skip DataInputConfig field during compilation job creation. You can also access derived information from model in DescribeCompilationJob response. -* `github.com/aws/aws-sdk-go-v2/service/vpclattice`: [v1.2.0](service/vpclattice/CHANGELOG.md#v120-2023-09-05) - * **Feature**: This release adds Lambda event structure version config support for LAMBDA target groups. It also adds newline support for auth policies. - -# Release (2023-09-01) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines`: [v1.8.0](service/chimesdkmediapipelines/CHANGELOG.md#v180-2023-09-01) - * **Feature**: This release adds support for the Voice Analytics feature for customer-owned KVS streams as part of the Amazon Chime SDK call analytics. -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.66.0](service/connect/CHANGELOG.md#v1660-2023-09-01) - * **Feature**: Amazon Connect adds the ability to read, create, update, delete, and list view resources, and adds the ability to read, create, delete, and list view versions. -* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.18.0](service/identitystore/CHANGELOG.md#v1180-2023-09-01) - * **Feature**: New Identity Store content for China Region launch -* `github.com/aws/aws-sdk-go-v2/service/neptunedata`: [v1.0.1](service/neptunedata/CHANGELOG.md#v101-2023-09-01) - * **Documentation**: Removed the descriptive text in the introduction. - -# Release (2023-08-31) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines`: [v1.7.0](service/chimesdkmediapipelines/CHANGELOG.md#v170-2023-08-31) - * **Feature**: This release adds support for feature Voice Enhancement for Call Recording as part of Amazon Chime SDK call analytics. -* `github.com/aws/aws-sdk-go-v2/service/cloudhsm`: [v1.15.0](service/cloudhsm/CHANGELOG.md#v1150-2023-08-31) - * **Feature**: Deprecating CloudHSM Classic API Service. -* `github.com/aws/aws-sdk-go-v2/service/cloudwatchevents`: [v1.17.0](service/cloudwatchevents/CHANGELOG.md#v1170-2023-08-31) - * **Feature**: Documentation updates for CloudWatch Events. -* `github.com/aws/aws-sdk-go-v2/service/connectcampaigns`: [v1.4.0](service/connectcampaigns/CHANGELOG.md#v140-2023-08-31) - * **Feature**: Amazon Connect outbound campaigns has launched agentless dialing mode which enables customers to make automated outbound calls without agent engagement. This release updates three of the campaign management API's to support the new agentless dialing mode and the new dialing capacity field. -* `github.com/aws/aws-sdk-go-v2/service/connectparticipant`: [v1.17.0](service/connectparticipant/CHANGELOG.md#v1170-2023-08-31) - * **Feature**: Amazon Connect Participant Service adds the ability to get a view resource using a view token, which is provided in a participant message, with the release of the DescribeView API. -* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.28.0](service/customerprofiles/CHANGELOG.md#v1280-2023-08-31) - * **Feature**: Adds sensitive trait to various shapes in Customer Profiles API model. -* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.30.0](service/ecs/CHANGELOG.md#v1300-2023-08-31) - * **Feature**: This release adds support for an account-level setting that you can use to configure the number of days for AWS Fargate task retirement. -* `github.com/aws/aws-sdk-go-v2/service/health`: [v1.19.0](service/health/CHANGELOG.md#v1190-2023-08-31) - * **Feature**: Adds new API DescribeEntityAggregatesForOrganization that retrieves entity aggregates across your organization. Also adds support for resource status filtering in DescribeAffectedEntitiesForOrganization, resource status aggregates in the DescribeEntityAggregates response, and new resource statuses. -* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.26.0](service/ivs/CHANGELOG.md#v1260-2023-08-31) - * **Feature**: Updated "type" description for CreateChannel, UpdateChannel, Channel, and ChannelSummary. -* `github.com/aws/aws-sdk-go-v2/service/kafkaconnect`: [v1.11.0](service/kafkaconnect/CHANGELOG.md#v1110-2023-08-31) - * **Feature**: Minor model changes for Kafka Connect as well as endpoint updates. -* `github.com/aws/aws-sdk-go-v2/service/paymentcryptographydata`: [v1.2.0](service/paymentcryptographydata/CHANGELOG.md#v120-2023-08-31) - * **Feature**: Make KeyCheckValue field optional when using asymmetric keys as Key Check Values typically only apply to symmetric keys -* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.21.0](service/sagemakerruntime/CHANGELOG.md#v1210-2023-08-31) - * **Feature**: This release adds a new InvokeEndpointWithResponseStream API to support streaming of model responses. - -# Release (2023-08-30) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.34.0](service/appflow/CHANGELOG.md#v1340-2023-08-30) - * **Feature**: Add SAP source connector parallel and pagination feature -* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.19.0](service/apprunner/CHANGELOG.md#v1190-2023-08-30) - * **Feature**: App Runner adds support for Bitbucket. You can now create App Runner connection that connects to your Bitbucket repositories and deploy App Runner service with the source code stored in a Bitbucket repository. -* `github.com/aws/aws-sdk-go-v2/service/cleanrooms`: [v1.4.0](service/cleanrooms/CHANGELOG.md#v140-2023-08-30) - * **Feature**: This release decouples member abilities in a collaboration. With this change, the member who can run queries no longer needs to be the same as the member who can receive results. -* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.29.0](service/datasync/CHANGELOG.md#v1290-2023-08-30) - * **Feature**: AWS DataSync introduces Task Reports, a new feature that provides detailed reports of data transfer operations for each task execution. -* `github.com/aws/aws-sdk-go-v2/service/neptunedata`: [v1.0.0](service/neptunedata/CHANGELOG.md#v100-2023-08-30) - * **Release**: New AWS service client module - * **Feature**: Allows customers to execute data plane actions like bulk loading graphs, issuing graph queries using Gremlin and openCypher directly from the SDK. -* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.30.0](service/networkfirewall/CHANGELOG.md#v1300-2023-08-30) - * **Feature**: Network Firewall increasing pagination token string length -* `github.com/aws/aws-sdk-go-v2/service/pcaconnectorad`: [v1.0.0](service/pcaconnectorad/CHANGELOG.md#v100-2023-08-30) - * **Release**: New AWS service client module - * **Feature**: The Connector for AD allows you to use a fully-managed AWS Private CA as a drop-in replacement for your self-managed enterprise CAs without local agents or proxy servers. Enterprises that use AD to manage Windows environments can reduce their private certificate authority (CA) costs and complexity. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.103.0](service/sagemaker/CHANGELOG.md#v11030-2023-08-30) - * **Feature**: Amazon SageMaker Canvas adds IdentityProviderOAuthSettings support for CanvasAppSettings - -# Release (2023-08-29) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.26.0](service/cognitoidentityprovider/CHANGELOG.md#v1260-2023-08-29) - * **Feature**: Added API example requests and responses for several operations. Fixed the validation regex for user pools Identity Provider name. -* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.32.5](service/fsx/CHANGELOG.md#v1325-2023-08-29) - * **Documentation**: Documentation updates for project quotas. -* `github.com/aws/aws-sdk-go-v2/service/omics`: [v1.9.0](service/omics/CHANGELOG.md#v190-2023-08-29) - * **Feature**: Add RetentionMode support for Runs. -* `github.com/aws/aws-sdk-go-v2/service/sesv2`: [v1.20.0](service/sesv2/CHANGELOG.md#v1200-2023-08-29) - * **Feature**: Adds support for the new Export and Message Insights features: create, get, list and cancel export jobs; get message insights. - -# Release (2023-08-28) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.25.0](service/backup/CHANGELOG.md#v1250-2023-08-28) - * **Feature**: Add support for customizing time zone for backup window in backup plan rules. -* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.26.0](service/computeoptimizer/CHANGELOG.md#v1260-2023-08-28) - * **Feature**: This release enables AWS Compute Optimizer to analyze and generate licensing optimization recommendations for sql server running on EC2 instances. -* `github.com/aws/aws-sdk-go-v2/service/organizations`: [v1.20.6](service/organizations/CHANGELOG.md#v1206-2023-08-28) - * **Documentation**: Documentation updates for permissions and links. -* `github.com/aws/aws-sdk-go-v2/service/securitylake`: [v1.7.0](service/securitylake/CHANGELOG.md#v170-2023-08-28) - * **Feature**: Remove incorrect regex enforcement on pagination tokens. -* `github.com/aws/aws-sdk-go-v2/service/servicequotas`: [v1.16.0](service/servicequotas/CHANGELOG.md#v1160-2023-08-28) - * **Feature**: Service Quotas now supports viewing the applied quota value and requesting a quota increase for a specific resource in an AWS account. -* `github.com/aws/aws-sdk-go-v2/service/workspacesweb`: [v1.12.0](service/workspacesweb/CHANGELOG.md#v1120-2023-08-28) - * **Feature**: WorkSpaces Web now enables Admins to configure which cookies are synchronized from an end-user's local browser to the in-session browser. In conjunction with a browser extension, this feature enables enhanced Single-Sign On capability by reducing the number of times an end-user has to authenticate. - -# Release (2023-08-25) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.29.0](service/cloudtrail/CHANGELOG.md#v1290-2023-08-25) - * **Feature**: Add ThrottlingException with error code 429 to handle CloudTrail Delegated Admin request rate exceeded on organization resources. -* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.27.7](service/cloudwatch/CHANGELOG.md#v1277-2023-08-25) - * **Documentation**: Doc-only update to get doc bug fixes into the SDK docs - -# Release (2023-08-24) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.115.0](service/ec2/CHANGELOG.md#v11150-2023-08-24) - * **Feature**: Amazon EC2 M7a instances, powered by 4th generation AMD EPYC processors, deliver up to 50% higher performance compared to M6a instances. Amazon EC2 Hpc7a instances, powered by 4th Gen AMD EPYC processors, deliver up to 2.5x better performance compared to Amazon EC2 Hpc6a instances. -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.62.0](service/glue/CHANGELOG.md#v1620-2023-08-24) - * **Feature**: Added API attributes that help in the monitoring of sessions. -* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.41.0](service/mediaconvert/CHANGELOG.md#v1410-2023-08-24) - * **Feature**: This release includes additional audio channel tags in Quicktime outputs, support for film grain synthesis for AV1 outputs, ability to create audio-only FLAC outputs, and ability to specify Amazon S3 destination storage class. -* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.35.0](service/medialive/CHANGELOG.md#v1350-2023-08-24) - * **Feature**: MediaLive now supports passthrough of KLV data to a HLS output group with a TS container. MediaLive now supports setting an attenuation mode for AC3 audio when the coding mode is 3/2 LFE. MediaLive now supports specifying whether to include filler NAL units in RTMP output group settings. -* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.27.0](service/mediatailor/CHANGELOG.md#v1270-2023-08-24) - * **Feature**: Adds new source location AUTODETECT_SIGV4 access type. -* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.42.0](service/quicksight/CHANGELOG.md#v1420-2023-08-24) - * **Feature**: Excel support in Snapshot Export APIs. Removed Required trait for some insight Computations. Namespace-shared Folders support. Global Filters support. Table pin Column support. -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.53.0](service/rds/CHANGELOG.md#v1530-2023-08-24) - * **Feature**: This release updates the supported versions for Percona XtraBackup in Aurora MySQL. -* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.33.0](service/s3control/CHANGELOG.md#v1330-2023-08-24) - * **Feature**: Updates to endpoint ruleset tests to address Smithy validation issues and standardize the capitalization of DualStack. -* `github.com/aws/aws-sdk-go-v2/service/verifiedpermissions`: [v1.2.1](service/verifiedpermissions/CHANGELOG.md#v121-2023-08-24) - * **Documentation**: Documentation updates for Amazon Verified Permissions. - -# Release (2023-08-23) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/apigateway`: [v1.18.0](service/apigateway/CHANGELOG.md#v1180-2023-08-23) - * **Feature**: This release adds RootResourceId to GetRestApi response. -* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.31.0](service/polly/CHANGELOG.md#v1310-2023-08-23) - * **Feature**: Amazon Polly adds 1 new voice - Zayd (ar-AE) - -# Release (2023-08-22) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.28.0](service/costexplorer/CHANGELOG.md#v1280-2023-08-22) - * **Feature**: This release adds the LastUpdatedDate and LastUsedDate timestamps to help you manage your cost allocation tags. -* `github.com/aws/aws-sdk-go-v2/service/globalaccelerator`: [v1.17.7](service/globalaccelerator/CHANGELOG.md#v1177-2023-08-22) - * **Documentation**: Global Accelerator now supports Client Ip Preservation for Network Load Balancer endpoints. -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.52.0](service/rds/CHANGELOG.md#v1520-2023-08-22) - * **Feature**: Adding parameters to CreateCustomDbEngineVersion reserved for future use. -* `github.com/aws/aws-sdk-go-v2/service/verifiedpermissions`: [v1.2.0](service/verifiedpermissions/CHANGELOG.md#v120-2023-08-22) - * **Feature**: Documentation updates for Amazon Verified Permissions. Increases max results per page for ListPolicyStores, ListPolicies, and ListPolicyTemplates APIs from 20 to 50. - -# Release (2023-08-21) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2`: v1.21.0 - * **Feature**: Add support for polly SynthesizeSpeech GET request presigner -* `github.com/aws/aws-sdk-go-v2/service/cloud9`: [v1.18.6](service/cloud9/CHANGELOG.md#v1186-2023-08-21) - * **Documentation**: Doc only update to add Ubuntu 22.04 as an Image ID option for Cloud9 -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.114.0](service/ec2/CHANGELOG.md#v11140-2023-08-21) - * **Feature**: The DeleteKeyPair API has been updated to return the keyPairId when an existing key pair is deleted. -* `github.com/aws/aws-sdk-go-v2/service/finspace`: [v1.12.0](service/finspace/CHANGELOG.md#v1120-2023-08-21) - * **Feature**: Allow customers to manage outbound traffic from their Kx Environment when attaching a transit gateway by providing network acl entries. Allow the customer to choose how they want to update the databases on a cluster allowing updates to possibly be faster than usual. -* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.30.0](service/polly/CHANGELOG.md#v1300-2023-08-21) - * **Feature**: Add support for polly SynthesizeSpeech GET request presigner -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.51.0](service/rds/CHANGELOG.md#v1510-2023-08-21) - * **Feature**: Adding support for RDS Aurora Global Database Unplanned Failover -* `github.com/aws/aws-sdk-go-v2/service/route53domains`: [v1.17.3](service/route53domains/CHANGELOG.md#v1173-2023-08-21) - * **Documentation**: Fixed typos in description fields - -# Release (2023-08-18) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/codecommit`: [v1.16.0](service/codecommit/CHANGELOG.md#v1160-2023-08-18) - * **Feature**: Add new ListFileCommitHistory operation to retrieve commits which introduced changes to a specific file. -* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.36.0](service/securityhub/CHANGELOG.md#v1360-2023-08-18) - * **Feature**: Added Inspector Lambda code Vulnerability section to ASFF, including GeneratorDetails, EpssScore, ExploitAvailable, and CodeVulnerabilities. - -# Release (2023-08-17) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2`: v1.20.2 - * **Bug Fix**: Sign `X-Amz-Server-Side-Encryption-Context` header to fix signing for PutObject requests that set `SSEKMSEncryptionContext`. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.113.0](service/ec2/CHANGELOG.md#v11130-2023-08-17) - * **Feature**: Adds support for SubnetConfigurations to allow users to select their own IPv4 and IPv6 addresses for Interface VPC endpoints -* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.22.0](service/gamelift/CHANGELOG.md#v1220-2023-08-17) - * **Feature**: Amazon GameLift updates its instance types support. -* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.32.3](service/s3control/CHANGELOG.md#v1323-2023-08-17) - * **Announcement**: BREAKFIX: corrected function spelling in environment config from GetS3DisableMultRegionAccessPoints to GetS3DisableMultiRegionAccessPoints - * **Bug Fix**: Adds DisableMRAP option to config loader, and DisableMRAP client resolver to achieve parity with other S3 options in the config loader. Additionally, added breakfix to correct spelling. - -# Release (2023-08-16) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.27.3](service/cloudwatch/CHANGELOG.md#v1273-2023-08-16) - * **Documentation**: Doc-only update to incorporate several doc bug fixes - -# Release (2023-08-15) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.17.0](service/chimesdkmeetings/CHANGELOG.md#v1170-2023-08-15) - * **Feature**: Updated API documentation to include additional exceptions. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.112.0](service/ec2/CHANGELOG.md#v11120-2023-08-15) - * **Feature**: Documentation updates for Elastic Compute Cloud (EC2). -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.61.0](service/glue/CHANGELOG.md#v1610-2023-08-15) - * **Feature**: AWS Glue Crawlers can now accept SerDe overrides from a custom csv classifier. The two SerDe options are LazySimpleSerDe and OpenCSVSerDe. In case, the user wants crawler to do the selection, "None" can be selected for this purpose. -* `github.com/aws/aws-sdk-go-v2/service/pi`: [v1.19.0](service/pi/CHANGELOG.md#v1190-2023-08-15) - * **Feature**: AWS Performance Insights for Amazon RDS is launching Performance Analysis On Demand, a new feature that allows you to analyze database performance metrics and find out the performance issues. You can now use SDK to create, list, get, delete, and manage tags of performance analysis reports. -* `github.com/aws/aws-sdk-go-v2/service/route53domains`: [v1.17.0](service/route53domains/CHANGELOG.md#v1170-2023-08-15) - * **Feature**: Provide explanation if CheckDomainTransferability return false. Provide requestId if a request is already submitted. Add sensitive protection for customer information -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.102.0](service/sagemaker/CHANGELOG.md#v11020-2023-08-15) - * **Feature**: SageMaker Inference Recommender now provides SupportedResponseMIMETypes from DescribeInferenceRecommendationsJob response - -# Release (2023-08-14) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.23.0](service/mediapackage/CHANGELOG.md#v1230-2023-08-14) - * **Feature**: Fix SDK logging of certain fields. -* `github.com/aws/aws-sdk-go-v2/service/omics`: [v1.8.0](service/omics/CHANGELOG.md#v180-2023-08-14) - * **Feature**: This release provides support for annotation store versioning and cross account sharing for Omics Analytics -* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.33.4](service/transfer/CHANGELOG.md#v1334-2023-08-14) - * **Documentation**: Documentation updates for AWS Transfer Family - -# Release (2023-08-11) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/amplifybackend`: [v1.16.0](service/amplifybackend/CHANGELOG.md#v1160-2023-08-11) - * **Feature**: Adds sensitive trait to required input shapes. -* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.36.0](service/configservice/CHANGELOG.md#v1360-2023-08-11) - * **Feature**: Updated ResourceType enum with new resource types onboarded by AWS Config in July 2023. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.111.0](service/ec2/CHANGELOG.md#v11110-2023-08-11) - * **Feature**: Amazon EC2 P5 instances, powered by the latest NVIDIA H100 Tensor Core GPUs, deliver the highest performance in EC2 for deep learning (DL) and HPC applications. M7i-flex and M7i instances are next-generation general purpose instances powered by custom 4th Generation Intel Xeon Scalable processors. -* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.41.0](service/quicksight/CHANGELOG.md#v1410-2023-08-11) - * **Feature**: New Authentication method for Account subscription - IAM Identity Center. Hierarchy layout support, default column width support and related style properties for pivot table visuals. Non-additive topic field aggregations for Topic API -* `github.com/aws/aws-sdk-go-v2/service/ses`: [v1.16.3](service/ses/CHANGELOG.md#v1163-2023-08-11) - * **Documentation**: Doc only updates to include: 1) Clarified which part of an email address where it's okay to have Punycode when it contains non-ASCII characters for the SendRawEmail action and other actions where this is applicable. 2) Updated S3Action description with new MB max bucket size from 30 to 40. -* `github.com/aws/aws-sdk-go-v2/service/swf`: [v1.17.0](service/swf/CHANGELOG.md#v1170-2023-08-11) - * **Feature**: This release adds new API parameters to override workflow task list for workflow executions. - -# Release (2023-08-10) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.28.3](service/cloudtrail/CHANGELOG.md#v1283-2023-08-10) - * **Documentation**: Documentation updates for CloudTrail. -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.65.0](service/connect/CHANGELOG.md#v1650-2023-08-10) - * **Feature**: This release adds APIs to provision agents that are global / available in multiple AWS regions and distribute them across these regions by percentage. -* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.21.0](service/elasticloadbalancingv2/CHANGELOG.md#v1210-2023-08-10) - * **Feature**: This release enables configuring security groups for Network Load Balancers -* `github.com/aws/aws-sdk-go-v2/service/omics`: [v1.7.0](service/omics/CHANGELOG.md#v170-2023-08-10) - * **Feature**: This release adds instanceType to GetRunTask & ListRunTasks responses. -* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.21.0](service/secretsmanager/CHANGELOG.md#v1210-2023-08-10) - * **Feature**: Add additional InvalidRequestException to list of possible exceptions for ListSecret. -* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.33.3](service/transfer/CHANGELOG.md#v1333-2023-08-10) - * **Documentation**: Documentation updates for AW Transfer Family - -# Release (2023-08-09) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/chimesdkvoice`: [v1.8.0](service/chimesdkvoice/CHANGELOG.md#v180-2023-08-09) - * **Feature**: Updating CreatePhoneNumberOrder, UpdatePhoneNumber and BatchUpdatePhoneNumbers APIs, adding phone number name -* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.32.0](service/fsx/CHANGELOG.md#v1320-2023-08-09) - * **Feature**: For FSx for Lustre, add new data repository task type, RELEASE_DATA_FROM_FILESYSTEM, to release files that have been archived to S3. For FSx for Windows, enable support for configuring and updating SSD IOPS, and for updating storage type. For FSx for OpenZFS, add new deployment type, MULTI_AZ_1. -* `github.com/aws/aws-sdk-go-v2/service/globalaccelerator`: [v1.17.3](service/globalaccelerator/CHANGELOG.md#v1173-2023-08-09) - * **Documentation**: Documentation update for dualstack EC2 endpoint support -* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.26.0](service/guardduty/CHANGELOG.md#v1260-2023-08-09) - * **Feature**: Added autoEnable ALL to UpdateOrganizationConfiguration and DescribeOrganizationConfiguration APIs. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.101.0](service/sagemaker/CHANGELOG.md#v11010-2023-08-09) - * **Feature**: This release adds support for cross account access for SageMaker Model Cards through AWS RAM. - -# Release (2023-08-08) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.24.0](service/backup/CHANGELOG.md#v1240-2023-08-08) - * **Feature**: This release introduces a new logically air-gapped vault (Preview) in AWS Backup that stores immutable backup copies, which are locked by default and isolated with encryption using AWS owned keys. Logically air-gapped vault (Preview) allows secure recovery of application data across accounts. -* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.29.0](service/elasticache/CHANGELOG.md#v1290-2023-08-08) - * **Feature**: Added support for cluster mode in online migration and test migration API -* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.21.0](service/servicecatalog/CHANGELOG.md#v1210-2023-08-08) - * **Feature**: Introduce support for HashiCorp Terraform Cloud in Service Catalog by addying TERRAFORM_CLOUD product type in CreateProduct and CreateProvisioningArtifact API. - -# Release (2023-08-07) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/detective`: [v1.21.0](service/detective/CHANGELOG.md#v1210-2023-08-07) - * **Feature**: Updated the email validation regex to be in line with the TLD name specifications. -* `github.com/aws/aws-sdk-go-v2/service/ivsrealtime`: [v1.4.0](service/ivsrealtime/CHANGELOG.md#v140-2023-08-07) - * **Feature**: Add QUOTA_EXCEEDED and PUBLISHER_NOT_FOUND to EventErrorCode for stage health events. -* `github.com/aws/aws-sdk-go-v2/service/kinesisvideo`: [v1.18.0](service/kinesisvideo/CHANGELOG.md#v1180-2023-08-07) - * **Feature**: This release enables minimum of Images SamplingInterval to be as low as 200 milliseconds in Kinesis Video Stream Image feature. -* `github.com/aws/aws-sdk-go-v2/service/kinesisvideoarchivedmedia`: [v1.16.0](service/kinesisvideoarchivedmedia/CHANGELOG.md#v1160-2023-08-07) - * **Feature**: This release enables minimum of Images SamplingInterval to be as low as 200 milliseconds in Kinesis Video Stream Image feature. -* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.30.2](service/rekognition/CHANGELOG.md#v1302-2023-08-07) - * **Documentation**: This release adds code snippets for Amazon Rekognition Custom Labels. - -# Release (2023-08-04) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.22.2](service/acmpca/CHANGELOG.md#v1222-2023-08-04) - * **Documentation**: Documentation correction for AWS Private CA -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.64.0](service/connect/CHANGELOG.md#v1640-2023-08-04) - * **Feature**: Added a new API UpdateRoutingProfileAgentAvailabilityTimer to update agent availability timer of a routing profile. -* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.28.0](service/datasync/CHANGELOG.md#v1280-2023-08-04) - * **Feature**: Display cloud storage used capacity at a cluster level. -* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.29.2](service/ecs/CHANGELOG.md#v1292-2023-08-04) - * **Documentation**: This is a documentation update to address various tickets. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.100.0](service/sagemaker/CHANGELOG.md#v11000-2023-08-04) - * **Feature**: Including DataCaptureConfig key in the Amazon Sagemaker Search's transform job object - -# Release (2023-08-03) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.30.2](service/autoscaling/CHANGELOG.md#v1302-2023-08-03) - * **Documentation**: Documentation changes related to Amazon EC2 Auto Scaling APIs. -* `github.com/aws/aws-sdk-go-v2/service/cloud9`: [v1.18.2](service/cloud9/CHANGELOG.md#v1182-2023-08-03) - * **Documentation**: Updated the deprecation date for Amazon Linux. Doc only update. -* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.30.0](service/databasemigrationservice/CHANGELOG.md#v1300-2023-08-03) - * **Feature**: The release makes public API for DMS Schema Conversion feature. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.110.0](service/ec2/CHANGELOG.md#v11100-2023-08-03) - * **Feature**: This release adds new parameter isPrimaryIPv6 to allow assigning an IPv6 address as a primary IPv6 address to a network interface which cannot be changed to give equivalent functionality available for network interfaces with primary IPv4 address. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.99.0](service/sagemaker/CHANGELOG.md#v1990-2023-08-03) - * **Feature**: Amazon SageMaker now supports running training jobs on p5.48xlarge instance types. - -# Release (2023-08-02) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/budgets`: [v1.16.0](service/budgets/CHANGELOG.md#v1160-2023-08-02) - * **Feature**: As part of CAE tagging integration we need to update our budget names regex filter to prevent customers from using "/action/" in their budget names. -* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.25.0](service/cognitoidentityprovider/CHANGELOG.md#v1250-2023-08-02) - * **Feature**: New feature that logs Cognito user pool error messages to CloudWatch logs. -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.60.0](service/glue/CHANGELOG.md#v1600-2023-08-02) - * **Feature**: This release includes additional Glue Streaming KAKFA SASL property types. -* `github.com/aws/aws-sdk-go-v2/service/resiliencehub`: [v1.13.0](service/resiliencehub/CHANGELOG.md#v1130-2023-08-02) - * **Feature**: Drift Detection capability added when applications policy has moved from a meet to breach state. Customers will be able to exclude operational recommendations and receive credit in their resilience score. Customers can now add ARH permissions to an existing or new role. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.98.0](service/sagemaker/CHANGELOG.md#v1980-2023-08-02) - * **Feature**: SageMaker Inference Recommender introduces a new API GetScalingConfigurationRecommendation to recommend auto scaling policies based on completed Inference Recommender jobs. - -# Release (2023-08-01) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.26.0](service/batch/CHANGELOG.md#v1260-2023-08-01) - * **Feature**: This release adds support for price capacity optimized allocation strategy for Spot Instances. -* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.29.0](service/databasemigrationservice/CHANGELOG.md#v1290-2023-08-01) - * **Feature**: Adding new API describe-engine-versions which provides information about the lifecycle of a replication instance's version. -* `github.com/aws/aws-sdk-go-v2/service/internetmonitor`: [v1.5.0](service/internetmonitor/CHANGELOG.md#v150-2023-08-01) - * **Feature**: This release adds a new feature for Amazon CloudWatch Internet Monitor that enables customers to set custom thresholds, for performance and availability drops, for impact limited to a single city-network to trigger creation of a health event. -* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.34.0](service/medialive/CHANGELOG.md#v1340-2023-08-01) - * **Feature**: AWS Elemental Link devices now report their Availability Zone. Link devices now support the ability to change their Availability Zone. -* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.29.0](service/polly/CHANGELOG.md#v1290-2023-08-01) - * **Feature**: Amazon Polly adds new French Belgian voice - Isabelle. Isabelle is available as Neural voice only. -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.50.0](service/rds/CHANGELOG.md#v1500-2023-08-01) - * **Feature**: Added support for deleted clusters PiTR. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.97.0](service/sagemaker/CHANGELOG.md#v1970-2023-08-01) - * **Feature**: Add Stairs TrafficPattern and FlatInvocations to RecommendationJobStoppingConditions - -# Release (2023-07-31) - -## General Highlights -* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide. -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/amplifyuibuilder`: [v1.12.0](service/amplifyuibuilder/CHANGELOG.md#v1120-2023-07-31) - * **Feature**: Amplify Studio releases GraphQL support for codegen job action. -* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.30.0](service/autoscaling/CHANGELOG.md#v1300-2023-07-31) - * **Feature**: You can now configure an instance refresh to set its status to 'failed' when it detects that a specified CloudWatch alarm has gone into the ALARM state. You can also choose to roll back the instance refresh automatically when the alarm threshold is met. -* `github.com/aws/aws-sdk-go-v2/service/cleanrooms`: [v1.3.0](service/cleanrooms/CHANGELOG.md#v130-2023-07-31) - * **Feature**: This release introduces custom SQL queries - an expanded set of SQL you can run. This release adds analysis templates, a new resource for storing pre-defined custom SQL queries ahead of time. This release also adds the Custom analysis rule, which lets you approve analysis templates for querying. -* `github.com/aws/aws-sdk-go-v2/service/codestarconnections`: [v1.15.0](service/codestarconnections/CHANGELOG.md#v1150-2023-07-31) - * **Feature**: New integration with the Gitlab provider type. -* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.15.0](service/drs/CHANGELOG.md#v1150-2023-07-31) - * **Feature**: Add support for in-aws right sizing -* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.16.0](service/inspector2/CHANGELOG.md#v1160-2023-07-31) - * **Feature**: This release adds 1 new API: BatchGetFindingDetails to retrieve enhanced vulnerability intelligence details for findings. -* `github.com/aws/aws-sdk-go-v2/service/lookoutequipment`: [v1.18.0](service/lookoutequipment/CHANGELOG.md#v1180-2023-07-31) - * **Feature**: This release includes new import resource, model versioning and resource policy features. -* `github.com/aws/aws-sdk-go-v2/service/omics`: [v1.6.0](service/omics/CHANGELOG.md#v160-2023-07-31) - * **Feature**: Add CreationType filter for ListReadSets -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.49.0](service/rds/CHANGELOG.md#v1490-2023-07-31) - * **Feature**: This release adds support for Aurora MySQL local write forwarding, which allows for forwarding of write operations from reader DB instances to the writer DB instance. -* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.29.0](service/route53/CHANGELOG.md#v1290-2023-07-31) - * **Feature**: Amazon Route 53 now supports the Israel (Tel Aviv) Region (il-central-1) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region. -* `github.com/aws/aws-sdk-go-v2/service/scheduler`: [v1.2.0](service/scheduler/CHANGELOG.md#v120-2023-07-31) - * **Feature**: This release introduces automatic deletion of schedules in EventBridge Scheduler. If configured, EventBridge Scheduler automatically deletes a schedule after the schedule has completed its last invocation. - -# Release (2023-07-28.2) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/applicationinsights`: [v1.18.0](service/applicationinsights/CHANGELOG.md#v1180-2023-07-282) - * **Feature**: This release enable customer to add/remove/update more than one workload for a component -* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.33.0](service/cloudformation/CHANGELOG.md#v1330-2023-07-282) - * **Feature**: This SDK release is for the feature launch of AWS CloudFormation RetainExceptOnCreate. It adds a new parameter retainExceptOnCreate in the following APIs: CreateStack, UpdateStack, RollbackStack, ExecuteChangeSet. -* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.27.0](service/cloudfront/CHANGELOG.md#v1270-2023-07-282) - * **Feature**: Add a new JavaScript runtime version for CloudFront Functions. -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.62.0](service/connect/CHANGELOG.md#v1620-2023-07-282) - * **Feature**: This release adds support for new number types. -* `github.com/aws/aws-sdk-go-v2/service/kafka`: [v1.21.0](service/kafka/CHANGELOG.md#v1210-2023-07-282) - * **Feature**: Amazon MSK has introduced new versions of ListClusterOperations and DescribeClusterOperation APIs. These v2 APIs provide information and insights into the ongoing operations of both MSK Provisioned and MSK Serverless clusters. -* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.21.0](service/pinpoint/CHANGELOG.md#v1210-2023-07-282) - * **Feature**: Added support for sending push notifications using the FCM v1 API with json credentials. Amazon Pinpoint customers can now deliver messages to Android devices using both FCM v1 API and the legacy FCM/GCM API - -# Release (2023-07-28) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.23.4](service/sqs/CHANGELOG.md#v1234-2023-07-28) - * **Documentation**: Documentation changes related to SQS APIs. - -# Release (2023-07-27) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.29.0](service/autoscaling/CHANGELOG.md#v1290-2023-07-27) - * **Feature**: This release updates validation for instance types used in the AllowedInstanceTypes and ExcludedInstanceTypes parameters of the InstanceRequirements property of a MixedInstancesPolicy. -* `github.com/aws/aws-sdk-go-v2/service/ebs`: [v1.17.0](service/ebs/CHANGELOG.md#v1170-2023-07-27) - * **Feature**: SDK and documentation updates for Amazon Elastic Block Store API -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.108.0](service/ec2/CHANGELOG.md#v11080-2023-07-27) - * **Feature**: SDK and documentation updates for Amazon Elastic Block Store APIs -* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.28.0](service/eks/CHANGELOG.md#v1280-2023-07-27) - * **Feature**: Add multiple customer error code to handle customer caused failure when managing EKS node groups -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.95.0](service/sagemaker/CHANGELOG.md#v1950-2023-07-27) - * **Feature**: Expose ProfilerConfig attribute in SageMaker Search API response. - -# Release (2023-07-26) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/entityresolution`: [v1.0.0](service/entityresolution/CHANGELOG.md#v100-2023-07-26) - * **Release**: New AWS service client module - * **Feature**: AWS Entity Resolution can effectively match a source record from a customer relationship management (CRM) system with a source record from a marketing system containing campaign information. -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.58.0](service/glue/CHANGELOG.md#v1580-2023-07-26) - * **Feature**: Release Glue Studio Snowflake Connector Node for SDK/CLI -* `github.com/aws/aws-sdk-go-v2/service/healthlake`: [v1.16.4](service/healthlake/CHANGELOG.md#v1164-2023-07-26) - * **Documentation**: Updating the HealthLake service documentation. -* `github.com/aws/aws-sdk-go-v2/service/managedblockchainquery`: [v1.0.0](service/managedblockchainquery/CHANGELOG.md#v100-2023-07-26) - * **Release**: New AWS service client module - * **Feature**: Amazon Managed Blockchain (AMB) Query provides serverless access to standardized, multi-blockchain datasets with developer-friendly APIs. -* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.39.1](service/mediaconvert/CHANGELOG.md#v1391-2023-07-26) - * **Documentation**: This release includes general updates to user documentation. -* `github.com/aws/aws-sdk-go-v2/service/omics`: [v1.5.2](service/omics/CHANGELOG.md#v152-2023-07-26) - * **Documentation**: The service is renaming as a part of AWS Health. -* `github.com/aws/aws-sdk-go-v2/service/opensearchserverless`: [v1.3.0](service/opensearchserverless/CHANGELOG.md#v130-2023-07-26) - * **Feature**: This release adds new collection type VectorSearch. -* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.27.0](service/polly/CHANGELOG.md#v1270-2023-07-26) - * **Feature**: Amazon Polly adds 1 new voice - Lisa (nl-BE) -* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.28.5](service/route53/CHANGELOG.md#v1285-2023-07-26) - * **Documentation**: Update that corrects the documents for received feedback. - -# Release (2023-07-25) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/billingconductor`: [v1.7.0](service/billingconductor/CHANGELOG.md#v170-2023-07-25) - * **Feature**: Added support for Auto-Assocate Billing Groups for CreateBillingGroup, UpdateBillingGroup, and ListBillingGroups. -* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.26.0](service/customerprofiles/CHANGELOG.md#v1260-2023-07-25) - * **Feature**: Amazon Connect Customer Profiles now supports rule-based resolution to match and merge similar profiles into unified profiles, helping companies deliver faster and more personalized customer service by providing access to relevant customer information for agents and automated experiences. -* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.26.0](service/datasync/CHANGELOG.md#v1260-2023-07-25) - * **Feature**: AWS DataSync now supports Microsoft Azure Blob Storage locations. -* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.20.2](service/dynamodb/CHANGELOG.md#v1202-2023-07-25) - * **Documentation**: Documentation updates for DynamoDB -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.107.0](service/ec2/CHANGELOG.md#v11070-2023-07-25) - * **Feature**: This release adds an instance's peak and baseline network bandwidth as well as the memory sizes of an instance's inference accelerators to DescribeInstanceTypes. -* `github.com/aws/aws-sdk-go-v2/service/emrserverless`: [v1.9.0](service/emrserverless/CHANGELOG.md#v190-2023-07-25) - * **Feature**: This release adds support for publishing application logs to CloudWatch. -* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.38.0](service/lambda/CHANGELOG.md#v1380-2023-07-25) - * **Feature**: Add Python 3.11 (python3.11) support to AWS Lambda -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.48.0](service/rds/CHANGELOG.md#v1480-2023-07-25) - * **Feature**: This release adds support for monitoring storage optimization progress on the DescribeDBInstances API. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.94.0](service/sagemaker/CHANGELOG.md#v1940-2023-07-25) - * **Feature**: Mark ContentColumn and TargetLabelColumn as required Targets in TextClassificationJobConfig in CreateAutoMLJobV2API -* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.34.0](service/securityhub/CHANGELOG.md#v1340-2023-07-25) - * **Feature**: Add support for CONTAINS and NOT_CONTAINS comparison operators for Automation Rules string filters and map filters -* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.20.0](service/sts/CHANGELOG.md#v1200-2023-07-25) - * **Feature**: API updates for the AWS Security Token Service -* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.32.0](service/transfer/CHANGELOG.md#v1320-2023-07-25) - * **Feature**: This release adds support for SFTP Connectors. -* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.14.0](service/wisdom/CHANGELOG.md#v1140-2023-07-25) - * **Feature**: This release added two new data types: AssistantIntegrationConfiguration, and SessionIntegrationConfiguration to support Wisdom integration with Amazon Connect Chat - -# Release (2023-07-24) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/apigatewayv2`: [v1.13.15](service/apigatewayv2/CHANGELOG.md#v11315-2023-07-24) - * **Documentation**: Documentation updates for Amazon API Gateway. -* `github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines`: [v1.5.0](service/chimesdkmediapipelines/CHANGELOG.md#v150-2023-07-24) - * **Feature**: AWS Media Pipeline compositing enhancement and Media Insights Pipeline auto language identification. -* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.32.0](service/cloudformation/CHANGELOG.md#v1320-2023-07-24) - * **Feature**: This release supports filtering by DRIFT_STATUS for existing API ListStackInstances and adds support for a new API ListStackInstanceResourceDrifts. Customers can now view resource drift information from their StackSet management accounts. -* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.26.0](service/costexplorer/CHANGELOG.md#v1260-2023-07-24) - * **Feature**: This release introduces the new API 'GetSavingsPlanPurchaseRecommendationDetails', which retrieves the details for a Savings Plan recommendation. It also updates the existing API 'GetSavingsPlansPurchaseRecommendation' to include the recommendation detail ID. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.106.0](service/ec2/CHANGELOG.md#v11060-2023-07-24) - * **Feature**: Add "disabled" enum value to SpotInstanceState. -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.57.0](service/glue/CHANGELOG.md#v1570-2023-07-24) - * **Feature**: Added support for Data Preparation Recipe node in Glue Studio jobs -* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.39.0](service/quicksight/CHANGELOG.md#v1390-2023-07-24) - * **Feature**: This release launches new Snapshot APIs for CSV and PDF exports, adds support for info icon for filters and parameters in Exploration APIs, adds modeled exception to the DeleteAccountCustomization API, and introduces AttributeAggregationFunction's ability to add UNIQUE_VALUE aggregation in tooltips. - -# Release (2023-07-21) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.56.0](service/glue/CHANGELOG.md#v1560-2023-07-21) - * **Feature**: This release adds support for AWS Glue Crawler with Apache Hudi Tables, allowing Crawlers to discover Hudi Tables in S3 and register them in Glue Data Catalog for query engines to query against. -* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.39.0](service/mediaconvert/CHANGELOG.md#v1390-2023-07-21) - * **Feature**: This release includes improvements to Preserve 444 handling, compatibility of HEVC sources without frame rates, and general improvements to MP4 outputs. -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.47.0](service/rds/CHANGELOG.md#v1470-2023-07-21) - * **Feature**: Adds support for the DBSystemID parameter of CreateDBInstance to RDS Custom for Oracle. -* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.28.17](service/workspaces/CHANGELOG.md#v12817-2023-07-21) - * **Documentation**: Fixed VolumeEncryptionKey descriptions - -# Release (2023-07-20.2) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/codecatalyst`: [v1.4.0](service/codecatalyst/CHANGELOG.md#v140-2023-07-202) - * **Feature**: This release adds support for updating and deleting spaces and projects in Amazon CodeCatalyst. It also adds support for creating, getting, and deleting source repositories in CodeCatalyst projects. -* `github.com/aws/aws-sdk-go-v2/service/connectcases`: [v1.5.0](service/connectcases/CHANGELOG.md#v150-2023-07-202) - * **Feature**: This release adds the ability to assign a case to a queue or user. -* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.31.0](service/lexmodelsv2/CHANGELOG.md#v1310-2023-07-202) - * **Feature**: This release updates type for Channel field in SessionSpecification and UtteranceSpecification -* `github.com/aws/aws-sdk-go-v2/service/route53resolver`: [v1.18.0](service/route53resolver/CHANGELOG.md#v1180-2023-07-202) - * **Feature**: This release adds support for Route 53 On Outposts, a new feature that allows customers to run Route 53 Resolver and Resolver endpoints locally on their Outposts. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.93.0](service/sagemaker/CHANGELOG.md#v1930-2023-07-202) - * **Feature**: Cross account support for SageMaker Feature Store -* `github.com/aws/aws-sdk-go-v2/service/sagemakerfeaturestoreruntime`: [v1.16.0](service/sagemakerfeaturestoreruntime/CHANGELOG.md#v1160-2023-07-202) - * **Feature**: Cross account support for SageMaker Feature Store -* `github.com/aws/aws-sdk-go-v2/service/securitylake`: [v1.5.0](service/securitylake/CHANGELOG.md#v150-2023-07-202) - * **Feature**: Adding support for Tags on Create and Resource Tagging API. -* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.27.0](service/transcribe/CHANGELOG.md#v1270-2023-07-202) - * **Feature**: Added API argument --toxicity-detection to startTranscriptionJob API, which allows users to view toxicity scores of submitted audio. - -# Release (2023-07-20) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/savingsplans`: [v1.12.14](service/savingsplans/CHANGELOG.md#v11214-2023-07-20) - * **Documentation**: Savings Plans endpoints update - -# Release (2023-07-19) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.31.0](service/cloudformation/CHANGELOG.md#v1310-2023-07-19) - * **Feature**: SDK and documentation updates for GetTemplateSummary API (unrecognized resources) -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.105.1](service/ec2/CHANGELOG.md#v11051-2023-07-19) - * **Documentation**: Amazon EC2 documentation updates. -* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.14.0](service/grafana/CHANGELOG.md#v1140-2023-07-19) - * **Feature**: Amazon Managed Grafana now supports grafanaVersion update for existing workspaces with UpdateWorkspaceConfiguration API. DescribeWorkspaceConfiguration API additionally returns grafanaVersion. A new ListVersions API lists available versions or, if given a workspaceId, the versions it can upgrade to. -* `github.com/aws/aws-sdk-go-v2/service/medicalimaging`: [v1.0.0](service/medicalimaging/CHANGELOG.md#v100-2023-07-19) - * **Release**: New AWS service client module - * **Feature**: General Availability (GA) release of AWS Health Imaging, enabling customers to store, transform, and analyze medical imaging data at petabyte-scale. -* `github.com/aws/aws-sdk-go-v2/service/ram`: [v1.19.0](service/ram/CHANGELOG.md#v1190-2023-07-19) - * **Feature**: This release adds support for securely sharing with AWS service principals. -* `github.com/aws/aws-sdk-go-v2/service/ssmsap`: [v1.3.0](service/ssmsap/CHANGELOG.md#v130-2023-07-19) - * **Feature**: Added support for SAP Hana High Availability discovery (primary and secondary nodes) and Backint agent installation with SSM for SAP. -* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.36.0](service/wafv2/CHANGELOG.md#v1360-2023-07-19) - * **Feature**: Added the URI path to the custom aggregation keys that you can specify for a rate-based rule. - -# Release (2023-07-18) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/codegurusecurity`: [v1.0.3](service/codegurusecurity/CHANGELOG.md#v103-2023-07-18) - * **Documentation**: Documentation updates for CodeGuru Security. -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.61.1](service/connect/CHANGELOG.md#v1611-2023-07-18) - * **Documentation**: GetMetricDataV2 API: Update to include Contact Lens Conversational Analytics Metrics -* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.30.0](service/lexmodelsv2/CHANGELOG.md#v1300-2023-07-18) - * **Feature**: This release adds support for Lex Developers to view analytics for their bots. -* `github.com/aws/aws-sdk-go-v2/service/m2`: [v1.6.0](service/m2/CHANGELOG.md#v160-2023-07-18) - * **Feature**: Allows UpdateEnvironment to update the environment to 0 host capacity. New GetSignedBluinsightsUrl API -* `github.com/aws/aws-sdk-go-v2/service/snowball`: [v1.20.0](service/snowball/CHANGELOG.md#v1200-2023-07-18) - * **Feature**: Adds support for RACK_5U_C. This is the first AWS Snow Family device designed to meet U.S. Military Ruggedization Standards (MIL-STD-810H) with 208 vCPU device in a portable, compact 5U, half-rack width form-factor. -* `github.com/aws/aws-sdk-go-v2/service/translate`: [v1.18.4](service/translate/CHANGELOG.md#v1184-2023-07-18) - * **Documentation**: Added DOCX word document support to TranslateDocument API - -# Release (2023-07-17) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.18.8](service/codeartifact/CHANGELOG.md#v1188-2023-07-17) - * **Documentation**: Doc only update for AWS CodeArtifact -* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.22.0](service/docdb/CHANGELOG.md#v1220-2023-07-17) - * **Feature**: Added major version upgrade option in ModifyDBCluster API -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.105.0](service/ec2/CHANGELOG.md#v11050-2023-07-17) - * **Feature**: Add Nitro TPM support on DescribeInstanceTypes -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.55.0](service/glue/CHANGELOG.md#v1550-2023-07-17) - * **Feature**: Adding new supported permission type flags to get-unfiltered endpoints that callers may pass to indicate support for enforcing Lake Formation fine-grained access control on nested column attributes. -* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.24.0](service/ivs/CHANGELOG.md#v1240-2023-07-17) - * **Feature**: This release provides the flexibility to configure what renditions or thumbnail qualities to record when creating recording configuration. -* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.22.0](service/lakeformation/CHANGELOG.md#v1220-2023-07-17) - * **Feature**: Adds supports for ReadOnlyAdmins and AllowFullTableExternalDataAccess. Adds NESTED_PERMISSION and NESTED_CELL_PERMISSION to SUPPORTED_PERMISSION_TYPES enum. Adds CREATE_LF_TAG on catalog resource and ALTER, DROP, and GRANT_WITH_LF_TAG_EXPRESSION on LF Tag resource. - -# Release (2023-07-13) - -## General Highlights -* **Feature**: Modify user agent syntax and introduce support for optional app identifier in UA header -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.23.0](service/cognitoidentityprovider/CHANGELOG.md#v1230-2023-07-13) - * **Feature**: API model updated in Amazon Cognito -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.61.0](service/connect/CHANGELOG.md#v1610-2023-07-13) - * **Feature**: Add support for deleting Queues and Routing Profiles. -* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.27.0](service/databasemigrationservice/CHANGELOG.md#v1270-2023-07-13) - * **Feature**: Enhanced PostgreSQL target endpoint settings for providing Babelfish support. -* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.25.0](service/datasync/CHANGELOG.md#v1250-2023-07-13) - * **Feature**: Added LunCount to the response object of DescribeStorageSystemResourcesResponse, LunCount represents the number of LUNs on a storage system resource. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.104.0](service/ec2/CHANGELOG.md#v11040-2023-07-13) - * **Feature**: This release adds support for the C7gn and Hpc7g instances. C7gn instances are powered by AWS Graviton3 processors and the fifth-generation AWS Nitro Cards. Hpc7g instances are powered by AWS Graviton 3E processors and provide up to 200 Gbps network bandwidth. -* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.30.0](service/fsx/CHANGELOG.md#v1300-2023-07-13) - * **Feature**: Amazon FSx for NetApp ONTAP now supports SnapLock, an ONTAP feature that enables you to protect your files in a volume by transitioning them to a write once, read many (WORM) state. -* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.21.1](service/iam/CHANGELOG.md#v1211-2023-07-13) - * **Documentation**: Documentation updates for AWS Identity and Access Management (IAM). -* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.25.0](service/mediatailor/CHANGELOG.md#v1250-2023-07-13) - * **Feature**: Adds categories to MediaTailor channel assembly alerts -* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.25.0](service/personalize/CHANGELOG.md#v1250-2023-07-13) - * **Feature**: This release provides ability to customers to change schema associated with their datasets in Amazon Personalize -* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.22.0](service/proton/CHANGELOG.md#v1220-2023-07-13) - * **Feature**: This release adds support for deployment history for Proton provisioned resources -* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.37.0](service/s3/CHANGELOG.md#v1370-2023-07-13) - * **Feature**: S3 Inventory now supports Object Access Control List and Object Owner as available object metadata fields in inventory reports. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.92.0](service/sagemaker/CHANGELOG.md#v1920-2023-07-13) - * **Feature**: Amazon SageMaker Canvas adds WorkspeceSettings support for CanvasAppSettings -* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.19.11](service/secretsmanager/CHANGELOG.md#v11911-2023-07-13) - * **Documentation**: Documentation updates for Secrets Manager - -# Release (2023-07-07) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.22.0](service/cloudwatchlogs/CHANGELOG.md#v1220-2023-07-07) - * **Feature**: Add CMK encryption support for CloudWatch Logs Insights query result data -* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.26.0](service/databasemigrationservice/CHANGELOG.md#v1260-2023-07-07) - * **Feature**: Releasing DMS Serverless. Adding support for PostgreSQL 15.x as source and target endpoint. Adding support for DocDB Elastic Clusters with sharded collections, PostgreSQL datatype mapping customization and disabling hostname validation of the certificate authority in Kafka endpoint settings -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.54.0](service/glue/CHANGELOG.md#v1540-2023-07-07) - * **Feature**: This release enables customers to create new Apache Iceberg tables and associated metadata in Amazon S3 by using native AWS Glue CreateTable operation. -* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.32.0](service/medialive/CHANGELOG.md#v1320-2023-07-07) - * **Feature**: This release enables the use of Thumbnails in AWS Elemental MediaLive. -* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.24.0](service/mediatailor/CHANGELOG.md#v1240-2023-07-07) - * **Feature**: The AWS Elemental MediaTailor SDK for Channel Assembly has added support for EXT-X-CUE-OUT and EXT-X-CUE-IN tags to specify ad breaks in HLS outputs, including support for EXT-OATCLS, EXT-X-ASSET, and EXT-X-CUE-OUT-CONT accessory tags. - -# Release (2023-07-06) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.103.0](service/ec2/CHANGELOG.md#v11030-2023-07-06) - * **Feature**: Add Nitro Enclaves support on DescribeInstanceTypes -* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.25.0](service/location/CHANGELOG.md#v1250-2023-07-06) - * **Feature**: This release adds support for authenticating with Amazon Location Service's Places & Routes APIs with an API Key. Also, with this release developers can publish tracked device position updates to Amazon EventBridge. -* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.28.0](service/outposts/CHANGELOG.md#v1280-2023-07-06) - * **Feature**: Added paginator support to several APIs. Added the ISOLATED enum value to AssetState. -* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.38.0](service/quicksight/CHANGELOG.md#v1380-2023-07-06) - * **Feature**: This release includes below three changes: small multiples axes improvement, field based coloring, removed required trait from Aggregation function for TopBottomFilter. -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.46.1](service/rds/CHANGELOG.md#v1461-2023-07-06) - * **Documentation**: Updates Amazon RDS documentation for creating DB instances and creating Aurora global clusters. - -# Release (2023-07-05) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/comprehendmedical`: [v1.16.3](service/comprehendmedical/CHANGELOG.md#v1163-2023-07-05) - * **Documentation**: Update to Amazon Comprehend Medical documentation. -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.60.1](service/connect/CHANGELOG.md#v1601-2023-07-05) - * **Documentation**: GetMetricDataV2 API: Channels filters do not count towards overall limitation of 100 filter values. -* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.23.0](service/kms/CHANGELOG.md#v1230-2023-07-05) - * **Feature**: Added Dry Run Feature to cryptographic and cross-account mutating KMS APIs (14 in all). This feature allows users to test their permissions and parameters before making the actual API call. -* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.19.0](service/mgn/CHANGELOG.md#v1190-2023-07-05) - * **Feature**: This release introduces the Global view feature and new Replication state APIs. -* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.33.2](service/securityhub/CHANGELOG.md#v1332-2023-07-05) - * **Documentation**: Documentation updates for AWS Security Hub - -# Release (2023-07-03) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.24.0](service/batch/CHANGELOG.md#v1240-2023-07-03) - * **Feature**: This feature allows customers to use AWS Batch with Linux with ARM64 CPU Architecture and X86_64 CPU Architecture with Windows OS on Fargate Platform. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.91.0](service/sagemaker/CHANGELOG.md#v1910-2023-07-03) - * **Feature**: SageMaker Inference Recommender now accepts new fields SupportedEndpointType and ServerlessConfiguration to support serverless endpoints. - -# Release (2023-06-30) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.28.0](service/ecs/CHANGELOG.md#v1280-2023-06-30) - * **Feature**: Added new field "credentialspecs" to the ecs task definition to support gMSA of windows/linux in both domainless and domain-joined mode -* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.38.1](service/mediaconvert/CHANGELOG.md#v1381-2023-06-30) - * **Documentation**: This release includes improved color handling of overlays and general updates to user documentation. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.90.0](service/sagemaker/CHANGELOG.md#v1900-2023-06-30) - * **Feature**: This release adds support for rolling deployment in SageMaker Inference. -* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.31.0](service/transfer/CHANGELOG.md#v1310-2023-06-30) - * **Feature**: Add outbound Basic authentication support to AS2 connectors -* `github.com/aws/aws-sdk-go-v2/service/verifiedpermissions`: [v1.0.4](service/verifiedpermissions/CHANGELOG.md#v104-2023-06-30) - * **Documentation**: This release corrects several broken links in the documentation. - -# Release (2023-06-29) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.21.0](service/appstream/CHANGELOG.md#v1210-2023-06-29) - * **Feature**: This release introduces app block builder, allowing customers to provision a resource to package applications into an app block -* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.24.0](service/chime/CHANGELOG.md#v1240-2023-06-29) - * **Feature**: The Amazon Chime SDK APIs in the Chime namespace are no longer supported. Customers should use APIs in the dedicated Amazon Chime SDK namespaces: ChimeSDKIdentity, ChimeSDKMediaPipelines, ChimeSDKMeetings, ChimeSDKMessaging, and ChimeSDKVoice. -* `github.com/aws/aws-sdk-go-v2/service/cleanrooms`: [v1.2.0](service/cleanrooms/CHANGELOG.md#v120-2023-06-29) - * **Feature**: This release adds support for the OR operator in RSQL join match conditions and the ability to control which operators (AND, OR) are allowed in a join match condition. -* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.20.0](service/dynamodb/CHANGELOG.md#v1200-2023-06-29) - * **Feature**: This release adds ReturnValuesOnConditionCheckFailure parameter to PutItem, UpdateItem, DeleteItem, ExecuteStatement, BatchExecuteStatement and ExecuteTransaction APIs. When set to ALL_OLD, API returns a copy of the item as it was when a conditional write failed -* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.20.0](service/gamelift/CHANGELOG.md#v1200-2023-06-29) - * **Feature**: Amazon GameLift now supports game builds that use the Amazon Linux 2023 (AL2023) operating system. -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.53.0](service/glue/CHANGELOG.md#v1530-2023-06-29) - * **Feature**: This release adds support for AWS Glue Crawler with Iceberg Tables, allowing Crawlers to discover Iceberg Tables in S3 and register them in Glue Data Catalog for query engines to query against. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.89.0](service/sagemaker/CHANGELOG.md#v1890-2023-06-29) - * **Feature**: Adding support for timeseries forecasting in the CreateAutoMLJobV2 API. - -# Release (2023-06-28) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/internetmonitor`: [v1.3.0](service/internetmonitor/CHANGELOG.md#v130-2023-06-28) - * **Feature**: This release adds a new feature for Amazon CloudWatch Internet Monitor that enables customers to set custom thresholds, for performance and availability drops, for triggering when to create a health event. -* `github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2`: [v1.17.0](service/kinesisanalyticsv2/CHANGELOG.md#v1170-2023-06-28) - * **Feature**: Support for new runtime environment in Kinesis Data Analytics Studio: Zeppelin-0.10, Apache Flink-1.15 -* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.37.0](service/lambda/CHANGELOG.md#v1370-2023-06-28) - * **Feature**: Surface ResourceConflictException in DeleteEventSourceMapping -* `github.com/aws/aws-sdk-go-v2/service/omics`: [v1.5.0](service/omics/CHANGELOG.md#v150-2023-06-28) - * **Feature**: Add Common Workflow Language (CWL) as a supported language for Omics workflows -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.46.0](service/rds/CHANGELOG.md#v1460-2023-06-28) - * **Feature**: Amazon Relational Database Service (RDS) now supports joining a RDS for SQL Server instance to a self-managed Active Directory. -* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.36.0](service/s3/CHANGELOG.md#v1360-2023-06-28) - * **Feature**: The S3 LISTObjects, ListObjectsV2 and ListObjectVersions API now supports a new optional header x-amz-optional-object-attributes. If header contains RestoreStatus as the value, then S3 will include Glacier restore status i.e. isRestoreInProgress and RestoreExpiryDate in List response. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.88.0](service/sagemaker/CHANGELOG.md#v1880-2023-06-28) - * **Feature**: This release adds support for Model Cards Model Registry integration. - -# Release (2023-06-27) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/appfabric`: [v1.0.0](service/appfabric/CHANGELOG.md#v100-2023-06-27) - * **Release**: New AWS service client module - * **Feature**: Initial release of AWS AppFabric for connecting SaaS applications for better productivity and security. -* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.32.0](service/appflow/CHANGELOG.md#v1320-2023-06-27) - * **Feature**: This release adds support to bypass SSO with the SAPOData connector when connecting to an SAP instance. -* `github.com/aws/aws-sdk-go-v2/service/emrserverless`: [v1.8.0](service/emrserverless/CHANGELOG.md#v180-2023-06-27) - * **Feature**: This release adds support to update the release label of an EMR Serverless application to upgrade it to a different version of Amazon EMR via UpdateApplication API. -* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.23.0](service/ivs/CHANGELOG.md#v1230-2023-06-27) - * **Feature**: IVS customers can now revoke the viewer session associated with an auth token, to prevent and stop playback using that token. -* `github.com/aws/aws-sdk-go-v2/service/kinesisvideo`: [v1.16.0](service/kinesisvideo/CHANGELOG.md#v1160-2023-06-27) - * **Feature**: General Availability (GA) release of Kinesis Video Streams at Edge, enabling customers to provide a configuration for the Kinesis Video Streams EdgeAgent running on an on-premise IoT device. Customers can now locally record from cameras and stream videos to the cloud on a configured schedule. -* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.28.0](service/macie2/CHANGELOG.md#v1280-2023-06-27) - * **Feature**: This release adds support for configuring new classification jobs to use the set of managed data identifiers that we recommend for jobs. For the managed data identifier selection type (managedDataIdentifierSelector), specify RECOMMENDED. -* `github.com/aws/aws-sdk-go-v2/service/privatenetworks`: [v1.3.0](service/privatenetworks/CHANGELOG.md#v130-2023-06-27) - * **Feature**: This release allows Private5G customers to choose different commitment plans (60-days, 1-year, 3-years) when placing new orders, enables automatic renewal option for 1-year and 3-years commitments. It also allows customers to update the commitment plan of an existing radio unit. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.87.0](service/sagemaker/CHANGELOG.md#v1870-2023-06-27) - * **Feature**: Introducing TTL for online store records in feature groups. -* `github.com/aws/aws-sdk-go-v2/service/sagemakerfeaturestoreruntime`: [v1.15.0](service/sagemakerfeaturestoreruntime/CHANGELOG.md#v1150-2023-06-27) - * **Feature**: Introducing TTL for online store records for feature groups. -* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.36.7](service/ssm/CHANGELOG.md#v1367-2023-06-27) - * **Documentation**: Systems Manager doc-only update for June 2023. -* `github.com/aws/aws-sdk-go-v2/service/verifiedpermissions`: [v1.0.3](service/verifiedpermissions/CHANGELOG.md#v103-2023-06-27) - * **Documentation**: This update fixes several broken links to the Cedar documentation. - -# Release (2023-06-26) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.60.0](service/connect/CHANGELOG.md#v1600-2023-06-26) - * **Feature**: This release provides a way to search for existing tags within an instance. Before tagging a resource, ensure consistency by searching for pre-existing key:value pairs. -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.52.0](service/glue/CHANGELOG.md#v1520-2023-06-26) - * **Feature**: Timestamp Starting Position For Kinesis and Kafka Data Sources in a Glue Streaming Job -* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.24.0](service/guardduty/CHANGELOG.md#v1240-2023-06-26) - * **Feature**: Add support for user.extra.sessionName in Kubernetes Audit Logs Findings. -* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.21.0](service/iam/CHANGELOG.md#v1210-2023-06-26) - * **Feature**: Support for a new API "GetMFADevice" to present MFA device metadata such as device certifications -* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.20.0](service/pinpoint/CHANGELOG.md#v1200-2023-06-26) - * **Feature**: Added time zone estimation support for journeys - -# Release (2023-06-23) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.24.0](service/devopsguru/CHANGELOG.md#v1240-2023-06-23) - * **Feature**: This release adds support for encryption via customer managed keys. -* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.29.3](service/fsx/CHANGELOG.md#v1293-2023-06-23) - * **Documentation**: Update to Amazon FSx documentation. -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.45.3](service/rds/CHANGELOG.md#v1453-2023-06-23) - * **Documentation**: Documentation improvements for create, describe, and modify DB clusters and DB instances. -* `github.com/aws/aws-sdk-go-v2/service/verifiedpermissions`: [v1.0.2](service/verifiedpermissions/CHANGELOG.md#v102-2023-06-23) - * **Documentation**: Added improved descriptions and new code samples to SDK documentation. - -# Release (2023-06-22) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/chimesdkidentity`: [v1.12.0](service/chimesdkidentity/CHANGELOG.md#v1120-2023-06-22) - * **Feature**: AppInstanceBots can be configured to be invoked or not using the Target or the CHIME.mentions attribute for ChannelMessages -* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.16.0](service/chimesdkmessaging/CHANGELOG.md#v1160-2023-06-22) - * **Feature**: ChannelMessages can be made visible to sender and intended recipient rather than all channel members with the target attribute. For example, a user can send messages to a bot and receive messages back in a group channel without other members seeing them. -* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.41.0](service/kendra/CHANGELOG.md#v1410-2023-06-22) - * **Feature**: Introducing Amazon Kendra Retrieve API that can be used to retrieve relevant passages or text excerpts given an input query. -* `github.com/aws/aws-sdk-go-v2/service/sfn`: [v1.18.0](service/sfn/CHANGELOG.md#v1180-2023-06-22) - * **Feature**: Adds support for Versions and Aliases. Adds 8 operations: PublishStateMachineVersion, DeleteStateMachineVersion, ListStateMachineVersions, CreateStateMachineAlias, DescribeStateMachineAlias, UpdateStateMachineAlias, DeleteStateMachineAlias, ListStateMachineAliases - -# Release (2023-06-21) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.19.11](service/dynamodb/CHANGELOG.md#v11911-2023-06-21) - * **Documentation**: Documentation updates for DynamoDB -* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.27.0](service/emr/CHANGELOG.md#v1270-2023-06-21) - * **Feature**: This release introduces a new Amazon EMR EPI called ListSupportedInstanceTypes that returns a list of all instance types supported by a given EMR release. -* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.15.0](service/inspector2/CHANGELOG.md#v1150-2023-06-21) - * **Feature**: This release adds support for Software Bill of Materials (SBOM) export and the general availability of code scanning for AWS Lambda functions. -* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.38.0](service/mediaconvert/CHANGELOG.md#v1380-2023-06-21) - * **Feature**: This release introduces the bandwidth reduction filter for the HEVC encoder, increases the limits of outputs per job, and updates support for the Nagra SDK to version 1.14.7. -* `github.com/aws/aws-sdk-go-v2/service/mq`: [v1.15.0](service/mq/CHANGELOG.md#v1150-2023-06-21) - * **Feature**: The Cross Region Disaster Recovery feature allows to replicate a brokers state from one region to another in order to provide customers with multi-region resiliency in the event of a regional outage. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.86.0](service/sagemaker/CHANGELOG.md#v1860-2023-06-21) - * **Feature**: This release provides support in SageMaker for output files in training jobs to be uploaded without compression and enable customer to deploy uncompressed model from S3 to real-time inference Endpoints. In addition, ml.trn1n.32xlarge is added to supported instance type list in training job. -* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.30.0](service/transfer/CHANGELOG.md#v1300-2023-06-21) - * **Feature**: This release adds a new parameter StructuredLogDestinations to CreateServer, UpdateServer APIs. - -# Release (2023-06-20) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.31.0](service/appflow/CHANGELOG.md#v1310-2023-06-20) - * **Feature**: This release adds new API to reset connector metadata cache -* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.34.0](service/configservice/CHANGELOG.md#v1340-2023-06-20) - * **Feature**: Updated ResourceType enum with new resource types onboarded by AWS Config in May 2023. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.102.0](service/ec2/CHANGELOG.md#v11020-2023-06-20) - * **Feature**: Adds support for targeting Dedicated Host allocations by assetIds in AWS Outposts -* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.36.0](service/lambda/CHANGELOG.md#v1360-2023-06-20) - * **Feature**: This release adds RecursiveInvocationException to the Invoke API and InvokeWithResponseStream API. -* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.28.0](service/redshift/CHANGELOG.md#v1280-2023-06-20) - * **Feature**: Added support for custom domain names for Redshift Provisioned clusters. This feature enables customers to create a custom domain name and use ACM to generate fully secure connections to it. - -# Release (2023-06-19) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.30.0](service/cloudformation/CHANGELOG.md#v1300-2023-06-19) - * **Feature**: Specify desired CloudFormation behavior in the event of ChangeSet execution failure using the CreateChangeSet OnStackFailure parameter -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.101.0](service/ec2/CHANGELOG.md#v11010-2023-06-19) - * **Feature**: API changes to AWS Verified Access to include data from trust providers in logs -* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.27.4](service/ecs/CHANGELOG.md#v1274-2023-06-19) - * **Documentation**: Documentation only update to address various tickets. -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.51.0](service/glue/CHANGELOG.md#v1510-2023-06-19) - * **Feature**: This release adds support for creating cross region table/database resource links -* `github.com/aws/aws-sdk-go-v2/service/pricing`: [v1.20.0](service/pricing/CHANGELOG.md#v1200-2023-06-19) - * **Feature**: This release updates the PriceListArn regex pattern. -* `github.com/aws/aws-sdk-go-v2/service/route53domains`: [v1.15.0](service/route53domains/CHANGELOG.md#v1150-2023-06-19) - * **Feature**: Update MaxItems upper bound to 1000 for ListPricesRequest -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.85.0](service/sagemaker/CHANGELOG.md#v1850-2023-06-19) - * **Feature**: Amazon Sagemaker Autopilot releases CreateAutoMLJobV2 and DescribeAutoMLJobV2 for Autopilot customers with ImageClassification, TextClassification and Tabular problem type config support. - -# Release (2023-06-16) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/applicationdiscoveryservice`: [v1.16.0](service/applicationdiscoveryservice/CHANGELOG.md#v1160-2023-06-16) - * **Feature**: Add Amazon EC2 instance recommendations export -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.59.0](service/connect/CHANGELOG.md#v1590-2023-06-16) - * **Feature**: Updates the *InstanceStorageConfig APIs to support a new ResourceType: SCREEN_RECORDINGS to enable screen recording and specify the storage configurations for publishing the recordings. Also updates DescribeInstance and ListInstances APIs to include InstanceAccessUrl attribute in the API response. -* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.20.3](service/iam/CHANGELOG.md#v1203-2023-06-16) - * **Documentation**: Documentation updates for AWS Identity and Access Management (IAM). -* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.35.0](service/s3/CHANGELOG.md#v1350-2023-06-16) - * **Feature**: This release adds SDK support for request-payer request header and request-charged response header in the "GetBucketAccelerateConfiguration", "ListMultipartUploads", "ListObjects", "ListObjectsV2" and "ListObjectVersions" S3 APIs. - -# Release (2023-06-15) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.25.0](service/auditmanager/CHANGELOG.md#v1250-2023-06-15) - * **Feature**: This release introduces 2 Audit Manager features: CSV exports and new manual evidence options. You can now export your evidence finder results in CSV format. In addition, you can now add manual evidence to a control by entering free-form text or uploading a file from your browser. -* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.20.3](service/efs/CHANGELOG.md#v1203-2023-06-15) - * **Documentation**: Documentation updates for EFS. -* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.23.2](service/guardduty/CHANGELOG.md#v1232-2023-06-15) - * **Documentation**: Updated descriptions for some APIs. -* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.24.0](service/location/CHANGELOG.md#v1240-2023-06-15) - * **Feature**: Amazon Location Service adds categories to places, including filtering on those categories in searches. Also, you can now add metadata properties to your geofences. - -# Release (2023-06-13) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.27.0](service/cloudtrail/CHANGELOG.md#v1270-2023-06-13) - * **Feature**: This feature allows users to view dashboards for CloudTrail Lake event data stores. -* `github.com/aws/aws-sdk-go-v2/service/codegurusecurity`: [v1.0.0](service/codegurusecurity/CHANGELOG.md#v100-2023-06-13) - * **Release**: New AWS service client module - * **Feature**: Initial release of Amazon CodeGuru Security APIs -* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.14.0](service/drs/CHANGELOG.md#v1140-2023-06-13) - * **Feature**: Added APIs to support network replication and recovery using AWS Elastic Disaster Recovery. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.100.0](service/ec2/CHANGELOG.md#v11000-2023-06-13) - * **Feature**: This release introduces a new feature, EC2 Instance Connect Endpoint, that enables you to connect to a resource over TCP, without requiring the resource to have a public IPv4 address. -* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.23.5](service/imagebuilder/CHANGELOG.md#v1235-2023-06-13) - * **Documentation**: Change the Image Builder ImagePipeline dateNextRun field to more accurately describe the data. -* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.27.0](service/lightsail/CHANGELOG.md#v1270-2023-06-13) - * **Feature**: This release adds pagination for the Get Certificates API operation. -* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.34.0](service/s3/CHANGELOG.md#v1340-2023-06-13) - * **Feature**: Integrate double encryption feature to SDKs. - * **Bug Fix**: Fix HeadObject to return types.Nound when an object does not exist. Fixes [2084](https://github.com/aws/aws-sdk-go-v2/issues/2084) -* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.33.0](service/securityhub/CHANGELOG.md#v1330-2023-06-13) - * **Feature**: Add support for Security Hub Automation Rules -* `github.com/aws/aws-sdk-go-v2/service/simspaceweaver`: [v1.3.0](service/simspaceweaver/CHANGELOG.md#v130-2023-06-13) - * **Feature**: This release fixes using aws-us-gov ARNs in API calls and adds documentation for snapshot APIs. -* `github.com/aws/aws-sdk-go-v2/service/verifiedpermissions`: [v1.0.0](service/verifiedpermissions/CHANGELOG.md#v100-2023-06-13) - * **Release**: New AWS service client module - * **Feature**: GA release of Amazon Verified Permissions. -* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.35.0](service/wafv2/CHANGELOG.md#v1350-2023-06-13) - * **Feature**: You can now detect and block fraudulent account creation attempts with the new AWS WAF Fraud Control account creation fraud prevention (ACFP) managed rule group AWSManagedRulesACFPRuleSet. -* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.21.0](service/wellarchitected/CHANGELOG.md#v1210-2023-06-13) - * **Feature**: AWS Well-Architected now supports Profiles that help customers prioritize which questions to focus on first by providing a list of prioritized questions that are better aligned with their business goals and outcomes. - -# Release (2023-06-12) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/amplifyuibuilder`: [v1.11.0](service/amplifyuibuilder/CHANGELOG.md#v1110-2023-06-12) - * **Feature**: AWS Amplify UIBuilder is launching Codegen UI, a new feature that enables you to generate your amplify uibuilder components and forms. -* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.19.8](service/dynamodb/CHANGELOG.md#v1198-2023-06-12) - * **Documentation**: Documentation updates for DynamoDB -* `github.com/aws/aws-sdk-go-v2/service/dynamodbstreams`: [v1.14.12](service/dynamodbstreams/CHANGELOG.md#v11412-2023-06-12) - * **Documentation**: Documentation updates for DynamoDB Streams -* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.29.0](service/fsx/CHANGELOG.md#v1290-2023-06-12) - * **Feature**: Amazon FSx for NetApp ONTAP now supports joining a storage virtual machine (SVM) to Active Directory after the SVM has been created. -* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.18.0](service/opensearch/CHANGELOG.md#v1180-2023-06-12) - * **Feature**: This release adds support for SkipUnavailable connection property for cross cluster search -* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.29.0](service/rekognition/CHANGELOG.md#v1290-2023-06-12) - * **Feature**: This release adds support for improved accuracy with user vector in Amazon Rekognition Face Search. Adds new APIs: AssociateFaces, CreateUser, DeleteUser, DisassociateFaces, ListUsers, SearchUsers, SearchUsersByImage. Also adds new face metadata that can be stored: user vector. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.84.0](service/sagemaker/CHANGELOG.md#v1840-2023-06-12) - * **Feature**: Sagemaker Neo now supports compilation for inferentia2 (ML_INF2) and Trainium1 (ML_TRN1) as available targets. With these devices, you can run your workloads at highest performance with lowest cost. inferentia2 (ML_INF2) is available in CMH and Trainium1 (ML_TRN1) is available in IAD currently - -# Release (2023-06-09) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.21.13](service/acmpca/CHANGELOG.md#v12113-2023-06-09) - * **Documentation**: Document-only update to refresh CLI documentation for AWS Private CA. No change to the service. -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.58.0](service/connect/CHANGELOG.md#v1580-2023-06-09) - * **Feature**: This release adds search APIs for Prompts, Quick Connects and Hours of Operations, which can be used to search for those resources within a Connect Instance. - -# Release (2023-06-08) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.30.0](service/athena/CHANGELOG.md#v1300-2023-06-08) - * **Feature**: You can now define custom spark properties at start of the session for use cases like cluster encryption, table formats, and general Spark tuning. -* `github.com/aws/aws-sdk-go-v2/service/comprehendmedical`: [v1.16.0](service/comprehendmedical/CHANGELOG.md#v1160-2023-06-08) - * **Feature**: This release supports a new set of entities and traits. -* `github.com/aws/aws-sdk-go-v2/service/paymentcryptography`: [v1.0.0](service/paymentcryptography/CHANGELOG.md#v100-2023-06-08) - * **Release**: New AWS service client module - * **Feature**: Initial release of AWS Payment Cryptography Control Plane service for creating and managing cryptographic keys used during card payment processing. -* `github.com/aws/aws-sdk-go-v2/service/paymentcryptographydata`: [v1.0.0](service/paymentcryptographydata/CHANGELOG.md#v100-2023-06-08) - * **Release**: New AWS service client module - * **Feature**: Initial release of AWS Payment Cryptography DataPlane Plane service for performing cryptographic operations typically used during card payment processing. -* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.19.0](service/servicecatalog/CHANGELOG.md#v1190-2023-06-08) - * **Feature**: New parameter added in ServiceCatalog DescribeProvisioningArtifact api - IncludeProvisioningArtifactParameters. This parameter can be used to return information about the parameters used to provision the product -* `github.com/aws/aws-sdk-go-v2/service/timestreamwrite`: [v1.17.0](service/timestreamwrite/CHANGELOG.md#v1170-2023-06-08) - * **Feature**: This release adds the capability for customers to define how their data should be partitioned, optimizing for certain access patterns. This definition will take place as a part of the table creation. - -# Release (2023-06-07) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.29.0](service/cloudformation/CHANGELOG.md#v1290-2023-06-07) - * **Feature**: AWS CloudFormation StackSets is updating the deployment experience for all stackset operations to skip suspended AWS accounts during deployments. StackSets will skip target AWS accounts that are suspended and set the Detailed Status of the corresponding stack instances as SKIPPED_SUSPENDED_ACCOUNT -* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.21.0](service/cloudwatchlogs/CHANGELOG.md#v1210-2023-06-07) - * **Feature**: This change adds support for account level data protection policies using 3 new APIs, PutAccountPolicy, DeleteAccountPolicy and DescribeAccountPolicy. DescribeLogGroup API has been modified to indicate if account level policy is applied to the LogGroup via "inheritedProperties" list in the response. -* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.25.0](service/customerprofiles/CHANGELOG.md#v1250-2023-06-07) - * **Feature**: This release introduces event stream related APIs. -* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.18.15](service/directconnect/CHANGELOG.md#v11815-2023-06-07) - * **Documentation**: This update corrects the jumbo frames mtu values from 9100 to 8500 for transit virtual interfaces. -* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.19.0](service/emrcontainers/CHANGELOG.md#v1190-2023-06-07) - * **Feature**: EMR on EKS adds support for log rotation of Spark container logs with EMR-6.11.0 onwards, to the StartJobRun API. -* `github.com/aws/aws-sdk-go-v2/service/iotdeviceadvisor`: [v1.19.0](service/iotdeviceadvisor/CHANGELOG.md#v1190-2023-06-07) - * **Feature**: AWS IoT Core Device Advisor now supports new Qualification Suite test case list. With this update, customers can more easily create new qualification test suite with an empty rootGroup input. - -# Release (2023-06-06) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.57.1](service/connect/CHANGELOG.md#v1571-2023-06-06) - * **Documentation**: GetMetricDataV2 API is now available in AWS GovCloud(US) region. -* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.26.0](service/emr/CHANGELOG.md#v1260-2023-06-06) - * **Feature**: This release provides customers the ability to specify an allocation strategies amongst PRICE_CAPACITY_OPTIMIZED, CAPACITY_OPTIMIZED, LOWEST_PRICE, DIVERSIFIED for Spot instances in Instance Feet cluster. This enables customers to choose an allocation strategy best suited for their workload. -* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.20.0](service/iam/CHANGELOG.md#v1200-2023-06-06) - * **Feature**: This release updates the AccountAlias regex pattern with the same length restrictions enforced by the length constraint. -* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.14.0](service/inspector2/CHANGELOG.md#v1140-2023-06-06) - * **Feature**: Adds new response properties and request parameters for 'last scanned at' on the ListCoverage operation. This feature allows you to search and view the date of which your resources were last scanned by Inspector. -* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.38.0](service/iot/CHANGELOG.md#v1380-2023-06-06) - * **Feature**: Adding IoT Device Management Software Package Catalog APIs to register, store, and report system software packages, along with their versions and metadata in a centralized location. -* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.29.0](service/lexmodelsv2/CHANGELOG.md#v1290-2023-06-06) - * **Feature**: This release adds support for Lex Developers to create test sets and to execute those test-sets against their bots. -* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.37.0](service/quicksight/CHANGELOG.md#v1370-2023-06-06) - * **Feature**: QuickSight support for pivot table field collapse state, radar chart range scale and multiple scope options in conditional formatting. -* `github.com/aws/aws-sdk-go-v2/service/signer`: [v1.15.0](service/signer/CHANGELOG.md#v1150-2023-06-06) - * **Feature**: AWS Signer is launching Container Image Signing, a new feature that enables you to sign and verify container images. This feature enables you to validate that only container images you approve are used in your enterprise. -* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.23.0](service/sqs/CHANGELOG.md#v1230-2023-06-06) - * **Feature**: Amazon SQS adds three new APIs - StartMessageMoveTask, CancelMessageMoveTask, and ListMessageMoveTasks to automate redriving messages from dead-letter queues to source queues or a custom destination. - -# Release (2023-06-05) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.28.0](service/cloudformation/CHANGELOG.md#v1280-2023-06-05) - * **Feature**: AWS CloudFormation StackSets provides customers with three new APIs to activate, deactivate, and describe AWS Organizations trusted access which is needed to get started with service-managed StackSets. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.99.0](service/ec2/CHANGELOG.md#v1990-2023-06-05) - * **Feature**: Making InstanceTagAttribute as the required parameter for the DeregisterInstanceEventNotificationAttributes and RegisterInstanceEventNotificationAttributes APIs. -* `github.com/aws/aws-sdk-go-v2/service/finspace`: [v1.10.0](service/finspace/CHANGELOG.md#v1100-2023-06-05) - * **Feature**: Releasing new Managed kdb Insights APIs -* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.25.0](service/frauddetector/CHANGELOG.md#v1250-2023-06-05) - * **Feature**: Added new variable types, new DateTime data type, and new rules engine functions for interacting and working with DateTime data types. -* `github.com/aws/aws-sdk-go-v2/service/keyspaces`: [v1.3.0](service/keyspaces/CHANGELOG.md#v130-2023-06-05) - * **Feature**: This release adds support for MRR GA launch, and includes multiregion support in create-keyspace, get-keyspace, and list-keyspace. -* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.22.0](service/kms/CHANGELOG.md#v1220-2023-06-05) - * **Feature**: This release includes feature to import customer's asymmetric (RSA and ECC) and HMAC keys into KMS. It also includes feature to allow customers to specify number of days to schedule a KMS key deletion as a policy condition key. -* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.35.0](service/lambda/CHANGELOG.md#v1350-2023-06-05) - * **Feature**: Add Ruby 3.2 (ruby3.2) Runtime support to AWS Lambda. -* `github.com/aws/aws-sdk-go-v2/service/mwaa`: [v1.16.0](service/mwaa/CHANGELOG.md#v1160-2023-06-05) - * **Feature**: This release adds ROLLING_BACK and CREATING_SNAPSHOT environment statuses for Amazon MWAA environments. - -# Release (2023-06-02) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.29.0](service/athena/CHANGELOG.md#v1290-2023-06-02) - * **Feature**: This release introduces the DeleteCapacityReservation API and the ability to manage capacity reservations using CloudFormation -* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.26.0](service/cloudtrail/CHANGELOG.md#v1260-2023-06-02) - * **Feature**: This feature allows users to start and stop event ingestion on a CloudTrail Lake event data store. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.83.0](service/sagemaker/CHANGELOG.md#v1830-2023-06-02) - * **Feature**: This release adds Selective Execution feature that allows SageMaker Pipelines users to run selected steps in a pipeline. -* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.34.0](service/wafv2/CHANGELOG.md#v1340-2023-06-02) - * **Feature**: Added APIs to describe managed products. The APIs retrieve information about rule groups that are managed by AWS and by AWS Marketplace sellers. - -# Release (2023-06-01) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/alexaforbusiness`: [v1.15.11](service/alexaforbusiness/CHANGELOG.md#v11511-2023-06-01) - * **Documentation**: Alexa for Business has been deprecated and is no longer supported. -* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.30.0](service/appflow/CHANGELOG.md#v1300-2023-06-01) - * **Feature**: Added ability to select DataTransferApiType for DescribeConnector and CreateFlow requests when using Async supported connectors. Added supportedDataTransferType to DescribeConnector/DescribeConnectors/ListConnector response. -* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.24.0](service/customerprofiles/CHANGELOG.md#v1240-2023-06-01) - * **Feature**: This release introduces calculated attribute related APIs. -* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.22.0](service/ivs/CHANGELOG.md#v1220-2023-06-01) - * **Feature**: API Update for IVS Advanced Channel type -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.82.1](service/sagemaker/CHANGELOG.md#v1821-2023-06-01) - * **Documentation**: Amazon Sagemaker Autopilot adds support for Parquet file input to NLP text classification jobs. -* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.33.1](service/wafv2/CHANGELOG.md#v1331-2023-06-01) - * **Documentation**: Corrected the information for the header order FieldToMatch setting - -# Release (2023-05-31) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.33.0](service/configservice/CHANGELOG.md#v1330-2023-05-31) - * **Feature**: Resource Types Exclusion feature launch by AWS Config -* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.24.0](service/frauddetector/CHANGELOG.md#v1240-2023-05-31) - * **Feature**: This release enables publishing event predictions from Amazon Fraud Detector (AFD) to Amazon EventBridge. For example, after getting predictions from AFD, Amazon EventBridge rules can be configured to trigger notification through an SNS topic, send a message with SES, or trigger Lambda workflows. -* `github.com/aws/aws-sdk-go-v2/service/healthlake`: [v1.16.0](service/healthlake/CHANGELOG.md#v1160-2023-05-31) - * **Feature**: This release adds a new request parameter to the CreateFHIRDatastore API operation. IdentityProviderConfiguration specifies how you want to authenticate incoming requests to your Healthlake Data Store. -* `github.com/aws/aws-sdk-go-v2/service/m2`: [v1.5.0](service/m2/CHANGELOG.md#v150-2023-05-31) - * **Feature**: Adds an optional create-only 'roleArn' property to Application resources. Enables PS and PO data set org types. -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.45.0](service/rds/CHANGELOG.md#v1450-2023-05-31) - * **Feature**: This release adds support for changing the engine for Oracle using the ModifyDbInstance API -* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.18.5](service/servicecatalog/CHANGELOG.md#v1185-2023-05-31) - * **Documentation**: Documentation updates for ServiceCatalog. -* `github.com/aws/aws-sdk-go-v2/service/workspacesweb`: [v1.10.0](service/workspacesweb/CHANGELOG.md#v1100-2023-05-31) - * **Feature**: WorkSpaces Web now allows you to control which IP addresses your WorkSpaces Web portal may be accessed from. - -# Release (2023-05-30) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/chimesdkvoice`: [v1.6.0](service/chimesdkvoice/CHANGELOG.md#v160-2023-05-30) - * **Feature**: Added optional CallLeg field to StartSpeakerSearchTask API request -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.50.0](service/glue/CHANGELOG.md#v1500-2023-05-30) - * **Feature**: Added Runtime parameter to allow selection of Ray Runtime -* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.18.3](service/groundstation/CHANGELOG.md#v1183-2023-05-30) - * **Documentation**: Updating description of GetMinuteUsage to be clearer. -* `github.com/aws/aws-sdk-go-v2/service/iotfleetwise`: [v1.4.0](service/iotfleetwise/CHANGELOG.md#v140-2023-05-30) - * **Feature**: Campaigns now support selecting Timestream or S3 as the data destination, Signal catalogs now support "Deprecation" keyword released in VSS v2.1 and "Comment" keyword released in VSS v3.0 -* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.23.0](service/location/CHANGELOG.md#v1230-2023-05-30) - * **Feature**: This release adds API support for political views for the maps service APIs: CreateMap, UpdateMap, DescribeMap. -* `github.com/aws/aws-sdk-go-v2/service/memorydb`: [v1.13.0](service/memorydb/CHANGELOG.md#v1130-2023-05-30) - * **Feature**: Amazon MemoryDB for Redis now supports AWS Identity and Access Management authentication access to Redis clusters starting with redis-engine version 7.0 -* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.24.0](service/personalize/CHANGELOG.md#v1240-2023-05-30) - * **Feature**: This release provides support for the exclusion of certain columns for training when creating a solution and creating or updating a recommender with Amazon Personalize. -* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.26.0](service/polly/CHANGELOG.md#v1260-2023-05-30) - * **Feature**: Amazon Polly adds 2 new voices - Sofie (da-DK) and Niamh (en-IE) -* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.32.0](service/securityhub/CHANGELOG.md#v1320-2023-05-30) - * **Feature**: Added new resource detail objects to ASFF, including resources for AwsGuardDutyDetector, AwsAmazonMqBroker, AwsEventSchemasRegistry, AwsAppSyncGraphQlApi and AwsStepFunctionStateMachine. -* `github.com/aws/aws-sdk-go-v2/service/securitylake`: [v1.4.0](service/securitylake/CHANGELOG.md#v140-2023-05-30) - * **Feature**: Log sources are now versioned. AWS log sources and custom sources will now come with a version identifier that enables producers to vend multiple schema versions to subscribers. Security Lake API have been refactored to more closely align with AWS API conventions. -* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.33.0](service/wafv2/CHANGELOG.md#v1330-2023-05-30) - * **Feature**: This SDK release provides customers the ability to use Header Order as a field to match. - -# Release (2023-05-26) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.57.0](service/connect/CHANGELOG.md#v1570-2023-05-26) - * **Feature**: Documentation update for a new Initiation Method value in DescribeContact API -* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.28.0](service/iotwireless/CHANGELOG.md#v1280-2023-05-26) - * **Feature**: Add Multicast Group support in Network Analyzer Configuration. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.82.0](service/sagemaker/CHANGELOG.md#v1820-2023-05-26) - * **Feature**: Added ml.p4d and ml.inf1 as supported instance type families for SageMaker Notebook Instances. - -# Release (2023-05-25) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.21.0](service/applicationautoscaling/CHANGELOG.md#v1210-2023-05-25) - * **Feature**: With this release, ElastiCache customers will be able to use predefined metricType "ElastiCacheDatabaseCapacityUsageCountedForEvictPercentage" for their ElastiCache instances. -* `github.com/aws/aws-sdk-go-v2/service/codepipeline`: [v1.15.0](service/codepipeline/CHANGELOG.md#v1150-2023-05-25) - * **Feature**: Add PollingDisabledAt time information in PipelineMetadata object of GetPipeline API. -* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.19.0](service/gamelift/CHANGELOG.md#v1190-2023-05-25) - * **Feature**: GameLift FleetIQ users can now filter game server claim requests to exclude servers on instances that are draining. -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.49.0](service/glue/CHANGELOG.md#v1490-2023-05-25) - * **Feature**: Added ability to create data quality rulesets for shared, cross-account Glue Data Catalog tables. Added support for dataset comparison rules through a new parameter called AdditionalDataSources. Enhanced the data quality results with a map containing profiled metric values. -* `github.com/aws/aws-sdk-go-v2/service/migrationhubrefactorspaces`: [v1.10.0](service/migrationhubrefactorspaces/CHANGELOG.md#v1100-2023-05-25) - * **Feature**: This SDK update allows for path parameter syntax to be passed to the CreateRoute API. Path parameter syntax require parameters to be enclosed in {} characters. This update also includes a new AppendSourcePath field which lets users forward the source path to the Service URL endpoint. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.81.0](service/sagemaker/CHANGELOG.md#v1810-2023-05-25) - * **Feature**: Amazon SageMaker Automatic Model Tuning now supports enabling Autotune for tuning jobs which can choose tuning job configurations. - -# Release (2023-05-24) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.21.0](service/appsync/CHANGELOG.md#v1210-2023-05-24) - * **Feature**: This release introduces AppSync Merged APIs, which provide the ability to compose multiple source APIs into a single federated/merged API. -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.56.0](service/connect/CHANGELOG.md#v1560-2023-05-24) - * **Feature**: Amazon Connect Evaluation Capabilities: validation improvements -* `github.com/aws/aws-sdk-go-v2/service/costandusagereportservice`: [v1.16.0](service/costandusagereportservice/CHANGELOG.md#v1160-2023-05-24) - * **Feature**: Add support for split cost allocation data on a report. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.80.0](service/sagemaker/CHANGELOG.md#v1800-2023-05-24) - * **Feature**: SageMaker now provides an instantaneous deployment recommendation through the DescribeModel API - -# Release (2023-05-23) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.24.0](service/fms/CHANGELOG.md#v1240-2023-05-23) - * **Feature**: Fixes issue that could cause calls to GetAdminScope and ListAdminAccountsForOrganization to return a 500 Internal Server error. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.79.0](service/sagemaker/CHANGELOG.md#v1790-2023-05-23) - * **Feature**: Added ModelNameEquals, ModelPackageVersionArnEquals in request and ModelName, SamplePayloadUrl, ModelPackageVersionArn in response of ListInferenceRecommendationsJobs API. Added Invocation timestamps in response of DescribeInferenceRecommendationsJob API & ListInferenceRecommendationsJobSteps API. -* `github.com/aws/aws-sdk-go-v2/service/translate`: [v1.18.0](service/translate/CHANGELOG.md#v1180-2023-05-23) - * **Feature**: Added support for calling TranslateDocument API. - -# Release (2023-05-22) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.22.0](service/backup/CHANGELOG.md#v1220-2023-05-22) - * **Feature**: Added support for tags on restore. -* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.19.2](service/pinpoint/CHANGELOG.md#v1192-2023-05-22) - * **Documentation**: Amazon Pinpoint is deprecating the tags parameter in the UpdateSegment, UpdateCampaign, UpdateEmailTemplate, UpdateSmsTemplate, UpdatePushTemplate, UpdateInAppTemplate and UpdateVoiceTemplate. Amazon Pinpoint will end support tags parameter by May 22, 2023. -* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.36.0](service/quicksight/CHANGELOG.md#v1360-2023-05-22) - * **Feature**: Add support for Asset Bundle, Geospatial Heatmaps. - -# Release (2023-05-19) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.21.0](service/backup/CHANGELOG.md#v1210-2023-05-19) - * **Feature**: Add ResourceArn, ResourceType, and BackupVaultName to ListRecoveryPointsByLegalHold API response. -* `github.com/aws/aws-sdk-go-v2/service/connectcases`: [v1.4.0](service/connectcases/CHANGELOG.md#v140-2023-05-19) - * **Feature**: This release adds the ability to create fields with type Url through the CreateField API. For more information see https://docs.aws.amazon.com/cases/latest/APIReference/Welcome.html -* `github.com/aws/aws-sdk-go-v2/service/mediapackagev2`: [v1.0.0](service/mediapackagev2/CHANGELOG.md#v100-2023-05-19) - * **Release**: New AWS service client module - * **Feature**: Adds support for the MediaPackage Live v2 API -* `github.com/aws/aws-sdk-go-v2/service/sesv2`: [v1.18.0](service/sesv2/CHANGELOG.md#v1180-2023-05-19) - * **Feature**: This release allows customers to update scaling mode property of dedicated IP pools with PutDedicatedIpPoolScalingAttributes call. - -# Release (2023-05-18) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.28.0](service/athena/CHANGELOG.md#v1280-2023-05-18) - * **Feature**: Removing SparkProperties from EngineConfiguration object for StartSession API call -* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.25.0](service/cloudtrail/CHANGELOG.md#v1250-2023-05-18) - * **Feature**: Add ConflictException to PutEventSelectors, add (Channel/EDS)ARNInvalidException to Tag APIs. These exceptions provide customers with more specific error messages instead of internal errors. -* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.24.0](service/computeoptimizer/CHANGELOG.md#v1240-2023-05-18) - * **Feature**: In this launch, we add support for showing integration status with external metric providers such as Instana, Datadog ...etc in GetEC2InstanceRecommendations and ExportEC2InstanceRecommendations apis -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.55.0](service/connect/CHANGELOG.md#v1550-2023-05-18) - * **Feature**: You can programmatically create and manage prompts using APIs, for example, to extract prompts stored within Amazon Connect and add them to your Amazon S3 bucket. AWS CloudTrail, AWS CloudFormation and tagging are supported. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.98.0](service/ec2/CHANGELOG.md#v1980-2023-05-18) - * **Feature**: Add support for i4g.large, i4g.xlarge, i4g.2xlarge, i4g.4xlarge, i4g.8xlarge and i4g.16xlarge instances powered by AWS Graviton2 processors that deliver up to 15% better compute performance than our other storage-optimized instances. -* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.27.1](service/ecs/CHANGELOG.md#v1271-2023-05-18) - * **Documentation**: Documentation only release to address various tickets. -* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.37.0](service/mediaconvert/CHANGELOG.md#v1370-2023-05-18) - * **Feature**: This release introduces a new MXF Profile for XDCAM which is strictly compliant with the SMPTE RDD 9 standard and improved handling of output name modifiers. -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.44.1](service/rds/CHANGELOG.md#v1441-2023-05-18) - * **Documentation**: RDS documentation update for the EngineVersion parameter of ModifyDBSnapshot -* `github.com/aws/aws-sdk-go-v2/service/sagemakergeospatial`: [v1.3.0](service/sagemakergeospatial/CHANGELOG.md#v130-2023-05-18) - * **Feature**: This release makes ExecutionRoleArn a required field in the StartEarthObservationJob API. - -# Release (2023-05-16) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/detective`: [v1.19.0](service/detective/CHANGELOG.md#v1190-2023-05-16) - * **Feature**: Added and updated API operations in Detective to support the integration of ASFF Security Hub findings. -* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.18.14](service/directconnect/CHANGELOG.md#v11814-2023-05-16) - * **Documentation**: This release includes an update to the mtu value for CreateTransitVirtualInterface from 9001 mtu to 8500 mtu. -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.48.0](service/glue/CHANGELOG.md#v1480-2023-05-16) - * **Feature**: Add Support for Tags for Custom Entity Types -* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.19.8](service/secretsmanager/CHANGELOG.md#v1198-2023-05-16) - * **Documentation**: Documentation updates for Secrets Manager -* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.32.0](service/wafv2/CHANGELOG.md#v1320-2023-05-16) - * **Feature**: My AWS Service (placeholder) - You can now rate limit web requests based on aggregation keys other than IP addresses, and you can aggregate using combinations of keys. You can also rate limit all requests that match a scope-down statement, without further aggregation. - -# Release (2023-05-15) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.27.0](service/athena/CHANGELOG.md#v1270-2023-05-15) - * **Feature**: You can now define custom spark properties at start of the session for use cases like cluster encryption, table formats, and general Spark tuning. -* `github.com/aws/aws-sdk-go-v2/service/codecatalyst`: [v1.3.0](service/codecatalyst/CHANGELOG.md#v130-2023-05-15) - * **Feature**: With this release, the users can list the active sessions connected to their Dev Environment on AWS CodeCatalyst -* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.28.0](service/rekognition/CHANGELOG.md#v1280-2023-05-15) - * **Feature**: This release adds a new EyeDirection attribute in Amazon Rekognition DetectFaces and IndexFaces APIs which predicts the yaw and pitch angles of a person's eye gaze direction for each face detected in the image. -* `github.com/aws/aws-sdk-go-v2/service/rolesanywhere`: [v1.2.0](service/rolesanywhere/CHANGELOG.md#v120-2023-05-15) - * **Feature**: Adds support for custom notification settings in a trust anchor. Introduces PutNotificationSettings and ResetNotificationSettings API's. Updates DurationSeconds max value to 3600. -* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.29.0](service/transfer/CHANGELOG.md#v1290-2023-05-15) - * **Feature**: This release introduces the ability to require both password and SSH key when users authenticate to your Transfer Family servers that use the SFTP protocol. - -# Release (2023-05-11) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.54.2](service/connect/CHANGELOG.md#v1542-2023-05-11) - * **Documentation**: This release updates GetMetricDataV2 API, to support metric data up-to last 35 days -* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.27.0](service/elasticache/CHANGELOG.md#v1270-2023-05-11) - * **Feature**: Added support to modify the cluster mode configuration for the existing ElastiCache ReplicationGroups. Customers can now modify the configuration from cluster mode disabled to cluster mode enabled. -* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.19.0](service/elasticsearchservice/CHANGELOG.md#v1190-2023-05-11) - * **Feature**: This release fixes DescribePackages API error with null filter value parameter. -* `github.com/aws/aws-sdk-go-v2/service/health`: [v1.17.0](service/health/CHANGELOG.md#v1170-2023-05-11) - * **Feature**: Add support for regional endpoints -* `github.com/aws/aws-sdk-go-v2/service/ivsrealtime`: [v1.2.0](service/ivsrealtime/CHANGELOG.md#v120-2023-05-11) - * **Feature**: Add methods for inspecting and debugging stages: ListStageSessions, GetStageSession, ListParticipants, GetParticipant, and ListParticipantEvents. -* `github.com/aws/aws-sdk-go-v2/service/omics`: [v1.4.0](service/omics/CHANGELOG.md#v140-2023-05-11) - * **Feature**: This release provides support for Ready2Run and GPU workflows, an improved read set filter, the direct upload of read sets into Omics Storage, and annotation parsing for analytics stores. -* `github.com/aws/aws-sdk-go-v2/service/support`: [v1.15.0](service/support/CHANGELOG.md#v1150-2023-05-11) - * **Feature**: This release adds 2 new Support APIs, DescribeCreateCaseOptions and DescribeSupportedLanguages. You can use these new APIs to get available support languages. - -# Release (2023-05-10) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.25.0](service/emr/CHANGELOG.md#v1250-2023-05-10) - * **Feature**: EMR Studio now supports programmatically executing a Notebooks on an EMR on EKS cluster. In addition, notebooks can now be executed by specifying its location in S3. -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.44.0](service/rds/CHANGELOG.md#v1440-2023-05-10) - * **Feature**: Amazon Relational Database Service (RDS) updates for the new Aurora I/O-Optimized storage type for Amazon Aurora DB clusters -* `github.com/aws/aws-sdk-go-v2/service/swf`: [v1.15.0](service/swf/CHANGELOG.md#v1150-2023-05-10) - * **Feature**: This release adds a new API parameter to exclude old history events from decision tasks. - -# Release (2023-05-09) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.20.0](service/applicationautoscaling/CHANGELOG.md#v1200-2023-05-09) - * **Feature**: With this release, Amazon SageMaker Serverless Inference customers can use Application Auto Scaling to auto scale the provisioned concurrency of their serverless endpoints. -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.47.0](service/glue/CHANGELOG.md#v1470-2023-05-09) - * **Feature**: This release adds AmazonRedshift Source and Target nodes in addition to DynamicTransform OutputSchemas -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.78.0](service/sagemaker/CHANGELOG.md#v1780-2023-05-09) - * **Feature**: This release includes support for (1) Provisioned Concurrency for Amazon SageMaker Serverless Inference and (2) UpdateEndpointWeightsAndCapacities API for Serverless endpoints. - -# Release (2023-05-08) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.46.0](service/glue/CHANGELOG.md#v1460-2023-05-08) - * **Feature**: Support large worker types G.4x and G.8x for Glue Spark -* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.23.0](service/guardduty/CHANGELOG.md#v1230-2023-05-08) - * **Feature**: Add AccessDeniedException 403 Error message code to support 3 Tagging related APIs -* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.29.0](service/iotsitewise/CHANGELOG.md#v1290-2023-05-08) - * **Feature**: Provide support for 20,000 max results for GetAssetPropertyValueHistory/BatchGetAssetPropertyValueHistory and 15 minute aggregate resolution for GetAssetPropertyAggregates/BatchGetAssetPropertyAggregates -* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.19.0](service/sts/CHANGELOG.md#v1190-2023-05-08) - * **Feature**: Documentation updates for AWS Security Token Service. - -# Release (2023-05-05) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.97.0](service/ec2/CHANGELOG.md#v1970-2023-05-05) - * **Feature**: This release adds support the inf2 and trn1n instances. inf2 instances are purpose built for deep learning inference while trn1n instances are powered by AWS Trainium accelerators and they build on the capabilities of Trainium-powered trn1 instances. -* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.13.0](service/inspector2/CHANGELOG.md#v1130-2023-05-05) - * **Feature**: Amazon Inspector now allows customers to search its vulnerability intelligence database if any of the Inspector scanning types are activated. -* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.23.0](service/mediatailor/CHANGELOG.md#v1230-2023-05-05) - * **Feature**: This release adds support for AFTER_LIVE_EDGE mode configuration for avail suppression, and adding a fill-policy setting that sets the avail suppression to PARTIAL_AVAIL or FULL_AVAIL_ONLY when AFTER_LIVE_EDGE is enabled. -* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.22.0](service/sqs/CHANGELOG.md#v1220-2023-05-05) - * **Feature**: Revert previous SQS protocol change. - -# Release (2023-05-04) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.26.0](service/cloudwatch/CHANGELOG.md#v1260-2023-05-04) - * **Feature**: Adds support for filtering by metric names in CloudWatch Metric Streams. -* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.32.0](service/configservice/CHANGELOG.md#v1320-2023-05-04) - * **Feature**: Updated ResourceType enum with new resource types onboarded by AWS Config in April 2023. -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.54.1](service/connect/CHANGELOG.md#v1541-2023-05-04) - * **Documentation**: Remove unused InvalidParameterException from CreateParticipant API -* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.27.0](service/ecs/CHANGELOG.md#v1270-2023-05-04) - * **Feature**: Documentation update for new error type NamespaceNotFoundException for CreateCluster and UpdateCluster -* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.28.0](service/networkfirewall/CHANGELOG.md#v1280-2023-05-04) - * **Feature**: This release adds support for the Suricata REJECT option in midstream exception configurations. -* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.17.0](service/opensearch/CHANGELOG.md#v1170-2023-05-04) - * **Feature**: DescribeDomainNodes: A new API that provides configuration information for nodes part of the domain -* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.35.0](service/quicksight/CHANGELOG.md#v1350-2023-05-04) - * **Feature**: Add support for Topic, Dataset parameters and VPC -* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.27.0](service/rekognition/CHANGELOG.md#v1270-2023-05-04) - * **Feature**: This release adds a new attribute FaceOccluded. Additionally, you can now select attributes individually (e.g. ["DEFAULT", "FACE_OCCLUDED", "AGE_RANGE"] instead of ["ALL"]), which can reduce response time. -* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.33.1](service/s3/CHANGELOG.md#v1331-2023-05-04) - * **Documentation**: Documentation updates for Amazon S3 -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.77.0](service/sagemaker/CHANGELOG.md#v1770-2023-05-04) - * **Feature**: We added support for ml.inf2 and ml.trn1 family of instances on Amazon SageMaker for deploying machine learning (ML) models for Real-time and Asynchronous inference. You can use these instances to achieve high performance at a low cost for generative artificial intelligence (AI) models. -* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.31.0](service/securityhub/CHANGELOG.md#v1310-2023-05-04) - * **Feature**: Add support for Finding History. -* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.21.0](service/sqs/CHANGELOG.md#v1210-2023-05-04) - * **Feature**: This release enables customers to call SQS using AWS JSON-1.0 protocol. - -# Release (2023-05-03) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.20.0](service/appsync/CHANGELOG.md#v1200-2023-05-03) - * **Feature**: Private API support for AWS AppSync. With Private APIs, you can now create GraphQL APIs that can only be accessed from your Amazon Virtual Private Cloud ("VPC"). -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.96.0](service/ec2/CHANGELOG.md#v1960-2023-05-03) - * **Feature**: Adds an SDK paginator for GetNetworkInsightsAccessScopeAnalysisFindings -* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.12.0](service/inspector2/CHANGELOG.md#v1120-2023-05-03) - * **Feature**: This feature provides deep inspection for linux based instance -* `github.com/aws/aws-sdk-go-v2/service/iottwinmaker`: [v1.12.0](service/iottwinmaker/CHANGELOG.md#v1120-2023-05-03) - * **Feature**: This release adds a field for GetScene API to return error code and message from dependency services. -* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.27.0](service/networkfirewall/CHANGELOG.md#v1270-2023-05-03) - * **Feature**: AWS Network Firewall now supports policy level HOME_NET variable overrides. -* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.16.0](service/opensearch/CHANGELOG.md#v1160-2023-05-03) - * **Feature**: Amazon OpenSearch Service adds the option to deploy a domain across multiple Availability Zones, with each AZ containing a complete copy of data and with nodes in one AZ acting as a standby. This option provides 99.99% availability and consistent performance in the event of infrastructure failure. -* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.20.0](service/wellarchitected/CHANGELOG.md#v1200-2023-05-03) - * **Feature**: This release deepens integration with AWS Service Catalog AppRegistry to improve workload resource discovery. - -# Release (2023-05-02) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.29.0](service/appflow/CHANGELOG.md#v1290-2023-05-02) - * **Feature**: This release adds new API to cancel flow executions. -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.54.0](service/connect/CHANGELOG.md#v1540-2023-05-02) - * **Feature**: Amazon Connect Service Rules API update: Added OnContactEvaluationSubmit event source to support user configuring evaluation form rules. -* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.26.3](service/ecs/CHANGELOG.md#v1263-2023-05-02) - * **Documentation**: Documentation only update to address Amazon ECS tickets. -* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.40.0](service/kendra/CHANGELOG.md#v1400-2023-05-02) - * **Feature**: AWS Kendra now supports configuring document fields/attributes via the GetQuerySuggestions API. You can now base query suggestions on the contents of document fields. -* `github.com/aws/aws-sdk-go-v2/service/resiliencehub`: [v1.11.0](service/resiliencehub/CHANGELOG.md#v1110-2023-05-02) - * **Feature**: This release will improve resource level transparency in applications by discovering previously hidden resources. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.76.0](service/sagemaker/CHANGELOG.md#v1760-2023-05-02) - * **Feature**: Amazon Sagemaker Autopilot supports training models with sample weights and additional objective metrics. - -# Release (2023-05-01) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.23.0](service/computeoptimizer/CHANGELOG.md#v1230-2023-05-01) - * **Feature**: support for tag filtering within compute optimizer. ability to filter recommendation results by tag and tag key value pairs. ability to filter by inferred workload type added. -* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.21.0](service/kms/CHANGELOG.md#v1210-2023-05-01) - * **Feature**: This release makes the NitroEnclave request parameter Recipient and the response field for CiphertextForRecipient available in AWS SDKs. It also adds the regex pattern for CloudHsmClusterId validation. - -# Release (2023-04-28) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.28.0](service/appflow/CHANGELOG.md#v1280-2023-04-28) - * **Feature**: Adds Jwt Support for Salesforce Credentials. -* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.26.0](service/athena/CHANGELOG.md#v1260-2023-04-28) - * **Feature**: You can now use capacity reservations on Amazon Athena to run SQL queries on fully-managed compute capacity. -* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.18.12](service/directconnect/CHANGELOG.md#v11812-2023-04-28) - * **Documentation**: This release corrects the jumbo frames MTU from 9100 to 8500. -* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.20.0](service/efs/CHANGELOG.md#v1200-2023-04-28) - * **Feature**: This release adds PAUSED and PAUSING state as a returned value for DescribeReplicationConfigurations response. -* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.13.0](service/grafana/CHANGELOG.md#v1130-2023-04-28) - * **Feature**: This release adds support for the grafanaVersion parameter in CreateWorkspace. -* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.37.0](service/iot/CHANGELOG.md#v1370-2023-04-28) - * **Feature**: This release allows AWS IoT Core users to specify a TLS security policy when creating and updating AWS IoT Domain Configurations. -* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.26.0](service/rekognition/CHANGELOG.md#v1260-2023-04-28) - * **Feature**: Added support for aggregating moderation labels by video segment timestamps for Stored Video Content Moderation APIs and added additional information about the job to all Stored Video Get API responses. -* `github.com/aws/aws-sdk-go-v2/service/simspaceweaver`: [v1.2.0](service/simspaceweaver/CHANGELOG.md#v120-2023-04-28) - * **Feature**: Added a new CreateSnapshot API. For the StartSimulation API, SchemaS3Location is now optional, added a new SnapshotS3Location parameter. For the DescribeSimulation API, added SNAPSHOT_IN_PROGRESS simulation state, deprecated SchemaError, added new fields: StartError and SnapshotS3Location. -* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.31.0](service/wafv2/CHANGELOG.md#v1310-2023-04-28) - * **Feature**: You can now associate a web ACL with a Verified Access instance. -* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.28.11](service/workspaces/CHANGELOG.md#v12811-2023-04-28) - * **Documentation**: Added Windows 11 to support Microsoft_Office_2019 - -# Release (2023-04-27) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.95.0](service/ec2/CHANGELOG.md#v1950-2023-04-27) - * **Feature**: This release adds support for AMD SEV-SNP on EC2 instances. -* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.18.0](service/emrcontainers/CHANGELOG.md#v1180-2023-04-27) - * **Feature**: This release adds GetManagedEndpointSessionCredentials, a new API that allows customers to generate an auth token to connect to a managed endpoint, enabling features such as self-hosted Jupyter notebooks for EMR on EKS. -* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.22.0](service/guardduty/CHANGELOG.md#v1220-2023-04-27) - * **Feature**: Added API support to initiate on-demand malware scan on specific resources. -* `github.com/aws/aws-sdk-go-v2/service/iotdeviceadvisor`: [v1.18.0](service/iotdeviceadvisor/CHANGELOG.md#v1180-2023-04-27) - * **Feature**: AWS IoT Core Device Advisor now supports MQTT over WebSocket. With this update, customers can run all three test suites of AWS IoT Core Device Advisor - qualification, custom, and long duration tests - using Signature Version 4 for MQTT over WebSocket. -* `github.com/aws/aws-sdk-go-v2/service/kafka`: [v1.20.0](service/kafka/CHANGELOG.md#v1200-2023-04-27) - * **Feature**: Amazon MSK has added new APIs that allows multi-VPC private connectivity and cluster policy support for Amazon MSK clusters that simplify connectivity and access between your Apache Kafka clients hosted in different VPCs and AWS accounts and your Amazon MSK clusters. -* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.34.0](service/lambda/CHANGELOG.md#v1340-2023-04-27) - * **Feature**: Add Java 17 (java17) support to AWS Lambda -* `github.com/aws/aws-sdk-go-v2/service/osis`: [v1.0.1](service/osis/CHANGELOG.md#v101-2023-04-27) - * **Documentation**: Documentation updates for OpenSearch Ingestion -* `github.com/aws/aws-sdk-go-v2/service/qldb`: [v1.15.10](service/qldb/CHANGELOG.md#v11510-2023-04-27) - * **Documentation**: Documentation updates for Amazon QLDB -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.75.0](service/sagemaker/CHANGELOG.md#v1750-2023-04-27) - * **Feature**: Added ml.p4d.24xlarge and ml.p4de.24xlarge as supported instances for SageMaker Studio - -# Release (2023-04-26) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/osis`: [v1.0.0](service/osis/CHANGELOG.md#v100-2023-04-26) - * **Release**: New AWS service client module - * **Feature**: Initial release for OpenSearch Ingestion - -# Release (2023-04-25) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.15.0](service/chimesdkmessaging/CHANGELOG.md#v1150-2023-04-25) - * **Feature**: Remove non actionable field from UpdateChannelReadMarker and DeleteChannelRequest. Add precise exceptions to DeleteChannel and DeleteStreamingConfigurations error cases. -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.53.0](service/connect/CHANGELOG.md#v1530-2023-04-25) - * **Feature**: Amazon Connect, Contact Lens Evaluation API release including ability to manage forms and to submit contact evaluations. -* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.24.0](service/datasync/CHANGELOG.md#v1240-2023-04-25) - * **Feature**: This release adds 13 new APIs to support AWS DataSync Discovery GA. -* `github.com/aws/aws-sdk-go-v2/service/directoryservice`: [v1.17.0](service/directoryservice/CHANGELOG.md#v1170-2023-04-25) - * **Feature**: New field added in AWS Managed Microsoft AD DescribeSettings response and regex pattern update for UpdateSettings value. Added length validation to RemoteDomainName. -* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.19.0](service/pinpoint/CHANGELOG.md#v1190-2023-04-25) - * **Feature**: Adds support for journey runs and querying journey execution metrics based on journey runs. Adds execution metrics to campaign activities. Updates docs for Advanced Quiet Time. - -# Release (2023-04-24) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2`: v1.18.0 - * **Feature**: add recursion detection middleware to all SDK requests to avoid recursion invocation in Lambda -* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.27.0](service/appflow/CHANGELOG.md#v1270-2023-04-24) - * **Feature**: Increased the max length for RefreshToken and AuthCode from 2048 to 4096. -* `github.com/aws/aws-sdk-go-v2/service/codecatalyst`: [v1.2.5](service/codecatalyst/CHANGELOG.md#v125-2023-04-24) - * **Documentation**: Documentation updates for Amazon CodeCatalyst. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.94.0](service/ec2/CHANGELOG.md#v1940-2023-04-24) - * **Feature**: API changes to AWS Verified Access related to identity providers' information. -* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.36.0](service/mediaconvert/CHANGELOG.md#v1360-2023-04-24) - * **Feature**: This release introduces a noise reduction pre-filter, linear interpolation deinterlace mode, video pass-through, updated default job settings, and expanded LC-AAC Stereo audio bitrate ranges. -* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.25.0](service/rekognition/CHANGELOG.md#v1250-2023-04-24) - * **Feature**: Added new status result to Liveness session status. -* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.28.0](service/route53/CHANGELOG.md#v1280-2023-04-24) - * **Feature**: added paginator for listResourceRecordSets -* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.33.0](service/s3/CHANGELOG.md#v1330-2023-04-24) - * **Feature**: added custom paginators for listMultipartUploads and ListObjectVersions - -# Release (2023-04-21) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.52.0](service/connect/CHANGELOG.md#v1520-2023-04-21) - * **Feature**: This release adds a new API CreateParticipant. For Amazon Connect Chat, you can use this new API to customize chat flow experiences. -* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.26.1](service/ecs/CHANGELOG.md#v1261-2023-04-21) - * **Documentation**: Documentation update to address various Amazon ECS tickets. -* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.23.0](service/fms/CHANGELOG.md#v1230-2023-04-21) - * **Feature**: AWS Firewall Manager adds support for multiple administrators. You can now delegate more than one administrator per organization. - -# Release (2023-04-20) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.23.0](service/chime/CHANGELOG.md#v1230-2023-04-20) - * **Feature**: Adds support for Hindi and Thai languages and additional Amazon Transcribe parameters to the StartMeetingTranscription API. -* `github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines`: [v1.4.0](service/chimesdkmediapipelines/CHANGELOG.md#v140-2023-04-20) - * **Feature**: This release adds support for specifying the recording file format in an S3 recording sink configuration. -* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.15.0](service/chimesdkmeetings/CHANGELOG.md#v1150-2023-04-20) - * **Feature**: Adds support for Hindi and Thai languages and additional Amazon Transcribe parameters to the StartMeetingTranscription API. -* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.18.0](service/gamelift/CHANGELOG.md#v1180-2023-04-20) - * **Feature**: Amazon GameLift supports creating Builds for Windows 2016 operating system. -* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.21.0](service/guardduty/CHANGELOG.md#v1210-2023-04-20) - * **Feature**: This release adds support for the new Lambda Protection feature. -* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.36.0](service/iot/CHANGELOG.md#v1360-2023-04-20) - * **Feature**: Support additional OTA states in GetOTAUpdate API -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.74.0](service/sagemaker/CHANGELOG.md#v1740-2023-04-20) - * **Feature**: Amazon SageMaker Canvas adds ModelRegisterSettings support for CanvasAppSettings. -* `github.com/aws/aws-sdk-go-v2/service/snowball`: [v1.19.0](service/snowball/CHANGELOG.md#v1190-2023-04-20) - * **Feature**: Adds support for Amazon S3 compatible storage. AWS Snow Family customers can now use Amazon S3 compatible storage on Snowball Edge devices. Also adds support for V3_5S. This is a refreshed AWS Snowball Edge Storage Optimized device type with 210TB SSD (customer usable). -* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.30.0](service/wafv2/CHANGELOG.md#v1300-2023-04-20) - * **Feature**: You can now create encrypted API keys to use in a client application integration of the JavaScript CAPTCHA API . You can also retrieve a list of your API keys and the JavaScript application integration URL. - -# Release (2023-04-19) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.24.0](service/comprehend/CHANGELOG.md#v1240-2023-04-19) - * **Feature**: This release supports native document models for custom classification, in addition to plain-text models. You train native document models using documents (PDF, Word, images) in their native format. -* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.26.0](service/ecs/CHANGELOG.md#v1260-2023-04-19) - * **Feature**: This release supports the Account Setting "TagResourceAuthorization" that allows for enhanced Tagging security controls. -* `github.com/aws/aws-sdk-go-v2/service/ram`: [v1.18.0](service/ram/CHANGELOG.md#v1180-2023-04-19) - * **Feature**: This release adds support for customer managed permissions. Customer managed permissions enable customers to author and manage tailored permissions for resources shared using RAM. -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.43.1](service/rds/CHANGELOG.md#v1431-2023-04-19) - * **Documentation**: Adds support for the ImageId parameter of CreateCustomDBEngineVersion to RDS Custom for Oracle -* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.32.0](service/s3/CHANGELOG.md#v1320-2023-04-19) - * **Feature**: Provides support for "Snow" Storage class. -* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.19.4](service/secretsmanager/CHANGELOG.md#v1194-2023-04-19) - * **Documentation**: Documentation updates for Secrets Manager - -# Release (2023-04-17) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.26.0](service/appflow/CHANGELOG.md#v1260-2023-04-17) - * **Feature**: This release adds a Client Token parameter to the following AppFlow APIs: Create/Update Connector Profile, Create/Update Flow, Start Flow, Register Connector, Update Connector Registration. The Client Token parameter allows idempotent operations for these APIs. -* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.13.0](service/drs/CHANGELOG.md#v1130-2023-04-17) - * **Feature**: Changed existing APIs and added new APIs to support using an account-level launch configuration template with AWS Elastic Disaster Recovery. -* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.19.5](service/dynamodb/CHANGELOG.md#v1195-2023-04-17) - * **Documentation**: Documentation updates for DynamoDB API -* `github.com/aws/aws-sdk-go-v2/service/emrserverless`: [v1.7.0](service/emrserverless/CHANGELOG.md#v170-2023-04-17) - * **Feature**: The GetJobRun API has been updated to include the job's billed resource utilization. This utilization shows the aggregate vCPU, memory and storage that AWS has billed for the job run. The billed resources include a 1-minute minimum usage for workers, plus additional storage over 20 GB per worker. -* `github.com/aws/aws-sdk-go-v2/service/internetmonitor`: [v1.2.0](service/internetmonitor/CHANGELOG.md#v120-2023-04-17) - * **Feature**: This release includes a new configurable value, TrafficPercentageToMonitor, which allows users to adjust the amount of traffic monitored by percentage -* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.27.0](service/iotwireless/CHANGELOG.md#v1270-2023-04-17) - * **Feature**: Supports the new feature of LoRaWAN roaming, allows to configure MaxEirp for LoRaWAN gateway, and allows to configure PingSlotPeriod for LoRaWAN multicast group -* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.33.0](service/lambda/CHANGELOG.md#v1330-2023-04-17) - * **Feature**: Add Python 3.10 (python3.10) support to AWS Lambda - -# Release (2023-04-14) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.25.1](service/ecs/CHANGELOG.md#v1251-2023-04-14) - * **Documentation**: This release supports ephemeral storage for AWS Fargate Windows containers. -* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.32.0](service/lambda/CHANGELOG.md#v1320-2023-04-14) - * **Feature**: This release adds SnapStart related exceptions to InvokeWithResponseStream API. IAM access related documentation is also added for this API. -* `github.com/aws/aws-sdk-go-v2/service/migrationhubrefactorspaces`: [v1.9.8](service/migrationhubrefactorspaces/CHANGELOG.md#v198-2023-04-14) - * **Documentation**: Doc only update for Refactor Spaces environments without network bridge feature. -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.43.0](service/rds/CHANGELOG.md#v1430-2023-04-14) - * **Feature**: This release adds support of modifying the engine mode of database clusters. - -# Release (2023-04-13) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/chimesdkvoice`: [v1.5.0](service/chimesdkvoice/CHANGELOG.md#v150-2023-04-13) - * **Feature**: This release adds tagging support for Voice Connectors and SIP Media Applications -* `github.com/aws/aws-sdk-go-v2/service/mediaconnect`: [v1.19.0](service/mediaconnect/CHANGELOG.md#v1190-2023-04-13) - * **Feature**: Gateway is a new feature of AWS Elemental MediaConnect. Gateway allows the deployment of on-premises resources for the purpose of transporting live video to and from the AWS Cloud. - -# Release (2023-04-12) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.18.0](service/groundstation/CHANGELOG.md#v1180-2023-04-12) - * **Feature**: AWS Ground Station Wideband DigIF GA Release -* `github.com/aws/aws-sdk-go-v2/service/managedblockchain`: [v1.15.5](service/managedblockchain/CHANGELOG.md#v1155-2023-04-12) - * **Documentation**: Removal of the Ropsten network. The Ethereum foundation ceased support of Ropsten on December 31st, 2022.. - -# Release (2023-04-11) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/ecrpublic`: [v1.16.0](service/ecrpublic/CHANGELOG.md#v1160-2023-04-11) - * **Feature**: This release will allow using registry alias as registryId in BatchDeleteImage request. -* `github.com/aws/aws-sdk-go-v2/service/emrserverless`: [v1.6.0](service/emrserverless/CHANGELOG.md#v160-2023-04-11) - * **Feature**: This release extends GetJobRun API to return job run timeout (executionTimeoutMinutes) specified during StartJobRun call (or default timeout of 720 minutes if none was specified). -* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.19.0](service/eventbridge/CHANGELOG.md#v1190-2023-04-11) - * **Feature**: EventBridge PutTarget support for multiple SQL arguments on RedshiftDataParameters -* `github.com/aws/aws-sdk-go-v2/service/iotdataplane`: [v1.15.0](service/iotdataplane/CHANGELOG.md#v1150-2023-04-11) - * **Feature**: This release adds support for MQTT5 user properties when calling the AWS IoT GetRetainedMessage API -* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.29.0](service/wafv2/CHANGELOG.md#v1290-2023-04-11) - * **Feature**: For web ACLs that protect CloudFront protections, the default request body inspection size is now 16 KB, and you can use the new association configuration to increase the inspection size further, up to 64 KB. Sizes over 16 KB can incur additional costs. - -# Release (2023-04-10) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.51.0](service/connect/CHANGELOG.md#v1510-2023-04-10) - * **Feature**: This release adds the ability to configure an agent's routing profile to receive contacts from multiple channels at the same time via extending the UpdateRoutingProfileConcurrency, CreateRoutingProfile and DescribeRoutingProfile APIs. -* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.25.0](service/ecs/CHANGELOG.md#v1250-2023-04-10) - * **Feature**: This release adds support for enabling FIPS compliance on Amazon ECS Fargate tasks -* `github.com/aws/aws-sdk-go-v2/service/marketplacecatalog`: [v1.16.0](service/marketplacecatalog/CHANGELOG.md#v1160-2023-04-10) - * **Feature**: Added three new APIs to support resource sharing: GetResourcePolicy, PutResourcePolicy, and DeleteResourcePolicy. Added new OwnershipType field to ListEntities request to let users filter on entities that are shared with them. Increased max page size of ListEntities response from 20 to 50 results. -* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.35.0](service/mediaconvert/CHANGELOG.md#v1350-2023-04-10) - * **Feature**: AWS Elemental MediaConvert SDK now supports conversion of 608 paint-on captions to pop-on captions for SCC sources. -* `github.com/aws/aws-sdk-go-v2/service/omics`: [v1.3.0](service/omics/CHANGELOG.md#v130-2023-04-10) - * **Feature**: Remove unexpected API changes. -* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.24.0](service/rekognition/CHANGELOG.md#v1240-2023-04-10) - * **Feature**: This release adds support for Face Liveness APIs in Amazon Rekognition. Updates UpdateStreamProcessor to return ResourceInUseException Exception. Minor updates to API documentation. - -# Release (2023-04-07) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/dlm`: [v1.15.0](service/dlm/CHANGELOG.md#v1150-2023-04-07) - * **Announcement**: This release includes breaking changes for the timestamp trait on the data lifecycle management client. - * **Feature**: Updated timestamp format for GetLifecyclePolicy API - * **Bug Fix**: Correct timestamp type for data lifecycle manager. -* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.21.0](service/docdb/CHANGELOG.md#v1210-2023-04-07) - * **Feature**: This release adds a new parameter 'DBClusterParameterGroupName' to 'RestoreDBClusterFromSnapshot' API to associate the name of the DB cluster parameter group while performing restore. -* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.28.8](service/fsx/CHANGELOG.md#v1288-2023-04-07) - * **Documentation**: Amazon FSx for Lustre now supports creating data repository associations on Persistent_1 and Scratch_2 file systems. -* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.31.0](service/lambda/CHANGELOG.md#v1310-2023-04-07) - * **Feature**: This release adds a new Lambda InvokeWithResponseStream API to support streaming Lambda function responses. The release also adds a new InvokeMode parameter to Function Url APIs to control whether the response will be streamed or buffered. -* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.34.0](service/quicksight/CHANGELOG.md#v1340-2023-04-07) - * **Feature**: This release has two changes: adding the OR condition to tag-based RLS rules in CreateDataSet and UpdateDataSet; adding RefreshSchedule and Incremental RefreshProperties operations for users to programmatically configure SPICE dataset ingestions. -* `github.com/aws/aws-sdk-go-v2/service/redshiftdata`: [v1.19.3](service/redshiftdata/CHANGELOG.md#v1193-2023-04-07) - * **Documentation**: Update documentation of API descriptions as needed in support of temporary credentials with IAM identity. -* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.18.1](service/servicecatalog/CHANGELOG.md#v1181-2023-04-07) - * **Documentation**: Updates description for property - -# Release (2023-04-06) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.27.0](service/cloudformation/CHANGELOG.md#v1270-2023-04-06) - * **Feature**: Including UPDATE_COMPLETE as a failed status for DeleteStack waiter. -* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.22.0](service/greengrassv2/CHANGELOG.md#v1220-2023-04-06) - * **Feature**: Add support for SUCCEEDED value in coreDeviceExecutionStatus field. Documentation updates for Greengrass V2. -* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.21.0](service/proton/CHANGELOG.md#v1210-2023-04-06) - * **Feature**: This release adds support for the AWS Proton service sync feature. Service sync enables managing an AWS Proton service (creating and updating instances) and all of it's corresponding service instances from a Git repository. -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.42.1](service/rds/CHANGELOG.md#v1421-2023-04-06) - * **Documentation**: Adds and updates the SDK examples - -# Release (2023-04-05) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.31.0](service/configservice/CHANGELOG.md#v1310-2023-04-05) - * **Feature**: This release adds resourceType enums for types released in March 2023. -* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.24.3](service/ecs/CHANGELOG.md#v1243-2023-04-05) - * **Documentation**: This is a document only updated to add information about Amazon Elastic Inference (EI). -* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.16.7](service/identitystore/CHANGELOG.md#v1167-2023-04-05) - * **Documentation**: Documentation updates for Identity Store CLI command reference. -* `github.com/aws/aws-sdk-go-v2/service/ivsrealtime`: [v1.1.0](service/ivsrealtime/CHANGELOG.md#v110-2023-04-05) - * **Feature**: Fix ParticipantToken ExpirationTime format -* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.26.0](service/networkfirewall/CHANGELOG.md#v1260-2023-04-05) - * **Feature**: AWS Network Firewall now supports IPv6-only subnets. -* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.18.0](service/servicecatalog/CHANGELOG.md#v1180-2023-04-05) - * **Feature**: removed incorrect product type value -* `github.com/aws/aws-sdk-go-v2/service/vpclattice`: [v1.0.1](service/vpclattice/CHANGELOG.md#v101-2023-04-05) - * **Documentation**: This release removes the entities in the API doc model package for auth policies. - -# Release (2023-04-04) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/amplifyuibuilder`: [v1.10.0](service/amplifyuibuilder/CHANGELOG.md#v1100-2023-04-04) - * **Feature**: Support StorageField and custom displays for data-bound options in form builder. Support non-string operands for predicates in collections. Support choosing client to get token from. -* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.28.1](service/autoscaling/CHANGELOG.md#v1281-2023-04-04) - * **Documentation**: Documentation updates for Amazon EC2 Auto Scaling -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.93.0](service/ec2/CHANGELOG.md#v1930-2023-04-04) - * **Feature**: C6in, M6in, M6idn, R6in and R6idn bare metal instances are powered by 3rd Generation Intel Xeon Scalable processors and offer up to 200 Gbps of network bandwidth. -* `github.com/aws/aws-sdk-go-v2/service/elasticinference`: [v1.13.0](service/elasticinference/CHANGELOG.md#v1130-2023-04-04) - * **Feature**: Updated public documentation for the Describe and Tagging APIs. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.73.0](service/sagemaker/CHANGELOG.md#v1730-2023-04-04) - * **Feature**: Amazon SageMaker Asynchronous Inference now allows customer's to receive failure model responses in S3 and receive success/failure model responses in SNS notifications. -* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.19.0](service/sagemakerruntime/CHANGELOG.md#v1190-2023-04-04) - * **Feature**: Amazon SageMaker Asynchronous Inference now provides customers a FailureLocation as a response parameter in InvokeEndpointAsync API to capture the model failure responses. -* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.28.0](service/wafv2/CHANGELOG.md#v1280-2023-04-04) - * **Feature**: This release rolls back association config feature for webACLs that protect CloudFront protections. - -# Release (2023-04-03) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.45.0](service/glue/CHANGELOG.md#v1450-2023-04-03) - * **Feature**: Add support for database-level federation -* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.21.0](service/lakeformation/CHANGELOG.md#v1210-2023-04-03) - * **Feature**: Add support for database-level federation -* `github.com/aws/aws-sdk-go-v2/service/licensemanager`: [v1.18.0](service/licensemanager/CHANGELOG.md#v1180-2023-04-03) - * **Feature**: This release adds grant override options to the CreateGrantVersion API. These options can be used to specify grant replacement behavior during grant activation. -* `github.com/aws/aws-sdk-go-v2/service/mwaa`: [v1.15.0](service/mwaa/CHANGELOG.md#v1150-2023-04-03) - * **Feature**: This Amazon MWAA release adds the ability to customize the Apache Airflow environment by launching a shell script at startup. This shell script is hosted in your environment's Amazon S3 bucket. Amazon MWAA runs the script before installing requirements and initializing the Apache Airflow process. -* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.17.0](service/servicecatalog/CHANGELOG.md#v1170-2023-04-03) - * **Feature**: This release introduces Service Catalog support for Terraform open source. It enables 1. The notify* APIs to Service Catalog. These APIs are used by the terraform engine to notify the result of the provisioning engine execution. 2. Adds a new TERRAFORM_OPEN_SOURCE product type in CreateProduct API. -* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.27.0](service/wafv2/CHANGELOG.md#v1270-2023-04-03) - * **Feature**: For web ACLs that protect CloudFront protections, the default request body inspection size is now 16 KB, and you can use the new association configuration to increase the inspection size further, up to 64 KB. Sizes over 16 KB can incur additional costs. - -# Release (2023-03-31) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.92.1](service/ec2/CHANGELOG.md#v1921-2023-03-31) - * **Documentation**: Documentation updates for EC2 On Demand Capacity Reservations -* `github.com/aws/aws-sdk-go-v2/service/internetmonitor`: [v1.1.0](service/internetmonitor/CHANGELOG.md#v110-2023-03-31) - * **Feature**: This release adds a new feature for Amazon CloudWatch Internet Monitor that enables customers to deliver internet measurements to Amazon S3 buckets as well as CloudWatch Logs. -* `github.com/aws/aws-sdk-go-v2/service/resiliencehub`: [v1.10.1](service/resiliencehub/CHANGELOG.md#v1101-2023-03-31) - * **Documentation**: Adding EKS related documentation for appTemplateBody -* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.31.1](service/s3/CHANGELOG.md#v1311-2023-03-31) - * **Documentation**: Documentation updates for Amazon S3 -* `github.com/aws/aws-sdk-go-v2/service/sagemakerfeaturestoreruntime`: [v1.14.0](service/sagemakerfeaturestoreruntime/CHANGELOG.md#v1140-2023-03-31) - * **Feature**: In this release, you can now chose between soft delete and hard delete when calling the DeleteRecord API, so you have more flexibility when it comes to managing online store data. - -# Release (2023-03-30) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.25.0](service/athena/CHANGELOG.md#v1250-2023-03-30) - * **Feature**: Make DefaultExecutorDpuSize and CoordinatorDpuSize fields optional in StartSession -* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.28.0](service/autoscaling/CHANGELOG.md#v1280-2023-03-30) - * **Feature**: Amazon EC2 Auto Scaling now supports Elastic Load Balancing traffic sources with the AttachTrafficSources, DetachTrafficSources, and DescribeTrafficSources APIs. This release also introduces a new activity status, "WaitingForConnectionDraining", for VPC Lattice to the DescribeScalingActivities API. -* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.23.0](service/batch/CHANGELOG.md#v1230-2023-03-30) - * **Feature**: This feature allows Batch on EKS to support configuration of Pod Labels through Metadata for Batch on EKS Jobs. -* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.22.0](service/computeoptimizer/CHANGELOG.md#v1220-2023-03-30) - * **Feature**: This release adds support for HDD EBS volume types and io2 Block Express. We are also adding support for 61 new instance types and instances that have non consecutive runtime. -* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.12.0](service/drs/CHANGELOG.md#v1120-2023-03-30) - * **Feature**: Adding a field to the replication configuration APIs to support the auto replicate new disks feature. We also deprecated RetryDataReplication. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.92.0](service/ec2/CHANGELOG.md#v1920-2023-03-30) - * **Feature**: This release adds support for Tunnel Endpoint Lifecycle control, a new feature that provides Site-to-Site VPN customers with better visibility and control of their VPN tunnel maintenance updates. -* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.24.0](service/emr/CHANGELOG.md#v1240-2023-03-30) - * **Feature**: Updated DescribeCluster and ListClusters API responses to include ErrorDetail that specifies error code, programmatically accessible error data,and an error message. ErrorDetail provides the underlying reason for cluster failure and recommends actions to simplify troubleshooting of EMR clusters. -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.44.0](service/glue/CHANGELOG.md#v1440-2023-03-30) - * **Feature**: This release adds support for AWS Glue Data Quality, which helps you evaluate and monitor the quality of your data and includes the API for creating, deleting, or updating data quality rulesets, runs and evaluations. -* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.20.0](service/guardduty/CHANGELOG.md#v1200-2023-03-30) - * **Feature**: Added EKS Runtime Monitoring feature support to existing detector, finding APIs and introducing new Coverage APIs -* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.23.0](service/imagebuilder/CHANGELOG.md#v1230-2023-03-30) - * **Feature**: Adds support for new image workflow details and image vulnerability detection. -* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.21.0](service/ivs/CHANGELOG.md#v1210-2023-03-30) - * **Feature**: Amazon Interactive Video Service (IVS) now offers customers the ability to configure IVS channels to allow insecure RTMP ingest. -* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.39.0](service/kendra/CHANGELOG.md#v1390-2023-03-30) - * **Feature**: AWS Kendra now supports featured results for a query. -* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.25.0](service/networkfirewall/CHANGELOG.md#v1250-2023-03-30) - * **Feature**: AWS Network Firewall added TLS inspection configurations to allow TLS traffic inspection. -* `github.com/aws/aws-sdk-go-v2/service/sagemakergeospatial`: [v1.2.0](service/sagemakergeospatial/CHANGELOG.md#v120-2023-03-30) - * **Feature**: Amazon SageMaker geospatial capabilities now supports server-side encryption with customer managed KMS key and SageMaker notebooks with a SageMaker geospatial image in a Amazon SageMaker Domain with VPC only mode. -* `github.com/aws/aws-sdk-go-v2/service/vpclattice`: [v1.0.0](service/vpclattice/CHANGELOG.md#v100-2023-03-30) - * **Release**: New AWS service client module - * **Feature**: General Availability (GA) release of Amazon VPC Lattice -* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.19.0](service/wellarchitected/CHANGELOG.md#v1190-2023-03-30) - * **Feature**: AWS Well-Architected SDK now supports getting consolidated report metrics and generating a consolidated report PDF. - -# Release (2023-03-29) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/opensearchserverless`: [v1.2.0](service/opensearchserverless/CHANGELOG.md#v120-2023-03-29) - * **Feature**: This release includes two new exception types "ServiceQuotaExceededException" and "OcuLimitExceededException". -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.42.0](service/rds/CHANGELOG.md#v1420-2023-03-29) - * **Feature**: Add support for creating a read replica DB instance from a Multi-AZ DB cluster. - -# Release (2023-03-28) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/ssmcontacts`: [v1.15.0](service/ssmcontacts/CHANGELOG.md#v1150-2023-03-28) - * **Feature**: This release adds 12 new APIs as part of Oncall Schedule feature release, adds support for a new contact type: ONCALL_SCHEDULE. Check public documentation for AWS ssm-contacts for more information -* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.21.0](service/ssmincidents/CHANGELOG.md#v1210-2023-03-28) - * **Feature**: Increased maximum length of "TriggerDetails.rawData" to 10K characters and "IncidentSummary" to 8K characters. - -# Release (2023-03-27) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.24.0](service/athena/CHANGELOG.md#v1240-2023-03-27) - * **Feature**: Enforces a minimal level of encryption for the workgroup for query and calculation results that are written to Amazon S3. When enabled, workgroup users can set encryption only to the minimum level set by the administrator or higher when they submit queries. -* `github.com/aws/aws-sdk-go-v2/service/chimesdkvoice`: [v1.4.0](service/chimesdkvoice/CHANGELOG.md#v140-2023-03-27) - * **Feature**: Documentation updates for Amazon Chime SDK Voice. -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.50.0](service/connect/CHANGELOG.md#v1500-2023-03-27) - * **Feature**: This release introduces support for RelatedContactId in the StartChatContact API. Interactive message and interactive message response have been added to the list of supported message content types for this API as well. -* `github.com/aws/aws-sdk-go-v2/service/connectparticipant`: [v1.15.7](service/connectparticipant/CHANGELOG.md#v1157-2023-03-27) - * **Documentation**: This release provides an update to the SendMessage API to handle interactive message response content-types. -* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.26.0](service/iotwireless/CHANGELOG.md#v1260-2023-03-27) - * **Feature**: Introducing new APIs that enable Sidewalk devices to communicate with AWS IoT Core through Sidewalk gateways. This will empower AWS customers to connect Sidewalk devices with other AWS IoT Services, creating possibilities for seamless integration and advanced device management. -* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.31.0](service/medialive/CHANGELOG.md#v1310-2023-03-27) - * **Feature**: AWS Elemental MediaLive now supports ID3 tag insertion for audio only HLS output groups. AWS Elemental Link devices now support tagging. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.72.1](service/sagemaker/CHANGELOG.md#v1721-2023-03-27) - * **Documentation**: Fixed some improperly rendered links in SDK documentation. -* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.30.0](service/securityhub/CHANGELOG.md#v1300-2023-03-27) - * **Feature**: Added new resource detail objects to ASFF, including resources for AwsEksCluster, AWSS3Bucket, AwsEc2RouteTable and AwsEC2Instance. -* `github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry`: [v1.17.0](service/servicecatalogappregistry/CHANGELOG.md#v1170-2023-03-27) - * **Feature**: In this release, we started supporting ARN in applicationSpecifier and attributeGroupSpecifier. GetAttributeGroup, ListAttributeGroups and ListAttributeGroupsForApplication APIs will now have CreatedBy field in the response. -* `github.com/aws/aws-sdk-go-v2/service/voiceid`: [v1.13.0](service/voiceid/CHANGELOG.md#v1130-2023-03-27) - * **Feature**: Amazon Connect Voice ID now supports multiple fraudster watchlists. Every domain has a default watchlist where all existing fraudsters are placed by default. Custom watchlists may now be created, managed, and evaluated against for known fraudster detection. - -# Release (2023-03-24) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.25.7](service/cloudwatch/CHANGELOG.md#v1257-2023-03-24) - * **Documentation**: Doc-only update to correct alarm actions list -* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.23.0](service/comprehend/CHANGELOG.md#v1230-2023-03-24) - * **Feature**: This release adds a new field (FlywheelArn) to the EntitiesDetectionJobProperties object. The FlywheelArn field is returned in the DescribeEntitiesDetectionJob and ListEntitiesDetectionJobs responses when the EntitiesDetection job is started with a FlywheelArn instead of an EntityRecognizerArn . -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.41.0](service/rds/CHANGELOG.md#v1410-2023-03-24) - * **Feature**: Added error code CreateCustomDBEngineVersionFault for when the create custom engine version for Custom engines fails. - -# Release (2023-03-23) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.22.0](service/batch/CHANGELOG.md#v1220-2023-03-23) - * **Feature**: This feature allows Batch to support configuration of ephemeral storage size for jobs running on FARGATE -* `github.com/aws/aws-sdk-go-v2/service/chimesdkidentity`: [v1.11.0](service/chimesdkidentity/CHANGELOG.md#v1110-2023-03-23) - * **Feature**: AppInstanceBots can be used to add a bot powered by Amazon Lex to chat channels. ExpirationSettings provides automatic resource deletion for AppInstanceUsers. -* `github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines`: [v1.3.0](service/chimesdkmediapipelines/CHANGELOG.md#v130-2023-03-23) - * **Feature**: This release adds Amazon Chime SDK call analytics. Call analytics include voice analytics, which provides speaker search and voice tone analysis. These capabilities can be used with Amazon Transcribe and Transcribe Call Analytics to generate machine-learning-powered insights from real-time audio. -* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.14.0](service/chimesdkmessaging/CHANGELOG.md#v1140-2023-03-23) - * **Feature**: ExpirationSettings provides automatic resource deletion for Channels. -* `github.com/aws/aws-sdk-go-v2/service/chimesdkvoice`: [v1.3.0](service/chimesdkvoice/CHANGELOG.md#v130-2023-03-23) - * **Feature**: This release adds Amazon Chime SDK call analytics. Call analytics include voice analytics, which provides speaker search and voice tone analysis. These capabilities can be used with Amazon Transcribe and Transcribe Call Analytics to generate machine-learning-powered insights from real-time audio. -* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.18.0](service/codeartifact/CHANGELOG.md#v1180-2023-03-23) - * **Feature**: Repository CreationTime is added to the CreateRepository and ListRepositories API responses. -* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.19.0](service/guardduty/CHANGELOG.md#v1190-2023-03-23) - * **Feature**: Adds AutoEnableOrganizationMembers attribute to DescribeOrganizationConfiguration and UpdateOrganizationConfiguration APIs. -* `github.com/aws/aws-sdk-go-v2/service/ivsrealtime`: [v1.0.0](service/ivsrealtime/CHANGELOG.md#v100-2023-03-23) - * **Release**: New AWS service client module - * **Feature**: Initial release of the Amazon Interactive Video Service RealTime API. -* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.34.0](service/mediaconvert/CHANGELOG.md#v1340-2023-03-23) - * **Feature**: AWS Elemental MediaConvert SDK now supports passthrough of ID3v2 tags for audio inputs to audio-only HLS outputs. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.72.0](service/sagemaker/CHANGELOG.md#v1720-2023-03-23) - * **Feature**: Amazon SageMaker Autopilot adds two new APIs - CreateAutoMLJobV2 and DescribeAutoMLJobV2. Amazon SageMaker Notebook Instances now supports the ml.geospatial.interactive instance type. -* `github.com/aws/aws-sdk-go-v2/service/servicediscovery`: [v1.21.0](service/servicediscovery/CHANGELOG.md#v1210-2023-03-23) - * **Feature**: Reverted the throttling exception RequestLimitExceeded for AWS Cloud Map APIs introduced in SDK version 1.12.424 2023-03-09 to previous exception specified in the ErrorCode. -* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.21.0](service/textract/CHANGELOG.md#v1210-2023-03-23) - * **Feature**: The AnalyzeDocument - Tables feature adds support for new elements in the API: table titles, footers, section titles, summary cells/tables, and table type. - -# Release (2023-03-22) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.19.8](service/iam/CHANGELOG.md#v1198-2023-03-22) - * **Documentation**: Documentation updates for AWS Identity and Access Management (IAM). -* `github.com/aws/aws-sdk-go-v2/service/iottwinmaker`: [v1.11.0](service/iottwinmaker/CHANGELOG.md#v1110-2023-03-22) - * **Feature**: This release adds support of adding metadata when creating a new scene or updating an existing scene. -* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.17.8](service/networkmanager/CHANGELOG.md#v1178-2023-03-22) - * **Documentation**: This release includes an update to create-transit-gateway-route-table-attachment, showing example usage for TransitGatewayRouteTableArn. -* `github.com/aws/aws-sdk-go-v2/service/resiliencehub`: [v1.10.0](service/resiliencehub/CHANGELOG.md#v1100-2023-03-22) - * **Feature**: This release provides customers with the ability to import resources from within an EKS cluster and assess the resiliency of EKS cluster workloads. -* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.36.0](service/ssm/CHANGELOG.md#v1360-2023-03-22) - * **Feature**: This Patch Manager release supports creating, updating, and deleting Patch Baselines for AmazonLinux2023, AlmaLinux. - -# Release (2023-03-21) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.13.0](service/chimesdkmessaging/CHANGELOG.md#v1130-2023-03-21) - * **Feature**: Amazon Chime SDK messaging customers can now manage streaming configuration for messaging data for archival and analysis. -* `github.com/aws/aws-sdk-go-v2/service/cleanrooms`: [v1.1.0](service/cleanrooms/CHANGELOG.md#v110-2023-03-21) - * **Feature**: GA Release of AWS Clean Rooms, Added Tagging Functionality -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.91.0](service/ec2/CHANGELOG.md#v1910-2023-03-21) - * **Feature**: This release adds support for AWS Network Firewall, AWS PrivateLink, and Gateway Load Balancers to Amazon VPC Reachability Analyzer, and it makes the path destination optional as long as a destination address in the filter at source is provided. -* `github.com/aws/aws-sdk-go-v2/service/internal/s3shared`: [v1.14.0](service/internal/s3shared/CHANGELOG.md#v1140-2023-03-21) - * **Feature**: port v1 sdk 100-continue http header customization for s3 PutObject/UploadPart request and enable user config -* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.28.0](service/iotsitewise/CHANGELOG.md#v1280-2023-03-21) - * **Feature**: Provide support for tagging of data streams and enabling tag based authorization for property alias -* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.18.0](service/mgn/CHANGELOG.md#v1180-2023-03-21) - * **Feature**: This release introduces the Import and export feature and expansion of the post-launch actions -* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.31.0](service/s3/CHANGELOG.md#v1310-2023-03-21) - * **Feature**: port v1 sdk 100-continue http header customization for s3 PutObject/UploadPart request and enable user config - -# Release (2023-03-20) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.19.0](service/applicationautoscaling/CHANGELOG.md#v1190-2023-03-20) - * **Feature**: With this release customers can now tag their Application Auto Scaling registered targets with key-value pairs and manage IAM permissions for all the tagged resources centrally. -* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.20.0](service/neptune/CHANGELOG.md#v1200-2023-03-20) - * **Feature**: This release makes following few changes. db-cluster-identifier is now a required parameter of create-db-instance. describe-db-cluster will now return PendingModifiedValues and GlobalClusterIdentifier fields in the response. -* `github.com/aws/aws-sdk-go-v2/service/s3outposts`: [v1.16.0](service/s3outposts/CHANGELOG.md#v1160-2023-03-20) - * **Feature**: S3 On Outposts added support for endpoint status, and a failed endpoint reason, if any -* `github.com/aws/aws-sdk-go-v2/service/workdocs`: [v1.14.0](service/workdocs/CHANGELOG.md#v1140-2023-03-20) - * **Feature**: This release adds a new API, SearchResources, which enable users to search through metadata and content of folders, documents, document versions and comments in a WorkDocs site. - -# Release (2023-03-17) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/billingconductor`: [v1.6.0](service/billingconductor/CHANGELOG.md#v160-2023-03-17) - * **Feature**: This release adds a new filter to ListAccountAssociations API and a new filter to ListBillingGroups API. -* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.30.0](service/configservice/CHANGELOG.md#v1300-2023-03-17) - * **Feature**: This release adds resourceType enums for types released from October 2022 through February 2023. -* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.25.0](service/databasemigrationservice/CHANGELOG.md#v1250-2023-03-17) - * **Feature**: S3 setting to create AWS Glue Data Catalog. Oracle setting to control conversion of timestamp column. Support for Kafka SASL Plain authentication. Setting to map boolean from PostgreSQL to Redshift. SQL Server settings to force lob lookup on inline LOBs and to control access of database logs. - -# Release (2023-03-16) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/config`: [v1.18.18](config/CHANGELOG.md#v11818-2023-03-16) - * **Bug Fix**: Allow RoleARN to be set as functional option on STS WebIdentityRoleOptions. Fixes aws/aws-sdk-go-v2#2015. -* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.18.0](service/guardduty/CHANGELOG.md#v1180-2023-03-16) - * **Feature**: Updated 9 APIs for feature enablement to reflect expansion of GuardDuty to features. Added new APIs and updated existing APIs to support RDS Protection GA. -* `github.com/aws/aws-sdk-go-v2/service/resourceexplorer2`: [v1.2.7](service/resourceexplorer2/CHANGELOG.md#v127-2023-03-16) - * **Documentation**: Documentation updates for APIs. -* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.18.7](service/sagemakerruntime/CHANGELOG.md#v1187-2023-03-16) - * **Documentation**: Documentation updates for SageMaker Runtime - -# Release (2023-03-15) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/migrationhubstrategy`: [v1.9.0](service/migrationhubstrategy/CHANGELOG.md#v190-2023-03-15) - * **Feature**: This release adds the binary analysis that analyzes IIS application DLLs on Windows and Java applications on Linux to provide anti-pattern report without configuring access to the source code. -* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.31.0](service/s3control/CHANGELOG.md#v1310-2023-03-15) - * **Feature**: Added support for S3 Object Lambda aliases. -* `github.com/aws/aws-sdk-go-v2/service/securitylake`: [v1.3.0](service/securitylake/CHANGELOG.md#v130-2023-03-15) - * **Feature**: Make Create/Get/ListSubscribers APIs return resource share ARN and name so they can be used to validate the RAM resource share to accept. GetDatalake can be used to track status of UpdateDatalake and DeleteDatalake requests. - -# Release (2023-03-14) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/feature/ec2/imds`: [v1.13.0](feature/ec2/imds/CHANGELOG.md#v1130-2023-03-14) - * **Feature**: Add flag to disable IMDSv1 fallback -* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.18.0](service/applicationautoscaling/CHANGELOG.md#v1180-2023-03-14) - * **Feature**: Application Auto Scaling customers can now use mathematical functions to customize the metric used with Target Tracking policies within the policy configuration itself, saving the cost and effort of publishing the customizations as a separate metric. -* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.19.0](service/dataexchange/CHANGELOG.md#v1190-2023-03-14) - * **Feature**: This release enables data providers to license direct access to S3 objects encrypted with Customer Managed Keys (CMK) in AWS KMS through AWS Data Exchange. Subscribers can use these keys to decrypt, then use the encrypted S3 objects shared with them, without creating or managing copies. -* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.18.7](service/directconnect/CHANGELOG.md#v1187-2023-03-14) - * **Documentation**: describe-direct-connect-gateway-associations includes a new status, updating, indicating that the association is currently in-process of updating. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.90.0](service/ec2/CHANGELOG.md#v1900-2023-03-14) - * **Feature**: This release adds a new DnsOptions key (PrivateDnsOnlyForInboundResolverEndpoint) to CreateVpcEndpoint and ModifyVpcEndpoint APIs. -* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.19.6](service/iam/CHANGELOG.md#v1196-2023-03-14) - * **Documentation**: Documentation only updates to correct customer-reported issues -* `github.com/aws/aws-sdk-go-v2/service/keyspaces`: [v1.2.0](service/keyspaces/CHANGELOG.md#v120-2023-03-14) - * **Feature**: Adding support for client-side timestamps -* `github.com/aws/aws-sdk-go-v2/service/support`: [v1.14.6](service/support/CHANGELOG.md#v1146-2023-03-14) - * **Announcement**: Model regenerated with support for null string values to properly implement `support` service operations `DescribeTrustedAdvisorCheckRefreshStatuses` and `DescribeTrustedAdvisorCheckSummaries` - -# Release (2023-03-13) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/appintegrations`: [v1.15.0](service/appintegrations/CHANGELOG.md#v1150-2023-03-13) - * **Feature**: Adds FileConfiguration to Amazon AppIntegrations CreateDataIntegration supporting scheduled downloading of third party files into Amazon Connect from sources such as Microsoft SharePoint. -* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.20.2](service/lakeformation/CHANGELOG.md#v1202-2023-03-13) - * **Documentation**: This release updates the documentation regarding Get/Update DataCellsFilter -* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.30.0](service/s3control/CHANGELOG.md#v1300-2023-03-13) - * **Feature**: Added support for cross-account Multi-Region Access Points. Added support for S3 Replication for S3 on Outposts. -* `github.com/aws/aws-sdk-go-v2/service/tnb`: [v1.1.0](service/tnb/CHANGELOG.md#v110-2023-03-13) - * **Feature**: This release adds tagging support to the following Network Instance APIs : Instantiate, Update, Terminate. -* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.13.0](service/wisdom/CHANGELOG.md#v1130-2023-03-13) - * **Feature**: This release extends Wisdom CreateKnowledgeBase API to support SharePoint connector type by removing the @required trait for objectField - -# Release (2023-03-10) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/ivschat`: [v1.4.0](service/ivschat/CHANGELOG.md#v140-2023-03-10) - * **Feature**: This release adds a new exception returned when calling AWS IVS chat UpdateLoggingConfiguration. Now UpdateLoggingConfiguration can return ConflictException when invalid updates are made in sequence to Logging Configurations. -* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.19.0](service/secretsmanager/CHANGELOG.md#v1190-2023-03-10) - * **Feature**: The type definitions of SecretString and SecretBinary now have a minimum length of 1 in the model to match the exception thrown when you pass in empty values. - -# Release (2023-03-09) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.17.0](service/codeartifact/CHANGELOG.md#v1170-2023-03-09) - * **Feature**: This release introduces the generic package format, a mechanism for storing arbitrary binary assets. It also adds a new API, PublishPackageVersion, to allow for publishing generic packages. -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.49.0](service/connect/CHANGELOG.md#v1490-2023-03-09) - * **Feature**: This release adds a new API, GetMetricDataV2, which returns metric data for Amazon Connect. -* `github.com/aws/aws-sdk-go-v2/service/evidently`: [v1.11.0](service/evidently/CHANGELOG.md#v1110-2023-03-09) - * **Feature**: Updated entity override documentation -* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.17.5](service/networkmanager/CHANGELOG.md#v1175-2023-03-09) - * **Documentation**: This update provides example usage for TransitGatewayRouteTableArn. -* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.33.0](service/quicksight/CHANGELOG.md#v1330-2023-03-09) - * **Feature**: This release has two changes: add state persistence feature for embedded dashboard and console in GenerateEmbedUrlForRegisteredUser API; add properties for hidden collapsed row dimensions in PivotTableOptions. -* `github.com/aws/aws-sdk-go-v2/service/redshiftdata`: [v1.19.0](service/redshiftdata/CHANGELOG.md#v1190-2023-03-09) - * **Feature**: Added support for Redshift Serverless workgroup-arn wherever the WorkgroupName parameter is available. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.71.0](service/sagemaker/CHANGELOG.md#v1710-2023-03-09) - * **Feature**: Amazon SageMaker Inference now allows SSM access to customer's model container by setting the "EnableSSMAccess" parameter for a ProductionVariant in CreateEndpointConfig API. -* `github.com/aws/aws-sdk-go-v2/service/servicediscovery`: [v1.20.0](service/servicediscovery/CHANGELOG.md#v1200-2023-03-09) - * **Feature**: Updated all AWS Cloud Map APIs to provide consistent throttling exception (RequestLimitExceeded) -* `github.com/aws/aws-sdk-go-v2/service/sesv2`: [v1.17.0](service/sesv2/CHANGELOG.md#v1170-2023-03-09) - * **Feature**: This release introduces a new recommendation in Virtual Deliverability Manager Advisor, which detects missing or misconfigured Brand Indicator for Message Identification (BIMI) DNS records for customer sending identities. - -# Release (2023-03-08) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.23.0](service/athena/CHANGELOG.md#v1230-2023-03-08) - * **Feature**: A new field SubstatementType is added to GetQueryExecution API, so customers have an error free way to detect the query type and interpret the result. -* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.19.0](service/dynamodb/CHANGELOG.md#v1190-2023-03-08) - * **Feature**: Adds deletion protection support to DynamoDB tables. Tables with deletion protection enabled cannot be deleted. Deletion protection is disabled by default, can be enabled via the CreateTable or UpdateTable APIs, and is visible in TableDescription. This setting is not replicated for Global Tables. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.89.0](service/ec2/CHANGELOG.md#v1890-2023-03-08) - * **Feature**: Introducing Amazon EC2 C7g, M7g and R7g instances, powered by the latest generation AWS Graviton3 processors and deliver up to 25% better performance over Graviton2-based instances. -* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.20.0](service/lakeformation/CHANGELOG.md#v1200-2023-03-08) - * **Feature**: This release adds two new API support "GetDataCellsFiler" and "UpdateDataCellsFilter", and also updates the corresponding documentation. -* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.21.0](service/mediapackage/CHANGELOG.md#v1210-2023-03-08) - * **Feature**: This release provides the date and time live resources were created. -* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.22.0](service/mediapackagevod/CHANGELOG.md#v1220-2023-03-08) - * **Feature**: This release provides the date and time VOD resources were created. -* `github.com/aws/aws-sdk-go-v2/service/route53resolver`: [v1.17.0](service/route53resolver/CHANGELOG.md#v1170-2023-03-08) - * **Feature**: Add dual-stack and IPv6 support for Route 53 Resolver Endpoint,Add IPv6 target IP in Route 53 Resolver Forwarding Rule -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.70.0](service/sagemaker/CHANGELOG.md#v1700-2023-03-08) - * **Feature**: There needs to be a user identity to specify the SageMaker user who perform each action regarding the entity. However, these is a not a unified concept of user identity across SageMaker service that could be used today. - -# Release (2023-03-07) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.24.0](service/databasemigrationservice/CHANGELOG.md#v1240-2023-03-07) - * **Feature**: This release adds DMS Fleet Advisor Target Recommendation APIs and exposes functionality for DMS Fleet Advisor. It adds functionality to start Target Recommendation calculation. -* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.22.1](service/location/CHANGELOG.md#v1221-2023-03-07) - * **Documentation**: Documentation update for the release of 3 additional map styles for use with Open Data Maps: Open Data Standard Dark, Open Data Visualization Light & Open Data Visualization Dark. - -# Release (2023-03-06) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/account`: [v1.10.0](service/account/CHANGELOG.md#v1100-2023-03-06) - * **Feature**: AWS Account alternate contact email addresses can now have a length of 254 characters and contain the character "|". -* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.20.6](service/ivs/CHANGELOG.md#v1206-2023-03-06) - * **Documentation**: Updated text description in DeleteChannel, Stream, and StreamSummary. - -# Release (2023-03-03) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.18.6](service/dynamodb/CHANGELOG.md#v1186-2023-03-03) - * **Documentation**: Documentation updates for DynamoDB. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.88.0](service/ec2/CHANGELOG.md#v1880-2023-03-03) - * **Feature**: This release adds support for a new boot mode for EC2 instances called 'UEFI Preferred'. -* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.27.1](service/macie2/CHANGELOG.md#v1271-2023-03-03) - * **Documentation**: Documentation updates for Amazon Macie -* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.33.0](service/mediaconvert/CHANGELOG.md#v1330-2023-03-03) - * **Feature**: The AWS Elemental MediaConvert SDK has improved handling for different input and output color space combinations. -* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.30.0](service/medialive/CHANGELOG.md#v1300-2023-03-03) - * **Feature**: AWS Elemental MediaLive adds support for Nielsen watermark timezones. -* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.26.0](service/transcribe/CHANGELOG.md#v1260-2023-03-03) - * **Feature**: Amazon Transcribe now supports role access for these API operations: CreateVocabulary, UpdateVocabulary, CreateVocabularyFilter, and UpdateVocabularyFilter. - -# Release (2023-03-02) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.35.0](service/iot/CHANGELOG.md#v1350-2023-03-02) - * **Feature**: A recurring maintenance window is an optional configuration used for rolling out the job document to all devices in the target group observing a predetermined start time, duration, and frequency that the maintenance window occurs. -* `github.com/aws/aws-sdk-go-v2/service/migrationhubstrategy`: [v1.8.0](service/migrationhubstrategy/CHANGELOG.md#v180-2023-03-02) - * **Feature**: This release updates the File Import API to allow importing servers already discovered by customers with reduced pre-requisites. -* `github.com/aws/aws-sdk-go-v2/service/organizations`: [v1.19.0](service/organizations/CHANGELOG.md#v1190-2023-03-02) - * **Feature**: This release introduces a new reason code, ACCOUNT_CREATION_NOT_COMPLETE, to ConstraintViolationException in CreateOrganization API. -* `github.com/aws/aws-sdk-go-v2/service/pi`: [v1.17.0](service/pi/CHANGELOG.md#v1170-2023-03-02) - * **Feature**: This release adds a new field PeriodAlignment to allow the customer specifying the returned timestamp of time periods to be either the start or end time. -* `github.com/aws/aws-sdk-go-v2/service/pipes`: [v1.2.0](service/pipes/CHANGELOG.md#v120-2023-03-02) - * **Feature**: This release fixes some input parameter range and patterns. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.69.0](service/sagemaker/CHANGELOG.md#v1690-2023-03-02) - * **Feature**: Add a new field "EndpointMetrics" in SageMaker Inference Recommender "ListInferenceRecommendationsJobSteps" API response. - -# Release (2023-03-01) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/codecatalyst`: [v1.2.0](service/codecatalyst/CHANGELOG.md#v120-2023-03-01) - * **Feature**: Published Dev Environments StopDevEnvironmentSession API -* `github.com/aws/aws-sdk-go-v2/service/pricing`: [v1.19.0](service/pricing/CHANGELOG.md#v1190-2023-03-01) - * **Feature**: This release adds 2 new APIs - ListPriceLists which returns a list of applicable price lists, and GetPriceListFileUrl which outputs a URL to retrieve your price lists from the generated file from ListPriceLists -* `github.com/aws/aws-sdk-go-v2/service/s3outposts`: [v1.15.0](service/s3outposts/CHANGELOG.md#v1150-2023-03-01) - * **Feature**: S3 on Outposts introduces a new API ListOutpostsWithS3, with this API you can list all your Outposts with S3 capacity. - -# Release (2023-02-28) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.22.0](service/comprehend/CHANGELOG.md#v1220-2023-02-28) - * **Feature**: Amazon Comprehend now supports flywheels to help you train and manage new model versions for custom models. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.87.0](service/ec2/CHANGELOG.md#v1870-2023-02-28) - * **Feature**: This release allows IMDS support to be set to v2-only on an existing AMI, so that all future instances launched from that AMI will use IMDSv2 by default. -* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.20.6](service/kms/CHANGELOG.md#v1206-2023-02-28) - * **Documentation**: AWS KMS is deprecating the RSAES_PKCS1_V1_5 wrapping algorithm option in the GetParametersForImport API that is used in the AWS KMS Import Key Material feature. AWS KMS will end support for this wrapping algorithm by October 1, 2023. -* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.26.0](service/lightsail/CHANGELOG.md#v1260-2023-02-28) - * **Feature**: This release adds Lightsail for Research feature support, such as GUI session access, cost estimates, stop instance on idle, and disk auto mount. -* `github.com/aws/aws-sdk-go-v2/service/managedblockchain`: [v1.15.0](service/managedblockchain/CHANGELOG.md#v1150-2023-02-28) - * **Feature**: This release adds support for tagging to the accessor resource in Amazon Managed Blockchain -* `github.com/aws/aws-sdk-go-v2/service/omics`: [v1.2.0](service/omics/CHANGELOG.md#v120-2023-02-28) - * **Feature**: Minor model changes to accomodate batch imports feature - -# Release (2023-02-27) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.23.0](service/devopsguru/CHANGELOG.md#v1230-2023-02-27) - * **Feature**: This release adds the description field on ListAnomaliesForInsight and DescribeAnomaly API responses for proactive anomalies. -* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.11.0](service/drs/CHANGELOG.md#v1110-2023-02-27) - * **Feature**: New fields were added to reflect availability zone data in source server and recovery instance description commands responses, as well as source server launch status. -* `github.com/aws/aws-sdk-go-v2/service/internetmonitor`: [v1.0.0](service/internetmonitor/CHANGELOG.md#v100-2023-02-27) - * **Release**: New AWS service client module - * **Feature**: CloudWatch Internet Monitor is a a new service within CloudWatch that will help application developers and network engineers continuously monitor internet performance metrics such as availability and performance between their AWS-hosted applications and end-users of these applications -* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.30.0](service/lambda/CHANGELOG.md#v1300-2023-02-27) - * **Feature**: This release adds the ability to create ESMs with Document DB change streams as event source. For more information see https://docs.aws.amazon.com/lambda/latest/dg/with-documentdb.html. -* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.32.0](service/mediaconvert/CHANGELOG.md#v1320-2023-02-27) - * **Feature**: The AWS Elemental MediaConvert SDK has added support for HDR10 to SDR tone mapping, and animated GIF video input sources. -* `github.com/aws/aws-sdk-go-v2/service/timestreamwrite`: [v1.16.0](service/timestreamwrite/CHANGELOG.md#v1160-2023-02-27) - * **Feature**: This release adds the ability to ingest batched historical data or migrate data in bulk from S3 into Timestream using CSV files. - -# Release (2023-02-24) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.48.0](service/connect/CHANGELOG.md#v1480-2023-02-24) - * **Feature**: StartTaskContact API now supports linked task creation with a new optional RelatedContactId parameter -* `github.com/aws/aws-sdk-go-v2/service/connectcases`: [v1.3.0](service/connectcases/CHANGELOG.md#v130-2023-02-24) - * **Feature**: This release adds the ability to delete domains through the DeleteDomain API. For more information see https://docs.aws.amazon.com/cases/latest/APIReference/Welcome.html -* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.27.5](service/redshift/CHANGELOG.md#v1275-2023-02-24) - * **Documentation**: Documentation updates for Redshift API bringing it in line with IAM best practices. -* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.29.0](service/securityhub/CHANGELOG.md#v1290-2023-02-24) - * **Feature**: New Security Hub APIs and updates to existing APIs that help you consolidate control findings and enable and disable controls across all supported standards -* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.16.5](service/servicecatalog/CHANGELOG.md#v1165-2023-02-24) - * **Documentation**: Documentation updates for Service Catalog - -# Release (2023-02-23) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.25.0](service/appflow/CHANGELOG.md#v1250-2023-02-23) - * **Feature**: This release enables the customers to choose whether to use Private Link for Metadata and Authorization call when using a private Salesforce connections -* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.24.0](service/ecs/CHANGELOG.md#v1240-2023-02-23) - * **Feature**: This release supports deleting Amazon ECS task definitions that are in the INACTIVE state. -* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.12.3](service/grafana/CHANGELOG.md#v1123-2023-02-23) - * **Documentation**: Doc-only update. Updated information on attached role policies for customer provided roles -* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.17.6](service/guardduty/CHANGELOG.md#v1176-2023-02-23) - * **Documentation**: Updated API and data types descriptions for CreateFilter, UpdateFilter, and TriggerDetails. -* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.25.0](service/iotwireless/CHANGELOG.md#v1250-2023-02-23) - * **Feature**: In this release, we add additional capabilities for the FUOTA which allows user to configure the fragment size, the sending interval and the redundancy ratio of the FUOTA tasks -* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.22.0](service/location/CHANGELOG.md#v1220-2023-02-23) - * **Feature**: This release adds support for using Maps APIs with an API Key in addition to AWS Cognito. This includes support for adding, listing, updating and deleting API Keys. -* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.27.0](service/macie2/CHANGELOG.md#v1270-2023-02-23) - * **Feature**: This release adds support for a new finding type, Policy:IAMUser/S3BucketSharedWithCloudFront, and S3 bucket metadata that indicates if a bucket is shared with an Amazon CloudFront OAI or OAC. -* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.26.0](service/wafv2/CHANGELOG.md#v1260-2023-02-23) - * **Feature**: You can now associate an AWS WAF v2 web ACL with an AWS App Runner service. - -# Release (2023-02-22) - -## General Highlights -* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes. -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/chimesdkvoice`: [v1.2.0](service/chimesdkvoice/CHANGELOG.md#v120-2023-02-22) - * **Feature**: This release introduces support for Voice Connector media metrics in the Amazon Chime SDK Voice namespace -* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.26.0](service/cloudfront/CHANGELOG.md#v1260-2023-02-22) - * **Feature**: CloudFront now supports block lists in origin request policies so that you can forward all headers, cookies, or query string from viewer requests to the origin *except* for those specified in the block list. -* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.23.0](service/datasync/CHANGELOG.md#v1230-2023-02-22) - * **Feature**: AWS DataSync has relaxed the minimum length constraint of AccessKey for Object Storage locations to 1. -* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.15.0](service/opensearch/CHANGELOG.md#v1150-2023-02-22) - * **Feature**: This release lets customers configure Off-peak window and software update related properties for a new/existing domain. It enhances the capabilities of StartServiceSoftwareUpdate API; adds 2 new APIs - ListScheduledActions & UpdateScheduledAction; and allows Auto-tune to make use of Off-peak window. -* `github.com/aws/aws-sdk-go-v2/service/rum`: [v1.10.0](service/rum/CHANGELOG.md#v1100-2023-02-22) - * **Feature**: CloudWatch RUM now supports CloudWatch Custom Metrics -* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.35.5](service/ssm/CHANGELOG.md#v1355-2023-02-22) - * **Documentation**: Document only update for Feb 2023 - -# Release (2023-02-21) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.32.0](service/quicksight/CHANGELOG.md#v1320-2023-02-21) - * **Feature**: S3 data sources now accept a custom IAM role. -* `github.com/aws/aws-sdk-go-v2/service/resiliencehub`: [v1.9.0](service/resiliencehub/CHANGELOG.md#v190-2023-02-21) - * **Feature**: In this release we improved resilience hub application creation and maintenance by introducing new resource and app component crud APIs, improving visibility and maintenance of application input sources and added support for additional information attributes to be provided by customers. -* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.28.4](service/securityhub/CHANGELOG.md#v1284-2023-02-21) - * **Documentation**: Documentation updates for AWS Security Hub -* `github.com/aws/aws-sdk-go-v2/service/tnb`: [v1.0.0](service/tnb/CHANGELOG.md#v100-2023-02-21) - * **Release**: New AWS service client module - * **Feature**: This is the initial SDK release for AWS Telco Network Builder (TNB). AWS Telco Network Builder is a network automation service that helps you deploy and manage telecom networks. - -# Release (2023-02-20) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2`: v1.17.5 - * **Bug Fix**: fix int overflow bug on 32 bit architecture -* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.24.0](service/auditmanager/CHANGELOG.md#v1240-2023-02-20) - * **Feature**: This release introduces a ServiceQuotaExceededException to the UpdateAssessmentFrameworkShare API operation. -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.47.0](service/connect/CHANGELOG.md#v1470-2023-02-20) - * **Feature**: Reasons for failed diff has been approved by SDK Reviewer - -# Release (2023-02-17) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.17.0](service/apprunner/CHANGELOG.md#v1170-2023-02-17) - * **Feature**: This release supports removing MaxSize limit for AutoScalingConfiguration. -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.43.0](service/glue/CHANGELOG.md#v1430-2023-02-17) - * **Feature**: Release of Delta Lake Data Lake Format for Glue Studio Service - -# Release (2023-02-16) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.23.0](service/emr/CHANGELOG.md#v1230-2023-02-16) - * **Feature**: This release provides customers the ability to define a timeout period for procuring capacity during a resize operation for Instance Fleet clusters. Customers can specify this timeout using the ResizeSpecifications parameter supported by RunJobFlow, ModifyInstanceFleet and AddInstanceFleet APIs. -* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.12.0](service/grafana/CHANGELOG.md#v1120-2023-02-16) - * **Feature**: With this release Amazon Managed Grafana now supports inbound Network Access Control that helps you to restrict user access to your Grafana workspaces -* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.20.3](service/ivs/CHANGELOG.md#v1203-2023-02-16) - * **Documentation**: Doc-only update. Updated text description in DeleteChannel, Stream, and StreamSummary. -* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.25.1](service/wafv2/CHANGELOG.md#v1251-2023-02-16) - * **Documentation**: Added a notice for account takeover prevention (ATP). The interface incorrectly lets you to configure ATP response inspection in regional web ACLs in Region US East (N. Virginia), without returning an error. ATP response inspection is only available in web ACLs that protect CloudFront distributions. - -# Release (2023-02-15) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.19.3](service/accessanalyzer/CHANGELOG.md#v1193-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/account`: [v1.9.1](service/account/CHANGELOG.md#v191-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/acm`: [v1.17.3](service/acm/CHANGELOG.md#v1173-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.21.2](service/acmpca/CHANGELOG.md#v1212-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/alexaforbusiness`: [v1.15.2](service/alexaforbusiness/CHANGELOG.md#v1152-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/amp`: [v1.16.2](service/amp/CHANGELOG.md#v1162-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/amplify`: [v1.13.2](service/amplify/CHANGELOG.md#v1132-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/amplifybackend`: [v1.14.2](service/amplifybackend/CHANGELOG.md#v1142-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/amplifyuibuilder`: [v1.9.2](service/amplifyuibuilder/CHANGELOG.md#v192-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/apigateway`: [v1.16.3](service/apigateway/CHANGELOG.md#v1163-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/apigatewaymanagementapi`: [v1.11.2](service/apigatewaymanagementapi/CHANGELOG.md#v1112-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/apigatewayv2`: [v1.13.3](service/apigatewayv2/CHANGELOG.md#v1133-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/appconfig`: [v1.17.1](service/appconfig/CHANGELOG.md#v1171-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/appconfigdata`: [v1.6.1](service/appconfigdata/CHANGELOG.md#v161-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.24.2](service/appflow/CHANGELOG.md#v1242-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/appintegrations`: [v1.14.2](service/appintegrations/CHANGELOG.md#v1142-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.17.3](service/applicationautoscaling/CHANGELOG.md#v1173-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/applicationcostprofiler`: [v1.10.2](service/applicationcostprofiler/CHANGELOG.md#v1102-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/applicationdiscoveryservice`: [v1.15.2](service/applicationdiscoveryservice/CHANGELOG.md#v1152-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/applicationinsights`: [v1.17.3](service/applicationinsights/CHANGELOG.md#v1173-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/appmesh`: [v1.17.2](service/appmesh/CHANGELOG.md#v1172-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.16.2](service/apprunner/CHANGELOG.md#v1162-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.20.2](service/appstream/CHANGELOG.md#v1202-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.19.2](service/appsync/CHANGELOG.md#v1192-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/arczonalshift`: [v1.1.3](service/arczonalshift/CHANGELOG.md#v113-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.22.2](service/athena/CHANGELOG.md#v1222-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.23.2](service/auditmanager/CHANGELOG.md#v1232-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/autoscalingplans`: [v1.13.2](service/autoscalingplans/CHANGELOG.md#v1132-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.20.1](service/backup/CHANGELOG.md#v1201-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/backupgateway`: [v1.9.2](service/backupgateway/CHANGELOG.md#v192-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/backupstorage`: [v1.1.2](service/backupstorage/CHANGELOG.md#v112-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.21.3](service/batch/CHANGELOG.md#v1213-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/billingconductor`: [v1.5.2](service/billingconductor/CHANGELOG.md#v152-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.17.2](service/braket/CHANGELOG.md#v1172-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/budgets`: [v1.14.2](service/budgets/CHANGELOG.md#v1142-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.22.2](service/chime/CHANGELOG.md#v1222-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/chimesdkidentity`: [v1.10.2](service/chimesdkidentity/CHANGELOG.md#v1102-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines`: [v1.2.2](service/chimesdkmediapipelines/CHANGELOG.md#v122-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.14.3](service/chimesdkmeetings/CHANGELOG.md#v1143-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.12.2](service/chimesdkmessaging/CHANGELOG.md#v1122-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/chimesdkvoice`: [v1.1.2](service/chimesdkvoice/CHANGELOG.md#v112-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/cleanrooms`: [v1.0.2](service/cleanrooms/CHANGELOG.md#v102-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/cloud9`: [v1.17.2](service/cloud9/CHANGELOG.md#v1172-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/cloudcontrol`: [v1.11.3](service/cloudcontrol/CHANGELOG.md#v1113-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/clouddirectory`: [v1.13.2](service/clouddirectory/CHANGELOG.md#v1132-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/cloudhsm`: [v1.13.2](service/cloudhsm/CHANGELOG.md#v1132-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/cloudhsmv2`: [v1.14.2](service/cloudhsmv2/CHANGELOG.md#v1142-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/cloudsearchdomain`: [v1.12.2](service/cloudsearchdomain/CHANGELOG.md#v1122-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.24.0](service/cloudtrail/CHANGELOG.md#v1240-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Feature**: This release adds an InsufficientEncryptionPolicyException type to the StartImport endpoint - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/cloudtraildata`: [v1.0.2](service/cloudtraildata/CHANGELOG.md#v102-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/cloudwatchevents`: [v1.15.3](service/cloudwatchevents/CHANGELOG.md#v1153-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.20.3](service/cloudwatchlogs/CHANGELOG.md#v1203-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.16.2](service/codeartifact/CHANGELOG.md#v1162-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/codebuild`: [v1.20.3](service/codebuild/CHANGELOG.md#v1203-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/codecatalyst`: [v1.1.2](service/codecatalyst/CHANGELOG.md#v112-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/codecommit`: [v1.14.2](service/codecommit/CHANGELOG.md#v1142-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/codedeploy`: [v1.16.3](service/codedeploy/CHANGELOG.md#v1163-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/codeguruprofiler`: [v1.13.2](service/codeguruprofiler/CHANGELOG.md#v1132-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/codegurureviewer`: [v1.17.2](service/codegurureviewer/CHANGELOG.md#v1172-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/codepipeline`: [v1.14.2](service/codepipeline/CHANGELOG.md#v1142-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/codestar`: [v1.13.2](service/codestar/CHANGELOG.md#v1132-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/codestarconnections`: [v1.14.2](service/codestarconnections/CHANGELOG.md#v1142-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/codestarnotifications`: [v1.14.2](service/codestarnotifications/CHANGELOG.md#v1142-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/cognitoidentity`: [v1.15.2](service/cognitoidentity/CHANGELOG.md#v1152-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.22.2](service/cognitoidentityprovider/CHANGELOG.md#v1222-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/cognitosync`: [v1.12.2](service/cognitosync/CHANGELOG.md#v1122-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.21.2](service/comprehend/CHANGELOG.md#v1212-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/comprehendmedical`: [v1.15.2](service/comprehendmedical/CHANGELOG.md#v1152-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.21.1](service/computeoptimizer/CHANGELOG.md#v1211-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.29.3](service/configservice/CHANGELOG.md#v1293-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.46.1](service/connect/CHANGELOG.md#v1461-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/connectcampaigns`: [v1.2.3](service/connectcampaigns/CHANGELOG.md#v123-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/connectcases`: [v1.2.3](service/connectcases/CHANGELOG.md#v123-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/connectcontactlens`: [v1.13.2](service/connectcontactlens/CHANGELOG.md#v1132-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/connectparticipant`: [v1.15.2](service/connectparticipant/CHANGELOG.md#v1152-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/controltower`: [v1.1.2](service/controltower/CHANGELOG.md#v112-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/costandusagereportservice`: [v1.15.2](service/costandusagereportservice/CHANGELOG.md#v1152-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.25.2](service/costexplorer/CHANGELOG.md#v1252-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.23.1](service/customerprofiles/CHANGELOG.md#v1231-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.23.3](service/databasemigrationservice/CHANGELOG.md#v1233-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.21.3](service/databrew/CHANGELOG.md#v1213-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.18.2](service/dataexchange/CHANGELOG.md#v1182-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/datapipeline`: [v1.14.2](service/datapipeline/CHANGELOG.md#v1142-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.22.1](service/datasync/CHANGELOG.md#v1221-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/dax`: [v1.12.2](service/dax/CHANGELOG.md#v1122-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/detective`: [v1.18.2](service/detective/CHANGELOG.md#v1182-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/devicefarm`: [v1.15.2](service/devicefarm/CHANGELOG.md#v1152-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.22.2](service/devopsguru/CHANGELOG.md#v1222-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.18.3](service/directconnect/CHANGELOG.md#v1183-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/directoryservice`: [v1.16.3](service/directoryservice/CHANGELOG.md#v1163-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/dlm`: [v1.14.4](service/dlm/CHANGELOG.md#v1144-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/docdbelastic`: [v1.1.2](service/docdbelastic/CHANGELOG.md#v112-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.10.2](service/drs/CHANGELOG.md#v1102-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.18.3](service/dynamodb/CHANGELOG.md#v1183-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/dynamodbstreams`: [v1.14.3](service/dynamodbstreams/CHANGELOG.md#v1143-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/ebs`: [v1.16.4](service/ebs/CHANGELOG.md#v1164-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/ec2instanceconnect`: [v1.15.2](service/ec2instanceconnect/CHANGELOG.md#v1152-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.18.3](service/ecr/CHANGELOG.md#v1183-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/ecrpublic`: [v1.15.2](service/ecrpublic/CHANGELOG.md#v1152-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.23.3](service/ecs/CHANGELOG.md#v1233-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.19.4](service/efs/CHANGELOG.md#v1194-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. - * **Documentation**: Documentation update for EFS to support IAM best practices. -* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.27.3](service/eks/CHANGELOG.md#v1273-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/elasticinference`: [v1.12.2](service/elasticinference/CHANGELOG.md#v1122-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.18.3](service/elasticsearchservice/CHANGELOG.md#v1183-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/elastictranscoder`: [v1.14.2](service/elastictranscoder/CHANGELOG.md#v1142-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.22.3](service/emr/CHANGELOG.md#v1223-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.17.1](service/emrcontainers/CHANGELOG.md#v1171-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/emrserverless`: [v1.5.2](service/emrserverless/CHANGELOG.md#v152-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.18.3](service/eventbridge/CHANGELOG.md#v1183-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/evidently`: [v1.10.2](service/evidently/CHANGELOG.md#v1102-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/finspace`: [v1.9.2](service/finspace/CHANGELOG.md#v192-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.14.2](service/finspacedata/CHANGELOG.md#v1142-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/firehose`: [v1.16.3](service/firehose/CHANGELOG.md#v1163-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/fis`: [v1.14.2](service/fis/CHANGELOG.md#v1142-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.22.3](service/fms/CHANGELOG.md#v1223-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.25.2](service/forecast/CHANGELOG.md#v1252-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/forecastquery`: [v1.13.2](service/forecastquery/CHANGELOG.md#v1132-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.23.0](service/frauddetector/CHANGELOG.md#v1230-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Feature**: This release introduces Lists feature which allows customers to reference a set of values in Fraud Detector's rules. With Lists, customers can dynamically manage these attributes in real time. Lists can be created/deleted and its contents can be modified using the Fraud Detector API. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.28.3](service/fsx/CHANGELOG.md#v1283-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.17.2](service/gamelift/CHANGELOG.md#v1172-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/gamesparks`: [v1.2.2](service/gamesparks/CHANGELOG.md#v122-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/glacier`: [v1.14.3](service/glacier/CHANGELOG.md#v1143-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/globalaccelerator`: [v1.16.2](service/globalaccelerator/CHANGELOG.md#v1162-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.42.0](service/glue/CHANGELOG.md#v1420-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Feature**: Fix DirectJDBCSource not showing up in CLI code gen - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.11.2](service/grafana/CHANGELOG.md#v1112-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/greengrass`: [v1.15.3](service/greengrass/CHANGELOG.md#v1153-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.21.3](service/greengrassv2/CHANGELOG.md#v1213-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.17.2](service/groundstation/CHANGELOG.md#v1172-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.17.3](service/guardduty/CHANGELOG.md#v1173-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/health`: [v1.16.2](service/health/CHANGELOG.md#v1162-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/healthlake`: [v1.15.2](service/healthlake/CHANGELOG.md#v1152-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/honeycode`: [v1.13.2](service/honeycode/CHANGELOG.md#v1132-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.16.2](service/identitystore/CHANGELOG.md#v1162-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.22.2](service/imagebuilder/CHANGELOG.md#v1222-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/inspector`: [v1.13.2](service/inspector/CHANGELOG.md#v1132-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.11.3](service/inspector2/CHANGELOG.md#v1113-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.34.2](service/iot/CHANGELOG.md#v1342-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/iot1clickdevicesservice`: [v1.11.2](service/iot1clickdevicesservice/CHANGELOG.md#v1112-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/iot1clickprojects`: [v1.12.2](service/iot1clickprojects/CHANGELOG.md#v1122-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/iotanalytics`: [v1.14.2](service/iotanalytics/CHANGELOG.md#v1142-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/iotdataplane`: [v1.14.2](service/iotdataplane/CHANGELOG.md#v1142-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/iotdeviceadvisor`: [v1.17.2](service/iotdeviceadvisor/CHANGELOG.md#v1172-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/iotevents`: [v1.15.2](service/iotevents/CHANGELOG.md#v1152-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/ioteventsdata`: [v1.13.2](service/ioteventsdata/CHANGELOG.md#v1132-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/iotfleethub`: [v1.13.2](service/iotfleethub/CHANGELOG.md#v1132-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/iotfleetwise`: [v1.3.2](service/iotfleetwise/CHANGELOG.md#v132-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/iotjobsdataplane`: [v1.12.2](service/iotjobsdataplane/CHANGELOG.md#v1122-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/iotroborunner`: [v1.1.2](service/iotroborunner/CHANGELOG.md#v112-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/iotsecuretunneling`: [v1.15.2](service/iotsecuretunneling/CHANGELOG.md#v1152-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.27.2](service/iotsitewise/CHANGELOG.md#v1272-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/iotthingsgraph`: [v1.14.2](service/iotthingsgraph/CHANGELOG.md#v1142-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/iottwinmaker`: [v1.10.2](service/iottwinmaker/CHANGELOG.md#v1102-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.24.2](service/iotwireless/CHANGELOG.md#v1242-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.20.2](service/ivs/CHANGELOG.md#v1202-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/ivschat`: [v1.3.2](service/ivschat/CHANGELOG.md#v132-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/kafka`: [v1.19.2](service/kafka/CHANGELOG.md#v1192-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/kafkaconnect`: [v1.9.2](service/kafkaconnect/CHANGELOG.md#v192-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.38.3](service/kendra/CHANGELOG.md#v1383-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/kendraranking`: [v1.0.4](service/kendraranking/CHANGELOG.md#v104-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/keyspaces`: [v1.1.2](service/keyspaces/CHANGELOG.md#v112-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.17.4](service/kinesis/CHANGELOG.md#v1174-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/kinesisanalytics`: [v1.14.2](service/kinesisanalytics/CHANGELOG.md#v1142-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2`: [v1.16.2](service/kinesisanalyticsv2/CHANGELOG.md#v1162-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/kinesisvideo`: [v1.15.3](service/kinesisvideo/CHANGELOG.md#v1153-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/kinesisvideoarchivedmedia`: [v1.14.3](service/kinesisvideoarchivedmedia/CHANGELOG.md#v1143-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/kinesisvideomedia`: [v1.11.3](service/kinesisvideomedia/CHANGELOG.md#v1113-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/kinesisvideosignaling`: [v1.11.3](service/kinesisvideosignaling/CHANGELOG.md#v1113-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/kinesisvideowebrtcstorage`: [v1.2.3](service/kinesisvideowebrtcstorage/CHANGELOG.md#v123-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.20.3](service/kms/CHANGELOG.md#v1203-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.19.2](service/lakeformation/CHANGELOG.md#v1192-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.29.2](service/lambda/CHANGELOG.md#v1292-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/lexmodelbuildingservice`: [v1.17.2](service/lexmodelbuildingservice/CHANGELOG.md#v1172-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.28.1](service/lexmodelsv2/CHANGELOG.md#v1281-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/lexruntimeservice`: [v1.13.2](service/lexruntimeservice/CHANGELOG.md#v1132-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.17.1](service/lexruntimev2/CHANGELOG.md#v1171-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/licensemanager`: [v1.17.2](service/licensemanager/CHANGELOG.md#v1172-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/licensemanagerlinuxsubscriptions`: [v1.1.2](service/licensemanagerlinuxsubscriptions/CHANGELOG.md#v112-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/licensemanagerusersubscriptions`: [v1.2.2](service/licensemanagerusersubscriptions/CHANGELOG.md#v122-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.25.3](service/lightsail/CHANGELOG.md#v1253-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.21.2](service/location/CHANGELOG.md#v1212-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/lookoutequipment`: [v1.17.2](service/lookoutequipment/CHANGELOG.md#v1172-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.19.2](service/lookoutmetrics/CHANGELOG.md#v1192-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/lookoutvision`: [v1.15.2](service/lookoutvision/CHANGELOG.md#v1152-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/m2`: [v1.4.2](service/m2/CHANGELOG.md#v142-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/machinelearning`: [v1.15.2](service/machinelearning/CHANGELOG.md#v1152-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/macie`: [v1.15.2](service/macie/CHANGELOG.md#v1152-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.26.2](service/macie2/CHANGELOG.md#v1262-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/managedblockchain`: [v1.14.2](service/managedblockchain/CHANGELOG.md#v1142-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/marketplacecatalog`: [v1.15.2](service/marketplacecatalog/CHANGELOG.md#v1152-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/marketplacecommerceanalytics`: [v1.12.2](service/marketplacecommerceanalytics/CHANGELOG.md#v1122-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/marketplaceentitlementservice`: [v1.12.2](service/marketplaceentitlementservice/CHANGELOG.md#v1122-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/marketplacemetering`: [v1.14.3](service/marketplacemetering/CHANGELOG.md#v1143-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/mediaconnect`: [v1.18.2](service/mediaconnect/CHANGELOG.md#v1182-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.31.1](service/mediaconvert/CHANGELOG.md#v1311-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.29.2](service/medialive/CHANGELOG.md#v1292-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.20.2](service/mediapackage/CHANGELOG.md#v1202-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.21.2](service/mediapackagevod/CHANGELOG.md#v1212-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/mediastore`: [v1.13.2](service/mediastore/CHANGELOG.md#v1132-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/mediastoredata`: [v1.13.2](service/mediastoredata/CHANGELOG.md#v1132-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.22.2](service/mediatailor/CHANGELOG.md#v1222-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/memorydb`: [v1.12.2](service/memorydb/CHANGELOG.md#v1122-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.17.2](service/mgn/CHANGELOG.md#v1172-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/migrationhub`: [v1.13.2](service/migrationhub/CHANGELOG.md#v1132-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/migrationhubconfig`: [v1.13.2](service/migrationhubconfig/CHANGELOG.md#v1132-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/migrationhuborchestrator`: [v1.1.2](service/migrationhuborchestrator/CHANGELOG.md#v112-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/migrationhubrefactorspaces`: [v1.9.1](service/migrationhubrefactorspaces/CHANGELOG.md#v191-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/migrationhubstrategy`: [v1.7.2](service/migrationhubstrategy/CHANGELOG.md#v172-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/mobile`: [v1.12.2](service/mobile/CHANGELOG.md#v1122-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/mq`: [v1.14.2](service/mq/CHANGELOG.md#v1142-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/mturk`: [v1.14.2](service/mturk/CHANGELOG.md#v1142-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/mwaa`: [v1.14.2](service/mwaa/CHANGELOG.md#v1142-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.24.2](service/networkfirewall/CHANGELOG.md#v1242-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.17.2](service/networkmanager/CHANGELOG.md#v1172-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.16.2](service/nimble/CHANGELOG.md#v1162-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/oam`: [v1.1.3](service/oam/CHANGELOG.md#v113-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/omics`: [v1.1.2](service/omics/CHANGELOG.md#v112-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.14.2](service/opensearch/CHANGELOG.md#v1142-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/opensearchserverless`: [v1.1.3](service/opensearchserverless/CHANGELOG.md#v113-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/opsworks`: [v1.14.2](service/opsworks/CHANGELOG.md#v1142-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/opsworkscm`: [v1.15.2](service/opsworkscm/CHANGELOG.md#v1152-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/organizations`: [v1.18.2](service/organizations/CHANGELOG.md#v1182-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.27.2](service/outposts/CHANGELOG.md#v1272-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/panorama`: [v1.11.2](service/panorama/CHANGELOG.md#v1112-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.23.2](service/personalize/CHANGELOG.md#v1232-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/personalizeevents`: [v1.13.2](service/personalizeevents/CHANGELOG.md#v1132-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/personalizeruntime`: [v1.13.2](service/personalizeruntime/CHANGELOG.md#v1132-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/pi`: [v1.16.3](service/pi/CHANGELOG.md#v1163-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.18.2](service/pinpoint/CHANGELOG.md#v1182-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/pinpointemail`: [v1.12.2](service/pinpointemail/CHANGELOG.md#v1122-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoice`: [v1.11.2](service/pinpointsmsvoice/CHANGELOG.md#v1112-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoicev2`: [v1.1.2](service/pinpointsmsvoicev2/CHANGELOG.md#v112-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/pipes`: [v1.1.2](service/pipes/CHANGELOG.md#v112-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.25.1](service/polly/CHANGELOG.md#v1251-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/pricing`: [v1.18.2](service/pricing/CHANGELOG.md#v1182-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/privatenetworks`: [v1.2.0](service/privatenetworks/CHANGELOG.md#v120-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Feature**: This release introduces a new StartNetworkResourceUpdate API, which enables return/replacement of hardware from a NetworkSite. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.20.1](service/proton/CHANGELOG.md#v1201-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/qldb`: [v1.15.2](service/qldb/CHANGELOG.md#v1152-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/qldbsession`: [v1.14.2](service/qldbsession/CHANGELOG.md#v1142-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.31.2](service/quicksight/CHANGELOG.md#v1312-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/ram`: [v1.17.3](service/ram/CHANGELOG.md#v1173-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/rbin`: [v1.8.3](service/rbin/CHANGELOG.md#v183-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.40.3](service/rds/CHANGELOG.md#v1403-2023-02-15) - * **Documentation**: Database Activity Stream support for RDS for SQL Server. -* `github.com/aws/aws-sdk-go-v2/service/rdsdata`: [v1.13.2](service/rdsdata/CHANGELOG.md#v1132-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/redshiftdata`: [v1.18.2](service/redshiftdata/CHANGELOG.md#v1182-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/redshiftserverless`: [v1.4.3](service/redshiftserverless/CHANGELOG.md#v143-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.23.2](service/rekognition/CHANGELOG.md#v1232-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/resiliencehub`: [v1.8.2](service/resiliencehub/CHANGELOG.md#v182-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/resourceexplorer2`: [v1.2.3](service/resourceexplorer2/CHANGELOG.md#v123-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/resourcegroups`: [v1.14.3](service/resourcegroups/CHANGELOG.md#v1143-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi`: [v1.14.3](service/resourcegroupstaggingapi/CHANGELOG.md#v1143-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/robomaker`: [v1.18.2](service/robomaker/CHANGELOG.md#v1182-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/rolesanywhere`: [v1.1.2](service/rolesanywhere/CHANGELOG.md#v112-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/route53domains`: [v1.14.2](service/route53domains/CHANGELOG.md#v1142-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/route53recoverycluster`: [v1.11.2](service/route53recoverycluster/CHANGELOG.md#v1112-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig`: [v1.11.2](service/route53recoverycontrolconfig/CHANGELOG.md#v1112-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/route53recoveryreadiness`: [v1.9.2](service/route53recoveryreadiness/CHANGELOG.md#v192-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/route53resolver`: [v1.16.3](service/route53resolver/CHANGELOG.md#v1163-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/rum`: [v1.9.2](service/rum/CHANGELOG.md#v192-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/s3outposts`: [v1.14.2](service/s3outposts/CHANGELOG.md#v1142-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.68.1](service/sagemaker/CHANGELOG.md#v1681-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/sagemakera2iruntime`: [v1.15.2](service/sagemakera2iruntime/CHANGELOG.md#v1152-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/sagemakeredge`: [v1.13.2](service/sagemakeredge/CHANGELOG.md#v1132-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/sagemakerfeaturestoreruntime`: [v1.13.2](service/sagemakerfeaturestoreruntime/CHANGELOG.md#v1132-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/sagemakergeospatial`: [v1.1.2](service/sagemakergeospatial/CHANGELOG.md#v112-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/sagemakermetrics`: [v1.0.5](service/sagemakermetrics/CHANGELOG.md#v105-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.18.3](service/sagemakerruntime/CHANGELOG.md#v1183-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/savingsplans`: [v1.12.2](service/savingsplans/CHANGELOG.md#v1122-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/scheduler`: [v1.1.2](service/scheduler/CHANGELOG.md#v112-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/schemas`: [v1.15.2](service/schemas/CHANGELOG.md#v1152-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.18.4](service/secretsmanager/CHANGELOG.md#v1184-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.28.2](service/securityhub/CHANGELOG.md#v1282-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/securitylake`: [v1.2.2](service/securitylake/CHANGELOG.md#v122-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/serverlessapplicationrepository`: [v1.12.2](service/serverlessapplicationrepository/CHANGELOG.md#v1122-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.16.2](service/servicecatalog/CHANGELOG.md#v1162-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry`: [v1.16.3](service/servicecatalogappregistry/CHANGELOG.md#v1163-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/servicediscovery`: [v1.19.2](service/servicediscovery/CHANGELOG.md#v1192-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/servicequotas`: [v1.14.3](service/servicequotas/CHANGELOG.md#v1143-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/sesv2`: [v1.16.2](service/sesv2/CHANGELOG.md#v1162-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/sfn`: [v1.17.3](service/sfn/CHANGELOG.md#v1173-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/shield`: [v1.18.2](service/shield/CHANGELOG.md#v1182-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/signer`: [v1.14.2](service/signer/CHANGELOG.md#v1142-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/simspaceweaver`: [v1.1.2](service/simspaceweaver/CHANGELOG.md#v112-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/sms`: [v1.13.2](service/sms/CHANGELOG.md#v1132-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/snowball`: [v1.18.1](service/snowball/CHANGELOG.md#v1181-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/snowdevicemanagement`: [v1.9.2](service/snowdevicemanagement/CHANGELOG.md#v192-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.35.3](service/ssm/CHANGELOG.md#v1353-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/ssmcontacts`: [v1.14.2](service/ssmcontacts/CHANGELOG.md#v1142-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.20.2](service/ssmincidents/CHANGELOG.md#v1202-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/ssmsap`: [v1.2.2](service/ssmsap/CHANGELOG.md#v122-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/sso`: [v1.12.2](service/sso/CHANGELOG.md#v1122-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/ssoadmin`: [v1.16.2](service/ssoadmin/CHANGELOG.md#v1162-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/ssooidc`: [v1.14.2](service/ssooidc/CHANGELOG.md#v1142-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.18.3](service/storagegateway/CHANGELOG.md#v1183-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/support`: [v1.14.2](service/support/CHANGELOG.md#v1142-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/supportapp`: [v1.2.2](service/supportapp/CHANGELOG.md#v122-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/swf`: [v1.14.4](service/swf/CHANGELOG.md#v1144-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/synthetics`: [v1.17.3](service/synthetics/CHANGELOG.md#v1173-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.20.2](service/textract/CHANGELOG.md#v1202-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/timestreamquery`: [v1.15.2](service/timestreamquery/CHANGELOG.md#v1152-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/timestreamwrite`: [v1.15.2](service/timestreamwrite/CHANGELOG.md#v1152-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.25.2](service/transcribe/CHANGELOG.md#v1252-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/transcribestreaming`: [v1.9.2](service/transcribestreaming/CHANGELOG.md#v192-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.28.3](service/transfer/CHANGELOG.md#v1283-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/translate`: [v1.17.2](service/translate/CHANGELOG.md#v1172-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/voiceid`: [v1.12.2](service/voiceid/CHANGELOG.md#v1122-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/waf`: [v1.12.2](service/waf/CHANGELOG.md#v1122-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/wafregional`: [v1.13.3](service/wafregional/CHANGELOG.md#v1133-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.25.0](service/wafv2/CHANGELOG.md#v1250-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Feature**: For protected CloudFront distributions, you can now use the AWS WAF Fraud Control account takeover prevention (ATP) managed rule group to block new login attempts from clients that have recently submitted too many failed login attempts. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.18.2](service/wellarchitected/CHANGELOG.md#v1182-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.12.2](service/wisdom/CHANGELOG.md#v1122-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/workdocs`: [v1.13.3](service/workdocs/CHANGELOG.md#v1133-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/worklink`: [v1.13.2](service/worklink/CHANGELOG.md#v1132-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/workmail`: [v1.18.2](service/workmail/CHANGELOG.md#v1182-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/workmailmessageflow`: [v1.12.2](service/workmailmessageflow/CHANGELOG.md#v1122-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.28.3](service/workspaces/CHANGELOG.md#v1283-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/workspacesweb`: [v1.9.2](service/workspacesweb/CHANGELOG.md#v192-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. -* `github.com/aws/aws-sdk-go-v2/service/xray`: [v1.16.3](service/xray/CHANGELOG.md#v1163-2023-02-15) - * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. - * **Bug Fix**: Correct error type parsing for restJson services. - -# Release (2023-02-14) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/appconfig`: [v1.17.0](service/appconfig/CHANGELOG.md#v1170-2023-02-14) - * **Feature**: AWS AppConfig now offers the option to set a version label on hosted configuration versions. Version labels allow you to identify specific hosted configuration versions based on an alternate versioning scheme that you define. -* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.22.0](service/datasync/CHANGELOG.md#v1220-2023-02-14) - * **Feature**: With this launch, we are giving customers the ability to use older SMB protocol versions, enabling them to use DataSync to copy data to and from their legacy storage arrays. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.86.0](service/ec2/CHANGELOG.md#v1860-2023-02-14) - * **Feature**: With this release customers can turn host maintenance on or off when allocating or modifying a supported dedicated host. Host maintenance is turned on by default for supported hosts. - -# Release (2023-02-13) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/account`: [v1.9.0](service/account/CHANGELOG.md#v190-2023-02-13) - * **Feature**: This release of the Account Management API enables customers to view and manage whether AWS Opt-In Regions are enabled or disabled for their Account. For more information, see https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-regions.html -* `github.com/aws/aws-sdk-go-v2/service/appconfigdata`: [v1.6.0](service/appconfigdata/CHANGELOG.md#v160-2023-02-13) - * **Feature**: AWS AppConfig now offers the option to set a version label on hosted configuration versions. If a labeled hosted configuration version is deployed, its version label is available in the GetLatestConfiguration response. -* `github.com/aws/aws-sdk-go-v2/service/snowball`: [v1.18.0](service/snowball/CHANGELOG.md#v1180-2023-02-13) - * **Feature**: Adds support for EKS Anywhere on Snowball. AWS Snow Family customers can now install EKS Anywhere service on Snowball Edge Compute Optimized devices. - -# Release (2023-02-10) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.27.0](service/autoscaling/CHANGELOG.md#v1270-2023-02-10) - * **Feature**: You can now either terminate/replace, ignore, or wait for EC2 Auto Scaling instances on standby or protected from scale in. Also, you can also roll back changes from a failed instance refresh. -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.46.0](service/connect/CHANGELOG.md#v1460-2023-02-10) - * **Feature**: This update provides the Wisdom session ARN for contacts enabled for Wisdom in the chat channel. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.85.0](service/ec2/CHANGELOG.md#v1850-2023-02-10) - * **Feature**: Adds support for waiters that automatically poll for an imported snapshot until it reaches the completed state. -* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.25.0](service/polly/CHANGELOG.md#v1250-2023-02-10) - * **Feature**: Amazon Polly adds two new neural Japanese voices - Kazuha, Tomoko -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.68.0](service/sagemaker/CHANGELOG.md#v1680-2023-02-10) - * **Feature**: Amazon SageMaker Autopilot adds support for selecting algorithms in CreateAutoMLJob API. -* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.20.2](service/sns/CHANGELOG.md#v1202-2023-02-10) - * **Documentation**: This release adds support for SNS X-Ray active tracing as well as other updates. - -# Release (2023-02-09) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.14.2](service/chimesdkmeetings/CHANGELOG.md#v1142-2023-02-09) - * **Documentation**: Documentation updates for Chime Meetings SDK -* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.17.0](service/emrcontainers/CHANGELOG.md#v1170-2023-02-09) - * **Feature**: EMR on EKS allows configuring retry policies for job runs through the StartJobRun API. Using retry policies, a job cause a driver pod to be restarted automatically if it fails or is deleted. The job's status can be seen in the DescribeJobRun and ListJobRun APIs and monitored using CloudWatch events. -* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.28.0](service/lexmodelsv2/CHANGELOG.md#v1280-2023-02-09) - * **Feature**: AWS Lex now supports Network of Bots. -* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.17.0](service/lexruntimev2/CHANGELOG.md#v1170-2023-02-09) - * **Feature**: AWS Lex now supports Network of Bots. -* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.25.2](service/lightsail/CHANGELOG.md#v1252-2023-02-09) - * **Documentation**: Documentation updates for Lightsail -* `github.com/aws/aws-sdk-go-v2/service/migrationhubrefactorspaces`: [v1.9.0](service/migrationhubrefactorspaces/CHANGELOG.md#v190-2023-02-09) - * **Feature**: This release adds support for creating environments with a network fabric type of NONE -* `github.com/aws/aws-sdk-go-v2/service/workdocs`: [v1.13.2](service/workdocs/CHANGELOG.md#v1132-2023-02-09) - * **Documentation**: Doc only update for the WorkDocs APIs. -* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.28.2](service/workspaces/CHANGELOG.md#v1282-2023-02-09) - * **Documentation**: Removed Windows Server 2016 BYOL and made changes based on IAM campaign. - -# Release (2023-02-08) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.20.0](service/backup/CHANGELOG.md#v1200-2023-02-08) - * **Feature**: This release added one attribute (resource name) in the output model of our 9 existing APIs in AWS backup so that customers will see the resource name at the output. No input required from Customers. -* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.25.0](service/cloudfront/CHANGELOG.md#v1250-2023-02-08) - * **Feature**: CloudFront Origin Access Control extends support to AWS Elemental MediaStore origins. -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.41.0](service/glue/CHANGELOG.md#v1410-2023-02-08) - * **Feature**: DirectJDBCSource + Glue 4.0 streaming options - -# Release (2023-02-07) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.28.2](service/transfer/CHANGELOG.md#v1282-2023-02-07) - * **Documentation**: Updated the documentation for the ImportCertificate API call, and added examples. - -# Release (2023-02-06) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.21.0](service/computeoptimizer/CHANGELOG.md#v1210-2023-02-06) - * **Feature**: AWS Compute optimizer can now infer if Kafka is running on an instance. -* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.23.0](service/customerprofiles/CHANGELOG.md#v1230-2023-02-06) - * **Feature**: This release deprecates the PartyType and Gender enum data types from the Profile model and replaces them with new PartyTypeString and GenderString attributes, which accept any string of length up to 255. -* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.22.0](service/frauddetector/CHANGELOG.md#v1220-2023-02-06) - * **Feature**: My AWS Service (Amazon Fraud Detector) - This release introduces Cold Start Model Training which optimizes training for small datasets and adds intelligent methods for treating unlabeled data. You can now train Online Fraud Insights or Transaction Fraud Insights models with minimal historical-data. -* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.31.0](service/mediaconvert/CHANGELOG.md#v1310-2023-02-06) - * **Feature**: The AWS Elemental MediaConvert SDK has added improved scene change detection capabilities and a bandwidth reduction filter, along with video quality enhancements, to the AVC encoder. -* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.27.0](service/outposts/CHANGELOG.md#v1270-2023-02-06) - * **Feature**: Adds OrderType to Order structure. Adds PreviousOrderId and PreviousLineItemId to LineItem structure. Adds new line item status REPLACED. Increases maximum length of pagination token. - -# Release (2023-02-03) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.26.2](service/autoscaling/CHANGELOG.md#v1262-2023-02-03) - * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. -* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.26.2](service/cloudformation/CHANGELOG.md#v1262-2023-02-03) - * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. -* `github.com/aws/aws-sdk-go-v2/service/cloudsearch`: [v1.14.1](service/cloudsearch/CHANGELOG.md#v1141-2023-02-03) - * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. -* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.25.2](service/cloudwatch/CHANGELOG.md#v1252-2023-02-03) - * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. -* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.20.2](service/docdb/CHANGELOG.md#v1202-2023-02-03) - * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.84.1](service/ec2/CHANGELOG.md#v1841-2023-02-03) - * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. -* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.26.2](service/elasticache/CHANGELOG.md#v1262-2023-02-03) - * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. -* `github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk`: [v1.15.1](service/elasticbeanstalk/CHANGELOG.md#v1151-2023-02-03) - * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. -* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing`: [v1.15.2](service/elasticloadbalancing/CHANGELOG.md#v1152-2023-02-03) - * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. -* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.19.3](service/elasticloadbalancingv2/CHANGELOG.md#v1193-2023-02-03) - * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. -* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.19.2](service/iam/CHANGELOG.md#v1192-2023-02-03) - * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. -* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.19.2](service/neptune/CHANGELOG.md#v1192-2023-02-03) - * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. -* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.20.0](service/proton/CHANGELOG.md#v1200-2023-02-03) - * **Feature**: Add new GetResourcesSummary API -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.40.2](service/rds/CHANGELOG.md#v1402-2023-02-03) - * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. -* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.27.2](service/redshift/CHANGELOG.md#v1272-2023-02-03) - * **Documentation**: Corrects descriptions of the parameters for the API operations RestoreFromClusterSnapshot, RestoreTableFromClusterSnapshot, and CreateCluster. - * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. -* `github.com/aws/aws-sdk-go-v2/service/ses`: [v1.15.1](service/ses/CHANGELOG.md#v1151-2023-02-03) - * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. -* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.20.1](service/sns/CHANGELOG.md#v1201-2023-02-03) - * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. -* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.20.2](service/sqs/CHANGELOG.md#v1202-2023-02-03) - * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. -* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.18.3](service/sts/CHANGELOG.md#v1183-2023-02-03) - * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. - -# Release (2023-02-02) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/appconfig`: [v1.16.0](service/appconfig/CHANGELOG.md#v1160-2023-02-02) - * **Feature**: AWS AppConfig introduces KMS customer-managed key (CMK) encryption of configuration data, along with AWS Secrets Manager as a new configuration data source. S3 objects using SSE-KMS encryption and SSM Parameter Store SecureStrings are also now supported. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.84.0](service/ec2/CHANGELOG.md#v1840-2023-02-02) - * **Feature**: Documentation updates for EC2. -* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.19.2](service/elasticloadbalancingv2/CHANGELOG.md#v1192-2023-02-02) - * **Documentation**: The GWLB Flex Health Check project updates the default values of healthy-threshold-count from 3 to 5 and unhealthy-threshold-count from 3 to 2 -* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.31.0](service/quicksight/CHANGELOG.md#v1310-2023-02-02) - * **Feature**: QuickSight support for Radar Chart and Dashboard Publish Options - -# Release (2023-02-01) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.22.0](service/devopsguru/CHANGELOG.md#v1220-2023-02-01) - * **Feature**: This release adds filter support ListAnomalyForInsight API. -* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.25.0](service/forecast/CHANGELOG.md#v1250-2023-02-01) - * **Feature**: This release will enable customer select INCREMENTAL as ImportModel in Forecast's CreateDatasetImportJob API. Verified latest SDK containing required attribute, following https://w.amazon.com/bin/view/AWS-Seer/Launch/Trebuchet/ -* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.19.1](service/iam/CHANGELOG.md#v1191-2023-02-01) - * **Documentation**: Documentation updates for AWS Identity and Access Management (IAM). -* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.22.0](service/mediatailor/CHANGELOG.md#v1220-2023-02-01) - * **Feature**: The AWS Elemental MediaTailor SDK for Channel Assembly has added support for program updates, and the ability to clip the end of VOD sources in programs. -* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.20.0](service/sns/CHANGELOG.md#v1200-2023-02-01) - * **Feature**: Additional attributes added for set-topic-attributes. - -# Release (2023-01-31) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.19.0](service/appsync/CHANGELOG.md#v1190-2023-01-31) - * **Feature**: This release introduces the feature to support EventBridge as AppSync data source. -* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.23.0](service/cloudtrail/CHANGELOG.md#v1230-2023-01-31) - * **Feature**: Add new "Channel" APIs to enable users to manage channels used for CloudTrail Lake integrations, and "Resource Policy" APIs to enable users to manage the resource-based permissions policy attached to a channel. -* `github.com/aws/aws-sdk-go-v2/service/cloudtraildata`: [v1.0.0](service/cloudtraildata/CHANGELOG.md#v100-2023-01-31) - * **Release**: New AWS service client module - * **Feature**: Add CloudTrail Data Service to enable users to ingest activity events from non-AWS sources into CloudTrail Lake. -* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.16.0](service/codeartifact/CHANGELOG.md#v1160-2023-01-31) - * **Feature**: This release introduces a new DeletePackage API, which enables deletion of a package and all of its versions from a repository. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.83.0](service/ec2/CHANGELOG.md#v1830-2023-01-31) - * **Feature**: This launch allows customers to associate up to 8 IP addresses to their NAT Gateways to increase the limit on concurrent connections to a single destination by eight times from 55K to 440K. -* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.17.0](service/groundstation/CHANGELOG.md#v1170-2023-01-31) - * **Feature**: DigIF Expansion changes to the Customer APIs. -* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.34.0](service/iot/CHANGELOG.md#v1340-2023-01-31) - * **Feature**: Added support for IoT Rules Engine Cloudwatch Logs action batch mode. -* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.14.0](service/opensearch/CHANGELOG.md#v1140-2023-01-31) - * **Feature**: Amazon OpenSearch Service adds the option for a VPC endpoint connection between two domains when the local domain uses OpenSearch version 1.3 or 2.3. You can now use remote reindex to copy indices from one VPC domain to another without a reverse proxy. -* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.24.0](service/polly/CHANGELOG.md#v1240-2023-01-31) - * **Feature**: Amazon Polly adds two new neural American English voices - Ruth, Stephen -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.67.0](service/sagemaker/CHANGELOG.md#v1670-2023-01-31) - * **Feature**: Amazon SageMaker Automatic Model Tuning now supports more completion criteria for Hyperparameter Optimization. -* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.28.0](service/securityhub/CHANGELOG.md#v1280-2023-01-31) - * **Feature**: New fields have been added to the AWS Security Finding Format. Compliance.SecurityControlId is a unique identifier for a security control across standards. Compliance.AssociatedStandards contains all enabled standards in which a security control is enabled. - -# Release (2023-01-30) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.26.0](service/cloudformation/CHANGELOG.md#v1260-2023-01-30) - * **Feature**: This feature provides a method of obtaining which regions a stackset has stack instances deployed in. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.82.0](service/ec2/CHANGELOG.md#v1820-2023-01-30) - * **Feature**: We add Prefix Lists as a new route destination option for LocalGatewayRoutes. This will allow customers to create routes to Prefix Lists. Prefix List routes will allow customers to group individual CIDR routes with the same target into a single route. - -# Release (2023-01-27) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.20.0](service/appstream/CHANGELOG.md#v1200-2023-01-27) - * **Feature**: Fixing the issue where Appstream waiters hang for fleet_started and fleet_stopped. -* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.21.0](service/mediatailor/CHANGELOG.md#v1210-2023-01-27) - * **Feature**: This release introduces the As Run logging type, along with API and documentation updates. -* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.26.0](service/outposts/CHANGELOG.md#v1260-2023-01-27) - * **Feature**: Adding support for payment term in GetOrder, CreateOrder responses. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.66.0](service/sagemaker/CHANGELOG.md#v1660-2023-01-27) - * **Feature**: This release supports running SageMaker Training jobs with container images that are in a private Docker registry. -* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.18.0](service/sagemakerruntime/CHANGELOG.md#v1180-2023-01-27) - * **Feature**: Amazon SageMaker Runtime which supports InvokeEndpointAsync asynchronously can now invoke endpoints with custom timeout values. Asynchronous invocations support longer processing times. - -# Release (2023-01-26) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.18.0](service/eventbridge/CHANGELOG.md#v1180-2023-01-26) - * **Feature**: Minor comments for Redshift Serverless workgroup target support. - -# Release (2023-01-25) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.81.0](service/ec2/CHANGELOG.md#v1810-2023-01-25) - * **Feature**: This release adds new functionality that allows customers to provision IPv6 CIDR blocks through Amazon VPC IP Address Manager (IPAM) as well as allowing customers to utilize IPAM Resource Discovery APIs. -* `github.com/aws/aws-sdk-go-v2/service/m2`: [v1.4.0](service/m2/CHANGELOG.md#v140-2023-01-25) - * **Feature**: Add returnCode, batchJobIdentifier in GetBatchJobExecution response, for user to view the batch job execution result & unique identifier from engine. Also removed unused headers from REST APIs -* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.23.0](service/polly/CHANGELOG.md#v1230-2023-01-25) - * **Feature**: Add 5 new neural voices - Sergio (es-ES), Andres (es-MX), Remi (fr-FR), Adriano (it-IT) and Thiago (pt-BR). -* `github.com/aws/aws-sdk-go-v2/service/redshiftserverless`: [v1.4.1](service/redshiftserverless/CHANGELOG.md#v141-2023-01-25) - * **Documentation**: Added query monitoring rules as possible parameters for create and update workgroup operations. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.65.0](service/sagemaker/CHANGELOG.md#v1650-2023-01-25) - * **Feature**: SageMaker Inference Recommender now decouples from Model Registry and could accept Model Name to invoke inference recommendations job; Inference Recommender now provides CPU/Memory Utilization metrics data in recommendation output. -* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.18.2](service/sts/CHANGELOG.md#v1182-2023-01-25) - * **Documentation**: Doc only change to update wording in a key topic - -# Release (2023-01-24) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.27.0](service/route53/CHANGELOG.md#v1270-2023-01-24) - * **Feature**: Amazon Route 53 now supports the Asia Pacific (Melbourne) Region (ap-southeast-4) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region. -* `github.com/aws/aws-sdk-go-v2/service/ssmsap`: [v1.2.0](service/ssmsap/CHANGELOG.md#v120-2023-01-24) - * **Feature**: This release provides updates to documentation and support for listing operations performed by AWS Systems Manager for SAP. - -# Release (2023-01-23) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.29.0](service/lambda/CHANGELOG.md#v1290-2023-01-23) - * **Feature**: Release Lambda RuntimeManagementConfig, enabling customers to better manage runtime updates to their Lambda functions. This release adds two new APIs, GetRuntimeManagementConfig and PutRuntimeManagementConfig, as well as support on existing Create/Get/Update function APIs. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.64.0](service/sagemaker/CHANGELOG.md#v1640-2023-01-23) - * **Feature**: Amazon SageMaker Inference now supports P4de instance types. - -# Release (2023-01-20) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.80.0](service/ec2/CHANGELOG.md#v1800-2023-01-20) - * **Feature**: C6in, M6in, M6idn, R6in and R6idn instances are powered by 3rd Generation Intel Xeon Scalable processors (code named Ice Lake) with an all-core turbo frequency of 3.5 GHz. -* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.20.0](service/ivs/CHANGELOG.md#v1200-2023-01-20) - * **Feature**: API and Doc update. Update to arns field in BatchGetStreamKey. Also updates to operations and structures. -* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.30.0](service/quicksight/CHANGELOG.md#v1300-2023-01-20) - * **Feature**: This release adds support for data bars in QuickSight table and increases pivot table field well limit. - -# Release (2023-01-19) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.24.0](service/appflow/CHANGELOG.md#v1240-2023-01-19) - * **Feature**: Adding support for Salesforce Pardot connector in Amazon AppFlow. -* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.20.0](service/cloudwatchlogs/CHANGELOG.md#v1200-2023-01-19) - * **Feature**: Bug fix - Removed the regex pattern validation from CoralModel to avoid potential security issue. -* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.15.0](service/codeartifact/CHANGELOG.md#v1150-2023-01-19) - * **Feature**: Documentation updates for CodeArtifact -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.45.0](service/connect/CHANGELOG.md#v1450-2023-01-19) - * **Feature**: Amazon Connect Chat introduces Persistent Chat, allowing customers to resume previous conversations with context and transcripts carried over from previous chats, eliminating the need to repeat themselves and allowing agents to provide personalized service with access to entire conversation history. -* `github.com/aws/aws-sdk-go-v2/service/connectparticipant`: [v1.15.0](service/connectparticipant/CHANGELOG.md#v1150-2023-01-19) - * **Feature**: This release updates Amazon Connect Participant's GetTranscript api to provide transcripts of past chats on a persistent chat session. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.79.0](service/ec2/CHANGELOG.md#v1790-2023-01-19) - * **Feature**: Adds SSM Parameter Resource Aliasing support to EC2 Launch Templates. Launch Templates can now store parameter aliases in place of AMI Resource IDs. CreateLaunchTemplateVersion and DescribeLaunchTemplateVersions now support a convenience flag, ResolveAlias, to return the resolved parameter value. -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.40.0](service/glue/CHANGELOG.md#v1400-2023-01-19) - * **Feature**: Release Glue Studio Hudi Data Lake Format for SDK/CLI -* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.16.0](service/groundstation/CHANGELOG.md#v1160-2023-01-19) - * **Feature**: Add configurable prepass and postpass times for DataflowEndpointGroup. Add Waiter to allow customers to wait for a contact that was reserved through ReserveContact -* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.29.0](service/medialive/CHANGELOG.md#v1290-2023-01-19) - * **Feature**: AWS Elemental MediaLive adds support for SCTE 35 preRollMilliSeconds. -* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.13.0](service/opensearch/CHANGELOG.md#v1130-2023-01-19) - * **Feature**: This release adds the enhanced dry run option, that checks for validation errors that might occur when deploying configuration changes and provides a summary of these errors, if any. The feature will also indicate whether a blue/green deployment will be required to apply a change. -* `github.com/aws/aws-sdk-go-v2/service/panorama`: [v1.11.0](service/panorama/CHANGELOG.md#v1110-2023-01-19) - * **Feature**: Added AllowMajorVersionUpdate option to OTAJobConfig to make appliance software major version updates opt-in. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.63.0](service/sagemaker/CHANGELOG.md#v1630-2023-01-19) - * **Feature**: HyperParameterTuningJobs now allow passing environment variables into the corresponding TrainingJobs - -# Release (2023-01-18) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.25.0](service/cloudwatch/CHANGELOG.md#v1250-2023-01-18) - * **Feature**: Enable cross-account streams in CloudWatch Metric Streams via Observability Access Manager. -* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.19.1](service/efs/CHANGELOG.md#v1191-2023-01-18) - * **Documentation**: Documentation updates for EFS access points limit increase -* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.24.2](service/wafv2/CHANGELOG.md#v1242-2023-01-18) - * **Documentation**: Improved the visibility of the guidance for updating AWS WAF resources, such as web ACLs and rule groups. - -# Release (2023-01-17) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/billingconductor`: [v1.5.0](service/billingconductor/CHANGELOG.md#v150-2023-01-17) - * **Feature**: This release adds support for SKU Scope for pricing plans. -* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.22.0](service/imagebuilder/CHANGELOG.md#v1220-2023-01-17) - * **Feature**: Add support for AWS Marketplace product IDs as input during CreateImageRecipe for the parent-image parameter. Add support for listing third-party components. -* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.24.0](service/networkfirewall/CHANGELOG.md#v1240-2023-01-17) - * **Feature**: Network Firewall now allows creation of dual stack endpoints, enabling inspection of IPv6 traffic. - -# Release (2023-01-13) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.44.0](service/connect/CHANGELOG.md#v1440-2023-01-13) - * **Feature**: This release updates the responses of UpdateContactFlowContent, UpdateContactFlowMetadata, UpdateContactFlowName and DeleteContactFlow API with empty responses. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.78.0](service/ec2/CHANGELOG.md#v1780-2023-01-13) - * **Feature**: Documentation updates for EC2. -* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.25.0](service/outposts/CHANGELOG.md#v1250-2023-01-13) - * **Feature**: This release adds POWER_30_KVA as an option for PowerDrawKva. PowerDrawKva is part of the RackPhysicalProperties structure in the CreateSite request. -* `github.com/aws/aws-sdk-go-v2/service/resourcegroups`: [v1.14.0](service/resourcegroups/CHANGELOG.md#v1140-2023-01-13) - * **Feature**: AWS Resource Groups customers can now turn on Group Lifecycle Events in their AWS account. When you turn this on, Resource Groups monitors your groups for changes to group state or membership. Those changes are sent to Amazon EventBridge as events that you can respond to using rules you create. - -# Release (2023-01-12) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/cleanrooms`: [v1.0.0](service/cleanrooms/CHANGELOG.md#v100-2023-01-12) - * **Release**: New AWS service client module - * **Feature**: Initial release of AWS Clean Rooms -* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.19.0](service/cloudwatchlogs/CHANGELOG.md#v1190-2023-01-12) - * **Feature**: Bug fix: logGroupName is now not a required field in GetLogEvents, FilterLogEvents, GetLogGroupFields, and DescribeLogStreams APIs as logGroupIdentifier can be provided instead -* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.28.0](service/lambda/CHANGELOG.md#v1280-2023-01-12) - * **Feature**: Add support for MaximumConcurrency parameter for SQS event source. Customers can now limit the maximum concurrent invocations for their SQS Event Source Mapping. -* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.30.0](service/mediaconvert/CHANGELOG.md#v1300-2023-01-12) - * **Feature**: The AWS Elemental MediaConvert SDK has added support for compact DASH manifest generation, audio normalization using TruePeak measurements, and the ability to clip the sample range in the color corrector. -* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.18.1](service/secretsmanager/CHANGELOG.md#v1181-2023-01-12) - * **Documentation**: Update documentation for new ListSecrets and DescribeSecret parameters - -# Release (2023-01-11) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.38.0](service/kendra/CHANGELOG.md#v1380-2023-01-11) - * **Feature**: This release adds support to new document types - RTF, XML, XSLT, MS_EXCEL, CSV, JSON, MD - -# Release (2023-01-10) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.21.0](service/location/CHANGELOG.md#v1210-2023-01-10) - * **Feature**: This release adds support for two new route travel models, Bicycle and Motorcycle which can be used with Grab data source. -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.40.0](service/rds/CHANGELOG.md#v1400-2023-01-10) - * **Feature**: This release adds support for configuring allocated storage on the CreateDBInstanceReadReplica, RestoreDBInstanceFromDBSnapshot, and RestoreDBInstanceToPointInTime APIs. - -# Release (2023-01-09) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/ecrpublic`: [v1.15.0](service/ecrpublic/CHANGELOG.md#v1150-2023-01-09) - * **Feature**: This release for Amazon ECR Public makes several change to bring the SDK into sync with the API. -* `github.com/aws/aws-sdk-go-v2/service/kendraranking`: [v1.0.0](service/kendraranking/CHANGELOG.md#v100-2023-01-09) - * **Release**: New AWS service client module - * **Feature**: Introducing Amazon Kendra Intelligent Ranking, a new set of Kendra APIs that leverages Kendra semantic ranking capabilities to improve the quality of search results from other search services (i.e. OpenSearch, ElasticSearch, Solr). -* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.23.0](service/networkfirewall/CHANGELOG.md#v1230-2023-01-09) - * **Feature**: Network Firewall now supports the Suricata rule action reject, in addition to the actions pass, drop, and alert. -* `github.com/aws/aws-sdk-go-v2/service/workspacesweb`: [v1.9.0](service/workspacesweb/CHANGELOG.md#v190-2023-01-09) - * **Feature**: This release adds support for a new portal authentication type: AWS IAM Identity Center (successor to AWS Single Sign-On). - -# Release (2023-01-06) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.21.0](service/acmpca/CHANGELOG.md#v1210-2023-01-06) - * **Feature**: Added revocation parameter validation: bucket names must match S3 bucket naming rules and CNAMEs conform to RFC2396 restrictions on the use of special characters in URIs. -* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.23.0](service/auditmanager/CHANGELOG.md#v1230-2023-01-06) - * **Feature**: This release introduces a new data retention option in your Audit Manager settings. You can now use the DeregistrationPolicy parameter to specify if you want to delete your data when you deregister Audit Manager. - -# Release (2023-01-05) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.19.0](service/accessanalyzer/CHANGELOG.md#v1190-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/account`: [v1.8.0](service/account/CHANGELOG.md#v180-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/acm`: [v1.17.0](service/acm/CHANGELOG.md#v1170-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.20.0](service/acmpca/CHANGELOG.md#v1200-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/alexaforbusiness`: [v1.15.0](service/alexaforbusiness/CHANGELOG.md#v1150-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/amp`: [v1.16.0](service/amp/CHANGELOG.md#v1160-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/amplify`: [v1.13.0](service/amplify/CHANGELOG.md#v1130-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/amplifybackend`: [v1.14.0](service/amplifybackend/CHANGELOG.md#v1140-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). - * **Feature**: Updated GetBackendAPIModels response to include ModelIntrospectionSchema json string -* `github.com/aws/aws-sdk-go-v2/service/amplifyuibuilder`: [v1.9.0](service/amplifyuibuilder/CHANGELOG.md#v190-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/apigateway`: [v1.16.0](service/apigateway/CHANGELOG.md#v1160-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/apigatewaymanagementapi`: [v1.11.0](service/apigatewaymanagementapi/CHANGELOG.md#v1110-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/apigatewayv2`: [v1.13.0](service/apigatewayv2/CHANGELOG.md#v1130-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/appconfig`: [v1.15.0](service/appconfig/CHANGELOG.md#v1150-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/appconfigdata`: [v1.5.0](service/appconfigdata/CHANGELOG.md#v150-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.23.0](service/appflow/CHANGELOG.md#v1230-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/appintegrations`: [v1.14.0](service/appintegrations/CHANGELOG.md#v1140-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.17.0](service/applicationautoscaling/CHANGELOG.md#v1170-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/applicationcostprofiler`: [v1.10.0](service/applicationcostprofiler/CHANGELOG.md#v1100-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/applicationdiscoveryservice`: [v1.15.0](service/applicationdiscoveryservice/CHANGELOG.md#v1150-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/applicationinsights`: [v1.17.0](service/applicationinsights/CHANGELOG.md#v1170-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/appmesh`: [v1.17.0](service/appmesh/CHANGELOG.md#v1170-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.16.0](service/apprunner/CHANGELOG.md#v1160-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). - * **Feature**: This release adds support of securely referencing secrets and configuration data that are stored in Secrets Manager and SSM Parameter Store by adding them as environment secrets in your App Runner service. -* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.19.0](service/appstream/CHANGELOG.md#v1190-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.18.0](service/appsync/CHANGELOG.md#v1180-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/arczonalshift`: [v1.1.0](service/arczonalshift/CHANGELOG.md#v110-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.22.0](service/athena/CHANGELOG.md#v1220-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.22.0](service/auditmanager/CHANGELOG.md#v1220-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.26.0](service/autoscaling/CHANGELOG.md#v1260-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/autoscalingplans`: [v1.13.0](service/autoscalingplans/CHANGELOG.md#v1130-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.19.0](service/backup/CHANGELOG.md#v1190-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/backupgateway`: [v1.9.0](service/backupgateway/CHANGELOG.md#v190-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/backupstorage`: [v1.1.0](service/backupstorage/CHANGELOG.md#v110-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.21.0](service/batch/CHANGELOG.md#v1210-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/billingconductor`: [v1.4.0](service/billingconductor/CHANGELOG.md#v140-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.17.0](service/braket/CHANGELOG.md#v1170-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/budgets`: [v1.14.0](service/budgets/CHANGELOG.md#v1140-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.22.0](service/chime/CHANGELOG.md#v1220-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/chimesdkidentity`: [v1.10.0](service/chimesdkidentity/CHANGELOG.md#v1100-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines`: [v1.2.0](service/chimesdkmediapipelines/CHANGELOG.md#v120-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.14.0](service/chimesdkmeetings/CHANGELOG.md#v1140-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.12.0](service/chimesdkmessaging/CHANGELOG.md#v1120-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/chimesdkvoice`: [v1.1.0](service/chimesdkvoice/CHANGELOG.md#v110-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/cloud9`: [v1.17.0](service/cloud9/CHANGELOG.md#v1170-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/cloudcontrol`: [v1.11.0](service/cloudcontrol/CHANGELOG.md#v1110-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/clouddirectory`: [v1.13.0](service/clouddirectory/CHANGELOG.md#v1130-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.25.0](service/cloudformation/CHANGELOG.md#v1250-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.24.0](service/cloudfront/CHANGELOG.md#v1240-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/cloudhsm`: [v1.13.0](service/cloudhsm/CHANGELOG.md#v1130-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/cloudhsmv2`: [v1.14.0](service/cloudhsmv2/CHANGELOG.md#v1140-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/cloudsearch`: [v1.14.0](service/cloudsearch/CHANGELOG.md#v1140-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/cloudsearchdomain`: [v1.12.0](service/cloudsearchdomain/CHANGELOG.md#v1120-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.22.0](service/cloudtrail/CHANGELOG.md#v1220-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.24.0](service/cloudwatch/CHANGELOG.md#v1240-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/cloudwatchevents`: [v1.15.0](service/cloudwatchevents/CHANGELOG.md#v1150-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.18.0](service/cloudwatchlogs/CHANGELOG.md#v1180-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.14.0](service/codeartifact/CHANGELOG.md#v1140-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/codebuild`: [v1.20.0](service/codebuild/CHANGELOG.md#v1200-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/codecatalyst`: [v1.1.0](service/codecatalyst/CHANGELOG.md#v110-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/codecommit`: [v1.14.0](service/codecommit/CHANGELOG.md#v1140-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/codedeploy`: [v1.16.0](service/codedeploy/CHANGELOG.md#v1160-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/codeguruprofiler`: [v1.13.0](service/codeguruprofiler/CHANGELOG.md#v1130-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/codegurureviewer`: [v1.17.0](service/codegurureviewer/CHANGELOG.md#v1170-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/codepipeline`: [v1.14.0](service/codepipeline/CHANGELOG.md#v1140-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/codestar`: [v1.13.0](service/codestar/CHANGELOG.md#v1130-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/codestarconnections`: [v1.14.0](service/codestarconnections/CHANGELOG.md#v1140-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/codestarnotifications`: [v1.14.0](service/codestarnotifications/CHANGELOG.md#v1140-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/cognitoidentity`: [v1.15.0](service/cognitoidentity/CHANGELOG.md#v1150-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.22.0](service/cognitoidentityprovider/CHANGELOG.md#v1220-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/cognitosync`: [v1.12.0](service/cognitosync/CHANGELOG.md#v1120-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.21.0](service/comprehend/CHANGELOG.md#v1210-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/comprehendmedical`: [v1.15.0](service/comprehendmedical/CHANGELOG.md#v1150-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.20.0](service/computeoptimizer/CHANGELOG.md#v1200-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.29.0](service/configservice/CHANGELOG.md#v1290-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.43.0](service/connect/CHANGELOG.md#v1430-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). - * **Feature**: Documentation update for a new Initiation Method value in DescribeContact API -* `github.com/aws/aws-sdk-go-v2/service/connectcampaigns`: [v1.2.0](service/connectcampaigns/CHANGELOG.md#v120-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/connectcases`: [v1.2.0](service/connectcases/CHANGELOG.md#v120-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/connectcontactlens`: [v1.13.0](service/connectcontactlens/CHANGELOG.md#v1130-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/connectparticipant`: [v1.14.0](service/connectparticipant/CHANGELOG.md#v1140-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/controltower`: [v1.1.0](service/controltower/CHANGELOG.md#v110-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/costandusagereportservice`: [v1.15.0](service/costandusagereportservice/CHANGELOG.md#v1150-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.25.0](service/costexplorer/CHANGELOG.md#v1250-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.22.0](service/customerprofiles/CHANGELOG.md#v1220-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.23.0](service/databasemigrationservice/CHANGELOG.md#v1230-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.21.0](service/databrew/CHANGELOG.md#v1210-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.18.0](service/dataexchange/CHANGELOG.md#v1180-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/datapipeline`: [v1.14.0](service/datapipeline/CHANGELOG.md#v1140-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.21.0](service/datasync/CHANGELOG.md#v1210-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/dax`: [v1.12.0](service/dax/CHANGELOG.md#v1120-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/detective`: [v1.18.0](service/detective/CHANGELOG.md#v1180-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/devicefarm`: [v1.15.0](service/devicefarm/CHANGELOG.md#v1150-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.21.0](service/devopsguru/CHANGELOG.md#v1210-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.18.0](service/directconnect/CHANGELOG.md#v1180-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/directoryservice`: [v1.16.0](service/directoryservice/CHANGELOG.md#v1160-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/dlm`: [v1.14.0](service/dlm/CHANGELOG.md#v1140-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.20.0](service/docdb/CHANGELOG.md#v1200-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/docdbelastic`: [v1.1.0](service/docdbelastic/CHANGELOG.md#v110-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.10.0](service/drs/CHANGELOG.md#v1100-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.18.0](service/dynamodb/CHANGELOG.md#v1180-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/dynamodbstreams`: [v1.14.0](service/dynamodbstreams/CHANGELOG.md#v1140-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/ebs`: [v1.16.0](service/ebs/CHANGELOG.md#v1160-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/ec2instanceconnect`: [v1.15.0](service/ec2instanceconnect/CHANGELOG.md#v1150-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.18.0](service/ecr/CHANGELOG.md#v1180-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/ecrpublic`: [v1.14.0](service/ecrpublic/CHANGELOG.md#v1140-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.23.0](service/ecs/CHANGELOG.md#v1230-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.19.0](service/efs/CHANGELOG.md#v1190-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.27.0](service/eks/CHANGELOG.md#v1270-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.26.0](service/elasticache/CHANGELOG.md#v1260-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk`: [v1.15.0](service/elasticbeanstalk/CHANGELOG.md#v1150-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/elasticinference`: [v1.12.0](service/elasticinference/CHANGELOG.md#v1120-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing`: [v1.15.0](service/elasticloadbalancing/CHANGELOG.md#v1150-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.19.0](service/elasticloadbalancingv2/CHANGELOG.md#v1190-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.18.0](service/elasticsearchservice/CHANGELOG.md#v1180-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/elastictranscoder`: [v1.14.0](service/elastictranscoder/CHANGELOG.md#v1140-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.22.0](service/emr/CHANGELOG.md#v1220-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.16.0](service/emrcontainers/CHANGELOG.md#v1160-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/emrserverless`: [v1.5.0](service/emrserverless/CHANGELOG.md#v150-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). - * **Feature**: Adds support for customized images. You can now provide runtime images when creating or updating EMR Serverless Applications. -* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.17.0](service/eventbridge/CHANGELOG.md#v1170-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/evidently`: [v1.10.0](service/evidently/CHANGELOG.md#v1100-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/finspace`: [v1.9.0](service/finspace/CHANGELOG.md#v190-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.14.0](service/finspacedata/CHANGELOG.md#v1140-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/firehose`: [v1.16.0](service/firehose/CHANGELOG.md#v1160-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/fis`: [v1.14.0](service/fis/CHANGELOG.md#v1140-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.22.0](service/fms/CHANGELOG.md#v1220-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.24.0](service/forecast/CHANGELOG.md#v1240-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/forecastquery`: [v1.13.0](service/forecastquery/CHANGELOG.md#v1130-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.21.0](service/frauddetector/CHANGELOG.md#v1210-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.28.0](service/fsx/CHANGELOG.md#v1280-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.17.0](service/gamelift/CHANGELOG.md#v1170-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/gamesparks`: [v1.2.0](service/gamesparks/CHANGELOG.md#v120-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/glacier`: [v1.14.0](service/glacier/CHANGELOG.md#v1140-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/globalaccelerator`: [v1.16.0](service/globalaccelerator/CHANGELOG.md#v1160-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.39.0](service/glue/CHANGELOG.md#v1390-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.11.0](service/grafana/CHANGELOG.md#v1110-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/greengrass`: [v1.15.0](service/greengrass/CHANGELOG.md#v1150-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.21.0](service/greengrassv2/CHANGELOG.md#v1210-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.15.0](service/groundstation/CHANGELOG.md#v1150-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.17.0](service/guardduty/CHANGELOG.md#v1170-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/health`: [v1.16.0](service/health/CHANGELOG.md#v1160-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/healthlake`: [v1.15.0](service/healthlake/CHANGELOG.md#v1150-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/honeycode`: [v1.13.0](service/honeycode/CHANGELOG.md#v1130-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.19.0](service/iam/CHANGELOG.md#v1190-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.16.0](service/identitystore/CHANGELOG.md#v1160-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.21.0](service/imagebuilder/CHANGELOG.md#v1210-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/inspector`: [v1.13.0](service/inspector/CHANGELOG.md#v1130-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.11.0](service/inspector2/CHANGELOG.md#v1110-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.33.0](service/iot/CHANGELOG.md#v1330-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/iot1clickdevicesservice`: [v1.11.0](service/iot1clickdevicesservice/CHANGELOG.md#v1110-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/iot1clickprojects`: [v1.12.0](service/iot1clickprojects/CHANGELOG.md#v1120-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/iotanalytics`: [v1.14.0](service/iotanalytics/CHANGELOG.md#v1140-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/iotdataplane`: [v1.14.0](service/iotdataplane/CHANGELOG.md#v1140-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/iotdeviceadvisor`: [v1.17.0](service/iotdeviceadvisor/CHANGELOG.md#v1170-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/iotevents`: [v1.15.0](service/iotevents/CHANGELOG.md#v1150-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/ioteventsdata`: [v1.13.0](service/ioteventsdata/CHANGELOG.md#v1130-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/iotfleethub`: [v1.13.0](service/iotfleethub/CHANGELOG.md#v1130-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/iotfleetwise`: [v1.3.0](service/iotfleetwise/CHANGELOG.md#v130-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/iotjobsdataplane`: [v1.12.0](service/iotjobsdataplane/CHANGELOG.md#v1120-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/iotroborunner`: [v1.1.0](service/iotroborunner/CHANGELOG.md#v110-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/iotsecuretunneling`: [v1.15.0](service/iotsecuretunneling/CHANGELOG.md#v1150-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.27.0](service/iotsitewise/CHANGELOG.md#v1270-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/iotthingsgraph`: [v1.14.0](service/iotthingsgraph/CHANGELOG.md#v1140-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/iottwinmaker`: [v1.10.0](service/iottwinmaker/CHANGELOG.md#v1100-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.24.0](service/iotwireless/CHANGELOG.md#v1240-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.19.0](service/ivs/CHANGELOG.md#v1190-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/ivschat`: [v1.3.0](service/ivschat/CHANGELOG.md#v130-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/kafka`: [v1.19.0](service/kafka/CHANGELOG.md#v1190-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/kafkaconnect`: [v1.9.0](service/kafkaconnect/CHANGELOG.md#v190-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.37.0](service/kendra/CHANGELOG.md#v1370-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/keyspaces`: [v1.1.0](service/keyspaces/CHANGELOG.md#v110-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.17.0](service/kinesis/CHANGELOG.md#v1170-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/kinesisanalytics`: [v1.14.0](service/kinesisanalytics/CHANGELOG.md#v1140-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2`: [v1.16.0](service/kinesisanalyticsv2/CHANGELOG.md#v1160-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/kinesisvideo`: [v1.15.0](service/kinesisvideo/CHANGELOG.md#v1150-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/kinesisvideoarchivedmedia`: [v1.14.0](service/kinesisvideoarchivedmedia/CHANGELOG.md#v1140-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/kinesisvideomedia`: [v1.11.0](service/kinesisvideomedia/CHANGELOG.md#v1110-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/kinesisvideosignaling`: [v1.11.0](service/kinesisvideosignaling/CHANGELOG.md#v1110-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/kinesisvideowebrtcstorage`: [v1.2.0](service/kinesisvideowebrtcstorage/CHANGELOG.md#v120-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.20.0](service/kms/CHANGELOG.md#v1200-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.19.0](service/lakeformation/CHANGELOG.md#v1190-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.27.0](service/lambda/CHANGELOG.md#v1270-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/lexmodelbuildingservice`: [v1.17.0](service/lexmodelbuildingservice/CHANGELOG.md#v1170-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.27.0](service/lexmodelsv2/CHANGELOG.md#v1270-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/lexruntimeservice`: [v1.13.0](service/lexruntimeservice/CHANGELOG.md#v1130-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.16.0](service/lexruntimev2/CHANGELOG.md#v1160-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/licensemanager`: [v1.17.0](service/licensemanager/CHANGELOG.md#v1170-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/licensemanagerlinuxsubscriptions`: [v1.1.0](service/licensemanagerlinuxsubscriptions/CHANGELOG.md#v110-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/licensemanagerusersubscriptions`: [v1.2.0](service/licensemanagerusersubscriptions/CHANGELOG.md#v120-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.25.0](service/lightsail/CHANGELOG.md#v1250-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). - * **Documentation**: Documentation updates for Amazon Lightsail. -* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.20.0](service/location/CHANGELOG.md#v1200-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/lookoutequipment`: [v1.17.0](service/lookoutequipment/CHANGELOG.md#v1170-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.19.0](service/lookoutmetrics/CHANGELOG.md#v1190-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/lookoutvision`: [v1.15.0](service/lookoutvision/CHANGELOG.md#v1150-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/m2`: [v1.3.0](service/m2/CHANGELOG.md#v130-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/machinelearning`: [v1.15.0](service/machinelearning/CHANGELOG.md#v1150-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/macie`: [v1.15.0](service/macie/CHANGELOG.md#v1150-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.26.0](service/macie2/CHANGELOG.md#v1260-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/managedblockchain`: [v1.14.0](service/managedblockchain/CHANGELOG.md#v1140-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/marketplacecatalog`: [v1.15.0](service/marketplacecatalog/CHANGELOG.md#v1150-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/marketplacecommerceanalytics`: [v1.12.0](service/marketplacecommerceanalytics/CHANGELOG.md#v1120-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/marketplaceentitlementservice`: [v1.12.0](service/marketplaceentitlementservice/CHANGELOG.md#v1120-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/marketplacemetering`: [v1.14.0](service/marketplacemetering/CHANGELOG.md#v1140-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/mediaconnect`: [v1.18.0](service/mediaconnect/CHANGELOG.md#v1180-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.29.0](service/mediaconvert/CHANGELOG.md#v1290-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.28.0](service/medialive/CHANGELOG.md#v1280-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.20.0](service/mediapackage/CHANGELOG.md#v1200-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.21.0](service/mediapackagevod/CHANGELOG.md#v1210-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/mediastore`: [v1.13.0](service/mediastore/CHANGELOG.md#v1130-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/mediastoredata`: [v1.13.0](service/mediastoredata/CHANGELOG.md#v1130-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.20.0](service/mediatailor/CHANGELOG.md#v1200-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/memorydb`: [v1.12.0](service/memorydb/CHANGELOG.md#v1120-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.17.0](service/mgn/CHANGELOG.md#v1170-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/migrationhub`: [v1.13.0](service/migrationhub/CHANGELOG.md#v1130-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/migrationhubconfig`: [v1.13.0](service/migrationhubconfig/CHANGELOG.md#v1130-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/migrationhuborchestrator`: [v1.1.0](service/migrationhuborchestrator/CHANGELOG.md#v110-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/migrationhubrefactorspaces`: [v1.8.0](service/migrationhubrefactorspaces/CHANGELOG.md#v180-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/migrationhubstrategy`: [v1.7.0](service/migrationhubstrategy/CHANGELOG.md#v170-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/mobile`: [v1.12.0](service/mobile/CHANGELOG.md#v1120-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/mq`: [v1.14.0](service/mq/CHANGELOG.md#v1140-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/mturk`: [v1.14.0](service/mturk/CHANGELOG.md#v1140-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/mwaa`: [v1.14.0](service/mwaa/CHANGELOG.md#v1140-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). - * **Documentation**: MWAA supports Apache Airflow version 2.4.3. -* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.19.0](service/neptune/CHANGELOG.md#v1190-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.22.0](service/networkfirewall/CHANGELOG.md#v1220-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.17.0](service/networkmanager/CHANGELOG.md#v1170-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.16.0](service/nimble/CHANGELOG.md#v1160-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/oam`: [v1.1.0](service/oam/CHANGELOG.md#v110-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/omics`: [v1.1.0](service/omics/CHANGELOG.md#v110-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.12.0](service/opensearch/CHANGELOG.md#v1120-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/opensearchserverless`: [v1.1.0](service/opensearchserverless/CHANGELOG.md#v110-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/opsworks`: [v1.14.0](service/opsworks/CHANGELOG.md#v1140-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/opsworkscm`: [v1.15.0](service/opsworkscm/CHANGELOG.md#v1150-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/organizations`: [v1.18.0](service/organizations/CHANGELOG.md#v1180-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.24.0](service/outposts/CHANGELOG.md#v1240-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/panorama`: [v1.10.0](service/panorama/CHANGELOG.md#v1100-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.23.0](service/personalize/CHANGELOG.md#v1230-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/personalizeevents`: [v1.13.0](service/personalizeevents/CHANGELOG.md#v1130-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/personalizeruntime`: [v1.13.0](service/personalizeruntime/CHANGELOG.md#v1130-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/pi`: [v1.16.0](service/pi/CHANGELOG.md#v1160-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.18.0](service/pinpoint/CHANGELOG.md#v1180-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/pinpointemail`: [v1.12.0](service/pinpointemail/CHANGELOG.md#v1120-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoice`: [v1.11.0](service/pinpointsmsvoice/CHANGELOG.md#v1110-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoicev2`: [v1.1.0](service/pinpointsmsvoicev2/CHANGELOG.md#v110-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/pipes`: [v1.1.0](service/pipes/CHANGELOG.md#v110-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.22.0](service/polly/CHANGELOG.md#v1220-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/pricing`: [v1.18.0](service/pricing/CHANGELOG.md#v1180-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/privatenetworks`: [v1.1.0](service/privatenetworks/CHANGELOG.md#v110-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.19.0](service/proton/CHANGELOG.md#v1190-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/qldb`: [v1.15.0](service/qldb/CHANGELOG.md#v1150-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/qldbsession`: [v1.14.0](service/qldbsession/CHANGELOG.md#v1140-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.29.0](service/quicksight/CHANGELOG.md#v1290-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/ram`: [v1.17.0](service/ram/CHANGELOG.md#v1170-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/rbin`: [v1.8.0](service/rbin/CHANGELOG.md#v180-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.39.0](service/rds/CHANGELOG.md#v1390-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). - * **Feature**: This release adds support for specifying which certificate authority (CA) to use for a DB instance's server certificate during DB instance creation, as well as other CA enhancements. -* `github.com/aws/aws-sdk-go-v2/service/rdsdata`: [v1.13.0](service/rdsdata/CHANGELOG.md#v1130-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.27.0](service/redshift/CHANGELOG.md#v1270-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/redshiftdata`: [v1.18.0](service/redshiftdata/CHANGELOG.md#v1180-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/redshiftserverless`: [v1.4.0](service/redshiftserverless/CHANGELOG.md#v140-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.23.0](service/rekognition/CHANGELOG.md#v1230-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/resiliencehub`: [v1.8.0](service/resiliencehub/CHANGELOG.md#v180-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/resourceexplorer2`: [v1.2.0](service/resourceexplorer2/CHANGELOG.md#v120-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/resourcegroups`: [v1.13.0](service/resourcegroups/CHANGELOG.md#v1130-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi`: [v1.14.0](service/resourcegroupstaggingapi/CHANGELOG.md#v1140-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/robomaker`: [v1.18.0](service/robomaker/CHANGELOG.md#v1180-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/rolesanywhere`: [v1.1.0](service/rolesanywhere/CHANGELOG.md#v110-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.26.0](service/route53/CHANGELOG.md#v1260-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/route53domains`: [v1.14.0](service/route53domains/CHANGELOG.md#v1140-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/route53recoverycluster`: [v1.11.0](service/route53recoverycluster/CHANGELOG.md#v1110-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig`: [v1.11.0](service/route53recoverycontrolconfig/CHANGELOG.md#v1110-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/route53recoveryreadiness`: [v1.9.0](service/route53recoveryreadiness/CHANGELOG.md#v190-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/route53resolver`: [v1.16.0](service/route53resolver/CHANGELOG.md#v1160-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/rum`: [v1.9.0](service/rum/CHANGELOG.md#v190-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.30.0](service/s3/CHANGELOG.md#v1300-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.29.0](service/s3control/CHANGELOG.md#v1290-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/s3outposts`: [v1.14.0](service/s3outposts/CHANGELOG.md#v1140-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.62.0](service/sagemaker/CHANGELOG.md#v1620-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/sagemakera2iruntime`: [v1.15.0](service/sagemakera2iruntime/CHANGELOG.md#v1150-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/sagemakeredge`: [v1.13.0](service/sagemakeredge/CHANGELOG.md#v1130-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/sagemakerfeaturestoreruntime`: [v1.13.0](service/sagemakerfeaturestoreruntime/CHANGELOG.md#v1130-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/sagemakergeospatial`: [v1.1.0](service/sagemakergeospatial/CHANGELOG.md#v110-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.17.0](service/sagemakerruntime/CHANGELOG.md#v1170-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/savingsplans`: [v1.12.0](service/savingsplans/CHANGELOG.md#v1120-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/scheduler`: [v1.1.0](service/scheduler/CHANGELOG.md#v110-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/schemas`: [v1.15.0](service/schemas/CHANGELOG.md#v1150-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.18.0](service/secretsmanager/CHANGELOG.md#v1180-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.27.0](service/securityhub/CHANGELOG.md#v1270-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/securitylake`: [v1.2.0](service/securitylake/CHANGELOG.md#v120-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/serverlessapplicationrepository`: [v1.12.0](service/serverlessapplicationrepository/CHANGELOG.md#v1120-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.16.0](service/servicecatalog/CHANGELOG.md#v1160-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry`: [v1.16.0](service/servicecatalogappregistry/CHANGELOG.md#v1160-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/servicediscovery`: [v1.19.0](service/servicediscovery/CHANGELOG.md#v1190-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/servicequotas`: [v1.14.0](service/servicequotas/CHANGELOG.md#v1140-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/ses`: [v1.15.0](service/ses/CHANGELOG.md#v1150-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/sesv2`: [v1.16.0](service/sesv2/CHANGELOG.md#v1160-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/sfn`: [v1.17.0](service/sfn/CHANGELOG.md#v1170-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/shield`: [v1.18.0](service/shield/CHANGELOG.md#v1180-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/signer`: [v1.14.0](service/signer/CHANGELOG.md#v1140-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/simspaceweaver`: [v1.1.0](service/simspaceweaver/CHANGELOG.md#v110-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/sms`: [v1.13.0](service/sms/CHANGELOG.md#v1130-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/snowball`: [v1.17.0](service/snowball/CHANGELOG.md#v1170-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/snowdevicemanagement`: [v1.9.0](service/snowdevicemanagement/CHANGELOG.md#v190-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.19.0](service/sns/CHANGELOG.md#v1190-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.20.0](service/sqs/CHANGELOG.md#v1200-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.35.0](service/ssm/CHANGELOG.md#v1350-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/ssmcontacts`: [v1.14.0](service/ssmcontacts/CHANGELOG.md#v1140-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.20.0](service/ssmincidents/CHANGELOG.md#v1200-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/ssmsap`: [v1.1.0](service/ssmsap/CHANGELOG.md#v110-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/sso`: [v1.12.0](service/sso/CHANGELOG.md#v1120-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/ssoadmin`: [v1.16.0](service/ssoadmin/CHANGELOG.md#v1160-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/ssooidc`: [v1.14.0](service/ssooidc/CHANGELOG.md#v1140-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.18.0](service/storagegateway/CHANGELOG.md#v1180-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.18.0](service/sts/CHANGELOG.md#v1180-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/support`: [v1.14.0](service/support/CHANGELOG.md#v1140-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/supportapp`: [v1.2.0](service/supportapp/CHANGELOG.md#v120-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/swf`: [v1.14.0](service/swf/CHANGELOG.md#v1140-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/synthetics`: [v1.17.0](service/synthetics/CHANGELOG.md#v1170-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.20.0](service/textract/CHANGELOG.md#v1200-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/timestreamquery`: [v1.15.0](service/timestreamquery/CHANGELOG.md#v1150-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/timestreamwrite`: [v1.15.0](service/timestreamwrite/CHANGELOG.md#v1150-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.25.0](service/transcribe/CHANGELOG.md#v1250-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/transcribestreaming`: [v1.9.0](service/transcribestreaming/CHANGELOG.md#v190-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.28.0](service/transfer/CHANGELOG.md#v1280-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/translate`: [v1.17.0](service/translate/CHANGELOG.md#v1170-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/voiceid`: [v1.12.0](service/voiceid/CHANGELOG.md#v1120-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/waf`: [v1.12.0](service/waf/CHANGELOG.md#v1120-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/wafregional`: [v1.13.0](service/wafregional/CHANGELOG.md#v1130-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.24.0](service/wafv2/CHANGELOG.md#v1240-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.18.0](service/wellarchitected/CHANGELOG.md#v1180-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.12.0](service/wisdom/CHANGELOG.md#v1120-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/workdocs`: [v1.13.0](service/workdocs/CHANGELOG.md#v1130-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/worklink`: [v1.13.0](service/worklink/CHANGELOG.md#v1130-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/workmail`: [v1.18.0](service/workmail/CHANGELOG.md#v1180-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/workmailmessageflow`: [v1.12.0](service/workmailmessageflow/CHANGELOG.md#v1120-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.28.0](service/workspaces/CHANGELOG.md#v1280-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/workspacesweb`: [v1.8.0](service/workspacesweb/CHANGELOG.md#v180-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). -* `github.com/aws/aws-sdk-go-v2/service/xray`: [v1.16.0](service/xray/CHANGELOG.md#v1160-2023-01-05) - * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). - -# Release (2023-01-04) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.16.0](service/applicationautoscaling/CHANGELOG.md#v1160-2023-01-04) - * **Feature**: Customers can now use the existing DescribeScalingActivities API to also see the detailed and machine-readable reasons for Application Auto Scaling not scaling their resources and, if needed, take the necessary corrective actions. -* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.17.4](service/cloudwatchlogs/CHANGELOG.md#v1174-2023-01-04) - * **Documentation**: Update to remove sequenceToken as a required field in PutLogEvents calls. -* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.34.0](service/ssm/CHANGELOG.md#v1340-2023-01-04) - * **Feature**: Adding support for QuickSetup Document Type in Systems Manager - -# Release (2023-01-03) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/securitylake`: [v1.1.0](service/securitylake/CHANGELOG.md#v110-2023-01-03) - * **Feature**: Allow CreateSubscriber API to take string input that allows setting more descriptive SubscriberDescription field. Make souceTypes field required in model level for UpdateSubscriberRequest as it is required for every API call on the backend. Allow ListSubscribers take any String as nextToken param. - -# Release (2022-12-30) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.23.0](service/cloudfront/CHANGELOG.md#v1230-2022-12-30) - * **Feature**: Extend response headers policy to support removing headers from viewer responses -* `github.com/aws/aws-sdk-go-v2/service/iotfleetwise`: [v1.2.1](service/iotfleetwise/CHANGELOG.md#v121-2022-12-30) - * **Documentation**: Update documentation - correct the epoch constant value of default value for expiryTime field in CreateCampaign request. - -# Release (2022-12-29) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/apigateway`: [v1.15.28](service/apigateway/CHANGELOG.md#v11528-2022-12-29) - * **Documentation**: Documentation updates for Amazon API Gateway -* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.21.0](service/emr/CHANGELOG.md#v1210-2022-12-29) - * **Feature**: Added GetClusterSessionCredentials API to allow Amazon SageMaker Studio to connect to EMR on EC2 clusters with runtime roles and AWS Lake Formation-based access control for Apache Spark, Apache Hive, and Presto queries. -* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.17.0](service/secretsmanager/CHANGELOG.md#v1170-2022-12-29) - * **Feature**: Added owning service filter, include planned deletion flag, and next rotation date response parameter in ListSecrets. -* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.11.0](service/wisdom/CHANGELOG.md#v1110-2022-12-29) - * **Feature**: This release extends Wisdom CreateContent and StartContentUpload APIs to support PDF and MicrosoftWord docx document uploading. - -# Release (2022-12-28) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.25.0](service/elasticache/CHANGELOG.md#v1250-2022-12-28) - * **Feature**: This release allows you to modify the encryption in transit setting, for existing Redis clusters. You can now change the TLS configuration of your Redis clusters without the need to re-build or re-provision the clusters or impact application availability. -* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.21.0](service/networkfirewall/CHANGELOG.md#v1210-2022-12-28) - * **Feature**: AWS Network Firewall now provides status messages for firewalls to help you troubleshoot when your endpoint fails. -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.38.0](service/rds/CHANGELOG.md#v1380-2022-12-28) - * **Feature**: This release adds support for Custom Engine Version (CEV) on RDS Custom SQL Server. -* `github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig`: [v1.10.0](service/route53recoverycontrolconfig/CHANGELOG.md#v1100-2022-12-28) - * **Feature**: Added support for Python paginators in the route53-recovery-control-config List* APIs. - -# Release (2022-12-27) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/memorydb`: [v1.11.0](service/memorydb/CHANGELOG.md#v1110-2022-12-27) - * **Feature**: This release adds support for MemoryDB Reserved nodes which provides a significant discount compared to on-demand node pricing. Reserved nodes are not physical nodes, but rather a billing discount applied to the use of on-demand nodes in your account. -* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.27.0](service/transfer/CHANGELOG.md#v1270-2022-12-27) - * **Feature**: Add additional operations to throw ThrottlingExceptions - -# Release (2022-12-23) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.42.0](service/connect/CHANGELOG.md#v1420-2022-12-23) - * **Feature**: Support for Routing Profile filter, SortCriteria, and grouping by Routing Profiles for GetCurrentMetricData API. Support for RoutingProfiles, UserHierarchyGroups, and Agents as filters, NextStatus and AgentStatusName for GetCurrentUserData. Adds ApproximateTotalCount to both APIs. -* `github.com/aws/aws-sdk-go-v2/service/connectparticipant`: [v1.13.0](service/connectparticipant/CHANGELOG.md#v1130-2022-12-23) - * **Feature**: Amazon Connect Chat introduces the Message Receipts feature. This feature allows agents and customers to receive message delivered and read receipts after they send a chat message. -* `github.com/aws/aws-sdk-go-v2/service/detective`: [v1.17.0](service/detective/CHANGELOG.md#v1170-2022-12-23) - * **Feature**: This release adds a missed AccessDeniedException type to several endpoints. -* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.27.0](service/fsx/CHANGELOG.md#v1270-2022-12-23) - * **Feature**: Fix a bug where a recent release might break certain existing SDKs. -* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.10.0](service/inspector2/CHANGELOG.md#v1100-2022-12-23) - * **Feature**: Amazon Inspector adds support for scanning NodeJS 18.x and Go 1.x AWS Lambda function runtimes. - -# Release (2022-12-22) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.19.0](service/computeoptimizer/CHANGELOG.md#v1190-2022-12-22) - * **Feature**: This release enables AWS Compute Optimizer to analyze and generate optimization recommendations for ecs services running on Fargate. -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.41.0](service/connect/CHANGELOG.md#v1410-2022-12-22) - * **Feature**: Amazon Connect Chat introduces the Idle Participant/Autodisconnect feature, which allows users to set timeouts relating to the activity of chat participants, using the new UpdateParticipantRoleConfig API. -* `github.com/aws/aws-sdk-go-v2/service/iotdeviceadvisor`: [v1.16.0](service/iotdeviceadvisor/CHANGELOG.md#v1160-2022-12-22) - * **Feature**: This release adds the following new features: 1) Documentation updates for IoT Device Advisor APIs. 2) Updated required request parameters for IoT Device Advisor APIs. 3) Added new service feature: ability to provide the test endpoint when customer executing the StartSuiteRun API. -* `github.com/aws/aws-sdk-go-v2/service/kinesisvideowebrtcstorage`: [v1.1.0](service/kinesisvideowebrtcstorage/CHANGELOG.md#v110-2022-12-22) - * **Feature**: Amazon Kinesis Video Streams offers capabilities to stream video and audio in real-time via WebRTC to the cloud for storage, playback, and analytical processing. Customers can use our enhanced WebRTC SDK and cloud APIs to enable real-time streaming, as well as media ingestion to the cloud. -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.37.0](service/rds/CHANGELOG.md#v1370-2022-12-22) - * **Feature**: Add support for managing master user password in AWS Secrets Manager for the DBInstance and DBCluster. -* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.16.11](service/secretsmanager/CHANGELOG.md#v11611-2022-12-22) - * **Documentation**: Documentation updates for Secrets Manager - -# Release (2022-12-21) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/licensemanagerlinuxsubscriptions`: [v1.0.0](service/licensemanagerlinuxsubscriptions/CHANGELOG.md#v100-2022-12-21) - * **Release**: New AWS service client module - * **Feature**: AWS License Manager now offers cross-region, cross-account tracking of commercial Linux subscriptions on AWS. This includes subscriptions purchased as part of EC2 subscription-included AMIs, on the AWS Marketplace, or brought to AWS via Red Hat Cloud Access Program. -* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.25.0](service/macie2/CHANGELOG.md#v1250-2022-12-21) - * **Feature**: This release adds support for analyzing Amazon S3 objects that use the S3 Glacier Instant Retrieval (Glacier_IR) storage class. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.61.0](service/sagemaker/CHANGELOG.md#v1610-2022-12-21) - * **Feature**: This release enables adding RStudio Workbench support to an existing Amazon SageMaker Studio domain. It allows setting your RStudio on SageMaker environment configuration parameters and also updating the RStudioConnectUrl and RStudioPackageManagerUrl parameters for existing domains -* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.33.4](service/ssm/CHANGELOG.md#v1334-2022-12-21) - * **Documentation**: Doc-only updates for December 2022. -* `github.com/aws/aws-sdk-go-v2/service/support`: [v1.13.22](service/support/CHANGELOG.md#v11322-2022-12-21) - * **Documentation**: Documentation updates for the AWS Support API -* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.26.0](service/transfer/CHANGELOG.md#v1260-2022-12-21) - * **Feature**: This release adds support for Decrypt as a workflow step type. - -# Release (2022-12-20) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.20.0](service/batch/CHANGELOG.md#v1200-2022-12-20) - * **Feature**: Adds isCancelled and isTerminated to DescribeJobs response. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.77.0](service/ec2/CHANGELOG.md#v1770-2022-12-20) - * **Feature**: Adds support for pagination in the EC2 DescribeImages API. -* `github.com/aws/aws-sdk-go-v2/service/lookoutequipment`: [v1.16.0](service/lookoutequipment/CHANGELOG.md#v1160-2022-12-20) - * **Feature**: This release adds support for listing inference schedulers by status. -* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.27.0](service/medialive/CHANGELOG.md#v1270-2022-12-20) - * **Feature**: This release adds support for two new features to AWS Elemental MediaLive. First, you can now burn-in timecodes to your MediaLive outputs. Second, we now now support the ability to decode Dolby E audio when it comes in on an input. -* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.15.0](service/nimble/CHANGELOG.md#v1150-2022-12-20) - * **Feature**: Amazon Nimble Studio now supports configuring session storage volumes and persistence, as well as backup and restore sessions through launch profiles. -* `github.com/aws/aws-sdk-go-v2/service/resourceexplorer2`: [v1.1.0](service/resourceexplorer2/CHANGELOG.md#v110-2022-12-20) - * **Feature**: Documentation updates for AWS Resource Explorer. -* `github.com/aws/aws-sdk-go-v2/service/route53domains`: [v1.13.0](service/route53domains/CHANGELOG.md#v1130-2022-12-20) - * **Feature**: Use Route 53 domain APIs to change owner, create/delete DS record, modify IPS tag, resend authorization. New: AssociateDelegationSignerToDomain, DisassociateDelegationSignerFromDomain, PushDomain, ResendOperationAuthorization. Updated: UpdateDomainContact, ListOperations, CheckDomainTransferability. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.60.0](service/sagemaker/CHANGELOG.md#v1600-2022-12-20) - * **Feature**: Amazon SageMaker Autopilot adds support for new objective metrics in CreateAutoMLJob API. -* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.24.0](service/transcribe/CHANGELOG.md#v1240-2022-12-20) - * **Feature**: Enable our batch transcription jobs for Swedish and Vietnamese. - -# Release (2022-12-19) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.21.0](service/athena/CHANGELOG.md#v1210-2022-12-19) - * **Feature**: Add missed InvalidRequestException in GetCalculationExecutionCode,StopCalculationExecution APIs. Correct required parameters (Payload and Type) in UpdateNotebook API. Change Notebook size from 15 Mb to 10 Mb. -* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.22.0](service/ecs/CHANGELOG.md#v1220-2022-12-19) - * **Feature**: This release adds support for alarm-based rollbacks in ECS, a new feature that allows customers to add automated safeguards for Amazon ECS service rolling updates. -* `github.com/aws/aws-sdk-go-v2/service/kinesisvideo`: [v1.14.0](service/kinesisvideo/CHANGELOG.md#v1140-2022-12-19) - * **Feature**: Amazon Kinesis Video Streams offers capabilities to stream video and audio in real-time via WebRTC to the cloud for storage, playback, and analytical processing. Customers can use our enhanced WebRTC SDK and cloud APIs to enable real-time streaming, as well as media ingestion to the cloud. -* `github.com/aws/aws-sdk-go-v2/service/kinesisvideowebrtcstorage`: [v1.0.0](service/kinesisvideowebrtcstorage/CHANGELOG.md#v100-2022-12-19) - * **Release**: New AWS service client module - * **Feature**: Amazon Kinesis Video Streams offers capabilities to stream video and audio in real-time via WebRTC to the cloud for storage, playback, and analytical processing. Customers can use our enhanced WebRTC SDK and cloud APIs to enable real-time streaming, as well as media ingestion to the cloud. -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.36.0](service/rds/CHANGELOG.md#v1360-2022-12-19) - * **Feature**: Add support for --enable-customer-owned-ip to RDS create-db-instance-read-replica API for RDS on Outposts. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.59.0](service/sagemaker/CHANGELOG.md#v1590-2022-12-19) - * **Feature**: AWS Sagemaker - Sagemaker Images now supports Aliases as secondary identifiers for ImageVersions. SageMaker Images now supports additional metadata for ImageVersions for better images management. - -# Release (2022-12-16) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.22.0](service/appflow/CHANGELOG.md#v1220-2022-12-16) - * **Feature**: This release updates the ListConnectorEntities API action so that it returns paginated responses that customers can retrieve with next tokens. -* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.22.2](service/cloudfront/CHANGELOG.md#v1222-2022-12-16) - * **Documentation**: Updated documentation for CloudFront -* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.20.0](service/datasync/CHANGELOG.md#v1200-2022-12-16) - * **Feature**: AWS DataSync now supports the use of tags with task executions. With this new feature, you can apply tags each time you execute a task, giving you greater control and management over your task executions. -* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.18.3](service/efs/CHANGELOG.md#v1183-2022-12-16) - * **Documentation**: General documentation updates for EFS. -* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.16.6](service/guardduty/CHANGELOG.md#v1166-2022-12-16) - * **Documentation**: This release provides the valid characters for the Description and Name field. -* `github.com/aws/aws-sdk-go-v2/service/iotfleetwise`: [v1.2.0](service/iotfleetwise/CHANGELOG.md#v120-2022-12-16) - * **Feature**: Updated error handling for empty resource names in "UpdateSignalCatalog" and "GetModelManifest" operations. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.58.0](service/sagemaker/CHANGELOG.md#v1580-2022-12-16) - * **Feature**: AWS sagemaker - Features: This release adds support for random seed, it's an integer value used to initialize a pseudo-random number generator. Setting a random seed will allow the hyperparameter tuning search strategies to produce more consistent configurations for the same tuning job. - -# Release (2022-12-15) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2`: v1.17.3 - * **Bug Fix**: Unify logic between shared config and in finding home directory -* `github.com/aws/aws-sdk-go-v2/config`: [v1.18.5](config/CHANGELOG.md#v1185-2022-12-15) - * **Bug Fix**: Unify logic between shared config and in finding home directory -* `github.com/aws/aws-sdk-go-v2/credentials`: [v1.13.5](credentials/CHANGELOG.md#v1135-2022-12-15) - * **Bug Fix**: Unify logic between shared config and in finding home directory -* `github.com/aws/aws-sdk-go-v2/service/backupgateway`: [v1.8.0](service/backupgateway/CHANGELOG.md#v180-2022-12-15) - * **Feature**: This release adds support for VMware vSphere tags, enabling customer to protect VMware virtual machines using tag-based policies for AWS tags mapped from vSphere tags. This release also adds support for customer-accessible gateway-hypervisor interaction log and upload bandwidth rate limit schedule. -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.40.0](service/connect/CHANGELOG.md#v1400-2022-12-15) - * **Feature**: Added support for "English - New Zealand" and "English - South African" to be used with Amazon Connect Custom Vocabulary APIs. -* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.21.0](service/ecs/CHANGELOG.md#v1210-2022-12-15) - * **Feature**: This release adds support for container port ranges in ECS, a new capability that allows customers to provide container port ranges to simplify use cases where multiple ports are in use in a container. This release updates TaskDefinition mutation APIs and the Task description APIs. -* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.26.0](service/eks/CHANGELOG.md#v1260-2022-12-15) - * **Feature**: Add support for Windows managed nodes groups. -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.38.0](service/glue/CHANGELOG.md#v1380-2022-12-15) - * **Feature**: This release adds support for AWS Glue Crawler with native DeltaLake tables, allowing Crawlers to classify Delta Lake format tables and catalog them for query engines to query against. -* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.16.0](service/kinesis/CHANGELOG.md#v1160-2022-12-15) - * **Feature**: Added StreamARN parameter for Kinesis Data Streams APIs. Added a new opaque pagination token for ListStreams. SDKs will auto-generate Account Endpoint when accessing Kinesis Data Streams. -* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.19.5](service/location/CHANGELOG.md#v1195-2022-12-15) - * **Documentation**: This release adds support for a new style, "VectorOpenDataStandardLight" which can be used with the new data source, "Open Data Maps (Preview)". -* `github.com/aws/aws-sdk-go-v2/service/m2`: [v1.2.0](service/m2/CHANGELOG.md#v120-2022-12-15) - * **Feature**: Adds an optional create-only `KmsKeyId` property to Environment and Application resources. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.57.0](service/sagemaker/CHANGELOG.md#v1570-2022-12-15) - * **Feature**: SageMaker Inference Recommender now allows customers to load tests their models on various instance types using private VPC. -* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.26.0](service/securityhub/CHANGELOG.md#v1260-2022-12-15) - * **Feature**: Added new resource details objects to ASFF, including resources for AwsEc2LaunchTemplate, AwsSageMakerNotebookInstance, AwsWafv2WebAcl and AwsWafv2RuleGroup. -* `github.com/aws/aws-sdk-go-v2/service/translate`: [v1.16.0](service/translate/CHANGELOG.md#v1160-2022-12-15) - * **Feature**: Raised the input byte size limit of the Text field in the TranslateText API to 10000 bytes. - -# Release (2022-12-14) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.23.0](service/cloudwatch/CHANGELOG.md#v1230-2022-12-14) - * **Feature**: Adding support for Metrics Insights Alarms -* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.24.0](service/costexplorer/CHANGELOG.md#v1240-2022-12-14) - * **Feature**: This release supports percentage-based thresholds on Cost Anomaly Detection alert subscriptions. -* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.16.0](service/networkmanager/CHANGELOG.md#v1160-2022-12-14) - * **Feature**: Appliance Mode support for AWS Cloud WAN. -* `github.com/aws/aws-sdk-go-v2/service/redshiftdata`: [v1.17.0](service/redshiftdata/CHANGELOG.md#v1170-2022-12-14) - * **Feature**: This release adds a new --client-token field to ExecuteStatement and BatchExecuteStatement operations. Customers can now run queries with the additional client token parameter to ensures idempotency. -* `github.com/aws/aws-sdk-go-v2/service/sagemakermetrics`: [v1.0.1](service/sagemakermetrics/CHANGELOG.md#v101-2022-12-14) - * **Documentation**: Update SageMaker Metrics documentation. - -# Release (2022-12-13) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.21.0](service/cloudtrail/CHANGELOG.md#v1210-2022-12-13) - * **Feature**: Merging mainline branch for service model into mainline release branch. There are no new APIs. -* `github.com/aws/aws-sdk-go-v2/service/marketplaceentitlementservice`: [v1.11.21](service/marketplaceentitlementservice/CHANGELOG.md#v11121-2022-12-13) - * **Bug Fix**: Fixing a shape type in the marketplaceentitlementservice client -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.35.0](service/rds/CHANGELOG.md#v1350-2022-12-13) - * **Feature**: This deployment adds ClientPasswordAuthType field to the Auth structure of the DBProxy. - -# Release (2022-12-12) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.21.0](service/customerprofiles/CHANGELOG.md#v1210-2022-12-12) - * **Feature**: This release allows custom strings in PartyType and Gender through 2 new attributes in the CreateProfile and UpdateProfile APIs: PartyTypeString and GenderString. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.76.0](service/ec2/CHANGELOG.md#v1760-2022-12-12) - * **Feature**: This release updates DescribeFpgaImages to show supported instance types of AFIs in its response. -* `github.com/aws/aws-sdk-go-v2/service/kinesisvideo`: [v1.13.0](service/kinesisvideo/CHANGELOG.md#v1130-2022-12-12) - * **Feature**: This release adds support for public preview of Kinesis Video Stream at Edge enabling customers to provide configuration for the Kinesis Video Stream EdgeAgent running on an on-premise IoT device. Customers can now locally record from cameras and stream videos to the cloud on configured schedule. -* `github.com/aws/aws-sdk-go-v2/service/lookoutvision`: [v1.14.13](service/lookoutvision/CHANGELOG.md#v11413-2022-12-12) - * **Documentation**: This documentation update adds kms:GenerateDataKey as a required permission to StartModelPackagingJob. -* `github.com/aws/aws-sdk-go-v2/service/migrationhubrefactorspaces`: [v1.7.0](service/migrationhubrefactorspaces/CHANGELOG.md#v170-2022-12-12) - * **Feature**: This release adds support for Lambda alias service endpoints. Lambda alias ARNs can now be passed into CreateService. -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.34.0](service/rds/CHANGELOG.md#v1340-2022-12-12) - * **Feature**: Update the RDS API model to support copying option groups during the CopyDBSnapshot operation -* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.22.0](service/rekognition/CHANGELOG.md#v1220-2022-12-12) - * **Feature**: Adds support for "aliases" and "categories", inclusion and exclusion filters for labels and label categories, and aggregating labels by video segment timestamps for Stored Video Label Detection APIs. -* `github.com/aws/aws-sdk-go-v2/service/sagemakermetrics`: [v1.0.0](service/sagemakermetrics/CHANGELOG.md#v100-2022-12-12) - * **Release**: New AWS service client module - * **Feature**: This release introduces support SageMaker Metrics APIs. -* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.23.3](service/wafv2/CHANGELOG.md#v1233-2022-12-12) - * **Documentation**: Documents the naming requirement for logging destinations that you use with web ACLs. - -# Release (2022-12-09) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.17.2](service/cloudwatchlogs/CHANGELOG.md#v1172-2022-12-09) - * **Documentation**: Doc-only update for CloudWatch Logs, for Tagging Permissions clarifications -* `github.com/aws/aws-sdk-go-v2/service/iotfleetwise`: [v1.1.0](service/iotfleetwise/CHANGELOG.md#v110-2022-12-09) - * **Feature**: Deprecated assignedValue property for actuators and attributes. Added a message to invalid nodes and invalid decoder manifest exceptions. -* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.26.0](service/medialive/CHANGELOG.md#v1260-2022-12-09) - * **Feature**: Link devices now support buffer size (latency) configuration. A higher latency value means a longer delay in transmitting from the device to MediaLive, but improved resiliency. A lower latency value means a shorter delay, but less resiliency. -* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.20.0](service/mediapackagevod/CHANGELOG.md#v1200-2022-12-09) - * **Feature**: This release provides the approximate number of assets in a packaging group. - -# Release (2022-12-08) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.25.0](service/autoscaling/CHANGELOG.md#v1250-2022-12-08) - * **Feature**: Adds support for metric math for target tracking scaling policies, saving you the cost and effort of publishing a custom metric to CloudWatch. Also adds support for VPC Lattice by adding the Attach/Detach/DescribeTrafficSources APIs and a new health check type to the CreateAutoScalingGroup API. -* `github.com/aws/aws-sdk-go-v2/service/iottwinmaker`: [v1.9.0](service/iottwinmaker/CHANGELOG.md#v190-2022-12-08) - * **Feature**: This release adds the following new features: 1) New APIs for managing a continuous sync of assets and asset models from AWS IoT SiteWise. 2) Support user friendly names for component types (ComponentTypeName) and properties (DisplayName). -* `github.com/aws/aws-sdk-go-v2/service/migrationhubstrategy`: [v1.6.0](service/migrationhubstrategy/CHANGELOG.md#v160-2022-12-08) - * **Feature**: This release adds known application filtering, server selection for assessments, support for potential recommendations, and indications for configuration and assessment status. For more information, see the AWS Migration Hub documentation at https://docs.aws.amazon.com/migrationhub/index.html - -# Release (2022-12-07) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.22.0](service/cloudfront/CHANGELOG.md#v1220-2022-12-07) - * **Feature**: Introducing UpdateDistributionWithStagingConfig that can be used to promote the staging configuration to the production. -* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.23.0](service/costexplorer/CHANGELOG.md#v1230-2022-12-07) - * **Feature**: This release adds the LinkedAccountName field to the GetAnomalies API response under RootCause -* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.25.0](service/eks/CHANGELOG.md#v1250-2022-12-07) - * **Feature**: Adds support for EKS add-ons configurationValues fields and DescribeAddonConfiguration function -* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.19.2](service/kms/CHANGELOG.md#v1192-2022-12-07) - * **Documentation**: Updated examples and exceptions for External Key Store (XKS). - -# Release (2022-12-06) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/billingconductor`: [v1.3.0](service/billingconductor/CHANGELOG.md#v130-2022-12-06) - * **Feature**: This release adds the Tiering Pricing Rule feature. -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.39.0](service/connect/CHANGELOG.md#v1390-2022-12-06) - * **Feature**: This release provides APIs that enable you to programmatically manage rules for Contact Lens conversational analytics and third party applications. For more information, see https://docs.aws.amazon.com/connect/latest/APIReference/rules-api.html -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.33.0](service/rds/CHANGELOG.md#v1330-2022-12-06) - * **Feature**: This release adds the BlueGreenDeploymentNotFoundFault to the AddTagsToResource, ListTagsForResource, and RemoveTagsFromResource operations. -* `github.com/aws/aws-sdk-go-v2/service/sagemakerfeaturestoreruntime`: [v1.12.0](service/sagemakerfeaturestoreruntime/CHANGELOG.md#v1120-2022-12-06) - * **Feature**: For online + offline Feature Groups, added ability to target PutRecord and DeleteRecord actions to only online store, or only offline store. If target store parameter is not specified, actions will apply to both stores. - -# Release (2022-12-05) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.22.0](service/costexplorer/CHANGELOG.md#v1220-2022-12-05) - * **Feature**: This release introduces two new APIs that offer a 1-click experience to refresh Savings Plans recommendations. The two APIs are StartSavingsPlansPurchaseRecommendationGeneration and ListSavingsPlansPurchaseRecommendationGeneration. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.75.0](service/ec2/CHANGELOG.md#v1750-2022-12-05) - * **Feature**: Documentation updates for EC2. -* `github.com/aws/aws-sdk-go-v2/service/ivschat`: [v1.2.0](service/ivschat/CHANGELOG.md#v120-2022-12-05) - * **Feature**: Adds PendingVerification error type to messaging APIs to block the resource usage for accounts identified as being fraudulent. -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.32.0](service/rds/CHANGELOG.md#v1320-2022-12-05) - * **Feature**: This release adds the InvalidDBInstanceStateFault to the RestoreDBClusterFromSnapshot operation. -* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.23.0](service/transcribe/CHANGELOG.md#v1230-2022-12-05) - * **Feature**: Amazon Transcribe now supports creating custom language models in the following languages: Japanese (ja-JP) and German (de-DE). - -# Release (2022-12-02) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.17.0](service/appsync/CHANGELOG.md#v1170-2022-12-02) - * **Feature**: Fixes the URI for the evaluatecode endpoint to include the /v1 prefix (ie. "/v1/dataplane-evaluatecode"). -* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.20.1](service/ecs/CHANGELOG.md#v1201-2022-12-02) - * **Documentation**: Documentation updates for Amazon ECS -* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.21.0](service/fms/CHANGELOG.md#v1210-2022-12-02) - * **Feature**: AWS Firewall Manager now supports Fortigate Cloud Native Firewall as a Service as a third-party policy type. -* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.28.0](service/mediaconvert/CHANGELOG.md#v1280-2022-12-02) - * **Feature**: The AWS Elemental MediaConvert SDK has added support for configurable ID3 eMSG box attributes and the ability to signal them with InbandEventStream tags in DASH and CMAF outputs. -* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.25.0](service/medialive/CHANGELOG.md#v1250-2022-12-02) - * **Feature**: Updates to Event Signaling and Management (ESAM) API and documentation. -* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.21.0](service/polly/CHANGELOG.md#v1210-2022-12-02) - * **Feature**: Add language code for Finnish (fi-FI) -* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.18.0](service/proton/CHANGELOG.md#v1180-2022-12-02) - * **Feature**: CreateEnvironmentAccountConnection RoleArn input is now optional -* `github.com/aws/aws-sdk-go-v2/service/redshiftserverless`: [v1.3.0](service/redshiftserverless/CHANGELOG.md#v130-2022-12-02) - * **Feature**: Add Table Level Restore operations for Amazon Redshift Serverless. Add multi-port support for Amazon Redshift Serverless endpoints. Add Tagging support to Snapshots and Recovery Points in Amazon Redshift Serverless. -* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.18.7](service/sns/CHANGELOG.md#v1187-2022-12-02) - * **Documentation**: This release adds the message payload-filtering feature to the SNS Subscribe, SetSubscriptionAttributes, and GetSubscriptionAttributes API actions - -# Release (2022-12-01) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/codecatalyst`: [v1.0.0](service/codecatalyst/CHANGELOG.md#v100-2022-12-01) - * **Release**: New AWS service client module - * **Feature**: This release adds operations that support customers using the AWS Toolkits and Amazon CodeCatalyst, a unified software development service that helps developers develop, deploy, and maintain applications in the cloud. For more information, see the documentation. -* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.20.0](service/comprehend/CHANGELOG.md#v1200-2022-12-01) - * **Feature**: Comprehend now supports semi-structured documents (such as PDF files or image files) as inputs for custom analysis using the synchronous APIs (ClassifyDocument and DetectEntities). -* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.16.0](service/gamelift/CHANGELOG.md#v1160-2022-12-01) - * **Feature**: GameLift introduces a new feature, GameLift Anywhere. GameLift Anywhere allows you to integrate your own compute resources with GameLift. You can also use GameLift Anywhere to iteratively test your game servers without uploading the build to GameLift for every iteration. -* `github.com/aws/aws-sdk-go-v2/service/pipes`: [v1.0.0](service/pipes/CHANGELOG.md#v100-2022-12-01) - * **Release**: New AWS service client module - * **Feature**: AWS introduces new Amazon EventBridge Pipes which allow you to connect sources (SQS, Kinesis, DDB, Kafka, MQ) to Targets (14+ EventBridge Targets) without any code, with filtering, batching, input transformation, and an optional Enrichment stage (Lambda, StepFunctions, ApiGateway, ApiDestinations) -* `github.com/aws/aws-sdk-go-v2/service/sfn`: [v1.16.0](service/sfn/CHANGELOG.md#v1160-2022-12-01) - * **Feature**: This release adds support for the AWS Step Functions Map state in Distributed mode. The changes include a new MapRun resource and several new and modified APIs. - -# Release (2022-11-30) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.18.0](service/accessanalyzer/CHANGELOG.md#v1180-2022-11-30) - * **Feature**: This release adds support for S3 cross account access points. IAM Access Analyzer will now produce public or cross account findings when it detects bucket delegation to external account access points. -* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.20.0](service/athena/CHANGELOG.md#v1200-2022-11-30) - * **Feature**: This release includes support for using Apache Spark in Amazon Athena. -* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.17.0](service/dataexchange/CHANGELOG.md#v1170-2022-11-30) - * **Feature**: This release enables data providers to license direct access to data in their Amazon S3 buckets or AWS Lake Formation data lakes through AWS Data Exchange. Subscribers get read-only access to the data and can use it in downstream AWS services, like Amazon Athena, without creating or managing copies. -* `github.com/aws/aws-sdk-go-v2/service/docdbelastic`: [v1.0.0](service/docdbelastic/CHANGELOG.md#v100-2022-11-30) - * **Release**: New AWS service client module - * **Feature**: Launched Amazon DocumentDB Elastic Clusters. You can now use the SDK to create, list, update and delete Amazon DocumentDB Elastic Cluster resources -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.37.0](service/glue/CHANGELOG.md#v1370-2022-11-30) - * **Feature**: This release adds support for AWS Glue Data Quality, which helps you evaluate and monitor the quality of your data and includes the API for creating, deleting, or updating data quality rulesets, runs and evaluations. -* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.28.0](service/s3control/CHANGELOG.md#v1280-2022-11-30) - * **Feature**: Amazon S3 now supports cross-account access points. S3 bucket owners can now allow trusted AWS accounts to create access points associated with their bucket. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.56.0](service/sagemaker/CHANGELOG.md#v1560-2022-11-30) - * **Feature**: Added Models as part of the Search API. Added Model shadow deployments in realtime inference, and shadow testing in managed inference. Added support for shared spaces, geospatial APIs, Model Cards, AutoMLJobStep in pipelines, Git repositories on user profiles and domains, Model sharing in Jumpstart. -* `github.com/aws/aws-sdk-go-v2/service/sagemakergeospatial`: [v1.0.0](service/sagemakergeospatial/CHANGELOG.md#v100-2022-11-30) - * **Release**: New AWS service client module - * **Feature**: This release provides Amazon SageMaker geospatial APIs to build, train, deploy and visualize geospatial models. - -# Release (2022-11-29.2) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.74.0](service/ec2/CHANGELOG.md#v1740-2022-11-292) - * **Feature**: This release adds support for AWS Verified Access and the Hpc6id Amazon EC2 compute optimized instance type, which features 3rd generation Intel Xeon Scalable processors. -* `github.com/aws/aws-sdk-go-v2/service/firehose`: [v1.15.0](service/firehose/CHANGELOG.md#v1150-2022-11-292) - * **Feature**: Allow support for the Serverless offering for Amazon OpenSearch Service as a Kinesis Data Firehose delivery destination. -* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.19.0](service/kms/CHANGELOG.md#v1190-2022-11-292) - * **Feature**: AWS KMS introduces the External Key Store (XKS), a new feature for customers who want to protect their data with encryption keys stored in an external key management system under their control. -* `github.com/aws/aws-sdk-go-v2/service/omics`: [v1.0.0](service/omics/CHANGELOG.md#v100-2022-11-292) - * **Release**: New AWS service client module - * **Feature**: Amazon Omics is a new, purpose-built service that can be used by healthcare and life science organizations to store, query, and analyze omics data. The insights from that data can be used to accelerate scientific discoveries and improve healthcare. -* `github.com/aws/aws-sdk-go-v2/service/opensearchserverless`: [v1.0.0](service/opensearchserverless/CHANGELOG.md#v100-2022-11-292) - * **Release**: New AWS service client module - * **Feature**: Publish SDK for Amazon OpenSearch Serverless -* `github.com/aws/aws-sdk-go-v2/service/securitylake`: [v1.0.0](service/securitylake/CHANGELOG.md#v100-2022-11-292) - * **Release**: New AWS service client module - * **Feature**: Amazon Security Lake automatically centralizes security data from cloud, on-premises, and custom sources into a purpose-built data lake stored in your account. Security Lake makes it easier to analyze security data, so you can improve the protection of your workloads, applications, and data -* `github.com/aws/aws-sdk-go-v2/service/simspaceweaver`: [v1.0.0](service/simspaceweaver/CHANGELOG.md#v100-2022-11-292) - * **Release**: New AWS service client module - * **Feature**: AWS SimSpace Weaver is a new service that helps customers build spatial simulations at new levels of scale - resulting in virtual worlds with millions of dynamic entities. See the AWS SimSpace Weaver developer guide for more details on how to get started. https://docs.aws.amazon.com/simspaceweaver - -# Release (2022-11-29) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/arczonalshift`: [v1.0.0](service/arczonalshift/CHANGELOG.md#v100-2022-11-29) - * **Release**: New AWS service client module - * **Feature**: Amazon Route 53 Application Recovery Controller Zonal Shift is a new service that makes it easy to shift traffic away from an Availability Zone in a Region. See the developer guide for more information: https://docs.aws.amazon.com/r53recovery/latest/dg/what-is-route53-recovery.html -* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.18.0](service/computeoptimizer/CHANGELOG.md#v1180-2022-11-29) - * **Feature**: Adds support for a new recommendation preference that makes it possible for customers to optimize their EC2 recommendations by utilizing an external metrics ingestion service to provide metrics. -* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.28.0](service/configservice/CHANGELOG.md#v1280-2022-11-29) - * **Feature**: With this release, you can use AWS Config to evaluate your resources for compliance with Config rules before they are created or updated. Using Config rules in proactive mode enables you to test and build compliant resource templates or check resource configurations at the time they are provisioned. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.73.0](service/ec2/CHANGELOG.md#v1730-2022-11-29) - * **Feature**: Introduces ENA Express, which uses AWS SRD and dynamic routing to increase throughput and minimize latency, adds support for trust relationships between Reachability Analyzer and AWS Organizations to enable cross-account analysis, and adds support for Infrastructure Performance metric subscriptions. -* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.24.0](service/eks/CHANGELOG.md#v1240-2022-11-29) - * **Feature**: Adds support for additional EKS add-ons metadata and filtering fields -* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.26.0](service/fsx/CHANGELOG.md#v1260-2022-11-29) - * **Feature**: This release adds support for 4GB/s / 160K PIOPS FSx for ONTAP file systems and 10GB/s / 350K PIOPS FSx for OpenZFS file systems (Single_AZ_2). For FSx for ONTAP, this also adds support for DP volumes, snapshot policy, copy tags to backups, and Multi-AZ route table updates. -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.36.0](service/glue/CHANGELOG.md#v1360-2022-11-29) - * **Feature**: This release allows the creation of Custom Visual Transforms (Dynamic Transforms) to be created via AWS Glue CLI/SDK. -* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.9.0](service/inspector2/CHANGELOG.md#v190-2022-11-29) - * **Feature**: This release adds support for Inspector to scan AWS Lambda. -* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.26.0](service/lambda/CHANGELOG.md#v1260-2022-11-29) - * **Feature**: Adds support for Lambda SnapStart, which helps improve the startup performance of functions. Customers can now manage SnapStart based functions via CreateFunction and UpdateFunctionConfiguration APIs -* `github.com/aws/aws-sdk-go-v2/service/licensemanagerusersubscriptions`: [v1.1.0](service/licensemanagerusersubscriptions/CHANGELOG.md#v110-2022-11-29) - * **Feature**: AWS now offers fully-compliant, Amazon-provided licenses for Microsoft Office Professional Plus 2021 Amazon Machine Images (AMIs) on Amazon EC2. These AMIs are now available on the Amazon EC2 console and on AWS Marketplace to launch instances on-demand without any long-term licensing commitments. -* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.24.0](service/macie2/CHANGELOG.md#v1240-2022-11-29) - * **Feature**: Added support for configuring Macie to continually sample objects from S3 buckets and inspect them for sensitive data. Results appear in statistics, findings, and other data that Macie provides. -* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.28.0](service/quicksight/CHANGELOG.md#v1280-2022-11-29) - * **Feature**: This release adds new Describe APIs and updates Create and Update APIs to support the data model for Dashboards, Analyses, and Templates. -* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.27.0](service/s3control/CHANGELOG.md#v1270-2022-11-29) - * **Feature**: Added two new APIs to support Amazon S3 Multi-Region Access Point failover controls: GetMultiRegionAccessPointRoutes and SubmitMultiRegionAccessPointRoutes. The failover control APIs are supported in the following Regions: us-east-1, us-west-2, eu-west-1, ap-southeast-2, and ap-northeast-1. -* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.25.0](service/securityhub/CHANGELOG.md#v1250-2022-11-29) - * **Feature**: Adding StandardsManagedBy field to DescribeStandards API response - -# Release (2022-11-28) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.18.0](service/backup/CHANGELOG.md#v1180-2022-11-28) - * **Feature**: AWS Backup introduces support for legal hold and application stack backups. AWS Backup Audit Manager introduces support for cross-Region, cross-account reports. -* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.22.0](service/cloudwatch/CHANGELOG.md#v1220-2022-11-28) - * **Feature**: Adds cross-account support to the GetMetricData API. Adds cross-account support to the ListMetrics API through the usage of the IncludeLinkedAccounts flag and the new OwningAccounts field. -* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.17.0](service/cloudwatchlogs/CHANGELOG.md#v1170-2022-11-28) - * **Feature**: Updates to support CloudWatch Logs data protection and CloudWatch cross-account observability -* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.9.0](service/drs/CHANGELOG.md#v190-2022-11-28) - * **Feature**: Non breaking changes to existing APIs, and additional APIs added to support in-AWS failing back using AWS Elastic Disaster Recovery. -* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.20.0](service/ecs/CHANGELOG.md#v1200-2022-11-28) - * **Feature**: This release adds support for ECS Service Connect, a new capability that simplifies writing and operating resilient distributed applications. This release updates the TaskDefinition, Cluster, Service mutation APIs with Service connect constructs and also adds a new ListServicesByNamespace API. -* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.18.0](service/efs/CHANGELOG.md#v1180-2022-11-28) - * **Feature**: This release adds elastic as a new ThroughputMode value for EFS file systems and adds AFTER_1_DAY as a value for TransitionToIARules. -* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.32.0](service/iot/CHANGELOG.md#v1320-2022-11-28) - * **Feature**: Job scheduling enables the scheduled rollout of a Job with start and end times and a customizable end behavior when end time is reached. This is available for continuous and snapshot jobs. Added support for MQTT5 properties to AWS IoT TopicRule Republish Action. -* `github.com/aws/aws-sdk-go-v2/service/iotdataplane`: [v1.13.0](service/iotdataplane/CHANGELOG.md#v1130-2022-11-28) - * **Feature**: This release adds support for MQTT5 properties to AWS IoT HTTP Publish API. -* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.23.0](service/iotwireless/CHANGELOG.md#v1230-2022-11-28) - * **Feature**: This release includes a new feature for customers to calculate the position of their devices by adding three new APIs: UpdateResourcePosition, GetResourcePosition, and GetPositionEstimate. -* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.36.0](service/kendra/CHANGELOG.md#v1360-2022-11-28) - * **Feature**: Amazon Kendra now supports preview of table information from HTML tables in the search results. The most relevant cells with their corresponding rows, columns are displayed as a preview in the search result. The most relevant table cell or cells are also highlighted in table preview. -* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.16.0](service/mgn/CHANGELOG.md#v1160-2022-11-28) - * **Feature**: This release adds support for Application and Wave management. We also now support custom post-launch actions. -* `github.com/aws/aws-sdk-go-v2/service/oam`: [v1.0.0](service/oam/CHANGELOG.md#v100-2022-11-28) - * **Release**: New AWS service client module - * **Feature**: Amazon CloudWatch Observability Access Manager is a new service that allows configuration of the CloudWatch cross-account observability feature. -* `github.com/aws/aws-sdk-go-v2/service/organizations`: [v1.17.0](service/organizations/CHANGELOG.md#v1170-2022-11-28) - * **Feature**: This release introduces delegated administrator for AWS Organizations, a new feature to help you delegate the management of your Organizations policies, enabling you to govern your AWS organization in a decentralized way. You can now allow member accounts to manage Organizations policies. -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.31.0](service/rds/CHANGELOG.md#v1310-2022-11-28) - * **Feature**: This release enables new Aurora and RDS feature called Blue/Green Deployments that makes updates to databases safer, simpler and faster. -* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.19.0](service/textract/CHANGELOG.md#v1190-2022-11-28) - * **Feature**: This release adds support for classifying and splitting lending documents by type, and extracting information by using the Analyze Lending APIs. This release also includes support for summarized information of the processed lending document package, in addition to per document results. -* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.22.0](service/transcribe/CHANGELOG.md#v1220-2022-11-28) - * **Feature**: This release adds support for 'inputType' for post-call and real-time (streaming) Call Analytics within Amazon Transcribe. -* `github.com/aws/aws-sdk-go-v2/service/transcribestreaming`: [v1.8.0](service/transcribestreaming/CHANGELOG.md#v180-2022-11-28) - * **Feature**: This release adds support for real-time (streaming) and post-call Call Analytics within Amazon Transcribe. - -# Release (2022-11-23) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.10.0](service/grafana/CHANGELOG.md#v1100-2022-11-23) - * **Feature**: This release includes support for configuring a Grafana workspace to connect to a datasource within a VPC as well as new APIs for configuring Grafana settings. -* `github.com/aws/aws-sdk-go-v2/service/rbin`: [v1.7.0](service/rbin/CHANGELOG.md#v170-2022-11-23) - * **Feature**: This release adds support for Rule Lock for Recycle Bin, which allows you to lock retention rules so that they can no longer be modified or deleted. - -# Release (2022-11-22) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.21.0](service/appflow/CHANGELOG.md#v1210-2022-11-22) - * **Feature**: Adding support for Amazon AppFlow to transfer the data to Amazon Redshift databases through Amazon Redshift Data API service. This feature will support the Redshift destination connector on both public and private accessible Amazon Redshift Clusters and Amazon Redshift Serverless. -* `github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2`: [v1.15.0](service/kinesisanalyticsv2/CHANGELOG.md#v1150-2022-11-22) - * **Feature**: Support for Apache Flink 1.15 in Kinesis Data Analytics. - -# Release (2022-11-21) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.25.0](service/route53/CHANGELOG.md#v1250-2022-11-21) - * **Feature**: Amazon Route 53 now supports the Asia Pacific (Hyderabad) Region (ap-south-2) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region. - -# Release (2022-11-18.2) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/ssmsap`: [v1.0.1](service/ssmsap/CHANGELOG.md#v101-2022-11-182) - * **Bug Fix**: Removes old model file for ssm sap and uses the new model file to regenerate client - -# Release (2022-11-18) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.20.0](service/appflow/CHANGELOG.md#v1200-2022-11-18) - * **Feature**: AppFlow provides a new API called UpdateConnectorRegistration to update a custom connector that customers have previously registered. With this API, customers no longer need to unregister and then register a connector to make an update. -* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.21.0](service/auditmanager/CHANGELOG.md#v1210-2022-11-18) - * **Feature**: This release introduces a new feature for Audit Manager: Evidence finder. You can now use evidence finder to quickly query your evidence, and add the matching evidence results to an assessment report. -* `github.com/aws/aws-sdk-go-v2/service/chimesdkvoice`: [v1.0.0](service/chimesdkvoice/CHANGELOG.md#v100-2022-11-18) - * **Release**: New AWS service client module - * **Feature**: Amazon Chime Voice Connector, Voice Connector Group and PSTN Audio Service APIs are now available in the Amazon Chime SDK Voice namespace. See https://docs.aws.amazon.com/chime-sdk/latest/dg/sdk-available-regions.html for more details. -* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.21.0](service/cloudfront/CHANGELOG.md#v1210-2022-11-18) - * **Feature**: CloudFront API support for staging distributions and associated traffic management policies. -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.38.0](service/connect/CHANGELOG.md#v1380-2022-11-18) - * **Feature**: Added AllowedAccessControlTags and TagRestrictedResource for Tag Based Access Control on Amazon Connect Webpage -* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.17.6](service/dynamodb/CHANGELOG.md#v1176-2022-11-18) - * **Documentation**: Updated minor fixes for DynamoDB documentation. -* `github.com/aws/aws-sdk-go-v2/service/dynamodbstreams`: [v1.13.25](service/dynamodbstreams/CHANGELOG.md#v11325-2022-11-18) - * **Documentation**: Updated minor fixes for DynamoDB documentation. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.72.0](service/ec2/CHANGELOG.md#v1720-2022-11-18) - * **Feature**: This release adds support for copying an Amazon Machine Image's tags when copying an AMI. -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.35.0](service/glue/CHANGELOG.md#v1350-2022-11-18) - * **Feature**: AWSGlue Crawler - Adding support for Table and Column level Comments with database level datatypes for JDBC based crawler. -* `github.com/aws/aws-sdk-go-v2/service/iotroborunner`: [v1.0.0](service/iotroborunner/CHANGELOG.md#v100-2022-11-18) - * **Release**: New AWS service client module - * **Feature**: AWS IoT RoboRunner is a new service that makes it easy to build applications that help multi-vendor robots work together seamlessly. See the IoT RoboRunner developer guide for more details on getting started. https://docs.aws.amazon.com/iotroborunner/latest/dev/iotroborunner-welcome.html -* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.27.0](service/quicksight/CHANGELOG.md#v1270-2022-11-18) - * **Feature**: This release adds the following: 1) Asset management for centralized assets governance 2) QuickSight Q now supports public embedding 3) New Termination protection flag to mitigate accidental deletes 4) Athena data sources now accept a custom IAM role 5) QuickSight supports connectivity to Databricks -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.55.0](service/sagemaker/CHANGELOG.md#v1550-2022-11-18) - * **Feature**: Added DisableProfiler flag as a new field in ProfilerConfig -* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.15.0](service/servicecatalog/CHANGELOG.md#v1150-2022-11-18) - * **Feature**: This release 1. adds support for Principal Name Sharing with Service Catalog portfolio sharing. 2. Introduces repo sourced products which are created and managed with existing SC APIs. These products are synced to external repos and auto create new product versions based on changes in the repo. -* `github.com/aws/aws-sdk-go-v2/service/sfn`: [v1.15.0](service/sfn/CHANGELOG.md#v1150-2022-11-18) - * **Feature**: This release adds support for using Step Functions service integrations to invoke any cross-account AWS resource, even if that service doesn't support resource-based policies or cross-account calls. See https://docs.aws.amazon.com/step-functions/latest/dg/concepts-access-cross-acct-resources.html -* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.25.0](service/transfer/CHANGELOG.md#v1250-2022-11-18) - * **Feature**: Adds a NONE encryption algorithm type to AS2 connectors, providing support for skipping encryption of the AS2 message body when a HTTPS URL is also specified. - -# Release (2022-11-17) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/amplify`: [v1.12.0](service/amplify/CHANGELOG.md#v1120-2022-11-17) - * **Feature**: Adds a new value (WEB_COMPUTE) to the Platform enum that allows customers to create Amplify Apps with Server-Side Rendering support. -* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.19.0](service/appflow/CHANGELOG.md#v1190-2022-11-17) - * **Feature**: AppFlow simplifies the preparation and cataloging of SaaS data into the AWS Glue Data Catalog where your data can be discovered and accessed by AWS analytics and ML services. AppFlow now also supports data field partitioning and file size optimization to improve query performance and reduce cost. -* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.16.0](service/appsync/CHANGELOG.md#v1160-2022-11-17) - * **Feature**: This release introduces the APPSYNC_JS runtime, and adds support for JavaScript in AppSync functions and AppSync pipeline resolvers. -* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.22.0](service/databasemigrationservice/CHANGELOG.md#v1220-2022-11-17) - * **Feature**: Adds support for Internet Protocol Version 6 (IPv6) on DMS Replication Instances -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.71.0](service/ec2/CHANGELOG.md#v1710-2022-11-17) - * **Feature**: This release adds a new optional parameter "privateIpAddress" for the CreateNatGateway API. PrivateIPAddress will allow customers to select a custom Private IPv4 address instead of having it be auto-assigned. -* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.18.25](service/elasticloadbalancingv2/CHANGELOG.md#v11825-2022-11-17) - * **Documentation**: Provides new target group attributes to turn on/off cross zone load balancing and configure target group health for Network Load Balancers and Application Load Balancers. Provides improvements to health check configuration for Network Load Balancers. -* `github.com/aws/aws-sdk-go-v2/service/emrserverless`: [v1.4.0](service/emrserverless/CHANGELOG.md#v140-2022-11-17) - * **Feature**: Adds support for AWS Graviton2 based applications. You can now select CPU architecture when creating new applications or updating existing ones. -* `github.com/aws/aws-sdk-go-v2/service/ivschat`: [v1.1.0](service/ivschat/CHANGELOG.md#v110-2022-11-17) - * **Feature**: Adds LoggingConfiguration APIs for IVS Chat - a feature that allows customers to store and record sent messages in a chat room to S3 buckets, CloudWatch logs, or Kinesis firehose. -* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.25.0](service/lambda/CHANGELOG.md#v1250-2022-11-17) - * **Feature**: Add Node 18 (nodejs18.x) support to AWS Lambda. -* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.22.0](service/personalize/CHANGELOG.md#v1220-2022-11-17) - * **Feature**: This release provides support for creation and use of metric attributions in AWS Personalize -* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.20.0](service/polly/CHANGELOG.md#v1200-2022-11-17) - * **Feature**: Add two new neural voices - Ola (pl-PL) and Hala (ar-AE). -* `github.com/aws/aws-sdk-go-v2/service/rum`: [v1.8.0](service/rum/CHANGELOG.md#v180-2022-11-17) - * **Feature**: CloudWatch RUM now supports custom events. To use custom events, create an app monitor or update an app monitor with CustomEvent Status as ENABLED. -* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.26.0](service/s3control/CHANGELOG.md#v1260-2022-11-17) - * **Feature**: Added 34 new S3 Storage Lens metrics to support additional customer use cases. -* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.16.7](service/secretsmanager/CHANGELOG.md#v1167-2022-11-17) - * **Documentation**: Documentation updates for Secrets Manager. -* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.24.0](service/securityhub/CHANGELOG.md#v1240-2022-11-17) - * **Feature**: Added SourceLayerArn and SourceLayerHash field for security findings. Updated AwsLambdaFunction Resource detail -* `github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry`: [v1.15.0](service/servicecatalogappregistry/CHANGELOG.md#v1150-2022-11-17) - * **Feature**: This release adds support for tagged resource associations, which allows you to associate a group of resources with a defined resource tag key and value to the application. -* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.17.4](service/sts/CHANGELOG.md#v1174-2022-11-17) - * **Documentation**: Documentation updates for AWS Security Token Service. -* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.18.0](service/textract/CHANGELOG.md#v1180-2022-11-17) - * **Feature**: This release adds support for specifying and extracting information from documents using the Signatures feature within Analyze Document API -* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.27.0](service/workspaces/CHANGELOG.md#v1270-2022-11-17) - * **Feature**: The release introduces CreateStandbyWorkspaces, an API that allows you to create standby WorkSpaces associated with a primary WorkSpace in another Region. DescribeWorkspaces now includes related WorkSpaces properties. DescribeWorkspaceBundles and CreateWorkspaceBundle now return more bundle details. - -# Release (2022-11-16) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.19.1](service/batch/CHANGELOG.md#v1191-2022-11-16) - * **Documentation**: Documentation updates related to Batch on EKS -* `github.com/aws/aws-sdk-go-v2/service/billingconductor`: [v1.2.0](service/billingconductor/CHANGELOG.md#v120-2022-11-16) - * **Feature**: This release adds a new feature BillingEntity pricing rule. -* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.24.0](service/cloudformation/CHANGELOG.md#v1240-2022-11-16) - * **Feature**: Added UnsupportedTarget HandlerErrorCode for use with CFN Resource Hooks -* `github.com/aws/aws-sdk-go-v2/service/comprehendmedical`: [v1.14.0](service/comprehendmedical/CHANGELOG.md#v1140-2022-11-16) - * **Feature**: This release supports new set of entities and traits. It also adds new category (BEHAVIORAL_ENVIRONMENTAL_SOCIAL). -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.37.0](service/connect/CHANGELOG.md#v1370-2022-11-16) - * **Feature**: This release adds a new MonitorContact API for initiating monitoring of ongoing Voice and Chat contacts. -* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.23.0](service/eks/CHANGELOG.md#v1230-2022-11-16) - * **Feature**: Adds support for customer-provided placement groups for Kubernetes control plane instances when creating local EKS clusters on Outposts -* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.24.0](service/elasticache/CHANGELOG.md#v1240-2022-11-16) - * **Feature**: for Redis now supports AWS Identity and Access Management authentication access to Redis clusters starting with redis-engine version 7.0 -* `github.com/aws/aws-sdk-go-v2/service/iottwinmaker`: [v1.8.0](service/iottwinmaker/CHANGELOG.md#v180-2022-11-16) - * **Feature**: This release adds the following: 1) ExecuteQuery API allows users to query their AWS IoT TwinMaker Knowledge Graph 2) Pricing plan APIs allow users to configure and manage their pricing mode 3) Support for property groups and tabular property values in existing AWS IoT TwinMaker APIs. -* `github.com/aws/aws-sdk-go-v2/service/personalizeevents`: [v1.12.0](service/personalizeevents/CHANGELOG.md#v1120-2022-11-16) - * **Feature**: This release provides support for creation and use of metric attributions in AWS Personalize -* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.17.0](service/proton/CHANGELOG.md#v1170-2022-11-16) - * **Feature**: Add support for sorting and filtering in ListServiceInstances -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.30.0](service/rds/CHANGELOG.md#v1300-2022-11-16) - * **Feature**: This release adds support for container databases (CDBs) to Amazon RDS Custom for Oracle. A CDB contains one PDB at creation. You can add more PDBs using Oracle SQL. You can also customize your database installation by setting the Oracle base, Oracle home, and the OS user name and group. -* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.33.0](service/ssm/CHANGELOG.md#v1330-2022-11-16) - * **Feature**: This release adds support for cross account access in CreateOpsItem, UpdateOpsItem and GetOpsItem. It introduces new APIs to setup resource policies for SSM resources: PutResourcePolicy, GetResourcePolicies and DeleteResourcePolicy. -* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.19.0](service/ssmincidents/CHANGELOG.md#v1190-2022-11-16) - * **Feature**: Add support for PagerDuty integrations on ResponsePlan, IncidentRecord, and RelatedItem APIs -* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.24.0](service/transfer/CHANGELOG.md#v1240-2022-11-16) - * **Feature**: Allow additional operations to throw ThrottlingException -* `github.com/aws/aws-sdk-go-v2/service/xray`: [v1.15.0](service/xray/CHANGELOG.md#v1150-2022-11-16) - * **Feature**: This release adds new APIs - PutResourcePolicy, DeleteResourcePolicy, ListResourcePolicies for supporting resource based policies for AWS X-Ray. - -# Release (2022-11-15) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.36.0](service/connect/CHANGELOG.md#v1360-2022-11-15) - * **Feature**: This release updates the APIs: UpdateInstanceAttribute, DescribeInstanceAttribute, and ListInstanceAttributes. You can use it to programmatically enable/disable enhanced contact monitoring using attribute type ENHANCED_CONTACT_MONITORING on the specified Amazon Connect instance. -* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.20.0](service/greengrassv2/CHANGELOG.md#v1200-2022-11-15) - * **Feature**: Adds new parent target ARN paramater to CreateDeployment, GetDeployment, and ListDeployments APIs for the new subdeployments feature. -* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.24.0](service/route53/CHANGELOG.md#v1240-2022-11-15) - * **Feature**: Amazon Route 53 now supports the Europe (Spain) Region (eu-south-2) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region. -* `github.com/aws/aws-sdk-go-v2/service/ssmsap`: [v1.0.0](service/ssmsap/CHANGELOG.md#v100-2022-11-15) - * **Release**: New AWS service client module - * **Feature**: AWS Systems Manager for SAP provides simplified operations and management of SAP applications such as SAP HANA. With this release, SAP customers and partners can automate and simplify their SAP system administration tasks such as backup/restore of SAP HANA. -* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.26.0](service/workspaces/CHANGELOG.md#v1260-2022-11-15) - * **Feature**: This release introduces ModifyCertificateBasedAuthProperties, a new API that allows control of certificate-based auth properties associated with a WorkSpaces directory. The DescribeWorkspaceDirectories API will now additionally return certificate-based auth properties in its responses. - -# Release (2022-11-14) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.20.0](service/customerprofiles/CHANGELOG.md#v1200-2022-11-14) - * **Feature**: This release enhances the SearchProfiles API by providing functionality to search for profiles using multiple keys and logical operators. -* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.18.0](service/lakeformation/CHANGELOG.md#v1180-2022-11-14) - * **Feature**: This release adds a new parameter "Parameters" in the DataLakeSettings. -* `github.com/aws/aws-sdk-go-v2/service/managedblockchain`: [v1.13.3](service/managedblockchain/CHANGELOG.md#v1133-2022-11-14) - * **Documentation**: Updating the API docs data type: NetworkEthereumAttributes, and the operations DeleteNode, and CreateNode to also include the supported Goerli network. -* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.16.0](service/proton/CHANGELOG.md#v1160-2022-11-14) - * **Feature**: Add support for CodeBuild Provisioning -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.29.0](service/rds/CHANGELOG.md#v1290-2022-11-14) - * **Feature**: This release adds support for restoring an RDS Multi-AZ DB cluster snapshot to a Single-AZ deployment or a Multi-AZ DB instance deployment. -* `github.com/aws/aws-sdk-go-v2/service/workdocs`: [v1.12.0](service/workdocs/CHANGELOG.md#v1120-2022-11-14) - * **Feature**: Added 2 new document related operations, DeleteDocumentVersion and RestoreDocumentVersions. -* `github.com/aws/aws-sdk-go-v2/service/xray`: [v1.14.0](service/xray/CHANGELOG.md#v1140-2022-11-14) - * **Feature**: This release enhances GetServiceGraph API to support new type of edge to represent links between SQS and Lambda in event-driven applications. - -# Release (2022-11-11) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/config`: [v1.18.0](config/CHANGELOG.md#v1180-2022-11-11) - * **Announcement**: When using the SSOTokenProvider, a previous implementation incorrectly compensated for invalid SSOTokenProvider configurations in the shared profile. This has been fixed via PR #1903 and tracked in issue #1846 - * **Feature**: Adds token refresh support (via SSOTokenProvider) when using the SSOCredentialProvider -* `github.com/aws/aws-sdk-go-v2/credentials`: [v1.13.0](credentials/CHANGELOG.md#v1130-2022-11-11) - * **Announcement**: When using the SSOTokenProvider, a previous implementation incorrectly compensated for invalid SSOTokenProvider configurations in the shared profile. This has been fixed via PR #1903 and tracked in issue #1846 - * **Feature**: Adds token refresh support (via SSOTokenProvider) when using the SSOCredentialProvider -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.34.1](service/glue/CHANGELOG.md#v1341-2022-11-11) - * **Documentation**: Added links related to enabling job bookmarks. -* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.31.0](service/iot/CHANGELOG.md#v1310-2022-11-11) - * **Feature**: This release add new api listRelatedResourcesForAuditFinding and new member type IssuerCertificates for Iot device device defender Audit. -* `github.com/aws/aws-sdk-go-v2/service/licensemanager`: [v1.16.0](service/licensemanager/CHANGELOG.md#v1160-2022-11-11) - * **Feature**: AWS License Manager now supports onboarded Management Accounts or Delegated Admins to view granted licenses aggregated from all accounts in the organization. -* `github.com/aws/aws-sdk-go-v2/service/marketplacecatalog`: [v1.14.0](service/marketplacecatalog/CHANGELOG.md#v1140-2022-11-11) - * **Feature**: Added three new APIs to support tagging and tag-based authorization: TagResource, UntagResource, and ListTagsForResource. Added optional parameters to the StartChangeSet API to support tagging a resource while making a request to create it. -* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.21.0](service/rekognition/CHANGELOG.md#v1210-2022-11-11) - * **Feature**: Adding support for ImageProperties feature to detect dominant colors and image brightness, sharpness, and contrast, inclusion and exclusion filters for labels and label categories, new fields to the API response, "aliases" and "categories" -* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.23.8](service/securityhub/CHANGELOG.md#v1238-2022-11-11) - * **Documentation**: Documentation updates for Security Hub -* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.18.0](service/ssmincidents/CHANGELOG.md#v1180-2022-11-11) - * **Feature**: RelatedItems now have an ID field which can be used for referencing them else where. Introducing event references in TimelineEvent API and increasing maximum length of "eventData" to 12K characters. - -# Release (2022-11-10) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.24.1](service/autoscaling/CHANGELOG.md#v1241-2022-11-10) - * **Documentation**: This release adds a new price capacity optimized allocation strategy for Spot Instances to help customers optimize provisioning of Spot Instances via EC2 Auto Scaling, EC2 Fleet, and Spot Fleet. It allocates Spot Instances based on both spare capacity availability and Spot Instance price. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.70.0](service/ec2/CHANGELOG.md#v1700-2022-11-10) - * **Feature**: This release adds a new price capacity optimized allocation strategy for Spot Instances to help customers optimize provisioning of Spot Instances via EC2 Auto Scaling, EC2 Fleet, and Spot Fleet. It allocates Spot Instances based on both spare capacity availability and Spot Instance price. -* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.19.0](service/ecs/CHANGELOG.md#v1190-2022-11-10) - * **Feature**: This release adds support for task scale-in protection with updateTaskProtection and getTaskProtection APIs. UpdateTaskProtection API can be used to protect a service managed task from being terminated by scale-in events and getTaskProtection API to get the scale-in protection status of a task. -* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.17.0](service/elasticsearchservice/CHANGELOG.md#v1170-2022-11-10) - * **Feature**: Amazon OpenSearch Service now offers managed VPC endpoints to connect to your Amazon OpenSearch Service VPC-enabled domain in a Virtual Private Cloud (VPC). This feature allows you to privately access OpenSearch Service domain without using public IPs or requiring traffic to traverse the Internet. -* `github.com/aws/aws-sdk-go-v2/service/resourceexplorer2`: [v1.0.1](service/resourceexplorer2/CHANGELOG.md#v101-2022-11-10) - * **Documentation**: Text only updates to some Resource Explorer descriptions. -* `github.com/aws/aws-sdk-go-v2/service/scheduler`: [v1.0.0](service/scheduler/CHANGELOG.md#v100-2022-11-10) - * **Release**: New AWS service client module - * **Feature**: AWS introduces the new Amazon EventBridge Scheduler. EventBridge Scheduler is a serverless scheduler that allows you to create, run, and manage tasks from one central, managed service. - -# Release (2022-11-09) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.35.0](service/connect/CHANGELOG.md#v1350-2022-11-09) - * **Feature**: This release adds new fields SignInUrl, UserArn, and UserId to GetFederationToken response payload. -* `github.com/aws/aws-sdk-go-v2/service/connectcases`: [v1.1.0](service/connectcases/CHANGELOG.md#v110-2022-11-09) - * **Feature**: This release adds the ability to disable templates through the UpdateTemplate API. Disabling templates prevents customers from creating cases using the template. For more information see https://docs.aws.amazon.com/cases/latest/APIReference/Welcome.html -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.69.0](service/ec2/CHANGELOG.md#v1690-2022-11-09) - * **Feature**: Amazon EC2 Trn1 instances, powered by AWS Trainium chips, are purpose built for high-performance deep learning training. u-24tb1.112xlarge and u-18tb1.112xlarge High Memory instances are purpose-built to run large in-memory databases. -* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.14.0](service/groundstation/CHANGELOG.md#v1140-2022-11-09) - * **Feature**: This release adds the preview of customer-provided ephemeris support for AWS Ground Station, allowing space vehicle owners to provide their own position and trajectory information for a satellite. -* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.19.0](service/mediapackagevod/CHANGELOG.md#v1190-2022-11-09) - * **Feature**: This release adds "IncludeIframeOnlyStream" for Dash endpoints. -* `github.com/aws/aws-sdk-go-v2/service/transcribestreaming`: [v1.7.0](service/transcribestreaming/CHANGELOG.md#v170-2022-11-09) - * **Feature**: This will release hi-IN and th-TH - -# Release (2022-11-08) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/acm`: [v1.16.0](service/acm/CHANGELOG.md#v1160-2022-11-08) - * **Feature**: Support added for requesting elliptic curve certificate key algorithm types P-256 (EC_prime256v1) and P-384 (EC_secp384r1). -* `github.com/aws/aws-sdk-go-v2/service/billingconductor`: [v1.1.0](service/billingconductor/CHANGELOG.md#v110-2022-11-08) - * **Feature**: This release adds the Recurring Custom Line Item feature along with a new API ListCustomLineItemVersions. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.68.0](service/ec2/CHANGELOG.md#v1680-2022-11-08) - * **Feature**: This release enables sharing of EC2 Placement Groups across accounts and within AWS Organizations using Resource Access Manager -* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.20.0](service/fms/CHANGELOG.md#v1200-2022-11-08) - * **Feature**: AWS Firewall Manager now supports importing existing AWS Network Firewall firewalls into Firewall Manager policies. -* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.24.0](service/lightsail/CHANGELOG.md#v1240-2022-11-08) - * **Feature**: This release adds support for Amazon Lightsail to automate the delegation of domains registered through Amazon Route 53 to Lightsail DNS management and to automate record creation for DNS validation of Lightsail SSL/TLS certificates. -* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.11.0](service/opensearch/CHANGELOG.md#v1110-2022-11-08) - * **Feature**: Amazon OpenSearch Service now offers managed VPC endpoints to connect to your Amazon OpenSearch Service VPC-enabled domain in a Virtual Private Cloud (VPC). This feature allows you to privately access OpenSearch Service domain without using public IPs or requiring traffic to traverse the Internet. -* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.19.0](service/polly/CHANGELOG.md#v1190-2022-11-08) - * **Feature**: Amazon Polly adds new voices: Elin (sv-SE), Ida (nb-NO), Laura (nl-NL) and Suvi (fi-FI). They are available as neural voices only. -* `github.com/aws/aws-sdk-go-v2/service/resourceexplorer2`: [v1.0.0](service/resourceexplorer2/CHANGELOG.md#v100-2022-11-08) - * **Release**: New AWS service client module - * **Feature**: This is the initial SDK release for AWS Resource Explorer. AWS Resource Explorer lets your users search for and discover your AWS resources across the AWS Regions in your account. -* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.23.0](service/route53/CHANGELOG.md#v1230-2022-11-08) - * **Feature**: Amazon Route 53 now supports the Europe (Zurich) Region (eu-central-2) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region. - -# Release (2022-11-07) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.19.0](service/athena/CHANGELOG.md#v1190-2022-11-07) - * **Feature**: Adds support for using Query Result Reuse -* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.24.0](service/autoscaling/CHANGELOG.md#v1240-2022-11-07) - * **Feature**: This release adds support for two new attributes for attribute-based instance type selection - NetworkBandwidthGbps and AllowedInstanceTypes. -* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.20.0](service/cloudtrail/CHANGELOG.md#v1200-2022-11-07) - * **Feature**: This release includes support for configuring a delegated administrator to manage an AWS Organizations organization CloudTrail trails and event data stores, and AWS Key Management Service encryption of CloudTrail Lake event data stores. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.67.0](service/ec2/CHANGELOG.md#v1670-2022-11-07) - * **Feature**: This release adds support for two new attributes for attribute-based instance type selection - NetworkBandwidthGbps and AllowedInstanceTypes. -* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.23.0](service/elasticache/CHANGELOG.md#v1230-2022-11-07) - * **Feature**: Added support for IPv6 and dual stack for Memcached and Redis clusters. Customers can now launch new Redis and Memcached clusters with IPv6 and dual stack networking support. -* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.26.0](service/lexmodelsv2/CHANGELOG.md#v1260-2022-11-07) - * **Feature**: Amazon Lex now supports new APIs for viewing and editing Custom Vocabulary in bots. -* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.27.0](service/mediaconvert/CHANGELOG.md#v1270-2022-11-07) - * **Feature**: The AWS Elemental MediaConvert SDK has added support for setting the SDR reference white point for HDR conversions and conversion of HDR10 to DolbyVision without mastering metadata. -* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.32.0](service/ssm/CHANGELOG.md#v1320-2022-11-07) - * **Feature**: This release includes support for applying a CloudWatch alarm to multi account multi region Systems Manager Automation -* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.23.1](service/wafv2/CHANGELOG.md#v1231-2022-11-07) - * **Documentation**: The geo match statement now adds labels for country and region. You can match requests at the region level by combining a geo match statement with label match statements. -* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.17.0](service/wellarchitected/CHANGELOG.md#v1170-2022-11-07) - * **Feature**: This release adds support for integrations with AWS Trusted Advisor and AWS Service Catalog AppRegistry to improve workload discovery and speed up your workload reviews. -* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.25.0](service/workspaces/CHANGELOG.md#v1250-2022-11-07) - * **Feature**: This release adds protocols attribute to workspaces properties data type. This enables customers to migrate workspaces from PC over IP (PCoIP) to WorkSpaces Streaming Protocol (WSP) using create and modify workspaces public APIs. - -# Release (2022-11-04) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.16.1](service/cloudwatchlogs/CHANGELOG.md#v1161-2022-11-04) - * **Documentation**: Doc-only update for bug fixes and support of export to buckets encrypted with SSE-KMS -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.66.0](service/ec2/CHANGELOG.md#v1660-2022-11-04) - * **Feature**: This release adds API support for the recipient of an AMI account share to remove shared AMI launch permissions. -* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.15.0](service/emrcontainers/CHANGELOG.md#v1150-2022-11-04) - * **Feature**: Adding support for Job templates. Job templates allow you to create and store templates to configure Spark applications parameters. This helps you ensure consistent settings across applications by reusing and enforcing configuration overrides in data pipelines. -* `github.com/aws/aws-sdk-go-v2/service/internal/eventstreamtesting`: [v1.0.37](service/internal/eventstreamtesting/CHANGELOG.md#v1037-2022-11-04) - * **Dependency Update**: update golang.org/x/net dependency to 0.1.0 - -# Release (2022-11-03) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/memorydb`: [v1.10.0](service/memorydb/CHANGELOG.md#v1100-2022-11-03) - * **Feature**: Adding support for r6gd instances for MemoryDB Redis with data tiering. In a cluster with data tiering enabled, when available memory capacity is exhausted, the least recently used data is automatically tiered to solid state drives for cost-effective capacity scaling with minimal performance impact. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.54.0](service/sagemaker/CHANGELOG.md#v1540-2022-11-03) - * **Feature**: Amazon SageMaker now supports running training jobs on ml.trn1 instance types. - -# Release (2022-11-02) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.26.0](service/iotsitewise/CHANGELOG.md#v1260-2022-11-02) - * **Feature**: This release adds the ListAssetModelProperties and ListAssetProperties APIs. You can list all properties that belong to a single asset model or asset using these two new APIs. -* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.25.0](service/s3control/CHANGELOG.md#v1250-2022-11-02) - * **Feature**: S3 on Outposts launches support for Lifecycle configuration for Outposts buckets. With S3 Lifecycle configuration, you can mange objects so they are stored cost effectively. You can manage objects using size-based rules and specify how many noncurrent versions bucket will retain. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.53.0](service/sagemaker/CHANGELOG.md#v1530-2022-11-02) - * **Feature**: This release updates Framework model regex for ModelPackage to support new Framework version xgboost, sklearn. -* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.17.0](service/ssmincidents/CHANGELOG.md#v1170-2022-11-02) - * **Feature**: Adds support for tagging replication-set on creation. - -# Release (2022-11-01) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.28.0](service/rds/CHANGELOG.md#v1280-2022-11-01) - * **Feature**: Relational Database Service - This release adds support for configuring Storage Throughput on RDS database instances. -* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.17.0](service/textract/CHANGELOG.md#v1170-2022-11-01) - * **Feature**: Add ocr results in AnalyzeIDResponse as blocks - -# Release (2022-10-31) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.15.0](service/apprunner/CHANGELOG.md#v1150-2022-10-31) - * **Feature**: This release adds support for private App Runner services. Services may now be configured to be made private and only accessible from a VPC. The changes include a new VpcIngressConnection resource and several new and modified APIs. -* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.16.0](service/cloudwatchlogs/CHANGELOG.md#v1160-2022-10-31) - * **Feature**: SDK release to support tagging for destinations and log groups with TagResource. Also supports tag on create with PutDestination. -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.34.0](service/connect/CHANGELOG.md#v1340-2022-10-31) - * **Feature**: Amazon connect now support a new API DismissUserContact to dismiss or remove terminated contacts in Agent CCP -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.65.0](service/ec2/CHANGELOG.md#v1650-2022-10-31) - * **Feature**: Elastic IP transfer is a new Amazon VPC feature that allows you to transfer your Elastic IP addresses from one AWS Account to another. -* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.30.0](service/iot/CHANGELOG.md#v1300-2022-10-31) - * **Feature**: This release adds the Amazon Location action to IoT Rules Engine. -* `github.com/aws/aws-sdk-go-v2/service/sesv2`: [v1.15.0](service/sesv2/CHANGELOG.md#v1150-2022-10-31) - * **Feature**: This release includes support for interacting with the Virtual Deliverability Manager, allowing you to opt in/out of the feature and to retrieve recommendations and metric data. -* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.16.0](service/textract/CHANGELOG.md#v1160-2022-10-31) - * **Feature**: This release introduces additional support for 30+ normalized fields such as vendor address and currency. It also includes OCR output in the response and accuracy improvements for the already supported fields in previous version - -# Release (2022-10-28) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.14.0](service/apprunner/CHANGELOG.md#v1140-2022-10-28) - * **Feature**: AWS App Runner adds .NET 6, Go 1, PHP 8.1 and Ruby 3.1 runtimes. -* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.18.0](service/appstream/CHANGELOG.md#v1180-2022-10-28) - * **Feature**: This release includes CertificateBasedAuthProperties in CreateDirectoryConfig and UpdateDirectoryConfig. -* `github.com/aws/aws-sdk-go-v2/service/cloud9`: [v1.16.20](service/cloud9/CHANGELOG.md#v11620-2022-10-28) - * **Documentation**: Update to the documentation section of the Cloud9 API Reference guide. -* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.23.0](service/cloudformation/CHANGELOG.md#v1230-2022-10-28) - * **Feature**: This release adds more fields to improves visibility of AWS CloudFormation StackSets information in following APIs: ListStackInstances, DescribeStackInstance, ListStackSetOperationResults, ListStackSetOperations, DescribeStackSetOperation. -* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.19.0](service/mediatailor/CHANGELOG.md#v1190-2022-10-28) - * **Feature**: This release introduces support for SCTE-35 segmentation descriptor messages which can be sent within time signal messages. - -# Release (2022-10-27) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.64.0](service/ec2/CHANGELOG.md#v1640-2022-10-27) - * **Feature**: Feature supports the replacement of instance root volume using an updated AMI without requiring customers to stop their instance. -* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.19.0](service/fms/CHANGELOG.md#v1190-2022-10-27) - * **Feature**: Add support NetworkFirewall Managed Rule Group Override flag in GetViolationDetails API -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.34.0](service/glue/CHANGELOG.md#v1340-2022-10-27) - * **Feature**: Added support for custom datatypes when using custom csv classifier. -* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.26.13](service/redshift/CHANGELOG.md#v12613-2022-10-27) - * **Documentation**: This release clarifies use for the ElasticIp parameter of the CreateCluster and RestoreFromClusterSnapshot APIs. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.52.0](service/sagemaker/CHANGELOG.md#v1520-2022-10-27) - * **Feature**: This change allows customers to provide a custom entrypoint script for the docker container to be run while executing training jobs, and provide custom arguments to the entrypoint script. -* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.23.0](service/wafv2/CHANGELOG.md#v1230-2022-10-27) - * **Feature**: This release adds the following: Challenge rule action, to silently verify client browsers; rule group rule action override to any valid rule action, not just Count; token sharing between protected applications for challenge/CAPTCHA token; targeted rules option for Bot Control managed rule group. - -# Release (2022-10-26) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.18.23](service/iam/CHANGELOG.md#v11823-2022-10-26) - * **Documentation**: Doc only update that corrects instances of CLI not using an entity. -* `github.com/aws/aws-sdk-go-v2/service/kafka`: [v1.18.0](service/kafka/CHANGELOG.md#v1180-2022-10-26) - * **Feature**: This release adds support for Tiered Storage. UpdateStorage allows you to control the Storage Mode for supported storage tiers. -* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.18.0](service/neptune/CHANGELOG.md#v1180-2022-10-26) - * **Feature**: Added a new cluster-level attribute to set the capacity range for Neptune Serverless instances. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.51.0](service/sagemaker/CHANGELOG.md#v1510-2022-10-26) - * **Feature**: Amazon SageMaker Automatic Model Tuning now supports specifying Grid Search strategy for tuning jobs, which evaluates all hyperparameter combinations exhaustively based on the categorical hyperparameters provided. - -# Release (2022-10-25) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.17.0](service/accessanalyzer/CHANGELOG.md#v1170-2022-10-25) - * **Feature**: This release adds support for six new resource types in IAM Access Analyzer to help you easily identify public and cross-account access to your AWS resources. Updated service API, documentation, and paginators. -* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.19.3](service/location/CHANGELOG.md#v1193-2022-10-25) - * **Documentation**: Added new map styles with satellite imagery for map resources using HERE as a data provider. -* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.18.0](service/mediatailor/CHANGELOG.md#v1180-2022-10-25) - * **Feature**: This release is a documentation update -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.27.0](service/rds/CHANGELOG.md#v1270-2022-10-25) - * **Feature**: Relational Database Service - This release adds support for exporting DB cluster data to Amazon S3. -* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.24.0](service/workspaces/CHANGELOG.md#v1240-2022-10-25) - * **Feature**: This release adds new enums for supporting Workspaces Core features, including creating Manual running mode workspaces, importing regular Workspaces Core images and importing g4dn Workspaces Core images. - -# Release (2022-10-24) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/feature/ec2/imds`: [v1.12.19](feature/ec2/imds/CHANGELOG.md#v11219-2022-10-24) - * **Bug Fix**: Fixes an issue that prevented logging of the API request or responses when the respective log modes were enabled. -* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.19.0](service/acmpca/CHANGELOG.md#v1190-2022-10-24) - * **Feature**: AWS Private Certificate Authority (AWS Private CA) now offers usage modes which are combination of features to address specific use cases. -* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.19.0](service/batch/CHANGELOG.md#v1190-2022-10-24) - * **Feature**: This release adds support for AWS Batch on Amazon EKS. -* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.19.0](service/datasync/CHANGELOG.md#v1190-2022-10-24) - * **Feature**: Added support for self-signed certificates when using object storage locations; added BytesCompressed to the TaskExecution response. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.50.0](service/sagemaker/CHANGELOG.md#v1500-2022-10-24) - * **Feature**: SageMaker Inference Recommender now supports a new API ListInferenceRecommendationJobSteps to return the details of all the benchmark we create for an inference recommendation job. - -# Release (2022-10-21) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2`: v1.17.0 - * **Feature**: Adds `aws.IsCredentialsProvider` for inspecting `CredentialProvider` types when needing to determine if the underlying implementation type matches a target type. This resolves an issue where `CredentialsCache` could mask `AnonymousCredentials` providers, breaking downstream detection logic. -* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.21.0](service/cognitoidentityprovider/CHANGELOG.md#v1210-2022-10-21) - * **Feature**: This release adds a new "DeletionProtection" field to the UserPool in Cognito. Application admins can configure this value with either ACTIVE or INACTIVE value. Setting this field to ACTIVE will prevent a user pool from accidental deletion. -* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.16.16](service/eventbridge/CHANGELOG.md#v11616-2022-10-21) - * **Bug Fix**: The SDK client has been updated to utilize the `aws.IsCredentialsProvider` function for determining if `aws.AnonymousCredentials` has been configured for the `CredentialProvider`. -* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.29.0](service/s3/CHANGELOG.md#v1290-2022-10-21) - * **Feature**: S3 on Outposts launches support for automatic bucket-style alias. You can use the automatic access point alias instead of an access point ARN for any object-level operation in an Outposts bucket. - * **Bug Fix**: The SDK client has been updated to utilize the `aws.IsCredentialsProvider` function for determining if `aws.AnonymousCredentials` has been configured for the `CredentialProvider`. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.49.0](service/sagemaker/CHANGELOG.md#v1490-2022-10-21) - * **Feature**: CreateInferenceRecommenderjob API now supports passing endpoint details directly, that will help customers to identify the max invocation and max latency they can achieve for their model and the associated endpoint along with getting recommendations on other instances. -* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.17.0](service/sts/CHANGELOG.md#v1170-2022-10-21) - * **Feature**: Add presign functionality for sts:AssumeRole operation - -# Release (2022-10-20) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.20.0](service/devopsguru/CHANGELOG.md#v1200-2022-10-20) - * **Feature**: This release adds information about the resources DevOps Guru is analyzing. -* `github.com/aws/aws-sdk-go-v2/service/globalaccelerator`: [v1.15.0](service/globalaccelerator/CHANGELOG.md#v1150-2022-10-20) - * **Feature**: Global Accelerator now supports AddEndpoints and RemoveEndpoints operations for standard endpoint groups. -* `github.com/aws/aws-sdk-go-v2/service/resiliencehub`: [v1.7.0](service/resiliencehub/CHANGELOG.md#v170-2022-10-20) - * **Feature**: In this release, we are introducing support for regional optimization for AWS Resilience Hub applications. It also includes a few documentation updates to improve clarity. -* `github.com/aws/aws-sdk-go-v2/service/rum`: [v1.7.0](service/rum/CHANGELOG.md#v170-2022-10-20) - * **Feature**: CloudWatch RUM now supports Extended CloudWatch Metrics with Additional Dimensions - -# Release (2022-10-19) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.11.6](service/chimesdkmessaging/CHANGELOG.md#v1116-2022-10-19) - * **Documentation**: Documentation updates for Chime Messaging SDK -* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.19.0](service/cloudtrail/CHANGELOG.md#v1190-2022-10-19) - * **Feature**: This release includes support for exporting CloudTrail Lake query results to an Amazon S3 bucket. -* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.27.0](service/configservice/CHANGELOG.md#v1270-2022-10-19) - * **Feature**: This release adds resourceType enums for AppConfig, AppSync, DataSync, EC2, EKS, Glue, GuardDuty, SageMaker, ServiceDiscovery, SES, Route53 types. -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.33.0](service/connect/CHANGELOG.md#v1330-2022-10-19) - * **Feature**: This release adds API support for managing phone numbers that can be used across multiple AWS regions through telephony traffic distribution. -* `github.com/aws/aws-sdk-go-v2/service/managedblockchain`: [v1.13.0](service/managedblockchain/CHANGELOG.md#v1130-2022-10-19) - * **Feature**: Adding new Accessor APIs for Amazon Managed Blockchain -* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.28.0](service/s3/CHANGELOG.md#v1280-2022-10-19) - * **Feature**: Updates internal logic for constructing API endpoints. We have added rule-based endpoints and internal model parameters. -* `github.com/aws/aws-sdk-go-v2/service/supportapp`: [v1.1.0](service/supportapp/CHANGELOG.md#v110-2022-10-19) - * **Feature**: This release adds the RegisterSlackWorkspaceForOrganization API. You can use the API to register a Slack workspace for an AWS account that is part of an organization. -* `github.com/aws/aws-sdk-go-v2/service/workspacesweb`: [v1.7.0](service/workspacesweb/CHANGELOG.md#v170-2022-10-19) - * **Feature**: WorkSpaces Web now supports user access logging for recording session start, stop, and URL navigation. - -# Release (2022-10-18) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.20.10](service/frauddetector/CHANGELOG.md#v12010-2022-10-18) - * **Documentation**: Documentation Updates for Amazon Fraud Detector -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.48.0](service/sagemaker/CHANGELOG.md#v1480-2022-10-18) - * **Feature**: This change allows customers to enable data capturing while running a batch transform job, and configure monitoring schedule to monitoring the captured data. -* `github.com/aws/aws-sdk-go-v2/service/servicediscovery`: [v1.18.0](service/servicediscovery/CHANGELOG.md#v1180-2022-10-18) - * **Feature**: Updated the ListNamespaces API to support the NAME and HTTP_NAME filters, and the BEGINS_WITH filter condition. -* `github.com/aws/aws-sdk-go-v2/service/sesv2`: [v1.14.0](service/sesv2/CHANGELOG.md#v1140-2022-10-18) - * **Feature**: This release allows subscribers to enable Dedicated IPs (managed) to send email via a fully managed dedicated IP experience. It also adds identities' VerificationStatus in the response of GetEmailIdentity and ListEmailIdentities APIs, and ImportJobs counts in the response of ListImportJobs API. - -# Release (2022-10-17) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/greengrass`: [v1.14.0](service/greengrass/CHANGELOG.md#v1140-2022-10-17) - * **Feature**: This change allows customers to specify FunctionRuntimeOverride in FunctionDefinitionVersion. This configuration can be used if the runtime on the device is different from the AWS Lambda runtime specified for that function. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.47.0](service/sagemaker/CHANGELOG.md#v1470-2022-10-17) - * **Feature**: This release adds support for C7g, C6g, C6gd, C6gn, M6g, M6gd, R6g, and R6gn Graviton instance types in Amazon SageMaker Inference. - -# Release (2022-10-14) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.26.0](service/mediaconvert/CHANGELOG.md#v1260-2022-10-14) - * **Feature**: MediaConvert now supports specifying the minimum percentage of the HRD buffer available at the end of each encoded video segment. - -# Release (2022-10-13) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/amplifyuibuilder`: [v1.8.0](service/amplifyuibuilder/CHANGELOG.md#v180-2022-10-13) - * **Feature**: We are releasing the ability for fields to be configured as arrays. -* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.18.0](service/appflow/CHANGELOG.md#v1180-2022-10-13) - * **Feature**: With this update, you can choose which Salesforce API is used by Amazon AppFlow to transfer data to or from your Salesforce account. You can choose the Salesforce REST API or Bulk API 2.0. You can also choose for Amazon AppFlow to pick the API automatically. -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.32.0](service/connect/CHANGELOG.md#v1320-2022-10-13) - * **Feature**: This release adds support for a secondary email and a mobile number for Amazon Connect instance users. -* `github.com/aws/aws-sdk-go-v2/service/directoryservice`: [v1.15.0](service/directoryservice/CHANGELOG.md#v1150-2022-10-13) - * **Feature**: This release adds support for describing and updating AWS Managed Microsoft AD set up. -* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.18.24](service/ecs/CHANGELOG.md#v11824-2022-10-13) - * **Documentation**: Documentation update to address tickets. -* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.16.0](service/guardduty/CHANGELOG.md#v1160-2022-10-13) - * **Feature**: Add UnprocessedDataSources to CreateDetectorResponse which specifies the data sources that couldn't be enabled during the CreateDetector request. In addition, update documentations. -* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.18.20](service/iam/CHANGELOG.md#v11820-2022-10-13) - * **Documentation**: Documentation updates for the AWS Identity and Access Management API Reference. -* `github.com/aws/aws-sdk-go-v2/service/iotfleetwise`: [v1.0.1](service/iotfleetwise/CHANGELOG.md#v101-2022-10-13) - * **Documentation**: Documentation update for AWS IoT FleetWise -* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.24.0](service/medialive/CHANGELOG.md#v1240-2022-10-13) - * **Feature**: AWS Elemental MediaLive now supports forwarding SCTE-35 messages through the Event Signaling and Management (ESAM) API, and can read those SCTE-35 messages from an inactive source. -* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.18.0](service/mediapackagevod/CHANGELOG.md#v1180-2022-10-13) - * **Feature**: This release adds SPEKE v2 support for MediaPackage VOD. Speke v2 is an upgrade to the existing SPEKE API to support multiple encryption keys, based on an encryption contract selected by the customer. -* `github.com/aws/aws-sdk-go-v2/service/panorama`: [v1.9.0](service/panorama/CHANGELOG.md#v190-2022-10-13) - * **Feature**: Pause and resume camera stream processing with SignalApplicationInstanceNodeInstances. Reboot an appliance with CreateJobForDevices. More application state information in DescribeApplicationInstance response. -* `github.com/aws/aws-sdk-go-v2/service/rdsdata`: [v1.12.16](service/rdsdata/CHANGELOG.md#v11216-2022-10-13) - * **Documentation**: Doc update to reflect no support for schema parameter on BatchExecuteStatement API -* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.31.0](service/ssm/CHANGELOG.md#v1310-2022-10-13) - * **Feature**: Support of AmazonLinux2022 by Patch Manager -* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.16.0](service/ssmincidents/CHANGELOG.md#v1160-2022-10-13) - * **Feature**: Update RelatedItem enum to support Tasks -* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.23.0](service/transfer/CHANGELOG.md#v1230-2022-10-13) - * **Feature**: This release adds an option for customers to configure workflows that are triggered when files are only partially received from a client due to premature session disconnect. -* `github.com/aws/aws-sdk-go-v2/service/translate`: [v1.15.1](service/translate/CHANGELOG.md#v1151-2022-10-13) - * **Documentation**: This release enables customers to specify multiple target languages in asynchronous batch translation requests. -* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.10.0](service/wisdom/CHANGELOG.md#v1100-2022-10-13) - * **Feature**: This release updates the GetRecommendations API to include a trigger event list for classifying and grouping recommendations. - -# Release (2022-10-07) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/codegurureviewer`: [v1.16.15](service/codegurureviewer/CHANGELOG.md#v11615-2022-10-07) - * **Documentation**: Documentation update to replace broken link. -* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.18.20](service/elasticloadbalancingv2/CHANGELOG.md#v11820-2022-10-07) - * **Documentation**: Gateway Load Balancer adds a new feature (target_failover) for customers to rebalance existing flows to a healthy target after marked unhealthy or deregistered. This allows graceful patching/upgrades of target appliances during maintenance windows, and helps reduce unhealthy target failover time. -* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.19.0](service/greengrassv2/CHANGELOG.md#v1190-2022-10-07) - * **Feature**: This release adds error status details for deployments and components that failed on a device and adds features to improve visibility into component installation. -* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.26.0](service/quicksight/CHANGELOG.md#v1260-2022-10-07) - * **Feature**: Amazon QuickSight now supports SecretsManager Secret ARN in place of CredentialPair for DataSource creation and update. This release also has some minor documentation updates and removes CountryCode as a required parameter in GeoSpatialColumnGroup - -# Release (2022-10-06) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/resiliencehub`: [v1.6.15](service/resiliencehub/CHANGELOG.md#v1615-2022-10-06) - * **Documentation**: Documentation change for AWS Resilience Hub. Doc-only update to fix Documentation layout - -# Release (2022-10-05) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.33.0](service/glue/CHANGELOG.md#v1330-2022-10-05) - * **Feature**: This SDK release adds support to sync glue jobs with source control provider. Additionally, a new parameter called SourceControlDetails will be added to Job model. -* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.20.0](service/networkfirewall/CHANGELOG.md#v1200-2022-10-05) - * **Feature**: StreamExceptionPolicy configures how AWS Network Firewall processes traffic when a network connection breaks midstream -* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.23.0](service/outposts/CHANGELOG.md#v1230-2022-10-05) - * **Feature**: This release adds the Asset state information to the ListAssets response. The ListAssets request supports filtering on Asset state. - -# Release (2022-10-04) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.31.0](service/connect/CHANGELOG.md#v1310-2022-10-04) - * **Feature**: Updated the CreateIntegrationAssociation API to support the CASES_DOMAIN IntegrationType. -* `github.com/aws/aws-sdk-go-v2/service/connectcases`: [v1.0.0](service/connectcases/CHANGELOG.md#v100-2022-10-04) - * **Release**: New AWS service client module - * **Feature**: This release adds APIs for Amazon Connect Cases. Cases allows your agents to quickly track and manage customer issues that require multiple interactions, follow-up tasks, and teams in your contact center. For more information, see https://docs.aws.amazon.com/cases/latest/APIReference/Welcome.html -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.63.0](service/ec2/CHANGELOG.md#v1630-2022-10-04) - * **Feature**: Added EnableNetworkAddressUsageMetrics flag for ModifyVpcAttribute, DescribeVpcAttribute APIs. -* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.18.23](service/ecs/CHANGELOG.md#v11823-2022-10-04) - * **Documentation**: Documentation updates to address various Amazon ECS tickets. -* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.24.0](service/s3control/CHANGELOG.md#v1240-2022-10-04) - * **Feature**: S3 Object Lambda adds support to allow customers to intercept HeadObject and ListObjects requests and introduce their own compute. These requests were previously proxied to S3. -* `github.com/aws/aws-sdk-go-v2/service/workmail`: [v1.17.0](service/workmail/CHANGELOG.md#v1170-2022-10-04) - * **Feature**: This release adds support for impersonation roles in Amazon WorkMail. - -# Release (2022-10-03) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.16.0](service/accessanalyzer/CHANGELOG.md#v1160-2022-10-03) - * **Feature**: AWS IAM Access Analyzer policy validation introduces new checks for role trust policies. As customers author a policy, IAM Access Analyzer policy validation evaluates the policy for any issues to make it easier for customers to author secure policies. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.62.0](service/ec2/CHANGELOG.md#v1620-2022-10-03) - * **Feature**: Adding an imdsSupport attribute to EC2 AMIs -* `github.com/aws/aws-sdk-go-v2/service/snowball`: [v1.16.0](service/snowball/CHANGELOG.md#v1160-2022-10-03) - * **Feature**: Adds support for V3_5C. This is a refreshed AWS Snowball Edge Compute Optimized device type with 28TB SSD, 104 vCPU and 416GB memory (customer usable). - -# Release (2022-09-30) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/codedeploy`: [v1.15.0](service/codedeploy/CHANGELOG.md#v1150-2022-09-30) - * **Feature**: This release allows you to override the alarm configurations when creating a deployment. -* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.19.0](service/devopsguru/CHANGELOG.md#v1190-2022-09-30) - * **Feature**: This release adds filter feature on AddNotificationChannel API, enable customer to configure the SNS notification messages by Severity or MessageTypes -* `github.com/aws/aws-sdk-go-v2/service/dlm`: [v1.13.0](service/dlm/CHANGELOG.md#v1130-2022-09-30) - * **Feature**: This release adds support for archival of single-volume snapshots created by Amazon Data Lifecycle Manager policies -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.46.0](service/sagemaker/CHANGELOG.md#v1460-2022-09-30) - * **Feature**: A new parameter called ExplainerConfig is added to CreateEndpointConfig API to enable SageMaker Clarify online explainability feature. -* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.16.0](service/sagemakerruntime/CHANGELOG.md#v1160-2022-09-30) - * **Feature**: A new parameter called EnableExplanations is added to InvokeEndpoint API to enable on-demand SageMaker Clarify online explainability requests. -* `github.com/aws/aws-sdk-go-v2/service/ssooidc`: [v1.13.6](service/ssooidc/CHANGELOG.md#v1136-2022-09-30) - * **Documentation**: Documentation updates for the IAM Identity Center OIDC CLI Reference. - -# Release (2022-09-29) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/acm`: [v1.15.0](service/acm/CHANGELOG.md#v1150-2022-09-29) - * **Feature**: This update returns additional certificate details such as certificate SANs and allows sorting in the ListCertificates API. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.61.0](service/ec2/CHANGELOG.md#v1610-2022-09-29) - * **Feature**: u-3tb1 instances are powered by Intel Xeon Platinum 8176M (Skylake) processors and are purpose-built to run large in-memory databases. -* `github.com/aws/aws-sdk-go-v2/service/emrserverless`: [v1.3.0](service/emrserverless/CHANGELOG.md#v130-2022-09-29) - * **Feature**: This release adds API support to debug Amazon EMR Serverless jobs in real-time with live application UIs -* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.25.0](service/fsx/CHANGELOG.md#v1250-2022-09-29) - * **Feature**: This release adds support for Amazon File Cache. -* `github.com/aws/aws-sdk-go-v2/service/migrationhuborchestrator`: [v1.0.0](service/migrationhuborchestrator/CHANGELOG.md#v100-2022-09-29) - * **Release**: New AWS service client module - * **Feature**: Introducing AWS MigrationHubOrchestrator. This is the first public release of AWS MigrationHubOrchestrator. -* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.18.0](service/polly/CHANGELOG.md#v1180-2022-09-29) - * **Feature**: Added support for the new Cantonese voice - Hiujin. Hiujin is available as a Neural voice only. -* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.15.0](service/proton/CHANGELOG.md#v1150-2022-09-29) - * **Feature**: This release adds an option to delete pipeline provisioning repositories using the UpdateAccountSettings API -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.45.0](service/sagemaker/CHANGELOG.md#v1450-2022-09-29) - * **Feature**: SageMaker Training Managed Warm Pools let you retain provisioned infrastructure to reduce latency for repetitive training workloads. -* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.16.2](service/secretsmanager/CHANGELOG.md#v1162-2022-09-29) - * **Documentation**: Documentation updates for Secrets Manager -* `github.com/aws/aws-sdk-go-v2/service/translate`: [v1.15.0](service/translate/CHANGELOG.md#v1150-2022-09-29) - * **Feature**: This release enables customers to access control rights on Translate resources like Parallel Data and Custom Terminology using Tag Based Authorization. -* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.23.0](service/workspaces/CHANGELOG.md#v1230-2022-09-29) - * **Feature**: This release includes diagnostic log uploading feature. If it is enabled, the log files of WorkSpaces Windows client will be sent to Amazon WorkSpaces automatically for troubleshooting. You can use modifyClientProperty api to enable/disable this feature. - -# Release (2022-09-27) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.21.0](service/costexplorer/CHANGELOG.md#v1210-2022-09-27) - * **Feature**: This release is to support retroactive Cost Categories. The new field will enable you to retroactively apply new and existing cost category rules to previous months. -* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.35.0](service/kendra/CHANGELOG.md#v1350-2022-09-27) - * **Feature**: My AWS Service (placeholder) - Amazon Kendra now provides a data source connector for DropBox. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/data-source-dropbox.html -* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.19.0](service/location/CHANGELOG.md#v1190-2022-09-27) - * **Feature**: This release adds place IDs, which are unique identifiers of places, along with a new GetPlace operation, which can be used with place IDs to find a place again later. UnitNumber and UnitType are also added as new properties of places. - -# Release (2022-09-26) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue`: [v1.10.0](feature/dynamodb/attributevalue/CHANGELOG.md#v1100-2022-09-26) - * **Feature**: Adds a String method to UnixTime, so that when structs with this field get logged it prints a human readable time. -* `github.com/aws/aws-sdk-go-v2/feature/dynamodbstreams/attributevalue`: [v1.10.0](feature/dynamodbstreams/attributevalue/CHANGELOG.md#v1100-2022-09-26) - * **Feature**: Adds a String method to UnixTime, so that when structs with this field get logged it prints a human readable time. -* `github.com/aws/aws-sdk-go-v2/service/costandusagereportservice`: [v1.14.0](service/costandusagereportservice/CHANGELOG.md#v1140-2022-09-26) - * **Feature**: This release adds two new support regions(me-central-1/eu-south-2) for OSG. -* `github.com/aws/aws-sdk-go-v2/service/iotfleetwise`: [v1.0.0](service/iotfleetwise/CHANGELOG.md#v100-2022-09-26) - * **Release**: New AWS service client module - * **Feature**: General availability (GA) for AWS IoT Fleetwise. It adds AWS IoT Fleetwise to AWS SDK. For more information, see https://docs.aws.amazon.com/iot-fleetwise/latest/APIReference/Welcome.html. -* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.30.0](service/ssm/CHANGELOG.md#v1300-2022-09-26) - * **Feature**: This release includes support for applying a CloudWatch alarm to Systems Manager capabilities like Automation, Run Command, State Manager, and Maintenance Windows. - -# Release (2022-09-23) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.13.0](service/apprunner/CHANGELOG.md#v1130-2022-09-23) - * **Feature**: AWS App Runner adds a Node.js 16 runtime. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.60.0](service/ec2/CHANGELOG.md#v1600-2022-09-23) - * **Feature**: Letting external AWS customers provide ImageId as a Launch Template override in FleetLaunchTemplateOverridesRequest -* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.25.0](service/lexmodelsv2/CHANGELOG.md#v1250-2022-09-23) - * **Feature**: This release introduces additional optional parameters promptAttemptsSpecification to PromptSpecification, which enables the users to configure interrupt setting and Audio, DTMF and Text input configuration for the initial and retry prompt played by the Bot -* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.23.0](service/lightsail/CHANGELOG.md#v1230-2022-09-23) - * **Feature**: This release adds Instance Metadata Service (IMDS) support for Lightsail instances. -* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.14.0](service/nimble/CHANGELOG.md#v1140-2022-09-23) - * **Feature**: Amazon Nimble Studio adds support for on-demand Amazon Elastic Compute Cloud (EC2) G3 and G5 instances, allowing customers to utilize additional GPU instance types for their creative projects. -* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.29.0](service/ssm/CHANGELOG.md#v1290-2022-09-23) - * **Feature**: This release adds new SSM document types ConformancePackTemplate and CloudFormation -* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.22.9](service/wafv2/CHANGELOG.md#v1229-2022-09-23) - * **Documentation**: Add the default specification for ResourceType in ListResourcesForWebACL. - -# Release (2022-09-22) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/backupgateway`: [v1.7.0](service/backupgateway/CHANGELOG.md#v170-2022-09-22) - * **Feature**: Changes include: new GetVirtualMachineApi to fetch a single user's VM, improving ListVirtualMachines to fetch filtered VMs as well as all VMs, and improving GetGatewayApi to now also return the gateway's MaintenanceStartTime. -* `github.com/aws/aws-sdk-go-v2/service/devicefarm`: [v1.14.0](service/devicefarm/CHANGELOG.md#v1140-2022-09-22) - * **Feature**: This release adds the support for VPC-ENI based connectivity for private devices on AWS Device Farm. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.59.0](service/ec2/CHANGELOG.md#v1590-2022-09-22) - * **Feature**: Documentation updates for Amazon EC2. -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.32.0](service/glue/CHANGELOG.md#v1320-2022-09-22) - * **Feature**: Added support for S3 Event Notifications for Catalog Target Crawlers. -* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.15.5](service/identitystore/CHANGELOG.md#v1155-2022-09-22) - * **Documentation**: Documentation updates for the Identity Store CLI Reference. - -# Release (2022-09-21) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.19.0](service/comprehend/CHANGELOG.md#v1190-2022-09-21) - * **Feature**: Amazon Comprehend now supports synchronous mode for targeted sentiment API operations. -* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.22.2](service/route53/CHANGELOG.md#v1222-2022-09-21) - * **Bug Fix**: Updated GetChange to sanitize /change/ prefix of the changeId returned from the service. -* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.23.0](service/s3control/CHANGELOG.md#v1230-2022-09-21) - * **Feature**: S3 on Outposts launches support for object versioning for Outposts buckets. With S3 Versioning, you can preserve, retrieve, and restore every version of every object stored in your buckets. You can recover from both unintended user actions and application failures. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.44.0](service/sagemaker/CHANGELOG.md#v1440-2022-09-21) - * **Feature**: SageMaker now allows customization on Canvas Application settings, including enabling/disabling time-series forecasting and specifying an Amazon Forecast execution role at both the Domain and UserProfile levels. - -# Release (2022-09-20) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2`: v1.16.16 - * **Documentation**: added clafirfication on the Credential object to show usage of loadDefaultConfig to load credentials -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.58.0](service/ec2/CHANGELOG.md#v1580-2022-09-20) - * **Feature**: This release adds support for blocked paths to Amazon VPC Reachability Analyzer. - -# Release (2022-09-19) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.18.0](service/cloudtrail/CHANGELOG.md#v1180-2022-09-19) - * **Feature**: This release includes support for importing existing trails into CloudTrail Lake. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.57.0](service/ec2/CHANGELOG.md#v1570-2022-09-19) - * **Feature**: This release adds CapacityAllocations field to DescribeCapacityReservations -* `github.com/aws/aws-sdk-go-v2/service/mediaconnect`: [v1.17.0](service/mediaconnect/CHANGELOG.md#v1170-2022-09-19) - * **Feature**: This change allows the customer to use the SRT Caller protocol as part of their flows -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.26.0](service/rds/CHANGELOG.md#v1260-2022-09-19) - * **Feature**: This release adds support for Amazon RDS Proxy with SQL Server compatibility. - -# Release (2022-09-16) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/codestarnotifications`: [v1.13.0](service/codestarnotifications/CHANGELOG.md#v1130-2022-09-16) - * **Feature**: This release adds tag based access control for the UntagResource API. -* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.18.21](service/ecs/CHANGELOG.md#v11821-2022-09-16) - * **Documentation**: This release supports new task definition sizes. - -# Release (2022-09-15) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.17.0](service/dynamodb/CHANGELOG.md#v1170-2022-09-15) - * **Feature**: Increased DynamoDB transaction limit from 25 to 100. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.56.0](service/ec2/CHANGELOG.md#v1560-2022-09-15) - * **Feature**: This feature allows customers to create tags for vpc-endpoint-connections and vpc-endpoint-service-permissions. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.43.0](service/sagemaker/CHANGELOG.md#v1430-2022-09-15) - * **Feature**: Amazon SageMaker Automatic Model Tuning now supports specifying Hyperband strategy for tuning jobs, which uses a multi-fidelity based tuning strategy to stop underperforming hyperparameter configurations early. - -# Release (2022-09-14) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/feature/rds/auth`: [v1.2.0](feature/rds/auth/CHANGELOG.md#v120-2022-09-14) - * **Feature**: Updated `BuildAuthToken` to validate the provided endpoint contains a port. -* `github.com/aws/aws-sdk-go-v2/internal/v4a`: [v1.0.13](internal/v4a/CHANGELOG.md#v1013-2022-09-14) - * **Bug Fix**: Fixes an issues where an error from an underlying SigV4 credential provider would not be surfaced from the SigV4a credential provider. Contribution by [sakthipriyan-aqfer](https://github.com/sakthipriyan-aqfer). -* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.18.0](service/acmpca/CHANGELOG.md#v1180-2022-09-14) - * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. -* `github.com/aws/aws-sdk-go-v2/service/amplifyuibuilder`: [v1.7.0](service/amplifyuibuilder/CHANGELOG.md#v170-2022-09-14) - * **Feature**: Amplify Studio UIBuilder is introducing forms functionality. Forms can be configured from Data Store models, JSON, or from scratch. These forms can then be generated in your project and used like any other React components. -* `github.com/aws/aws-sdk-go-v2/service/appconfig`: [v1.14.0](service/appconfig/CHANGELOG.md#v1140-2022-09-14) - * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. -* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.17.0](service/appflow/CHANGELOG.md#v1170-2022-09-14) - * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. -* `github.com/aws/aws-sdk-go-v2/service/appmesh`: [v1.16.0](service/appmesh/CHANGELOG.md#v1160-2022-09-14) - * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. -* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.17.0](service/cloudtrail/CHANGELOG.md#v1170-2022-09-14) - * **Feature**: This release adds CloudTrail getChannel and listChannels APIs to allow customer to view the ServiceLinkedChannel configurations. -* `github.com/aws/aws-sdk-go-v2/service/codestar`: [v1.12.0](service/codestar/CHANGELOG.md#v1120-2022-09-14) - * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. -* `github.com/aws/aws-sdk-go-v2/service/codestarnotifications`: [v1.12.0](service/codestarnotifications/CHANGELOG.md#v1120-2022-09-14) - * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. -* `github.com/aws/aws-sdk-go-v2/service/cognitoidentity`: [v1.14.0](service/cognitoidentity/CHANGELOG.md#v1140-2022-09-14) - * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. -* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.20.0](service/cognitoidentityprovider/CHANGELOG.md#v1200-2022-09-14) - * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. -* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.26.0](service/configservice/CHANGELOG.md#v1260-2022-09-14) - * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.30.0](service/connect/CHANGELOG.md#v1300-2022-09-14) - * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. -* `github.com/aws/aws-sdk-go-v2/service/connectparticipant`: [v1.12.0](service/connectparticipant/CHANGELOG.md#v1120-2022-09-14) - * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. -* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.20.0](service/costexplorer/CHANGELOG.md#v1200-2022-09-14) - * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. -* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.19.0](service/customerprofiles/CHANGELOG.md#v1190-2022-09-14) - * **Feature**: Added isUnstructured in response for Customer Profiles Integration APIs - * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. -* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.16.0](service/dataexchange/CHANGELOG.md#v1160-2022-09-14) - * **Feature**: Documentation updates for AWS Data Exchange. -* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.8.0](service/drs/CHANGELOG.md#v180-2022-09-14) - * **Feature**: Fixed the data type of lagDuration that is returned in Describe Source Server API -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.55.0](service/ec2/CHANGELOG.md#v1550-2022-09-14) - * **Feature**: Documentation updates for Amazon EC2. - * **Feature**: This release adds support to send VPC Flow Logs to kinesis-data-firehose as new destination type - * **Feature**: This update introduces API operations to manage and create local gateway route tables, CoIP pools, and VIF group associations. - * **Feature**: Two new features for local gateway route tables: support for static routes targeting Elastic Network Interfaces and direct VPC routing. -* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.22.0](service/eks/CHANGELOG.md#v1220-2022-09-14) - * **Feature**: Adding support for local Amazon EKS clusters on Outposts - * **Feature**: Adds support for EKS Addons ResolveConflicts "preserve" flag. Also adds new update failed status for EKS Addons. -* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.14.0](service/emrcontainers/CHANGELOG.md#v1140-2022-09-14) - * **Feature**: EMR on EKS now allows running Spark SQL using the newly introduced Spark SQL Job Driver in the Start Job Run API -* `github.com/aws/aws-sdk-go-v2/service/emrserverless`: [v1.2.0](service/emrserverless/CHANGELOG.md#v120-2022-09-14) - * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. -* `github.com/aws/aws-sdk-go-v2/service/evidently`: [v1.9.0](service/evidently/CHANGELOG.md#v190-2022-09-14) - * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. - * **Feature**: This release adds support for the client-side evaluation - powered by AWS AppConfig feature. -* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.13.0](service/finspacedata/CHANGELOG.md#v1130-2022-09-14) - * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. -* `github.com/aws/aws-sdk-go-v2/service/fis`: [v1.13.0](service/fis/CHANGELOG.md#v1130-2022-09-14) - * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. -* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.24.12](service/fsx/CHANGELOG.md#v12412-2022-09-14) - * **Documentation**: Documentation update for Amazon FSx. -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.31.0](service/glue/CHANGELOG.md#v1310-2022-09-14) - * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. -* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.18.0](service/greengrassv2/CHANGELOG.md#v1180-2022-09-14) - * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. -* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.15.3](service/identitystore/CHANGELOG.md#v1153-2022-09-14) - * **Documentation**: Documentation updates for the Identity Store CLI Reference. -* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.20.0](service/imagebuilder/CHANGELOG.md#v1200-2022-09-14) - * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. -* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.8.0](service/inspector2/CHANGELOG.md#v180-2022-09-14) - * **Feature**: This release adds new fields like fixAvailable, fixedInVersion and remediation to the finding model. The requirement to have vulnerablePackages in the finding model has also been removed. The documentation has been updated to reflect these changes. -* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.29.0](service/iot/CHANGELOG.md#v1290-2022-09-14) - * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. -* `github.com/aws/aws-sdk-go-v2/service/iotanalytics`: [v1.13.0](service/iotanalytics/CHANGELOG.md#v1130-2022-09-14) - * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. -* `github.com/aws/aws-sdk-go-v2/service/iotsecuretunneling`: [v1.14.0](service/iotsecuretunneling/CHANGELOG.md#v1140-2022-09-14) - * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. -* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.25.0](service/iotsitewise/CHANGELOG.md#v1250-2022-09-14) - * **Feature**: Allow specifying units in Asset Properties -* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.34.0](service/kendra/CHANGELOG.md#v1340-2022-09-14) - * **Feature**: This release enables our customer to choose the option of Sharepoint 2019 for the on-premise Sharepoint connector. -* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.24.0](service/lexmodelsv2/CHANGELOG.md#v1240-2022-09-14) - * **Feature**: This release is for supporting Composite Slot Type feature in AWS Lex V2. Composite Slot Type will help developer to logically group coherent slots and maintain their inter-relationships in runtime conversation. -* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.15.0](service/lexruntimev2/CHANGELOG.md#v1150-2022-09-14) - * **Feature**: This release is for supporting Composite Slot Type feature in AWS Lex V2. Composite Slot Type will help developer to logically group coherent slots and maintain their inter-relationships in runtime conversation. -* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.18.0](service/lookoutmetrics/CHANGELOG.md#v1180-2022-09-14) - * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. - * **Feature**: Release dimension value filtering feature to allow customers to define dimension filters for including only a subset of their dataset to be used by LookoutMetrics. -* `github.com/aws/aws-sdk-go-v2/service/m2`: [v1.1.0](service/m2/CHANGELOG.md#v110-2022-09-14) - * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. -* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.23.0](service/medialive/CHANGELOG.md#v1230-2022-09-14) - * **Feature**: This change exposes API settings which allow Dolby Atmos and Dolby Vision to be used when running a channel using Elemental Media Live -* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.19.0](service/networkfirewall/CHANGELOG.md#v1190-2022-09-14) - * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. -* `github.com/aws/aws-sdk-go-v2/service/pi`: [v1.15.0](service/pi/CHANGELOG.md#v1150-2022-09-14) - * **Feature**: Increases the maximum values of two RDS Performance Insights APIs. The maximum value of the Limit parameter of DimensionGroup is 25. The MaxResult maximum is now 25 for the following APIs: DescribeDimensionKeys, GetResourceMetrics, ListAvailableResourceDimensions, and ListAvailableResourceMetrics. -* `github.com/aws/aws-sdk-go-v2/service/pricing`: [v1.17.0](service/pricing/CHANGELOG.md#v1170-2022-09-14) - * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. -* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.25.0](service/quicksight/CHANGELOG.md#v1250-2022-09-14) - * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. -* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.26.9](service/redshift/CHANGELOG.md#v1269-2022-09-14) - * **Documentation**: This release updates documentation for AQUA features and other description updates. -* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.22.0](service/route53/CHANGELOG.md#v1220-2022-09-14) - * **Feature**: Amazon Route 53 now supports the Middle East (UAE) Region (me-central-1) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region. -* `github.com/aws/aws-sdk-go-v2/service/route53recoverycluster`: [v1.10.0](service/route53recoverycluster/CHANGELOG.md#v1100-2022-09-14) - * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. -* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.22.0](service/s3control/CHANGELOG.md#v1220-2022-09-14) - * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.42.0](service/sagemaker/CHANGELOG.md#v1420-2022-09-14) - * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. - * **Feature**: SageMaker Hosting now allows customization on ML instance storage volume size, model data download timeout and inference container startup ping health check timeout for each ProductionVariant in CreateEndpointConfig API. - * **Feature**: This release adds HyperParameterTuningJob type in Search API. - * **Feature**: This release adds Mode to AutoMLJobConfig. -* `github.com/aws/aws-sdk-go-v2/service/sagemakera2iruntime`: [v1.14.0](service/sagemakera2iruntime/CHANGELOG.md#v1140-2022-09-14) - * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. -* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.16.0](service/secretsmanager/CHANGELOG.md#v1160-2022-09-14) - * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. -* `github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry`: [v1.14.0](service/servicecatalogappregistry/CHANGELOG.md#v1140-2022-09-14) - * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. -* `github.com/aws/aws-sdk-go-v2/service/sfn`: [v1.14.0](service/sfn/CHANGELOG.md#v1140-2022-09-14) - * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. -* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.18.0](service/sns/CHANGELOG.md#v1180-2022-09-14) - * **Feature**: Amazon SNS introduces the Data Protection Policy APIs, which enable customers to attach a data protection policy to an SNS topic. This allows topic owners to enable the new message data protection feature to audit and block sensitive data that is exchanged through their topics. -* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.28.0](service/ssm/CHANGELOG.md#v1280-2022-09-14) - * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. - * **Feature**: This release adds support for Systems Manager State Manager Association tagging. -* `github.com/aws/aws-sdk-go-v2/service/timestreamwrite`: [v1.14.0](service/timestreamwrite/CHANGELOG.md#v1140-2022-09-14) - * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. -* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.22.0](service/transfer/CHANGELOG.md#v1220-2022-09-14) - * **Feature**: This release introduces the ability to have multiple server host keys for any of your Transfer Family servers that use the SFTP protocol. - -# Release (2022-09-02.2) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.15.2](service/identitystore/CHANGELOG.md#v1152-2022-09-022) - * **Bug Fix**: Reverts a change to the identitystore module so that MaxResults members of ListGroupMemberShips, ListGroupMembershipsForMembers, ListGroups, and ListUsers are correctly generated as pointer types instead of value types - -# Release (2022-09-02) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.19.0](service/cognitoidentityprovider/CHANGELOG.md#v1190-2022-09-02) - * **Feature**: This release adds a new "AuthSessionValidity" field to the UserPoolClient in Cognito. Application admins can configure this value for their users' authentication duration, which is currently fixed at 3 minutes, up to 15 minutes. Setting this field will also apply to the SMS MFA authentication flow. -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.29.0](service/connect/CHANGELOG.md#v1290-2022-09-02) - * **Feature**: This release adds search APIs for Routing Profiles and Queues, which can be used to search for those resources within a Connect Instance. -* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.19.0](service/mediapackage/CHANGELOG.md#v1190-2022-09-02) - * **Feature**: Added support for AES_CTR encryption to CMAF origin endpoints -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.41.0](service/sagemaker/CHANGELOG.md#v1410-2022-09-02) - * **Feature**: This release enables administrators to attribute user activity and API calls from Studio notebooks, Data Wrangler and Canvas to specific users even when users share the same execution IAM role. ExecutionRoleIdentityConfig at Sagemaker domain level enables this feature. - -# Release (2022-09-01) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/codegurureviewer`: [v1.16.11](service/codegurureviewer/CHANGELOG.md#v11611-2022-09-01) - * **Documentation**: Documentation updates to fix formatting issues in CLI and SDK documentation. -* `github.com/aws/aws-sdk-go-v2/service/controltower`: [v1.0.0](service/controltower/CHANGELOG.md#v100-2022-09-01) - * **Release**: New AWS service client module - * **Feature**: This release contains the first SDK for AWS Control Tower. It introduces a new set of APIs: EnableControl, DisableControl, GetControlOperation, and ListEnabledControls. -* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.21.10](service/route53/CHANGELOG.md#v12110-2022-09-01) - * **Documentation**: Documentation updates for Amazon Route 53. - -# Release (2022-08-31) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.20.2](service/cloudfront/CHANGELOG.md#v1202-2022-08-31) - * **Documentation**: Update API documentation for CloudFront origin access control (OAC) -* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.15.0](service/identitystore/CHANGELOG.md#v1150-2022-08-31) - * **Feature**: Expand IdentityStore API to support Create, Read, Update, Delete and Get operations for User, Group and GroupMembership resources. -* `github.com/aws/aws-sdk-go-v2/service/iotthingsgraph`: [v1.13.0](service/iotthingsgraph/CHANGELOG.md#v1130-2022-08-31) - * **Feature**: This release deprecates all APIs of the ThingsGraph service -* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.18.0](service/ivs/CHANGELOG.md#v1180-2022-08-31) - * **Feature**: IVS Merge Fragmented Streams. This release adds support for recordingReconnectWindow field in IVS recordingConfigurations. For more information see https://docs.aws.amazon.com/ivs/latest/APIReference/Welcome.html -* `github.com/aws/aws-sdk-go-v2/service/rdsdata`: [v1.12.12](service/rdsdata/CHANGELOG.md#v11212-2022-08-31) - * **Documentation**: Documentation updates for RDS Data API -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.40.0](service/sagemaker/CHANGELOG.md#v1400-2022-08-31) - * **Feature**: SageMaker Inference Recommender now accepts Inference Recommender fields: Domain, Task, Framework, SamplePayloadUrl, SupportedContentTypes, SupportedInstanceTypes, directly in our CreateInferenceRecommendationsJob API through ContainerConfig - -# Release (2022-08-30) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.17.0](service/greengrassv2/CHANGELOG.md#v1170-2022-08-30) - * **Feature**: Adds topologyFilter to ListInstalledComponentsRequest which allows filtration of components by ROOT or ALL (including root and dependency components). Adds lastStatusChangeTimestamp to ListInstalledComponents response to show the last time a component changed state on a device. -* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.14.15](service/identitystore/CHANGELOG.md#v11415-2022-08-30) - * **Documentation**: Documentation updates for the Identity Store CLI Reference. -* `github.com/aws/aws-sdk-go-v2/service/lookoutequipment`: [v1.15.0](service/lookoutequipment/CHANGELOG.md#v1150-2022-08-30) - * **Feature**: This release adds new apis for providing labels. -* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.23.0](service/macie2/CHANGELOG.md#v1230-2022-08-30) - * **Feature**: This release of the Amazon Macie API adds support for using allow lists to define specific text and text patterns to ignore when inspecting data sources for sensitive data. -* `github.com/aws/aws-sdk-go-v2/service/sso`: [v1.11.19](service/sso/CHANGELOG.md#v11119-2022-08-30) - * **Documentation**: Documentation updates for the AWS IAM Identity Center Portal CLI Reference. -* `github.com/aws/aws-sdk-go-v2/service/ssoadmin`: [v1.15.7](service/ssoadmin/CHANGELOG.md#v1157-2022-08-30) - * **Documentation**: Documentation updates for the AWS IAM Identity Center CLI Reference. - -# Release (2022-08-29) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.24.9](service/fsx/CHANGELOG.md#v1249-2022-08-29) - * **Documentation**: Documentation updates for Amazon FSx for NetApp ONTAP. -* `github.com/aws/aws-sdk-go-v2/service/voiceid`: [v1.11.0](service/voiceid/CHANGELOG.md#v1110-2022-08-29) - * **Feature**: Amazon Connect Voice ID now detects voice spoofing. When a prospective fraudster tries to spoof caller audio using audio playback or synthesized speech, Voice ID will return a risk score and outcome to indicate the how likely it is that the voice is spoofed. - -# Release (2022-08-26) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.18.0](service/mediapackage/CHANGELOG.md#v1180-2022-08-26) - * **Feature**: This release adds Ads AdTriggers and AdsOnDeliveryRestrictions to describe calls for CMAF endpoints on MediaPackage. -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.25.1](service/rds/CHANGELOG.md#v1251-2022-08-26) - * **Documentation**: Removes support for RDS Custom from DBInstanceClass in ModifyDBInstance - -# Release (2022-08-25) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.18.13](service/elasticloadbalancingv2/CHANGELOG.md#v11813-2022-08-25) - * **Documentation**: Documentation updates for ELBv2. Gateway Load Balancer now supports Configurable Flow Stickiness, enabling you to configure the hashing used to maintain stickiness of flows to a specific target appliance. -* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.15.0](service/gamelift/CHANGELOG.md#v1150-2022-08-25) - * **Feature**: This release adds support for eight EC2 local zones as fleet locations; Atlanta, Chicago, Dallas, Denver, Houston, Kansas City (us-east-1-mci-1a), Los Angeles, and Phoenix. It also adds support for C5d, C6a, C6i, and R5d EC2 instance families. -* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.22.0](service/iotwireless/CHANGELOG.md#v1220-2022-08-25) - * **Feature**: This release includes a new feature for the customers to enable the LoRa gateways to send out beacons for Class B devices and an option to select one or more gateways for Class C devices when sending the LoRaWAN downlink messages. -* `github.com/aws/aws-sdk-go-v2/service/ivschat`: [v1.0.13](service/ivschat/CHANGELOG.md#v1013-2022-08-25) - * **Documentation**: Documentation change for IVS Chat API Reference. Doc-only update to add a paragraph on ARNs to the Welcome section. -* `github.com/aws/aws-sdk-go-v2/service/panorama`: [v1.8.0](service/panorama/CHANGELOG.md#v180-2022-08-25) - * **Feature**: Support sorting and filtering in ListDevices API, and add more fields to device listings and single device detail -* `github.com/aws/aws-sdk-go-v2/service/ssooidc`: [v1.13.0](service/ssooidc/CHANGELOG.md#v1130-2022-08-25) - * **Feature**: Updated required request parameters on IAM Identity Center's OIDC CreateToken action. - -# Release (2022-08-24) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.20.0](service/cloudfront/CHANGELOG.md#v1200-2022-08-24) - * **Feature**: Adds support for CloudFront origin access control (OAC), making it possible to restrict public access to S3 bucket origins in all AWS Regions, those with SSE-KMS, and more. -* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.25.0](service/configservice/CHANGELOG.md#v1250-2022-08-24) - * **Feature**: AWS Config now supports ConformancePackTemplate documents in SSM Docs for the deployment and update of conformance packs. -* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.18.14](service/iam/CHANGELOG.md#v11814-2022-08-24) - * **Documentation**: Documentation updates for AWS Identity and Access Management (IAM). -* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.17.1](service/ivs/CHANGELOG.md#v1171-2022-08-24) - * **Documentation**: Documentation Change for IVS API Reference - Doc-only update to type field description for CreateChannel and UpdateChannel actions and for Channel data type. Also added Amazon Resource Names (ARNs) paragraph to Welcome section. -* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.24.0](service/quicksight/CHANGELOG.md#v1240-2022-08-24) - * **Feature**: Added a new optional property DashboardVisual under ExperienceConfiguration parameter of GenerateEmbedUrlForAnonymousUser and GenerateEmbedUrlForRegisteredUser API operations. This supports embedding of specific visuals in QuickSight dashboards. -* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.21.5](service/transfer/CHANGELOG.md#v1215-2022-08-24) - * **Documentation**: Documentation updates for AWS Transfer Family - -# Release (2022-08-23) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.25.0](service/rds/CHANGELOG.md#v1250-2022-08-23) - * **Feature**: RDS for Oracle supports Oracle Data Guard switchover and read replica backups. -* `github.com/aws/aws-sdk-go-v2/service/ssoadmin`: [v1.15.5](service/ssoadmin/CHANGELOG.md#v1155-2022-08-23) - * **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) - -# Release (2022-08-22) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.19.5](service/docdb/CHANGELOG.md#v1195-2022-08-22) - * **Documentation**: Update document for volume clone -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.54.0](service/ec2/CHANGELOG.md#v1540-2022-08-22) - * **Feature**: R6a instances are powered by 3rd generation AMD EPYC (Milan) processors delivering all-core turbo frequency of 3.6 GHz. C6id, M6id, and R6id instances are powered by 3rd generation Intel Xeon Scalable processor (Ice Lake) delivering all-core turbo frequency of 3.5 GHz. -* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.23.0](service/forecast/CHANGELOG.md#v1230-2022-08-22) - * **Feature**: releasing What-If Analysis APIs and update ARN regex pattern to be more strict in accordance with security recommendation -* `github.com/aws/aws-sdk-go-v2/service/forecastquery`: [v1.12.0](service/forecastquery/CHANGELOG.md#v1120-2022-08-22) - * **Feature**: releasing What-If Analysis APIs -* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.24.0](service/iotsitewise/CHANGELOG.md#v1240-2022-08-22) - * **Feature**: Enable non-unique asset names under different hierarchies -* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.23.0](service/lexmodelsv2/CHANGELOG.md#v1230-2022-08-22) - * **Feature**: This release introduces a new feature to stop a running BotRecommendation Job for Automated Chatbot Designer. -* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.23.0](service/securityhub/CHANGELOG.md#v1230-2022-08-22) - * **Feature**: Added new resource details objects to ASFF, including resources for AwsBackupBackupVault, AwsBackupBackupPlan and AwsBackupRecoveryPoint. Added FixAvailable, FixedInVersion and Remediation to Vulnerability. -* `github.com/aws/aws-sdk-go-v2/service/supportapp`: [v1.0.0](service/supportapp/CHANGELOG.md#v100-2022-08-22) - * **Release**: New AWS service client module - * **Feature**: This is the initial SDK release for the AWS Support App in Slack. - -# Release (2022-08-19) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.28.0](service/connect/CHANGELOG.md#v1280-2022-08-19) - * **Feature**: This release adds SearchSecurityProfiles API which can be used to search for Security Profile resources within a Connect Instance. -* `github.com/aws/aws-sdk-go-v2/service/ivschat`: [v1.0.12](service/ivschat/CHANGELOG.md#v1012-2022-08-19) - * **Documentation**: Documentation Change for IVS Chat API Reference - Doc-only update to change text/description for tags field. -* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.33.0](service/kendra/CHANGELOG.md#v1330-2022-08-19) - * **Feature**: This release adds support for a new authentication type - Personal Access Token (PAT) for confluence server. -* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.17.0](service/lookoutmetrics/CHANGELOG.md#v1170-2022-08-19) - * **Feature**: This release is to make GetDataQualityMetrics API publicly available. - -# Release (2022-08-18) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines`: [v1.1.0](service/chimesdkmediapipelines/CHANGELOG.md#v110-2022-08-18) - * **Feature**: The Amazon Chime SDK now supports live streaming of real-time video from the Amazon Chime SDK sessions to streaming platforms such as Amazon IVS and Amazon Elemental MediaLive. We have also added support for concatenation to create a single media capture file. -* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.21.0](service/cloudwatch/CHANGELOG.md#v1210-2022-08-18) - * **Feature**: Add support for managed Contributor Insights Rules -* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.18.4](service/cognitoidentityprovider/CHANGELOG.md#v1184-2022-08-18) - * **Documentation**: This change is being made simply to fix the public documentation based on the models. We have included the PasswordChange and ResendCode events, along with the Pass, Fail and InProgress status. We have removed the Success and Failure status which are never returned by our APIs. -* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.16.0](service/dynamodb/CHANGELOG.md#v1160-2022-08-18) - * **Feature**: This release adds support for importing data from S3 into a new DynamoDB table -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.53.0](service/ec2/CHANGELOG.md#v1530-2022-08-18) - * **Feature**: This release adds support for VPN log options , a new feature allowing S2S VPN connections to send IKE activity logs to CloudWatch Logs -* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.15.0](service/networkmanager/CHANGELOG.md#v1150-2022-08-18) - * **Feature**: Add TransitGatewayPeeringAttachmentId property to TransitGatewayPeering Model - -# Release (2022-08-17) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/appmesh`: [v1.15.0](service/appmesh/CHANGELOG.md#v1150-2022-08-17) - * **Feature**: AWS App Mesh release to support Multiple Listener and Access Log Format feature -* `github.com/aws/aws-sdk-go-v2/service/connectcampaigns`: [v1.1.0](service/connectcampaigns/CHANGELOG.md#v110-2022-08-17) - * **Feature**: Updated exceptions for Amazon Connect Outbound Campaign api's. -* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.32.0](service/kendra/CHANGELOG.md#v1320-2022-08-17) - * **Feature**: This release adds Zendesk connector (which allows you to specify Zendesk SAAS platform as data source), Proxy Support for Sharepoint and Confluence Server (which allows you to specify the proxy configuration if proxy is required to connect to your Sharepoint/Confluence Server as data source). -* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.17.0](service/lakeformation/CHANGELOG.md#v1170-2022-08-17) - * **Feature**: This release adds a new API support "AssumeDecoratedRoleWithSAML" and also release updates the corresponding documentation. -* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.24.0](service/lambda/CHANGELOG.md#v1240-2022-08-17) - * **Feature**: Added support for customization of Consumer Group ID for MSK and Kafka Event Source Mappings. -* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.22.0](service/lexmodelsv2/CHANGELOG.md#v1220-2022-08-17) - * **Feature**: This release introduces support for enhanced conversation design with the ability to define custom conversation flows with conditional branching and new bot responses. -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.24.0](service/rds/CHANGELOG.md#v1240-2022-08-17) - * **Feature**: Adds support for Internet Protocol Version 6 (IPv6) for RDS Aurora database clusters. -* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.15.18](service/secretsmanager/CHANGELOG.md#v11518-2022-08-17) - * **Documentation**: Documentation updates for Secrets Manager. - -# Release (2022-08-16) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.20.0](service/rekognition/CHANGELOG.md#v1200-2022-08-16) - * **Feature**: This release adds APIs which support copying an Amazon Rekognition Custom Labels model and managing project policies across AWS account. -* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.14.12](service/servicecatalog/CHANGELOG.md#v11412-2022-08-16) - * **Documentation**: Documentation updates for Service Catalog - -# Release (2022-08-15) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.19.0](service/cloudfront/CHANGELOG.md#v1190-2022-08-15) - * **Feature**: Adds Http 3 support to distributions -* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.14.13](service/identitystore/CHANGELOG.md#v11413-2022-08-15) - * **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) -* `github.com/aws/aws-sdk-go-v2/service/sso`: [v1.11.17](service/sso/CHANGELOG.md#v11117-2022-08-15) - * **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) -* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.9.0](service/wisdom/CHANGELOG.md#v190-2022-08-15) - * **Feature**: This release introduces a new API PutFeedback that allows submitting feedback to Wisdom on content relevance. - -# Release (2022-08-14) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/config`: [v1.17.0](config/CHANGELOG.md#v1170-2022-08-14) - * **Feature**: Add alternative mechanism for determning the users `$HOME` or `%USERPROFILE%` location when the environment variables are not present. -* `github.com/aws/aws-sdk-go-v2/service/amp`: [v1.15.0](service/amp/CHANGELOG.md#v1150-2022-08-14) - * **Feature**: This release adds log APIs that allow customers to manage logging for their Amazon Managed Service for Prometheus workspaces. -* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.11.0](service/chimesdkmessaging/CHANGELOG.md#v1110-2022-08-14) - * **Feature**: The Amazon Chime SDK now supports channels with up to one million participants with elastic channels. -* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.17.0](service/ivs/CHANGELOG.md#v1170-2022-08-14) - * **Feature**: Updates various list api MaxResults ranges -* `github.com/aws/aws-sdk-go-v2/service/personalizeruntime`: [v1.12.0](service/personalizeruntime/CHANGELOG.md#v1120-2022-08-14) - * **Feature**: This release provides support for promotions in AWS Personalize runtime. -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.23.6](service/rds/CHANGELOG.md#v1236-2022-08-14) - * **Documentation**: Adds support for RDS Custom to DBInstanceClass in ModifyDBInstance - -# Release (2022-08-11) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/backupstorage`: [v1.0.0](service/backupstorage/CHANGELOG.md#v100-2022-08-11) - * **Release**: New AWS service client module - * **Feature**: This is the first public release of AWS Backup Storage. We are exposing some previously-internal APIs for use by external services. These APIs are not meant to be used directly by customers. -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.30.0](service/glue/CHANGELOG.md#v1300-2022-08-11) - * **Feature**: Add support for Python 3.9 AWS Glue Python Shell jobs -* `github.com/aws/aws-sdk-go-v2/service/privatenetworks`: [v1.0.0](service/privatenetworks/CHANGELOG.md#v100-2022-08-11) - * **Release**: New AWS service client module - * **Feature**: This is the initial SDK release for AWS Private 5G. AWS Private 5G is a managed service that makes it easy to deploy, operate, and scale your own private mobile network at your on-premises location. - -# Release (2022-08-10) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/config`: [v1.16.0](config/CHANGELOG.md#v1160-2022-08-10) - * **Feature**: Adds support for the following settings in the `~/.aws/credentials` file: `sso_account_id`, `sso_region`, `sso_role_name`, `sso_start_url`, and `ca_bundle`. -* `github.com/aws/aws-sdk-go-v2/service/dlm`: [v1.12.0](service/dlm/CHANGELOG.md#v1120-2022-08-10) - * **Feature**: This release adds support for excluding specific data (non-boot) volumes from multi-volume snapshot sets created by snapshot lifecycle policies -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.52.0](service/ec2/CHANGELOG.md#v1520-2022-08-10) - * **Feature**: This release adds support for excluding specific data (non-root) volumes from multi-volume snapshot sets created from instances. - -# Release (2022-08-09) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.20.0](service/cloudwatch/CHANGELOG.md#v1200-2022-08-09) - * **Feature**: Various quota increases related to dimensions and custom metrics -* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.18.0](service/location/CHANGELOG.md#v1180-2022-08-09) - * **Feature**: Amazon Location Service now allows circular geofences in BatchPutGeofence, PutGeofence, and GetGeofence APIs. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.39.0](service/sagemaker/CHANGELOG.md#v1390-2022-08-09) - * **Feature**: Amazon SageMaker Automatic Model Tuning now supports specifying multiple alternate EC2 instance types to make tuning jobs more robust when the preferred instance type is not available due to insufficient capacity. -* `github.com/aws/aws-sdk-go-v2/service/sagemakera2iruntime`: [v1.13.0](service/sagemakera2iruntime/CHANGELOG.md#v1130-2022-08-09) - * **Feature**: Fix bug with parsing ISO-8601 CreationTime in Java SDK in DescribeHumanLoop - -# Release (2022-08-08) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2`: v1.16.9 - * **Bug Fix**: aws/signer/v4: Fixes a panic in SDK's handling of endpoint URLs with ports by correcting how URL path is parsed from opaque URLs. Fixes [#1294](https://github.com/aws/aws-sdk-go-v2/issues/1294). -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.29.0](service/glue/CHANGELOG.md#v1290-2022-08-08) - * **Feature**: Add an option to run non-urgent or non-time sensitive Glue Jobs on spare capacity -* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.14.10](service/identitystore/CHANGELOG.md#v11410-2022-08-08) - * **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) -* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.21.0](service/iotwireless/CHANGELOG.md#v1210-2022-08-08) - * **Feature**: AWS IoT Wireless release support for sidewalk data reliability. -* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.17.0](service/pinpoint/CHANGELOG.md#v1170-2022-08-08) - * **Feature**: Adds support for Advance Quiet Time in Journeys. Adds RefreshOnSegmentUpdate and WaitForQuietTime to JourneyResponse. -* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.23.2](service/quicksight/CHANGELOG.md#v1232-2022-08-08) - * **Documentation**: A series of documentation updates to the QuickSight API reference. -* `github.com/aws/aws-sdk-go-v2/service/sso`: [v1.11.14](service/sso/CHANGELOG.md#v11114-2022-08-08) - * **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) -* `github.com/aws/aws-sdk-go-v2/service/ssoadmin`: [v1.15.2](service/ssoadmin/CHANGELOG.md#v1152-2022-08-08) - * **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) -* `github.com/aws/aws-sdk-go-v2/service/ssooidc`: [v1.12.12](service/ssooidc/CHANGELOG.md#v11212-2022-08-08) - * **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) - -# Release (2022-08-04) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.13.0](service/chimesdkmeetings/CHANGELOG.md#v1130-2022-08-04) - * **Feature**: Adds support for Tags on Amazon Chime SDK WebRTC sessions -* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.24.0](service/configservice/CHANGELOG.md#v1240-2022-08-04) - * **Feature**: Add resourceType enums for Athena, GlobalAccelerator, Detective and EC2 types -* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.21.3](service/databasemigrationservice/CHANGELOG.md#v1213-2022-08-04) - * **Documentation**: Documentation updates for Database Migration Service (DMS). -* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.28.0](service/iot/CHANGELOG.md#v1280-2022-08-04) - * **Feature**: The release is to support attach a provisioning template to CACert for JITP function, Customer now doesn't have to hardcode a roleArn and templateBody during register a CACert to enable JITP. - -# Release (2022-08-03) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.18.0](service/cognitoidentityprovider/CHANGELOG.md#v1180-2022-08-03) - * **Feature**: Add a new exception type, ForbiddenException, that is returned when request is not allowed -* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.22.0](service/wafv2/CHANGELOG.md#v1220-2022-08-03) - * **Feature**: You can now associate an AWS WAF web ACL with an Amazon Cognito user pool. - -# Release (2022-08-02) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/licensemanagerusersubscriptions`: [v1.0.0](service/licensemanagerusersubscriptions/CHANGELOG.md#v100-2022-08-02) - * **Release**: New AWS service client module - * **Feature**: This release supports user based subscription for Microsoft Visual Studio Professional and Enterprise on EC2. -* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.21.0](service/personalize/CHANGELOG.md#v1210-2022-08-02) - * **Feature**: This release adds support for incremental bulk ingestion for the Personalize CreateDatasetImportJob API. - -# Release (2022-08-01) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.23.1](service/configservice/CHANGELOG.md#v1231-2022-08-01) - * **Documentation**: Documentation update for PutConfigRule and PutOrganizationConfigRule -* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.22.0](service/workspaces/CHANGELOG.md#v1220-2022-08-01) - * **Feature**: This release introduces ModifySamlProperties, a new API that allows control of SAML properties associated with a WorkSpaces directory. The DescribeWorkspaceDirectories API will now additionally return SAML properties in its responses. - -# Release (2022-07-29) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.51.0](service/ec2/CHANGELOG.md#v1510-2022-07-29) - * **Feature**: Documentation updates for Amazon EC2. -* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.24.4](service/fsx/CHANGELOG.md#v1244-2022-07-29) - * **Documentation**: Documentation updates for Amazon FSx -* `github.com/aws/aws-sdk-go-v2/service/shield`: [v1.17.0](service/shield/CHANGELOG.md#v1170-2022-07-29) - * **Feature**: AWS Shield Advanced now supports filtering for ListProtections and ListProtectionGroups. - -# Release (2022-07-28) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.50.1](service/ec2/CHANGELOG.md#v1501-2022-07-28) - * **Documentation**: Documentation updates for VM Import/Export. -* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.16.0](service/elasticsearchservice/CHANGELOG.md#v1160-2022-07-28) - * **Feature**: This release adds support for gp3 EBS (Elastic Block Store) storage. -* `github.com/aws/aws-sdk-go-v2/service/lookoutvision`: [v1.14.0](service/lookoutvision/CHANGELOG.md#v1140-2022-07-28) - * **Feature**: This release introduces support for image segmentation models and updates CPU accelerator options for models hosted on edge devices. -* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.10.0](service/opensearch/CHANGELOG.md#v1100-2022-07-28) - * **Feature**: This release adds support for gp3 EBS (Elastic Block Store) storage. - -# Release (2022-07-27) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.20.0](service/auditmanager/CHANGELOG.md#v1200-2022-07-27) - * **Feature**: This release adds an exceeded quota exception to several APIs. We added a ServiceQuotaExceededException for the following operations: CreateAssessment, CreateControl, CreateAssessmentFramework, and UpdateAssessmentStatus. -* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.21.0](service/chime/CHANGELOG.md#v1210-2022-07-27) - * **Feature**: Chime VoiceConnector will now support ValidateE911Address which will allow customers to prevalidate their addresses included in their SIP invites for emergency calling -* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.23.0](service/configservice/CHANGELOG.md#v1230-2022-07-27) - * **Feature**: This release adds ListConformancePackComplianceScores API to support the new compliance score feature, which provides a percentage of the number of compliant rule-resource combinations in a conformance pack compared to the number of total possible rule-resource combinations in the conformance pack. -* `github.com/aws/aws-sdk-go-v2/service/globalaccelerator`: [v1.14.0](service/globalaccelerator/CHANGELOG.md#v1140-2022-07-27) - * **Feature**: Global Accelerator now supports dual-stack accelerators, enabling support for IPv4 and IPv6 traffic. -* `github.com/aws/aws-sdk-go-v2/service/marketplacecatalog`: [v1.13.0](service/marketplacecatalog/CHANGELOG.md#v1130-2022-07-27) - * **Feature**: The SDK for the StartChangeSet API will now automatically set and use an idempotency token in the ClientRequestToken request parameter if the customer does not provide it. -* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.17.0](service/polly/CHANGELOG.md#v1170-2022-07-27) - * **Feature**: Amazon Polly adds new English and Hindi voice - Kajal. Kajal is available as Neural voice only. -* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.27.5](service/ssm/CHANGELOG.md#v1275-2022-07-27) - * **Documentation**: Adding doc updates for OpsCenter support in Service Setting actions. -* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.21.0](service/workspaces/CHANGELOG.md#v1210-2022-07-27) - * **Feature**: Added CreateWorkspaceImage API to create a new WorkSpace image from an existing WorkSpace. - -# Release (2022-07-26) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.15.0](service/appsync/CHANGELOG.md#v1150-2022-07-26) - * **Feature**: Adds support for a new API to evaluate mapping templates with mock data, allowing you to remotely unit test your AppSync resolvers and functions. -* `github.com/aws/aws-sdk-go-v2/service/detective`: [v1.16.0](service/detective/CHANGELOG.md#v1160-2022-07-26) - * **Feature**: Added the ability to get data source package information for the behavior graph. Graph administrators can now start (or stop) optional datasources on the behavior graph. -* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.15.0](service/guardduty/CHANGELOG.md#v1150-2022-07-26) - * **Feature**: Amazon GuardDuty introduces a new Malware Protection feature that triggers malware scan on selected EC2 instance resources, after the service detects a potentially malicious activity. -* `github.com/aws/aws-sdk-go-v2/service/lookoutvision`: [v1.13.0](service/lookoutvision/CHANGELOG.md#v1130-2022-07-26) - * **Feature**: This release introduces support for the automatic scaling of inference units used by Amazon Lookout for Vision models. -* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.22.0](service/macie2/CHANGELOG.md#v1220-2022-07-26) - * **Feature**: This release adds support for retrieving (revealing) sample occurrences of sensitive data that Amazon Macie detects and reports in findings. -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.23.1](service/rds/CHANGELOG.md#v1231-2022-07-26) - * **Documentation**: Adds support for using RDS Proxies with RDS for MariaDB databases. -* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.19.0](service/rekognition/CHANGELOG.md#v1190-2022-07-26) - * **Feature**: This release introduces support for the automatic scaling of inference units used by Amazon Rekognition Custom Labels models. -* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.22.3](service/securityhub/CHANGELOG.md#v1223-2022-07-26) - * **Documentation**: Documentation updates for AWS Security Hub -* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.21.0](service/transfer/CHANGELOG.md#v1210-2022-07-26) - * **Feature**: AWS Transfer Family now supports Applicability Statement 2 (AS2), a network protocol used for the secure and reliable transfer of critical Business-to-Business (B2B) data over the public internet using HTTP/HTTPS as the transport mechanism. - -# Release (2022-07-25) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.23.6](service/autoscaling/CHANGELOG.md#v1236-2022-07-25) - * **Documentation**: Documentation update for Amazon EC2 Auto Scaling. - -# Release (2022-07-22) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/account`: [v1.7.0](service/account/CHANGELOG.md#v170-2022-07-22) - * **Feature**: This release enables customers to manage the primary contact information for their AWS accounts. For more information, see https://docs.aws.amazon.com/accounts/latest/reference/API_Operations.html -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.50.0](service/ec2/CHANGELOG.md#v1500-2022-07-22) - * **Feature**: Added support for EC2 M1 Mac instances. For more information, please visit aws.amazon.com/mac. -* `github.com/aws/aws-sdk-go-v2/service/iotdeviceadvisor`: [v1.15.0](service/iotdeviceadvisor/CHANGELOG.md#v1150-2022-07-22) - * **Feature**: Added new service feature (Early access only) - Long Duration Test, where customers can test the IoT device to observe how it behaves when the device is in operation for longer period. -* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.22.0](service/medialive/CHANGELOG.md#v1220-2022-07-22) - * **Feature**: Link devices now support remote rebooting. Link devices now support maintenance windows. Maintenance windows allow a Link device to install software updates without stopping the MediaLive channel. The channel will experience a brief loss of input from the device while updates are installed. -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.23.0](service/rds/CHANGELOG.md#v1230-2022-07-22) - * **Feature**: This release adds the "ModifyActivityStream" API with support for audit policy state locking and unlocking. -* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.21.0](service/transcribe/CHANGELOG.md#v1210-2022-07-22) - * **Feature**: Remove unsupported language codes for StartTranscriptionJob and update VocabularyFileUri for UpdateMedicalVocabulary - -# Release (2022-07-21) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.18.0](service/athena/CHANGELOG.md#v1180-2022-07-21) - * **Feature**: This feature allows customers to retrieve runtime statistics for completed queries -* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.19.0](service/cloudwatch/CHANGELOG.md#v1190-2022-07-21) - * **Feature**: Adding support for the suppression of Composite Alarm actions -* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.21.1](service/databasemigrationservice/CHANGELOG.md#v1211-2022-07-21) - * **Documentation**: Documentation updates for Database Migration Service (DMS). -* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.19.0](service/docdb/CHANGELOG.md#v1190-2022-07-21) - * **Feature**: Enable copy-on-write restore type -* `github.com/aws/aws-sdk-go-v2/service/ec2instanceconnect`: [v1.14.0](service/ec2instanceconnect/CHANGELOG.md#v1140-2022-07-21) - * **Feature**: This release includes a new exception type "EC2InstanceUnavailableException" for SendSSHPublicKey and SendSerialConsoleSSHPublicKey APIs. -* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.20.0](service/frauddetector/CHANGELOG.md#v1200-2022-07-21) - * **Feature**: The release introduces Account Takeover Insights (ATI) model. The ATI model detects fraud relating to account takeover. This release also adds support for new variable types: ARE_CREDENTIALS_VALID and SESSION_ID and adds new structures to Model Version APIs. -* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.23.0](service/iotsitewise/CHANGELOG.md#v1230-2022-07-21) - * **Feature**: Added asynchronous API to ingest bulk historical and current data into IoT SiteWise. -* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.31.0](service/kendra/CHANGELOG.md#v1310-2022-07-21) - * **Feature**: Amazon Kendra now provides Oauth2 support for SharePoint Online. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/data-source-sharepoint.html -* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.18.0](service/networkfirewall/CHANGELOG.md#v1180-2022-07-21) - * **Feature**: Network Firewall now supports referencing dynamic IP sets from stateful rule groups, for IP sets stored in Amazon VPC prefix lists. -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.22.1](service/rds/CHANGELOG.md#v1221-2022-07-21) - * **Documentation**: Adds support for creating an RDS Proxy for an RDS for MariaDB database. - -# Release (2022-07-20) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.17.11](service/acmpca/CHANGELOG.md#v11711-2022-07-20) - * **Documentation**: AWS Certificate Manager (ACM) Private Certificate Authority (PCA) documentation updates -* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.27.0](service/iot/CHANGELOG.md#v1270-2022-07-20) - * **Feature**: GA release the ability to enable/disable IoT Fleet Indexing for Device Defender and Named Shadow information, and search them through IoT Fleet Indexing APIs. This includes Named Shadow Selection as a part of the UpdateIndexingConfiguration API. - -# Release (2022-07-19) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.18.0](service/devopsguru/CHANGELOG.md#v1180-2022-07-19) - * **Feature**: Added new APIs for log anomaly detection feature. -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.28.1](service/glue/CHANGELOG.md#v1281-2022-07-19) - * **Documentation**: Documentation updates for AWS Glue Job Timeout and Autoscaling -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.38.0](service/sagemaker/CHANGELOG.md#v1380-2022-07-19) - * **Feature**: Fixed an issue with cross account QueryLineage -* `github.com/aws/aws-sdk-go-v2/service/sagemakeredge`: [v1.12.0](service/sagemakeredge/CHANGELOG.md#v1120-2022-07-19) - * **Feature**: Amazon SageMaker Edge Manager provides lightweight model deployment feature to deploy machine learning models on requested devices. -* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.20.0](service/workspaces/CHANGELOG.md#v1200-2022-07-19) - * **Feature**: Increased the character limit of the login message from 850 to 2000 characters. - -# Release (2022-07-18) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/applicationdiscoveryservice`: [v1.14.0](service/applicationdiscoveryservice/CHANGELOG.md#v1140-2022-07-18) - * **Feature**: Add AWS Agentless Collector details to the GetDiscoverySummary API response -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.49.1](service/ec2/CHANGELOG.md#v1491-2022-07-18) - * **Documentation**: Documentation updates for Amazon EC2. -* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.22.0](service/elasticache/CHANGELOG.md#v1220-2022-07-18) - * **Feature**: Adding AutoMinorVersionUpgrade in the DescribeReplicationGroups API -* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.18.0](service/kms/CHANGELOG.md#v1180-2022-07-18) - * **Feature**: Added support for the SM2 KeySpec in China Partition Regions -* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.17.0](service/mediapackage/CHANGELOG.md#v1170-2022-07-18) - * **Feature**: This release adds "IncludeIframeOnlyStream" for Dash endpoints and increases the number of supported video and audio encryption presets for Speke v2 -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.37.0](service/sagemaker/CHANGELOG.md#v1370-2022-07-18) - * **Feature**: Amazon SageMaker Edge Manager provides lightweight model deployment feature to deploy machine learning models on requested devices. -* `github.com/aws/aws-sdk-go-v2/service/ssoadmin`: [v1.15.0](service/ssoadmin/CHANGELOG.md#v1150-2022-07-18) - * **Feature**: AWS SSO now supports attaching customer managed policies and a permissions boundary to your permission sets. This release adds new API operations to manage and view the customer managed policies and the permissions boundary for a given permission set. - -# Release (2022-07-15) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.18.3](service/datasync/CHANGELOG.md#v1183-2022-07-15) - * **Documentation**: Documentation updates for AWS DataSync regarding configuring Amazon FSx for ONTAP location security groups and SMB user permissions. -* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.7.0](service/drs/CHANGELOG.md#v170-2022-07-15) - * **Feature**: Changed existing APIs to allow choosing a dynamic volume type for replicating volumes, to reduce costs for customers. -* `github.com/aws/aws-sdk-go-v2/service/evidently`: [v1.8.0](service/evidently/CHANGELOG.md#v180-2022-07-15) - * **Feature**: This release adds support for the new segmentation feature. -* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.21.0](service/wafv2/CHANGELOG.md#v1210-2022-07-15) - * **Feature**: This SDK release provide customers ability to add sensitivity level for WAF SQLI Match Statements. - -# Release (2022-07-14) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.17.0](service/athena/CHANGELOG.md#v1170-2022-07-14) - * **Feature**: This release updates data types that contain either QueryExecutionId, NamedQueryId or ExpectedBucketOwner. Ids must be between 1 and 128 characters and contain only non-whitespace characters. ExpectedBucketOwner must be 12-digit string. -* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.13.0](service/codeartifact/CHANGELOG.md#v1130-2022-07-14) - * **Feature**: This release introduces Package Origin Controls, a mechanism used to counteract Dependency Confusion attacks. Adds two new APIs, PutPackageOriginConfiguration and DescribePackage, and updates the ListPackage, DescribePackageVersion and ListPackageVersion APIs in support of the feature. -* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.22.0](service/configservice/CHANGELOG.md#v1220-2022-07-14) - * **Feature**: Update ResourceType enum with values for Route53Resolver, Batch, DMS, Workspaces, Stepfunctions, SageMaker, ElasticLoadBalancingV2, MSK types -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.49.0](service/ec2/CHANGELOG.md#v1490-2022-07-14) - * **Feature**: This release adds flow logs for Transit Gateway to allow customers to gain deeper visibility and insights into network traffic through their Transit Gateways. -* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.18.0](service/fms/CHANGELOG.md#v1180-2022-07-14) - * **Feature**: Adds support for strict ordering in stateful rule groups in Network Firewall policies. -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.28.0](service/glue/CHANGELOG.md#v1280-2022-07-14) - * **Feature**: This release adds an additional worker type for Glue Streaming jobs. -* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.7.0](service/inspector2/CHANGELOG.md#v170-2022-07-14) - * **Feature**: This release adds support for Inspector V2 scan configurations through the get and update configuration APIs. Currently this allows configuring ECR automated re-scan duration to lifetime or 180 days or 30 days. -* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.30.0](service/kendra/CHANGELOG.md#v1300-2022-07-14) - * **Feature**: This release adds AccessControlConfigurations which allow you to redefine your document level access control without the need for content re-indexing. -* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.13.0](service/nimble/CHANGELOG.md#v1130-2022-07-14) - * **Feature**: Amazon Nimble Studio adds support for IAM-based access to AWS resources for Nimble Studio components and custom studio components. Studio Component scripts use these roles on Nimble Studio workstation to mount filesystems, access S3 buckets, or other configured resources in the Studio's AWS account -* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.22.0](service/outposts/CHANGELOG.md#v1220-2022-07-14) - * **Feature**: This release adds the ShipmentInformation and AssetInformationList fields to the GetOrder API response. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.36.0](service/sagemaker/CHANGELOG.md#v1360-2022-07-14) - * **Feature**: This release adds support for G5, P4d, and C6i instance types in Amazon SageMaker Inference and increases the number of hyperparameters that can be searched from 20 to 30 in Amazon SageMaker Automatic Model Tuning - -# Release (2022-07-13) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/appconfig`: [v1.13.0](service/appconfig/CHANGELOG.md#v1130-2022-07-13) - * **Feature**: Adding Create, Get, Update, Delete, and List APIs for new two new resources: Extensions and ExtensionAssociations. - -# Release (2022-07-12) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.14.0](service/networkmanager/CHANGELOG.md#v1140-2022-07-12) - * **Feature**: This release adds general availability API support for AWS Cloud WAN. - -# Release (2022-07-11) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.48.0](service/ec2/CHANGELOG.md#v1480-2022-07-11) - * **Feature**: Build, manage, and monitor a unified global network that connects resources running across your cloud and on-premises environments using the AWS Cloud WAN APIs. -* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.26.0](service/redshift/CHANGELOG.md#v1260-2022-07-11) - * **Feature**: This release adds a new --snapshot-arn field for describe-cluster-snapshots, describe-node-configuration-options, restore-from-cluster-snapshot, authorize-snapshot-acsess, and revoke-snapshot-acsess APIs. It allows customers to give a Redshift snapshot ARN or a Redshift Serverless ARN as input. -* `github.com/aws/aws-sdk-go-v2/service/redshiftserverless`: [v1.2.2](service/redshiftserverless/CHANGELOG.md#v122-2022-07-11) - * **Documentation**: Removed prerelease language for GA launch. - -# Release (2022-07-08) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.17.0](service/backup/CHANGELOG.md#v1170-2022-07-08) - * **Feature**: This release adds support for authentication using IAM user identity instead of passed IAM role, identified by excluding the IamRoleArn field in the StartRestoreJob API. This feature applies to only resource clients with a destructive restore nature (e.g. SAP HANA). - -# Release (2022-07-07) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.12.0](service/chimesdkmeetings/CHANGELOG.md#v1120-2022-07-07) - * **Feature**: Adds support for AppKeys and TenantIds in Amazon Chime SDK WebRTC sessions -* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.21.0](service/databasemigrationservice/CHANGELOG.md#v1210-2022-07-07) - * **Feature**: New api to migrate event subscriptions to event bridge rules -* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.26.0](service/iot/CHANGELOG.md#v1260-2022-07-07) - * **Feature**: This release adds support to register a CA certificate without having to provide a verification certificate. This also allows multiple AWS accounts to register the same CA in the same region. -* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.20.0](service/iotwireless/CHANGELOG.md#v1200-2022-07-07) - * **Feature**: Adds 5 APIs: PutPositionConfiguration, GetPositionConfiguration, ListPositionConfigurations, UpdatePosition, GetPosition for the new Positioning Service feature which enables customers to configure solvers to calculate position of LoRaWAN devices, or specify position of LoRaWAN devices & gateways. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.35.0](service/sagemaker/CHANGELOG.md#v1350-2022-07-07) - * **Feature**: Heterogeneous clusters: the ability to launch training jobs with multiple instance types. This enables running component of the training job on the instance type that is most suitable for it. e.g. doing data processing and augmentation on CPU instances and neural network training on GPU instances - -# Release (2022-07-06) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.22.0](service/cloudformation/CHANGELOG.md#v1220-2022-07-06) - * **Feature**: My AWS Service (placeholder) - Add a new feature Account-level Targeting for StackSet operation -* `github.com/aws/aws-sdk-go-v2/service/synthetics`: [v1.16.0](service/synthetics/CHANGELOG.md#v1160-2022-07-06) - * **Feature**: This release introduces Group feature, which enables users to group cross-region canaries. - -# Release (2022-07-05) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.21.5](service/configservice/CHANGELOG.md#v1215-2022-07-05) - * **Documentation**: Updating documentation service limits -* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.21.0](service/lexmodelsv2/CHANGELOG.md#v1210-2022-07-05) - * **Feature**: This release introduces additional optional parameters "messageSelectionStrategy" to PromptSpecification, which enables the users to configure the bot to play messages in orderly manner. -* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.23.0](service/quicksight/CHANGELOG.md#v1230-2022-07-05) - * **Feature**: This release allows customers to programmatically create QuickSight accounts with Enterprise and Enterprise + Q editions. It also releases allowlisting domains for embedding QuickSight dashboards at runtime through the embedding APIs. -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.22.0](service/rds/CHANGELOG.md#v1220-2022-07-05) - * **Feature**: Adds waiters support for DBCluster. -* `github.com/aws/aws-sdk-go-v2/service/rolesanywhere`: [v1.0.0](service/rolesanywhere/CHANGELOG.md#v100-2022-07-05) - * **Release**: New AWS service client module - * **Feature**: IAM Roles Anywhere allows your workloads such as servers, containers, and applications to obtain temporary AWS credentials and use the same IAM roles and policies that you have configured for your AWS workloads to access AWS resources. -* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.19.0](service/sqs/CHANGELOG.md#v1190-2022-07-05) - * **Feature**: Adds support for the SQS client to automatically validate message checksums for SendMessage, SendMessageBatch, and ReceiveMessage. A DisableMessageChecksumValidation parameter has been added to the Options struct for SQS package. Setting this to true will disable the checksum validation. This can be set when creating a client, or per operation call. -* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.15.0](service/ssmincidents/CHANGELOG.md#v1150-2022-07-05) - * **Feature**: Adds support for tagging incident-record on creation by providing incident tags in the template within a response-plan. - -# Release (2022-07-01) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.20.0](service/databasemigrationservice/CHANGELOG.md#v1200-2022-07-01) - * **Feature**: Added new features for AWS DMS version 3.4.7 that includes new endpoint settings for S3, OpenSearch, Postgres, SQLServer and Oracle. -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.21.5](service/rds/CHANGELOG.md#v1215-2022-07-01) - * **Documentation**: Adds support for additional retention periods to Performance Insights. -* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.27.0](service/s3/CHANGELOG.md#v1270-2022-07-01) - * **Feature**: Add presign support for HeadBucket, DeleteObject, and DeleteBucket. Fixes [#1076](https://github.com/aws/aws-sdk-go-v2/issues/1076). - -# Release (2022-06-30) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.16.0](service/athena/CHANGELOG.md#v1160-2022-06-30) - * **Feature**: This feature introduces the API support for Athena's parameterized query and BatchGetPreparedStatement API. -* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.18.0](service/customerprofiles/CHANGELOG.md#v1180-2022-06-30) - * **Feature**: This release adds the optional MinAllowedConfidenceScoreForMerging parameter to the CreateDomain, UpdateDomain, and GetAutoMergingPreview APIs in Customer Profiles. This parameter is used as a threshold to influence the profile auto-merging step of the Identity Resolution process. -* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.20.0](service/emr/CHANGELOG.md#v1200-2022-06-30) - * **Feature**: This release adds support for the ExecutionRoleArn parameter in the AddJobFlowSteps and DescribeStep APIs. Customers can use ExecutionRoleArn to specify the IAM role used for each job they submit using the AddJobFlowSteps API. -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.27.0](service/glue/CHANGELOG.md#v1270-2022-06-30) - * **Feature**: This release adds tag as an input of CreateDatabase -* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.29.0](service/kendra/CHANGELOG.md#v1290-2022-06-30) - * **Feature**: Amazon Kendra now provides a data source connector for alfresco -* `github.com/aws/aws-sdk-go-v2/service/mwaa`: [v1.13.0](service/mwaa/CHANGELOG.md#v1130-2022-06-30) - * **Feature**: Documentation updates for Amazon Managed Workflows for Apache Airflow. -* `github.com/aws/aws-sdk-go-v2/service/pricing`: [v1.16.0](service/pricing/CHANGELOG.md#v1160-2022-06-30) - * **Feature**: Documentation update for GetProducts Response. -* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.16.0](service/wellarchitected/CHANGELOG.md#v1160-2022-06-30) - * **Feature**: Added support for UpdateGlobalSettings API. Added status filter to ListWorkloadShares and ListLensShares. -* `github.com/aws/aws-sdk-go-v2/service/workmail`: [v1.16.0](service/workmail/CHANGELOG.md#v1160-2022-06-30) - * **Feature**: This release adds support for managing user availability configurations in Amazon WorkMail. - -# Release (2022-06-29) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2`: v1.16.6 - * **Bug Fix**: Fix aws/signer/v4 to not double sign Content-Length header. Fixes [#1728](https://github.com/aws/aws-sdk-go-v2/issues/1728). Thanks to @matelang for creating the issue and PR. -* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.17.0](service/appstream/CHANGELOG.md#v1170-2022-06-29) - * **Feature**: Includes support for StreamingExperienceSettings in CreateStack and UpdateStack APIs -* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.18.7](service/elasticloadbalancingv2/CHANGELOG.md#v1187-2022-06-29) - * **Documentation**: This release adds two attributes for ALB. One, helps to preserve the host header and the other helps to modify, preserve, or remove the X-Forwarded-For header in the HTTP request. -* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.19.0](service/emr/CHANGELOG.md#v1190-2022-06-29) - * **Feature**: This release introduces additional optional parameter "Throughput" to VolumeSpecification to enable user to configure throughput for gp3 ebs volumes. -* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.21.0](service/medialive/CHANGELOG.md#v1210-2022-06-29) - * **Feature**: This release adds support for automatic renewal of MediaLive reservations at the end of each reservation term. Automatic renewal is optional. This release also adds support for labelling accessibility-focused audio and caption tracks in HLS outputs. -* `github.com/aws/aws-sdk-go-v2/service/redshiftserverless`: [v1.2.0](service/redshiftserverless/CHANGELOG.md#v120-2022-06-29) - * **Feature**: Add new API operations for Amazon Redshift Serverless, a new way of using Amazon Redshift without needing to manually manage provisioned clusters. The new operations let you interact with Redshift Serverless resources, such as create snapshots, list VPC endpoints, delete resource policies, and more. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.34.0](service/sagemaker/CHANGELOG.md#v1340-2022-06-29) - * **Feature**: This release adds: UpdateFeatureGroup, UpdateFeatureMetadata, DescribeFeatureMetadata APIs; FeatureMetadata type in Search API; LastModifiedTime, LastUpdateStatus, OnlineStoreTotalSizeBytes in DescribeFeatureGroup API. -* `github.com/aws/aws-sdk-go-v2/service/translate`: [v1.14.0](service/translate/CHANGELOG.md#v1140-2022-06-29) - * **Feature**: Added ListLanguages API which can be used to list the languages supported by Translate. - -# Release (2022-06-28) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.18.0](service/datasync/CHANGELOG.md#v1180-2022-06-28) - * **Feature**: AWS DataSync now supports Amazon FSx for NetApp ONTAP locations. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.47.0](service/ec2/CHANGELOG.md#v1470-2022-06-28) - * **Feature**: This release adds a new spread placement group to EC2 Placement Groups: host level spread, which spread instances between physical hosts, available to Outpost customers only. CreatePlacementGroup and DescribePlacementGroups APIs were updated with a new parameter: SpreadLevel to support this feature. -* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.12.0](service/finspacedata/CHANGELOG.md#v1120-2022-06-28) - * **Feature**: Release new API GetExternalDataViewAccessDetails -* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.16.0](service/polly/CHANGELOG.md#v1160-2022-06-28) - * **Feature**: Add 4 new neural voices - Pedro (es-US), Liam (fr-CA), Daniel (de-DE) and Arthur (en-GB). - -# Release (2022-06-24.2) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.13.7](service/emrcontainers/CHANGELOG.md#v1137-2022-06-242) - * **Bug Fix**: Fixes bug with incorrect modeled timestamp format - -# Release (2022-06-23) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/lookoutequipment`: [v1.14.0](service/lookoutequipment/CHANGELOG.md#v1140-2022-06-23) - * **Feature**: This release adds visualizations to the scheduled inference results. Users will be able to see interference results, including diagnostic results from their running inference schedulers. -* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.25.1](service/mediaconvert/CHANGELOG.md#v1251-2022-06-23) - * **Documentation**: AWS Elemental MediaConvert SDK has released support for automatic DolbyVision metadata generation when converting HDR10 to DolbyVision. -* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.15.0](service/mgn/CHANGELOG.md#v1150-2022-06-23) - * **Feature**: New and modified APIs for the Post-Migration Framework -* `github.com/aws/aws-sdk-go-v2/service/migrationhubrefactorspaces`: [v1.6.0](service/migrationhubrefactorspaces/CHANGELOG.md#v160-2022-06-23) - * **Feature**: This release adds the new API UpdateRoute that allows route to be updated to ACTIVE/INACTIVE state. In addition, CreateRoute API will now allow users to create route in ACTIVE/INACTIVE state. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.33.0](service/sagemaker/CHANGELOG.md#v1330-2022-06-23) - * **Feature**: SageMaker Ground Truth now supports Virtual Private Cloud. Customers can launch labeling jobs and access to their private workforce in VPC mode. - -# Release (2022-06-22) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/apigateway`: [v1.15.8](service/apigateway/CHANGELOG.md#v1158-2022-06-22) - * **Documentation**: Documentation updates for Amazon API Gateway -* `github.com/aws/aws-sdk-go-v2/service/pricing`: [v1.15.0](service/pricing/CHANGELOG.md#v1150-2022-06-22) - * **Feature**: This release introduces 1 update to the GetProducts API. The serviceCode attribute is now required when you use the GetProductsRequest. -* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.20.0](service/transfer/CHANGELOG.md#v1200-2022-06-22) - * **Feature**: Until today, the service supported only RSA host keys and user keys. Now with this launch, Transfer Family has expanded the support for ECDSA and ED25519 host keys and user keys, enabling customers to support a broader set of clients by choosing RSA, ECDSA, and ED25519 host and user keys. - -# Release (2022-06-21) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.46.0](service/ec2/CHANGELOG.md#v1460-2022-06-21) - * **Feature**: This release adds support for Private IP VPNs, a new feature allowing S2S VPN connections to use private ip addresses as the tunnel outside ip address over Direct Connect as transport. -* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.18.9](service/ecs/CHANGELOG.md#v1189-2022-06-21) - * **Documentation**: Amazon ECS UpdateService now supports the following parameters: PlacementStrategies, PlacementConstraints and CapacityProviderStrategy. -* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.15.0](service/wellarchitected/CHANGELOG.md#v1150-2022-06-21) - * **Feature**: Adds support for lens tagging, Adds support for multiple helpful-resource urls and multiple improvement-plan urls. - -# Release (2022-06-20) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/directoryservice`: [v1.14.0](service/directoryservice/CHANGELOG.md#v1140-2022-06-20) - * **Feature**: This release adds support for describing and updating AWS Managed Microsoft AD settings -* `github.com/aws/aws-sdk-go-v2/service/kafka`: [v1.17.7](service/kafka/CHANGELOG.md#v1177-2022-06-20) - * **Documentation**: Documentation updates to use Az Id during cluster creation. -* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.21.0](service/outposts/CHANGELOG.md#v1210-2022-06-20) - * **Feature**: This release adds the AssetLocation structure to the ListAssets response. AssetLocation includes the RackElevation for an Asset. - -# Release (2022-06-17) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.27.0](service/connect/CHANGELOG.md#v1270-2022-06-17) - * **Feature**: This release updates these APIs: UpdateInstanceAttribute, DescribeInstanceAttribute and ListInstanceAttributes. You can use it to programmatically enable/disable High volume outbound communications using attribute type HIGH_VOLUME_OUTBOUND on the specified Amazon Connect instance. -* `github.com/aws/aws-sdk-go-v2/service/connectcampaigns`: [v1.0.0](service/connectcampaigns/CHANGELOG.md#v100-2022-06-17) - * **Release**: New AWS service client module - * **Feature**: Added Amazon Connect high volume outbound communications SDK. -* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.15.7](service/dynamodb/CHANGELOG.md#v1157-2022-06-17) - * **Documentation**: Doc only update for DynamoDB service -* `github.com/aws/aws-sdk-go-v2/service/dynamodbstreams`: [v1.13.7](service/dynamodbstreams/CHANGELOG.md#v1137-2022-06-17) - * **Documentation**: Doc only update for DynamoDB service - -# Release (2022-06-16) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/redshiftdata`: [v1.16.0](service/redshiftdata/CHANGELOG.md#v1160-2022-06-16) - * **Feature**: This release adds a new --workgroup-name field to operations that connect to an endpoint. Customers can now execute queries against their serverless workgroups. -* `github.com/aws/aws-sdk-go-v2/service/redshiftserverless`: [v1.1.0](service/redshiftserverless/CHANGELOG.md#v110-2022-06-16) - * **Feature**: Add new API operations for Amazon Redshift Serverless, a new way of using Amazon Redshift without needing to manually manage provisioned clusters. The new operations let you interact with Redshift Serverless resources, such as create snapshots, list VPC endpoints, delete resource policies, and more. -* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.15.11](service/secretsmanager/CHANGELOG.md#v11511-2022-06-16) - * **Documentation**: Documentation updates for Secrets Manager -* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.22.0](service/securityhub/CHANGELOG.md#v1220-2022-06-16) - * **Feature**: Added Threats field for security findings. Added new resource details for ECS Container, ECS Task, RDS SecurityGroup, Kinesis Stream, EC2 TransitGateway, EFS AccessPoint, CloudFormation Stack, CloudWatch Alarm, VPC Peering Connection and WAF Rules - -# Release (2022-06-15) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.11.0](service/finspacedata/CHANGELOG.md#v1110-2022-06-15) - * **Feature**: This release adds a new set of APIs, GetPermissionGroup, DisassociateUserFromPermissionGroup, AssociateUserToPermissionGroup, ListPermissionGroupsByUser, ListUsersByPermissionGroup. -* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.14.0](service/guardduty/CHANGELOG.md#v1140-2022-06-15) - * **Feature**: Adds finding fields available from GuardDuty Console. Adds FreeTrial related operations. Deprecates the use of various APIs related to Master Accounts and Replace them with Administrator Accounts. -* `github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry`: [v1.13.0](service/servicecatalogappregistry/CHANGELOG.md#v1130-2022-06-15) - * **Feature**: This release adds a new API ListAttributeGroupsForApplication that returns associated attribute groups of an application. In addition, the UpdateApplication and UpdateAttributeGroup APIs will not allow users to update the 'Name' attribute. -* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.19.0](service/workspaces/CHANGELOG.md#v1190-2022-06-15) - * **Feature**: Added new field "reason" to OperationNotSupportedException. Receiving this exception in the DeregisterWorkspaceDirectory API will now return a reason giving more context on the failure. - -# Release (2022-06-14) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/budgets`: [v1.13.0](service/budgets/CHANGELOG.md#v1130-2022-06-14) - * **Feature**: Add a budgets ThrottlingException. Update the CostFilters value pattern. -* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.16.0](service/lookoutmetrics/CHANGELOG.md#v1160-2022-06-14) - * **Feature**: Adding filters to Alert and adding new UpdateAlert API. -* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.25.0](service/mediaconvert/CHANGELOG.md#v1250-2022-06-14) - * **Feature**: AWS Elemental MediaConvert SDK has added support for rules that constrain Automatic-ABR rendition selection when generating ABR package ladders. - -# Release (2022-06-13) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.20.0](service/outposts/CHANGELOG.md#v1200-2022-06-13) - * **Feature**: This release adds API operations AWS uses to install Outpost servers. - -# Release (2022-06-10) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.19.7](service/frauddetector/CHANGELOG.md#v1197-2022-06-10) - * **Documentation**: Documentation updates for Amazon Fraud Detector (AWSHawksNest) - -# Release (2022-06-09) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.11.0](service/chimesdkmeetings/CHANGELOG.md#v1110-2022-06-09) - * **Feature**: Adds support for live transcription in AWS GovCloud (US) Regions. - -# Release (2022-06-08) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.19.0](service/databasemigrationservice/CHANGELOG.md#v1190-2022-06-08) - * **Feature**: This release adds DMS Fleet Advisor APIs and exposes functionality for DMS Fleet Advisor. It adds functionality to create and modify fleet advisor instances, and to collect and analyze information about the local data infrastructure. -* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.18.7](service/iam/CHANGELOG.md#v1187-2022-06-08) - * **Documentation**: Documentation updates for AWS Identity and Access Management (IAM). -* `github.com/aws/aws-sdk-go-v2/service/m2`: [v1.0.0](service/m2/CHANGELOG.md#v100-2022-06-08) - * **Release**: New AWS service client module - * **Feature**: AWS Mainframe Modernization service is a managed mainframe service and set of tools for planning, migrating, modernizing, and running mainframe workloads on AWS -* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.17.0](service/neptune/CHANGELOG.md#v1170-2022-06-08) - * **Feature**: This release adds support for Neptune to be configured as a global database, with a primary DB cluster in one region, and up to five secondary DB clusters in other regions. -* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.25.0](service/redshift/CHANGELOG.md#v1250-2022-06-08) - * **Feature**: Adds new API GetClusterCredentialsWithIAM to return temporary credentials. -* `github.com/aws/aws-sdk-go-v2/service/redshiftserverless`: [v1.0.0](service/redshiftserverless/CHANGELOG.md#v100-2022-06-08) - * **Release**: New AWS service client module - * **Feature**: Add new API operations for Amazon Redshift Serverless, a new way of using Amazon Redshift without needing to manually manage provisioned clusters. The new operations let you interact with Redshift Serverless resources, such as create snapshots, list VPC endpoints, delete resource policies, and more. - -# Release (2022-06-07) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.19.0](service/auditmanager/CHANGELOG.md#v1190-2022-06-07) - * **Feature**: This release introduces 2 updates to the Audit Manager API. The roleType and roleArn attributes are now required when you use the CreateAssessment or UpdateAssessment operation. We also added a throttling exception to the RegisterAccount API operation. -* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.19.0](service/costexplorer/CHANGELOG.md#v1190-2022-06-07) - * **Feature**: Added two new APIs to support cost allocation tags operations: ListCostAllocationTags, UpdateCostAllocationTagsStatus. - -# Release (2022-06-06) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.10.0](service/chimesdkmessaging/CHANGELOG.md#v1100-2022-06-06) - * **Feature**: This release adds support for searching channels by members via the SearchChannels API, removes required restrictions for Name and Mode in UpdateChannel API and enhances CreateChannel API by exposing member and moderator list as well as channel id as optional parameters. -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.26.0](service/connect/CHANGELOG.md#v1260-2022-06-06) - * **Feature**: This release adds a new API, GetCurrentUserData, which returns real-time details about users' current activity. - -# Release (2022-06-02) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/applicationinsights`: [v1.16.0](service/applicationinsights/CHANGELOG.md#v1160-2022-06-02) - * **Feature**: Provide Account Level onboarding support through CFN/CLI -* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.12.6](service/codeartifact/CHANGELOG.md#v1126-2022-06-02) - * **Documentation**: Documentation updates for CodeArtifact -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.25.0](service/connect/CHANGELOG.md#v1250-2022-06-02) - * **Feature**: This release adds the following features: 1) New APIs to manage (create, list, update) task template resources, 2) Updates to startTaskContact API to support task templates, and 3) new TransferContact API to programmatically transfer in-progress tasks via a contact flow. -* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.28.0](service/kendra/CHANGELOG.md#v1280-2022-06-02) - * **Feature**: Amazon Kendra now provides a data source connector for GitHub. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/data-source-github.html -* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.14.0](service/proton/CHANGELOG.md#v1140-2022-06-02) - * **Feature**: Add new "Components" API to enable users to Create, Delete and Update AWS Proton components. -* `github.com/aws/aws-sdk-go-v2/service/voiceid`: [v1.10.0](service/voiceid/CHANGELOG.md#v1100-2022-06-02) - * **Feature**: Added a new attribute ServerSideEncryptionUpdateDetails to Domain and DomainSummary. - -# Release (2022-06-01) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/backupgateway`: [v1.6.0](service/backupgateway/CHANGELOG.md#v160-2022-06-01) - * **Feature**: Adds GetGateway and UpdateGatewaySoftwareNow API and adds hypervisor name to UpdateHypervisor API -* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.10.0](service/chimesdkmeetings/CHANGELOG.md#v1100-2022-06-01) - * **Feature**: Adds support for centrally controlling each participant's ability to send and receive audio, video and screen share within a WebRTC session. Attendee capabilities can be specified when the attendee is created and updated during the session with the new BatchUpdateAttendeeCapabilitiesExcept API. -* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.22.0](service/forecast/CHANGELOG.md#v1220-2022-06-01) - * **Feature**: Added Format field to Import and Export APIs in Amazon Forecast. Added TimeSeriesSelector to Create Forecast API. -* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.21.0](service/route53/CHANGELOG.md#v1210-2022-06-01) - * **Feature**: Add new APIs to support Route 53 IP Based Routing - -# Release (2022-05-31) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.17.0](service/cognitoidentityprovider/CHANGELOG.md#v1170-2022-05-31) - * **Feature**: Amazon Cognito now supports IP Address propagation for all unauthenticated APIs (e.g. SignUp, ForgotPassword). -* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.6.0](service/drs/CHANGELOG.md#v160-2022-05-31) - * **Feature**: Changed existing APIs and added new APIs to accommodate using multiple AWS accounts with AWS Elastic Disaster Recovery. -* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.22.0](service/iotsitewise/CHANGELOG.md#v1220-2022-05-31) - * **Feature**: This release adds the following new optional field to the IoT SiteWise asset resource: assetDescription. -* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.15.0](service/lookoutmetrics/CHANGELOG.md#v1150-2022-05-31) - * **Feature**: Adding backtest mode to detectors using the Cloudwatch data source. -* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.20.0](service/transcribe/CHANGELOG.md#v1200-2022-05-31) - * **Feature**: Amazon Transcribe now supports automatic language identification for multi-lingual audio in batch mode. - -# Release (2022-05-27) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.16.0](service/appflow/CHANGELOG.md#v1160-2022-05-27) - * **Feature**: Adding the following features/changes: Parquet output that preserves typing from the source connector, Failed executions threshold before deactivation for scheduled flows, increasing max size of access and refresh token from 2048 to 4096 -* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.17.0](service/datasync/CHANGELOG.md#v1170-2022-05-27) - * **Feature**: AWS DataSync now supports TLS encryption in transit, file system policies and access points for EFS locations. -* `github.com/aws/aws-sdk-go-v2/service/emrserverless`: [v1.1.0](service/emrserverless/CHANGELOG.md#v110-2022-05-27) - * **Feature**: This release adds support for Amazon EMR Serverless, a serverless runtime environment that simplifies running analytics applications using the latest open source frameworks such as Apache Spark and Apache Hive. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.32.0](service/sagemaker/CHANGELOG.md#v1320-2022-05-27) - * **Feature**: Amazon SageMaker Notebook Instances now allows configuration of Instance Metadata Service version and Amazon SageMaker Studio now supports G5 instance types. - -# Release (2022-05-26) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.45.0](service/ec2/CHANGELOG.md#v1450-2022-05-26) - * **Feature**: C7g instances, powered by the latest generation AWS Graviton3 processors, provide the best price performance in Amazon EC2 for compute-intensive workloads. -* `github.com/aws/aws-sdk-go-v2/service/emrserverless`: [v1.0.0](service/emrserverless/CHANGELOG.md#v100-2022-05-26) - * **Release**: New AWS service client module - * **Feature**: This release adds support for Amazon EMR Serverless, a serverless runtime environment that simplifies running analytics applications using the latest open source frameworks such as Apache Spark and Apache Hive. -* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.21.0](service/forecast/CHANGELOG.md#v1210-2022-05-26) - * **Feature**: Introduced a new field in Auto Predictor as Time Alignment Boundary. It helps in aligning the timestamps generated during Forecast exports -* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.22.0](service/lightsail/CHANGELOG.md#v1220-2022-05-26) - * **Feature**: Amazon Lightsail now supports the ability to configure a Lightsail Container Service to pull images from Amazon ECR private repositories in your account. - -# Release (2022-05-25) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/apigateway`: [v1.15.6](service/apigateway/CHANGELOG.md#v1156-2022-05-25) - * **Documentation**: Documentation updates for Amazon API Gateway -* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.12.3](service/apprunner/CHANGELOG.md#v1123-2022-05-25) - * **Documentation**: Documentation-only update added for CodeConfiguration. -* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.21.0](service/cloudformation/CHANGELOG.md#v1210-2022-05-25) - * **Feature**: Add a new parameter statusReason to DescribeStackSetOperation output for additional details -* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.24.0](service/fsx/CHANGELOG.md#v1240-2022-05-25) - * **Feature**: This release adds root squash support to FSx for Lustre to restrict root level access from clients by mapping root users to a less-privileged user/group with limited permissions. -* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.14.0](service/lookoutmetrics/CHANGELOG.md#v1140-2022-05-25) - * **Feature**: Adding AthenaSourceConfig for MetricSet APIs to support Athena as a data source. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.31.0](service/sagemaker/CHANGELOG.md#v1310-2022-05-25) - * **Feature**: Amazon SageMaker Autopilot adds support for manually selecting features from the input dataset using the CreateAutoMLJob API. -* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.15.9](service/secretsmanager/CHANGELOG.md#v1159-2022-05-25) - * **Documentation**: Documentation updates for Secrets Manager -* `github.com/aws/aws-sdk-go-v2/service/voiceid`: [v1.9.0](service/voiceid/CHANGELOG.md#v190-2022-05-25) - * **Feature**: VoiceID will now automatically expire Speakers if they haven't been accessed for Enrollment, Re-enrollment or Successful Auth for three years. The Speaker APIs now return a "LastAccessedAt" time for Speakers, and the EvaluateSession API returns "SPEAKER_EXPIRED" Auth Decision for EXPIRED Speakers. - -# Release (2022-05-24) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.16.0](service/cognitoidentityprovider/CHANGELOG.md#v1160-2022-05-24) - * **Feature**: Amazon Cognito now supports requiring attribute verification (ex. email and phone number) before update. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.44.0](service/ec2/CHANGELOG.md#v1440-2022-05-24) - * **Feature**: Stop Protection feature enables customers to protect their instances from accidental stop actions. -* `github.com/aws/aws-sdk-go-v2/service/ivschat`: [v1.0.4](service/ivschat/CHANGELOG.md#v104-2022-05-24) - * **Documentation**: Doc-only update. For MessageReviewHandler structure, added timeout period in the description of the fallbackResult field -* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.24.0](service/mediaconvert/CHANGELOG.md#v1240-2022-05-24) - * **Feature**: AWS Elemental MediaConvert SDK has added support for rules that constrain Automatic-ABR rendition selection when generating ABR package ladders. -* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.13.0](service/networkmanager/CHANGELOG.md#v1130-2022-05-24) - * **Feature**: This release adds Multi Account API support for a TGW Global Network, to enable and disable AWSServiceAccess with AwsOrganizations for Network Manager service and dependency CloudFormation StackSets service. - -# Release (2022-05-23) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.21.0](service/elasticache/CHANGELOG.md#v1210-2022-05-23) - * **Feature**: Added support for encryption in transit for Memcached clusters. Customers can now launch Memcached cluster with encryption in transit enabled when using Memcached version 1.6.12 or later. -* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.20.0](service/forecast/CHANGELOG.md#v1200-2022-05-23) - * **Feature**: New APIs for Monitor that help you understand how your predictors perform over time. -* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.20.0](service/personalize/CHANGELOG.md#v1200-2022-05-23) - * **Feature**: Adding modelMetrics as part of DescribeRecommender API response for Personalize. - -# Release (2022-05-20) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.15.7](service/cloudwatchlogs/CHANGELOG.md#v1157-2022-05-20) - * **Documentation**: Doc-only update to publish the new valid values for log retention -* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.18.0](service/comprehend/CHANGELOG.md#v1180-2022-05-20) - * **Feature**: Comprehend releases 14 new entity types for DetectPiiEntities and ContainsPiiEntities APIs. - -# Release (2022-05-19) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/gamesparks`: [v1.1.0](service/gamesparks/CHANGELOG.md#v110-2022-05-19) - * **Feature**: This release adds an optional DeploymentResult field in the responses of GetStageDeploymentIntegrationTests and ListStageDeploymentIntegrationTests APIs. -* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.13.0](service/lookoutmetrics/CHANGELOG.md#v1130-2022-05-19) - * **Feature**: In this release we added SnsFormat to SNSConfiguration to support human readable alert. - -# Release (2022-05-18) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/appmesh`: [v1.14.0](service/appmesh/CHANGELOG.md#v1140-2022-05-18) - * **Feature**: This release updates the existing Create and Update APIs for meshes and virtual nodes by adding a new IP preference field. This new IP preference field can be used to control the IP versions being used with the mesh and allows for IPv6 support within App Mesh. -* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.18.3](service/batch/CHANGELOG.md#v1183-2022-05-18) - * **Documentation**: Documentation updates for AWS Batch. -* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.16.0](service/greengrassv2/CHANGELOG.md#v1160-2022-05-18) - * **Feature**: This release adds the new DeleteDeployment API operation that you can use to delete deployment resources. This release also adds support for discontinued AWS-provided components, so AWS can communicate when a component has any issues that you should consider before you deploy it. -* `github.com/aws/aws-sdk-go-v2/service/ioteventsdata`: [v1.12.0](service/ioteventsdata/CHANGELOG.md#v1120-2022-05-18) - * **Feature**: Introducing new API for deleting detectors: BatchDeleteDetector. -* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.22.0](service/quicksight/CHANGELOG.md#v1220-2022-05-18) - * **Feature**: API UpdatePublicSharingSettings enables IAM admins to enable/disable account level setting for public access of dashboards. When enabled, owners/co-owners for dashboards can enable public access on their dashboards. These dashboards can only be accessed through share link or embedding. -* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.19.0](service/transfer/CHANGELOG.md#v1190-2022-05-18) - * **Feature**: AWS Transfer Family now supports SetStat server configuration option, which provides the ability to ignore SetStat command issued by file transfer clients, enabling customers to upload files without any errors. - -# Release (2022-05-17) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/internal/ini`: [v1.3.12](internal/ini/CHANGELOG.md#v1312-2022-05-17) - * **Bug Fix**: Removes the fuzz testing files from the module, as they are invalid and not used. -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.25.0](service/glue/CHANGELOG.md#v1250-2022-05-17) - * **Feature**: This release adds a new optional parameter called codeGenNodeConfiguration to CRUD job APIs that allows users to manage visual jobs via APIs. The updated CreateJob and UpdateJob will create jobs that can be viewed in Glue Studio as a visual graph. GetJob can be used to get codeGenNodeConfiguration. -* `github.com/aws/aws-sdk-go-v2/service/iotsecuretunneling`: [v1.13.1](service/iotsecuretunneling/CHANGELOG.md#v1131-2022-05-17) - * **Bug Fix**: Fixes iotsecuretunneling and mobile API clients to use the correct name for signing requests, Fixes [#1686](https://github.com/aws/aws-sdk-go-v2/issues/1686). -* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.17.2](service/kms/CHANGELOG.md#v1172-2022-05-17) - * **Documentation**: Add HMAC best practice tip, annual rotation of AWS managed keys. -* `github.com/aws/aws-sdk-go-v2/service/mobile`: [v1.11.5](service/mobile/CHANGELOG.md#v1115-2022-05-17) - * **Bug Fix**: Fixes iotsecuretunneling and mobile API clients to use the correct name for signing requests, Fixes [#1686](https://github.com/aws/aws-sdk-go-v2/issues/1686). - -# Release (2022-05-16) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/applicationdiscoveryservice`: [v1.13.0](service/applicationdiscoveryservice/CHANGELOG.md#v1130-2022-05-16) - * **Feature**: Add Migration Evaluator Collector details to the GetDiscoverySummary API response -* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.18.0](service/cloudfront/CHANGELOG.md#v1180-2022-05-16) - * **Feature**: Introduced a new error (TooLongCSPInResponseHeadersPolicy) that is returned when the value of the Content-Security-Policy header in a response headers policy exceeds the maximum allowed length. -* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.18.1](service/rekognition/CHANGELOG.md#v1181-2022-05-16) - * **Documentation**: Documentation updates for Amazon Rekognition. -* `github.com/aws/aws-sdk-go-v2/service/resiliencehub`: [v1.6.0](service/resiliencehub/CHANGELOG.md#v160-2022-05-16) - * **Feature**: In this release, we are introducing support for Amazon Elastic Container Service, Amazon Route 53, AWS Elastic Disaster Recovery, AWS Backup in addition to the existing supported Services. This release also supports Terraform file input from S3 and scheduling daily assessments -* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.14.2](service/servicecatalog/CHANGELOG.md#v1142-2022-05-16) - * **Documentation**: Updated the descriptions for the ListAcceptedPortfolioShares API description and the PortfolioShareType parameters. -* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.16.5](service/sts/CHANGELOG.md#v1165-2022-05-16) - * **Documentation**: Documentation updates for AWS Security Token Service. -* `github.com/aws/aws-sdk-go-v2/service/workspacesweb`: [v1.6.0](service/workspacesweb/CHANGELOG.md#v160-2022-05-16) - * **Feature**: Amazon WorkSpaces Web now supports Administrator timeout control - -# Release (2022-05-13) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.9.0](service/grafana/CHANGELOG.md#v190-2022-05-13) - * **Feature**: This release adds APIs for creating and deleting API keys in an Amazon Managed Grafana workspace. - -# Release (2022-05-12) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.43.0](service/ec2/CHANGELOG.md#v1430-2022-05-12) - * **Feature**: This release introduces a target type Gateway Load Balancer Endpoint for mirrored traffic. Customers can now specify GatewayLoadBalancerEndpoint option during the creation of a traffic mirror target. -* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.10.5](service/finspacedata/CHANGELOG.md#v1105-2022-05-12) - * **Documentation**: We've now deprecated CreateSnapshot permission for creating a data view, instead use CreateDataView permission. -* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.25.1](service/iot/CHANGELOG.md#v1251-2022-05-12) - * **Documentation**: Documentation update for China region ListMetricValues for IoT -* `github.com/aws/aws-sdk-go-v2/service/ivschat`: [v1.0.2](service/ivschat/CHANGELOG.md#v102-2022-05-12) - * **Documentation**: Documentation-only updates for IVS Chat API Reference. -* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.27.0](service/kendra/CHANGELOG.md#v1270-2022-05-12) - * **Feature**: Amazon Kendra now provides a data source connector for Jira. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/data-source-jira.html -* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.23.0](service/lambda/CHANGELOG.md#v1230-2022-05-12) - * **Feature**: Lambda releases NodeJs 16 managed runtime to be available in all commercial regions. -* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.21.0](service/lightsail/CHANGELOG.md#v1210-2022-05-12) - * **Feature**: This release adds support to include inactive database bundles in the response of the GetRelationalDatabaseBundles request. -* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.19.1](service/outposts/CHANGELOG.md#v1191-2022-05-12) - * **Documentation**: Documentation updates for AWS Outposts. -* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.14.0](service/ssmincidents/CHANGELOG.md#v1140-2022-05-12) - * **Feature**: Adding support for dynamic SSM Runbook parameter values. Updating validation pattern for engagements. Adding ConflictException to UpdateReplicationSet API contract. -* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.18.6](service/transfer/CHANGELOG.md#v1186-2022-05-12) - * **Documentation**: AWS Transfer Family now accepts ECDSA keys for server host keys - -# Release (2022-05-11) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.42.0](service/ec2/CHANGELOG.md#v1420-2022-05-11) - * **Feature**: This release updates AWS PrivateLink APIs to support IPv6 for PrivateLink Services and Endpoints of type 'Interface'. -* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.15.7](service/secretsmanager/CHANGELOG.md#v1157-2022-05-11) - * **Documentation**: Doc only update for Secrets Manager that fixes several customer-reported issues. - -# Release (2022-05-10) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.17.5](service/computeoptimizer/CHANGELOG.md#v1175-2022-05-10) - * **Documentation**: Documentation updates for Compute Optimizer -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.41.0](service/ec2/CHANGELOG.md#v1410-2022-05-10) - * **Feature**: Added support for using NitroTPM and UEFI Secure Boot on EC2 instances. -* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.21.0](service/eks/CHANGELOG.md#v1210-2022-05-10) - * **Feature**: Adds BOTTLEROCKET_ARM_64_NVIDIA and BOTTLEROCKET_x86_64_NVIDIA AMI types to EKS managed nodegroups -* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.18.0](service/emr/CHANGELOG.md#v1180-2022-05-10) - * **Feature**: This release updates the Amazon EMR ModifyInstanceGroups API to support "MERGE" type cluster reconfiguration. Also, added the ability to specify a particular Amazon Linux release for all nodes in a cluster launch request. -* `github.com/aws/aws-sdk-go-v2/service/migrationhubrefactorspaces`: [v1.5.5](service/migrationhubrefactorspaces/CHANGELOG.md#v155-2022-05-10) - * **Documentation**: AWS Migration Hub Refactor Spaces documentation only update to fix a formatting issue. - -# Release (2022-05-09) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/config`: [v1.15.5](config/CHANGELOG.md#v1155-2022-05-09) - * **Bug Fix**: Fixes a bug in LoadDefaultConfig to correctly assign ConfigSources so all config resolvers have access to the config sources. This fixes the feature/ec2/imds client not having configuration applied via config.LoadOptions such as EC2IMDSClientEnableState. PR [#1682](https://github.com/aws/aws-sdk-go-v2/pull/1682) -* `github.com/aws/aws-sdk-go-v2/service/cloudcontrol`: [v1.10.0](service/cloudcontrol/CHANGELOG.md#v1100-2022-05-09) - * **Feature**: SDK release for Cloud Control API to include paginators for Python SDK. -* `github.com/aws/aws-sdk-go-v2/service/evidently`: [v1.7.0](service/evidently/CHANGELOG.md#v170-2022-05-09) - * **Feature**: Add detail message inside GetExperimentResults API response to indicate experiment result availability -* `github.com/aws/aws-sdk-go-v2/service/ssmcontacts`: [v1.13.5](service/ssmcontacts/CHANGELOG.md#v1135-2022-05-09) - * **Documentation**: Fixed an error in the DescribeEngagement example for AWS Incident Manager. - -# Release (2022-05-06) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.40.0](service/ec2/CHANGELOG.md#v1400-2022-05-06) - * **Feature**: Add new state values for IPAMs, IPAM Scopes, and IPAM Pools. -* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.17.0](service/location/CHANGELOG.md#v1170-2022-05-06) - * **Feature**: Amazon Location Service now includes a MaxResults parameter for ListGeofences requests. -* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.16.0](service/mediapackage/CHANGELOG.md#v1160-2022-05-06) - * **Feature**: This release adds Dvb Dash 2014 as an available profile option for Dash Origin Endpoints. -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.21.1](service/rds/CHANGELOG.md#v1211-2022-05-06) - * **Documentation**: Various documentation improvements. -* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.24.0](service/redshift/CHANGELOG.md#v1240-2022-05-06) - * **Feature**: Introduces new field 'LoadSampleData' in CreateCluster operation. Customers can now specify 'LoadSampleData' option during creation of a cluster, which results in loading of sample data in the cluster that is created. -* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.21.1](service/securityhub/CHANGELOG.md#v1211-2022-05-06) - * **Documentation**: Documentation updates for Security Hub API reference - -# Release (2022-05-05) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.16.0](service/datasync/CHANGELOG.md#v1160-2022-05-05) - * **Feature**: AWS DataSync now supports a new ObjectTags Task API option that can be used to control whether Object Tags are transferred. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.39.0](service/ec2/CHANGELOG.md#v1390-2022-05-05) - * **Feature**: Amazon EC2 I4i instances are powered by 3rd generation Intel Xeon Scalable processors and feature up to 30 TB of local AWS Nitro SSD storage -* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.25.0](service/iot/CHANGELOG.md#v1250-2022-05-05) - * **Feature**: AWS IoT Jobs now allows you to create up to 100,000 active continuous and snapshot jobs by using concurrency control. -* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.26.0](service/kendra/CHANGELOG.md#v1260-2022-05-05) - * **Feature**: AWS Kendra now supports hierarchical facets for a query. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/filtering.html - -# Release (2022-05-04) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.16.0](service/backup/CHANGELOG.md#v1160-2022-05-04) - * **Feature**: Adds support to 2 new filters about job complete time for 3 list jobs APIs in AWS Backup -* `github.com/aws/aws-sdk-go-v2/service/iotsecuretunneling`: [v1.13.0](service/iotsecuretunneling/CHANGELOG.md#v1130-2022-05-04) - * **Feature**: This release introduces a new API RotateTunnelAccessToken that allow revoking the existing tokens and generate new tokens -* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.20.1](service/lightsail/CHANGELOG.md#v1201-2022-05-04) - * **Documentation**: Documentation updates for Lightsail -* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.27.0](service/ssm/CHANGELOG.md#v1270-2022-05-04) - * **Feature**: This release adds the TargetMaps parameter in SSM State Manager API. - -# Release (2022-05-03) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.38.0](service/ec2/CHANGELOG.md#v1380-2022-05-03) - * **Feature**: Adds support for allocating Dedicated Hosts on AWS Outposts. The AllocateHosts API now accepts an OutpostArn request parameter, and the DescribeHosts API now includes an OutpostArn response parameter. -* `github.com/aws/aws-sdk-go-v2/service/kinesisvideo`: [v1.12.0](service/kinesisvideo/CHANGELOG.md#v1120-2022-05-03) - * **Feature**: Add support for multiple image feature related APIs for configuring image generation and notification of a video stream. Add "GET_IMAGES" to the list of supported API names for the GetDataEndpoint API. -* `github.com/aws/aws-sdk-go-v2/service/kinesisvideoarchivedmedia`: [v1.13.0](service/kinesisvideoarchivedmedia/CHANGELOG.md#v1130-2022-05-03) - * **Feature**: Add support for GetImages API for retrieving images from a video stream -* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.26.8](service/s3/CHANGELOG.md#v1268-2022-05-03) - * **Documentation**: Documentation only update for doc bug fixes for the S3 API docs. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.30.0](service/sagemaker/CHANGELOG.md#v1300-2022-05-03) - * **Feature**: SageMaker Autopilot adds new metrics for all candidate models generated by Autopilot experiments; RStudio on SageMaker now allows users to bring your own development environment in a custom image. - -# Release (2022-05-02) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/organizations`: [v1.16.0](service/organizations/CHANGELOG.md#v1160-2022-05-02) - * **Feature**: This release adds the INVALID_PAYMENT_INSTRUMENT as a fail reason and an error message. -* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.19.0](service/outposts/CHANGELOG.md#v1190-2022-05-02) - * **Feature**: This release adds a new API called ListAssets to the Outposts SDK, which lists the hardware assets in an Outpost. -* `github.com/aws/aws-sdk-go-v2/service/synthetics`: [v1.15.0](service/synthetics/CHANGELOG.md#v1150-2022-05-02) - * **Feature**: CloudWatch Synthetics has introduced a new feature to provide customers with an option to delete the underlying resources that Synthetics canary creates when the user chooses to delete the canary. - -# Release (2022-04-29) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/codegurureviewer`: [v1.16.0](service/codegurureviewer/CHANGELOG.md#v1160-2022-04-29) - * **Feature**: Amazon CodeGuru Reviewer now supports suppressing recommendations from being generated on specific files and directories. -* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.23.0](service/mediaconvert/CHANGELOG.md#v1230-2022-04-29) - * **Feature**: AWS Elemental MediaConvert SDK nows supports creation of Dolby Vision profile 8.1, the ability to generate black frames of video, and introduces audio-only DASH and CMAF support. -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.21.0](service/rds/CHANGELOG.md#v1210-2022-04-29) - * **Feature**: Feature - Adds support for Internet Protocol Version 6 (IPv6) on RDS database instances. -* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.26.0](service/ssm/CHANGELOG.md#v1260-2022-04-29) - * **Feature**: Update the StartChangeRequestExecution, adding TargetMaps to the Runbook parameter -* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.20.0](service/wafv2/CHANGELOG.md#v1200-2022-04-29) - * **Feature**: You can now inspect all request headers and all cookies. You can now specify how to handle oversize body contents in your rules that inspect the body. - -# Release (2022-04-28) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.18.5](service/auditmanager/CHANGELOG.md#v1185-2022-04-28) - * **Documentation**: This release adds documentation updates for Audit Manager. We provided examples of how to use the Custom_ prefix for the keywordValue attribute. We also provided more details about the DeleteAssessmentReport operation. -* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.16.0](service/braket/CHANGELOG.md#v1160-2022-04-28) - * **Feature**: This release enables Braket Hybrid Jobs with Embedded Simulators to have multiple instances. -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.24.0](service/connect/CHANGELOG.md#v1240-2022-04-28) - * **Feature**: This release introduces an API for changing the current agent status of a user in Connect. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.37.0](service/ec2/CHANGELOG.md#v1370-2022-04-28) - * **Feature**: This release adds support to query the public key and creation date of EC2 Key Pairs. Additionally, the format (pem or ppk) of a key pair can be specified when creating a new key pair. -* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.13.5](service/guardduty/CHANGELOG.md#v1135-2022-04-28) - * **Documentation**: Documentation update for API description. -* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.17.0](service/networkfirewall/CHANGELOG.md#v1170-2022-04-28) - * **Feature**: AWS Network Firewall adds support for stateful threat signature AWS managed rule groups. - -# Release (2022-04-27) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/amplify`: [v1.11.5](service/amplify/CHANGELOG.md#v1115-2022-04-27) - * **Documentation**: Documentation only update to support the Amplify GitHub App feature launch -* `github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines`: [v1.0.0](service/chimesdkmediapipelines/CHANGELOG.md#v100-2022-04-27) - * **Release**: New AWS service client module - * **Feature**: For Amazon Chime SDK meetings, the Amazon Chime Media Pipelines SDK allows builders to capture audio, video, and content share streams. You can also capture meeting events, live transcripts, and data messages. The pipelines save the artifacts to an Amazon S3 bucket that you designate. -* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.16.0](service/cloudtrail/CHANGELOG.md#v1160-2022-04-27) - * **Feature**: Increases the retention period maximum to 2557 days. Deprecates unused fields of the ListEventDataStores API response. Updates documentation. -* `github.com/aws/aws-sdk-go-v2/service/internal/checksum`: [v1.1.5](service/internal/checksum/CHANGELOG.md#v115-2022-04-27) - * **Bug Fix**: Fixes a bug that could cause the SigV4 payload hash to be incorrectly encoded, leading to signing errors. -* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.19.0](service/iotwireless/CHANGELOG.md#v1190-2022-04-27) - * **Feature**: Add list support for event configurations, allow to get and update event configurations by resource type, support LoRaWAN events; Make NetworkAnalyzerConfiguration as a resource, add List, Create, Delete API support; Add FCntStart attribute support for ABP WirelessDevice. -* `github.com/aws/aws-sdk-go-v2/service/lookoutequipment`: [v1.13.0](service/lookoutequipment/CHANGELOG.md#v1130-2022-04-27) - * **Feature**: This release adds the following new features: 1) Introduces an option for automatic schema creation 2) Now allows for Ingestion of data containing most common errors and allows automatic data cleaning 3) Introduces new API ListSensorStatistics that gives further information about the ingested data -* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.18.0](service/rekognition/CHANGELOG.md#v1180-2022-04-27) - * **Feature**: This release adds support to configure stream-processor resources for label detections on streaming-videos. UpateStreamProcessor API is also launched with this release, which could be used to update an existing stream-processor. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.29.0](service/sagemaker/CHANGELOG.md#v1290-2022-04-27) - * **Feature**: Amazon SageMaker Autopilot adds support for custom validation dataset and validation ratio through the CreateAutoMLJob and DescribeAutoMLJob APIs. - -# Release (2022-04-26) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.17.0](service/cloudfront/CHANGELOG.md#v1170-2022-04-26) - * **Feature**: CloudFront now supports the Server-Timing header in HTTP responses sent from CloudFront. You can use this header to view metrics that help you gain insights about the behavior and performance of CloudFront. To use this header, enable it in a response headers policy. -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.24.2](service/glue/CHANGELOG.md#v1242-2022-04-26) - * **Documentation**: This release adds documentation for the APIs to create, read, delete, list, and batch read of AWS Glue custom patterns, and for Lake Formation configuration settings in the AWS Glue crawler. -* `github.com/aws/aws-sdk-go-v2/service/ivschat`: [v1.0.0](service/ivschat/CHANGELOG.md#v100-2022-04-26) - * **Release**: New AWS service client module - * **Feature**: Adds new APIs for IVS Chat, a feature for building interactive chat experiences alongside an IVS broadcast. -* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.20.0](service/lightsail/CHANGELOG.md#v1200-2022-04-26) - * **Feature**: This release adds support for Lightsail load balancer HTTP to HTTPS redirect and TLS policy configuration. -* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.16.0](service/networkfirewall/CHANGELOG.md#v1160-2022-04-26) - * **Feature**: AWS Network Firewall now enables customers to use a customer managed AWS KMS key for the encryption of their firewall resources. -* `github.com/aws/aws-sdk-go-v2/service/pricing`: [v1.14.5](service/pricing/CHANGELOG.md#v1145-2022-04-26) - * **Documentation**: Documentation updates for Price List API -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.28.0](service/sagemaker/CHANGELOG.md#v1280-2022-04-26) - * **Feature**: SageMaker Inference Recommender now accepts customer KMS key ID for encryption of endpoints and compilation outputs created during inference recommendation. - -# Release (2022-04-25) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2`: v1.16.3 - * **Dependency Update**: Update SDK's internal copy of golang.org/x/sync/singleflight to address issue with test failing due to timeing issues -* `github.com/aws/aws-sdk-go-v2/credentials`: [v1.12.0](credentials/CHANGELOG.md#v1120-2022-04-25) - * **Feature**: Adds Duration and Policy options that can be used when creating stscreds.WebIdentityRoleProvider credentials provider. -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.23.0](service/connect/CHANGELOG.md#v1230-2022-04-25) - * **Feature**: This release adds SearchUsers API which can be used to search for users with a Connect Instance -* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.14.4](service/gamelift/CHANGELOG.md#v1144-2022-04-25) - * **Documentation**: Documentation updates for Amazon GameLift. -* `github.com/aws/aws-sdk-go-v2/service/mq`: [v1.13.0](service/mq/CHANGELOG.md#v1130-2022-04-25) - * **Feature**: This release adds the CRITICAL_ACTION_REQUIRED broker state and the ActionRequired API property. CRITICAL_ACTION_REQUIRED informs you when your broker is degraded. ActionRequired provides you with a code which you can use to find instructions in the Developer Guide on how to resolve the issue. -* `github.com/aws/aws-sdk-go-v2/service/rdsdata`: [v1.12.0](service/rdsdata/CHANGELOG.md#v1120-2022-04-25) - * **Feature**: Support to receive SQL query results in the form of a simplified JSON string. This enables developers using the new JSON string format to more easily convert it to an object using popular JSON string parsing libraries. -* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.21.0](service/securityhub/CHANGELOG.md#v1210-2022-04-25) - * **Feature**: Security Hub now lets you opt-out of auto-enabling the defaults standards (CIS and FSBP) in accounts that are auto-enabled with Security Hub via Security Hub's integration with AWS Organizations. - -# Release (2022-04-22) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.9.0](service/chimesdkmeetings/CHANGELOG.md#v190-2022-04-22) - * **Feature**: Include additional exceptions types. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.36.0](service/ec2/CHANGELOG.md#v1360-2022-04-22) - * **Feature**: Adds support for waiters that automatically poll for a deleted NAT Gateway until it reaches the deleted state. - -# Release (2022-04-21) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.20.5](service/elasticache/CHANGELOG.md#v1205-2022-04-21) - * **Documentation**: Doc only update for ElastiCache -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.24.0](service/glue/CHANGELOG.md#v1240-2022-04-21) - * **Feature**: This release adds APIs to create, read, delete, list, and batch read of Glue custom entity types -* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.21.0](service/iotsitewise/CHANGELOG.md#v1210-2022-04-21) - * **Feature**: This release adds 3 new batch data query APIs : BatchGetAssetPropertyValue, BatchGetAssetPropertyValueHistory and BatchGetAssetPropertyAggregates -* `github.com/aws/aws-sdk-go-v2/service/iottwinmaker`: [v1.7.0](service/iottwinmaker/CHANGELOG.md#v170-2022-04-21) - * **Feature**: General availability (GA) for AWS IoT TwinMaker. For more information, see https://docs.aws.amazon.com/iot-twinmaker/latest/apireference/Welcome.html -* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.12.0](service/lookoutmetrics/CHANGELOG.md#v1120-2022-04-21) - * **Feature**: Added DetectMetricSetConfig API for detecting configuration required for creating metric set from provided S3 data source. -* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.17.0](service/mediatailor/CHANGELOG.md#v1170-2022-04-21) - * **Feature**: This release introduces tiered channels and adds support for live sources. Customers using a STANDARD channel can now create programs using live sources. -* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.15.5](service/secretsmanager/CHANGELOG.md#v1155-2022-04-21) - * **Documentation**: Documentation updates for Secrets Manager -* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.17.0](service/storagegateway/CHANGELOG.md#v1170-2022-04-21) - * **Feature**: This release adds support for minimum of 5 character length virtual tape barcodes. -* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.8.0](service/wisdom/CHANGELOG.md#v180-2022-04-21) - * **Feature**: This release updates the GetRecommendations API to include a trigger event list for classifying and grouping recommendations. - -# Release (2022-04-20) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.22.0](service/connect/CHANGELOG.md#v1220-2022-04-20) - * **Feature**: This release adds APIs to search, claim, release, list, update, and describe phone numbers. You can also use them to associate and disassociate contact flows to phone numbers. -* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.21.0](service/macie2/CHANGELOG.md#v1210-2022-04-20) - * **Feature**: Sensitive data findings in Amazon Macie now indicate how Macie found the sensitive data that produced a finding (originType). -* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.14.0](service/mgn/CHANGELOG.md#v1140-2022-04-20) - * **Feature**: Removed required annotation from input fields in Describe operations requests. Added quotaValue to ServiceQuotaExceededException -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.20.0](service/rds/CHANGELOG.md#v1200-2022-04-20) - * **Feature**: Added a new cluster-level attribute to set the capacity range for Aurora Serverless v2 instances. - -# Release (2022-04-19) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.23.0](service/autoscaling/CHANGELOG.md#v1230-2022-04-19) - * **Feature**: EC2 Auto Scaling now adds default instance warm-up times for all scaling activities, health check replacements, and other replacement events in the Auto Scaling instance lifecycle. -* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.25.0](service/kendra/CHANGELOG.md#v1250-2022-04-19) - * **Feature**: Amazon Kendra now provides a data source connector for Quip. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/data-source-quip.html -* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.17.0](service/kms/CHANGELOG.md#v1170-2022-04-19) - * **Feature**: Adds support for KMS keys and APIs that generate and verify HMAC codes -* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.19.0](service/personalize/CHANGELOG.md#v1190-2022-04-19) - * **Feature**: Adding StartRecommender and StopRecommender APIs for Personalize. -* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.15.0](service/polly/CHANGELOG.md#v1150-2022-04-19) - * **Feature**: Amazon Polly adds new Austrian German voice - Hannah. Hannah is available as Neural voice only. -* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.23.0](service/redshift/CHANGELOG.md#v1230-2022-04-19) - * **Feature**: Introduces new fields for LogDestinationType and LogExports on EnableLogging requests and Enable/Disable/DescribeLogging responses. Customers can now select CloudWatch Logs as a destination for their Audit Logs. -* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.25.0](service/ssm/CHANGELOG.md#v1250-2022-04-19) - * **Feature**: Added offset support for specifying the number of days to wait after the date and time specified by a CRON expression when creating SSM association. -* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.15.0](service/textract/CHANGELOG.md#v1150-2022-04-19) - * **Feature**: This release adds support for specifying and extracting information from documents using the Queries feature within Analyze Document API -* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.18.4](service/transfer/CHANGELOG.md#v1184-2022-04-19) - * **Documentation**: This release contains corrected HomeDirectoryMappings examples for several API functions: CreateAccess, UpdateAccess, CreateUser, and UpdateUser,. -* `github.com/aws/aws-sdk-go-v2/service/worklink`: [v1.12.0](service/worklink/CHANGELOG.md#v1120-2022-04-19) - * **Feature**: Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK. - -# Release (2022-04-15) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue`: [v1.9.0](feature/dynamodb/attributevalue/CHANGELOG.md#v190-2022-04-15) - * **Feature**: Support has been added for specifying a custom time format when encoding and decoding DynamoDB AttributeValues. Use `EncoderOptions.EncodeTime` to specify a custom time encoding function, and use `DecoderOptions.DecodeTime` for specifying how to handle the corresponding AttributeValues using the format. Thank you [Pablo Lopez](https://github.com/plopezlpz) for this contribution. -* `github.com/aws/aws-sdk-go-v2/feature/dynamodbstreams/attributevalue`: [v1.9.0](feature/dynamodbstreams/attributevalue/CHANGELOG.md#v190-2022-04-15) - * **Feature**: Support has been added for specifying a custom time format when encoding and decoding DynamoDB AttributeValues. Use `EncoderOptions.EncodeTime` to specify a custom time encoding function, and use `DecoderOptions.DecodeTime` for specifying how to handle the corresponding AttributeValues using the format. Thank you [Pablo Lopez](https://github.com/plopezlpz) for this contribution. -* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.15.0](service/athena/CHANGELOG.md#v1150-2022-04-15) - * **Feature**: This release adds subfields, ErrorMessage, Retryable, to the AthenaError response object in the GetQueryExecution API when a query fails. -* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.19.0](service/lightsail/CHANGELOG.md#v1190-2022-04-15) - * **Feature**: This release adds support to describe the synchronization status of the account-level block public access feature for your Amazon Lightsail buckets. -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.19.0](service/rds/CHANGELOG.md#v1190-2022-04-15) - * **Feature**: Removes Amazon RDS on VMware with the deletion of APIs related to Custom Availability Zones and Media installation - -# Release (2022-04-14) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.15.0](service/appflow/CHANGELOG.md#v1150-2022-04-14) - * **Feature**: Enables users to pass custom token URL parameters for Oauth2 authentication during create connector profile -* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.16.0](service/appstream/CHANGELOG.md#v1160-2022-04-14) - * **Feature**: Includes updates for create and update fleet APIs to manage the session scripts locations for Elastic fleets. -* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.18.0](service/batch/CHANGELOG.md#v1180-2022-04-14) - * **Feature**: Enables configuration updates for compute environments with BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED allocation strategies. -* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.18.1](service/cloudwatch/CHANGELOG.md#v1181-2022-04-14) - * **Documentation**: Updates documentation for additional statistics in CloudWatch Metric Streams. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.35.1](service/ec2/CHANGELOG.md#v1351-2022-04-14) - * **Documentation**: Documentation updates for Amazon EC2. -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.23.0](service/glue/CHANGELOG.md#v1230-2022-04-14) - * **Feature**: Auto Scaling for Glue version 3.0 and later jobs to dynamically scale compute resources. This SDK change provides customers with the auto-scaled DPU usage - -# Release (2022-04-13) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.18.0](service/cloudwatch/CHANGELOG.md#v1180-2022-04-13) - * **Feature**: Adds support for additional statistics in CloudWatch Metric Streams. -* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.23.0](service/fsx/CHANGELOG.md#v1230-2022-04-13) - * **Feature**: This release adds support for deploying FSx for ONTAP file systems in a single Availability Zone. - -# Release (2022-04-12) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.17.0](service/devopsguru/CHANGELOG.md#v1170-2022-04-12) - * **Feature**: This release adds new APIs DeleteInsight to deletes the insight along with the associated anomalies, events and recommendations. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.35.0](service/ec2/CHANGELOG.md#v1350-2022-04-12) - * **Feature**: X2idn and X2iedn instances are powered by 3rd generation Intel Xeon Scalable processors with an all-core turbo frequency up to 3.5 GHzAmazon EC2. C6a instances are powered by 3rd generation AMD EPYC processors. -* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.17.0](service/efs/CHANGELOG.md#v1170-2022-04-12) - * **Feature**: Amazon EFS adds support for a ThrottlingException when using the CreateAccessPoint API if the account is nearing the AccessPoint limit(120). -* `github.com/aws/aws-sdk-go-v2/service/iottwinmaker`: [v1.6.0](service/iottwinmaker/CHANGELOG.md#v160-2022-04-12) - * **Feature**: This release adds the following new features: 1) ListEntities API now supports search using ExternalId. 2) BatchPutPropertyValue and GetPropertyValueHistory API now allows users to represent time in sub-second level precisions. -* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.15.4](service/kinesis/CHANGELOG.md#v1154-2022-04-12) - * **Bug Fix**: Fixes an issue that caused the unexported constructor function names for EventStream types to be swapped for the event reader and writer respectivly. -* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.14.4](service/lexruntimev2/CHANGELOG.md#v1144-2022-04-12) - * **Bug Fix**: Fixes an issue that caused the unexported constructor function names for EventStream types to be swapped for the event reader and writer respectivly. -* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.26.5](service/s3/CHANGELOG.md#v1265-2022-04-12) - * **Bug Fix**: Fixes an issue that caused the unexported constructor function names for EventStream types to be swapped for the event reader and writer respectivly. -* `github.com/aws/aws-sdk-go-v2/service/transcribestreaming`: [v1.6.4](service/transcribestreaming/CHANGELOG.md#v164-2022-04-12) - * **Bug Fix**: Fixes an issue that caused the unexported constructor function names for EventStream types to be swapped for the event reader and writer respectivly. - -# Release (2022-04-11) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/amplifyuibuilder`: [v1.6.0](service/amplifyuibuilder/CHANGELOG.md#v160-2022-04-11) - * **Feature**: In this release, we have added the ability to bind events to component level actions. -* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.12.0](service/apprunner/CHANGELOG.md#v1120-2022-04-11) - * **Feature**: This release adds tracing for App Runner services with X-Ray using AWS Distro for OpenTelemetry. New APIs: CreateObservabilityConfiguration, DescribeObservabilityConfiguration, ListObservabilityConfigurations, and DeleteObservabilityConfiguration. Updated APIs: CreateService and UpdateService. -* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.18.0](service/workspaces/CHANGELOG.md#v1180-2022-04-11) - * **Feature**: Added API support that allows customers to create GPU-enabled WorkSpaces using EC2 G4dn instances. - -# Release (2022-04-08) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.22.0](service/mediaconvert/CHANGELOG.md#v1220-2022-04-08) - * **Feature**: AWS Elemental MediaConvert SDK has added support for the pass-through of WebVTT styling to WebVTT outputs, pass-through of KLV metadata to supported formats, and improved filter support for processing 444/RGB content. -* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.17.0](service/mediapackagevod/CHANGELOG.md#v1170-2022-04-08) - * **Feature**: This release adds ScteMarkersSource as an available field for Dash Packaging Configurations. When set to MANIFEST, MediaPackage will source the SCTE-35 markers from the manifest. When set to SEGMENTS, MediaPackage will source the SCTE-35 markers from the segments. -* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.19.0](service/wafv2/CHANGELOG.md#v1190-2022-04-08) - * **Feature**: Add a new CurrentDefaultVersion field to ListAvailableManagedRuleGroupVersions API response; add a new VersioningSupported boolean to each ManagedRuleGroup returned from ListAvailableManagedRuleGroups API response. - -# Release (2022-04-07) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/internal/v4a`: [v1.0.0](internal/v4a/CHANGELOG.md#v100-2022-04-07) - * **Release**: New internal v4a signing module location. -* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.18.0](service/docdb/CHANGELOG.md#v1180-2022-04-07) - * **Feature**: Added support to enable/disable performance insights when creating or modifying db instances -* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.16.0](service/eventbridge/CHANGELOG.md#v1160-2022-04-07) - * **Feature**: Adds new EventBridge Endpoint resources for disaster recovery, multi-region failover, and cross-region replication capabilities to help you build resilient event-driven applications. -* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.18.0](service/personalize/CHANGELOG.md#v1180-2022-04-07) - * **Feature**: This release provides tagging support in AWS Personalize. -* `github.com/aws/aws-sdk-go-v2/service/pi`: [v1.14.4](service/pi/CHANGELOG.md#v1144-2022-04-07) - * **Documentation**: Adds support for DocumentDB to the Performance Insights API. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.27.0](service/sagemaker/CHANGELOG.md#v1270-2022-04-07) - * **Feature**: Amazon Sagemaker Notebook Instances now supports G5 instance types - -# Release (2022-04-06) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.21.0](service/configservice/CHANGELOG.md#v1210-2022-04-06) - * **Feature**: Add resourceType enums for AWS::EMR::SecurityConfiguration and AWS::SageMaker::CodeRepository -* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.24.0](service/kendra/CHANGELOG.md#v1240-2022-04-06) - * **Feature**: Amazon Kendra now provides a data source connector for Box. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/data-source-box.html -* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.22.0](service/lambda/CHANGELOG.md#v1220-2022-04-06) - * **Feature**: This release adds new APIs for creating and managing Lambda Function URLs and adds a new FunctionUrlAuthType parameter to the AddPermission API. Customers can use Function URLs to create built-in HTTPS endpoints on their functions. -* `github.com/aws/aws-sdk-go-v2/service/panorama`: [v1.7.0](service/panorama/CHANGELOG.md#v170-2022-04-06) - * **Feature**: Added Brand field to device listings. - -# Release (2022-04-05) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.15.0](service/datasync/CHANGELOG.md#v1150-2022-04-05) - * **Feature**: AWS DataSync now supports Amazon FSx for OpenZFS locations. -* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.22.0](service/fsx/CHANGELOG.md#v1220-2022-04-05) - * **Feature**: Provide customers more visibility into file system status by adding new "Misconfigured Unavailable" status for Amazon FSx for Windows File Server. -* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.21.4](service/s3control/CHANGELOG.md#v1214-2022-04-05) - * **Documentation**: Documentation-only update for doc bug fixes for the S3 Control API docs. -* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.20.0](service/securityhub/CHANGELOG.md#v1200-2022-04-05) - * **Feature**: Added additional ASFF details for RdsSecurityGroup AutoScalingGroup, ElbLoadBalancer, CodeBuildProject and RedshiftCluster. - -# Release (2022-04-04) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.24.0](service/iot/CHANGELOG.md#v1240-2022-04-04) - * **Feature**: AWS IoT - AWS IoT Device Defender adds support to list metric datapoints collected for IoT devices through the ListMetricValues API -* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.13.0](service/proton/CHANGELOG.md#v1130-2022-04-04) - * **Feature**: SDK release to support tagging for AWS Proton Repository resource -* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.14.0](service/servicecatalog/CHANGELOG.md#v1140-2022-04-04) - * **Feature**: This release adds ProvisioningArtifictOutputKeys to DescribeProvisioningParameters to reference the outputs of a Provisioned Product and deprecates ProvisioningArtifactOutputs. -* `github.com/aws/aws-sdk-go-v2/service/sms`: [v1.12.4](service/sms/CHANGELOG.md#v1124-2022-04-04) - * **Documentation**: Revised product update notice for SMS console deprecation. - -# Release (2022-04-01) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.21.0](service/connect/CHANGELOG.md#v1210-2022-04-01) - * **Feature**: This release updates these APIs: UpdateInstanceAttribute, DescribeInstanceAttribute and ListInstanceAttributes. You can use it to programmatically enable/disable multi-party conferencing using attribute type MULTI_PARTY_CONFERENCING on the specified Amazon Connect instance. - -# Release (2022-03-31) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue`: [v1.8.4](feature/dynamodb/attributevalue/CHANGELOG.md#v184-2022-03-31) - * **Documentation**: Fixes documentation typos in Number type's helper methods -* `github.com/aws/aws-sdk-go-v2/feature/dynamodbstreams/attributevalue`: [v1.8.4](feature/dynamodbstreams/attributevalue/CHANGELOG.md#v184-2022-03-31) - * **Documentation**: Fixes documentation typos in Number type's helper methods -* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.18.3](service/auditmanager/CHANGELOG.md#v1183-2022-03-31) - * **Documentation**: This release adds documentation updates for Audit Manager. The updates provide data deletion guidance when a customer deregisters Audit Manager or deregisters a delegated administrator. -* `github.com/aws/aws-sdk-go-v2/service/cloudcontrol`: [v1.9.0](service/cloudcontrol/CHANGELOG.md#v190-2022-03-31) - * **Feature**: SDK release for Cloud Control API in Amazon Web Services China (Beijing) Region, operated by Sinnet, and Amazon Web Services China (Ningxia) Region, operated by NWCD -* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.20.0](service/databrew/CHANGELOG.md#v1200-2022-03-31) - * **Feature**: This AWS Glue Databrew release adds feature to support ORC as an input format. -* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.8.0](service/grafana/CHANGELOG.md#v180-2022-03-31) - * **Feature**: This release adds tagging support to the Managed Grafana service. New APIs: TagResource, UntagResource and ListTagsForResource. Updates: add optional field tags to support tagging while calling CreateWorkspace. -* `github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoicev2`: [v1.0.0](service/pinpointsmsvoicev2/CHANGELOG.md#v100-2022-03-31) - * **Release**: New AWS service client module - * **Feature**: Amazon Pinpoint now offers a version 2.0 suite of SMS and voice APIs, providing increased control over sending and configuration. This release is a new SDK for sending SMS and voice messages called PinpointSMSVoiceV2. -* `github.com/aws/aws-sdk-go-v2/service/route53recoverycluster`: [v1.9.0](service/route53recoverycluster/CHANGELOG.md#v190-2022-03-31) - * **Feature**: This release adds a new API "ListRoutingControls" to list routing control states using the highly reliable Route 53 ARC data plane endpoints. -* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.17.0](service/workspaces/CHANGELOG.md#v1170-2022-03-31) - * **Feature**: Added APIs that allow you to customize the logo, login message, and help links in the WorkSpaces client login page. To learn more, visit https://docs.aws.amazon.com/workspaces/latest/adminguide/customize-branding.html - -# Release (2022-03-30) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.34.0](service/ec2/CHANGELOG.md#v1340-2022-03-30) - * **Feature**: This release simplifies the auto-recovery configuration process enabling customers to set the recovery behavior to disabled or default -* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.17.0](service/fms/CHANGELOG.md#v1170-2022-03-30) - * **Feature**: AWS Firewall Manager now supports the configuration of third-party policies that can use either the centralized or distributed deployment models. -* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.21.0](service/fsx/CHANGELOG.md#v1210-2022-03-30) - * **Feature**: This release adds support for modifying throughput capacity for FSx for ONTAP file systems. -* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.23.3](service/iot/CHANGELOG.md#v1233-2022-03-30) - * **Documentation**: Doc only update for IoT that fixes customer-reported issues. -* `github.com/aws/aws-sdk-go-v2/service/iotdataplane`: [v1.12.0](service/iotdataplane/CHANGELOG.md#v1120-2022-03-30) - * **Feature**: Update the default AWS IoT Core Data Plane endpoint from VeriSign signed to ATS signed. If you have firewalls with strict egress rules, configure the rules to grant you access to data-ats.iot.[region].amazonaws.com or data-ats.iot.[region].amazonaws.com.cn. - -# Release (2022-03-29) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/organizations`: [v1.15.0](service/organizations/CHANGELOG.md#v1150-2022-03-29) - * **Feature**: This release provides the new CloseAccount API that enables principals in the management account to close any member account within an organization. - -# Release (2022-03-28) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.17.3](service/acmpca/CHANGELOG.md#v1173-2022-03-28) - * **Documentation**: Updating service name entities -* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.20.0](service/medialive/CHANGELOG.md#v1200-2022-03-28) - * **Feature**: This release adds support for selecting a maintenance window. - -# Release (2022-03-25) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.17.0](service/batch/CHANGELOG.md#v1170-2022-03-25) - * **Feature**: Bug Fix: Fixed a bug where shapes were marked as unboxed and were not serialized and sent over the wire, causing an API error from the service. - * This is a breaking change, and has been accepted due to the API operation not being usable due to the members modeled as unboxed (aka value) types. The update changes the members to boxed (aka pointer) types so that the zero value of the members can be handled correctly by the SDK and service. Your application will fail to compile with the updated module. To workaround this you'll need to update your application to use pointer types for the members impacted. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.33.0](service/ec2/CHANGELOG.md#v1330-2022-03-25) - * **Feature**: This is release adds support for Amazon VPC Reachability Analyzer to analyze path through a Transit Gateway. -* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.24.0](service/ssm/CHANGELOG.md#v1240-2022-03-25) - * **Feature**: This Patch Manager release supports creating, updating, and deleting Patch Baselines for Rocky Linux OS. - -# Release (2022-03-24) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.20.0](service/configservice/CHANGELOG.md#v1200-2022-03-24) - * **Feature**: Added new APIs GetCustomRulePolicy and GetOrganizationCustomRulePolicy, and updated existing APIs PutConfigRule, DescribeConfigRule, DescribeConfigRuleEvaluationStatus, PutOrganizationConfigRule, DescribeConfigRule to support a new feature for building AWS Config rules with AWS CloudFormation Guard -* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.21.0](service/lambda/CHANGELOG.md#v1210-2022-03-24) - * **Feature**: Adds support for increased ephemeral storage (/tmp) up to 10GB for Lambda functions. Customers can now provision up to 10 GB of ephemeral storage per function instance, a 20x increase over the previous limit of 512 MB. -* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.19.0](service/transcribe/CHANGELOG.md#v1190-2022-03-24) - * **Feature**: This release adds an additional parameter for subtitling with Amazon Transcribe batch jobs: outputStartIndex. - -# Release (2022-03-23) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2`: v1.16.0 - * **Feature**: Update CredentialsCache to make use of two new optional CredentialsProvider interfaces to give the cache, per provider, behavior how the cache handles credentials that fail to refresh, and adjusting expires time. See [aws.CredentialsCache](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws#CredentialsCache) for more details. - * **Feature**: Update `ec2rolecreds` package's `Provider` to implememnt support for CredentialsCache new optional caching strategy interfaces, HandleFailRefreshCredentialsCacheStrategy and AdjustExpiresByCredentialsCacheStrategy. -* `github.com/aws/aws-sdk-go-v2/credentials`: [v1.11.0](credentials/CHANGELOG.md#v1110-2022-03-23) - * **Feature**: Update `ec2rolecreds` package's `Provider` to implememnt support for CredentialsCache new optional caching strategy interfaces, HandleFailRefreshCredentialsCacheStrategy and AdjustExpiresByCredentialsCacheStrategy. -* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.18.0](service/auditmanager/CHANGELOG.md#v1180-2022-03-23) - * **Feature**: This release updates 1 API parameter, the SnsArn attribute. The character length and regex pattern for the SnsArn attribute have been updated, which enables you to deselect an SNS topic when using the UpdateSettings operation. -* `github.com/aws/aws-sdk-go-v2/service/ebs`: [v1.15.0](service/ebs/CHANGELOG.md#v1150-2022-03-23) - * **Feature**: Increased the maximum supported value for the Timeout parameter of the StartSnapshot API from 60 minutes to 4320 minutes. Changed the HTTP error code for ConflictException from 503 to 409. -* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.20.2](service/elasticache/CHANGELOG.md#v1202-2022-03-23) - * **Documentation**: Doc only update for ElastiCache -* `github.com/aws/aws-sdk-go-v2/service/gamesparks`: [v1.0.0](service/gamesparks/CHANGELOG.md#v100-2022-03-23) - * **Release**: New AWS service client module - * **Feature**: Released the preview of Amazon GameSparks, a fully managed AWS service that provides a multi-service backend for game developers. -* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.22.0](service/redshift/CHANGELOG.md#v1220-2022-03-23) - * **Feature**: This release adds a new [--encrypted | --no-encrypted] field in restore-from-cluster-snapshot API. Customers can now restore an unencrypted snapshot to a cluster encrypted with AWS Managed Key or their own KMS key. -* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.23.0](service/ssm/CHANGELOG.md#v1230-2022-03-23) - * **Feature**: Update AddTagsToResource, ListTagsForResource, and RemoveTagsFromResource APIs to reflect the support for tagging Automation resources. Includes other minor documentation updates. -* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.18.1](service/transfer/CHANGELOG.md#v1181-2022-03-23) - * **Documentation**: Documentation updates for AWS Transfer Family to describe how to remove an associated workflow from a server. - -# Release (2022-03-22) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.18.0](service/costexplorer/CHANGELOG.md#v1180-2022-03-22) - * **Feature**: Added three new APIs to support tagging and resource-level authorization on Cost Explorer resources: TagResource, UntagResource, ListTagsForResource. Added optional parameters to CreateCostCategoryDefinition, CreateAnomalySubscription and CreateAnomalyMonitor APIs to support Tag On Create. -* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.18.2](service/ecs/CHANGELOG.md#v1182-2022-03-22) - * **Documentation**: Documentation only update to address tickets -* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.16.0](service/lakeformation/CHANGELOG.md#v1160-2022-03-22) - * **Feature**: The release fixes the incorrect permissions called out in the documentation - DESCRIBE_TAG, ASSOCIATE_TAG, DELETE_TAG, ALTER_TAG. This trebuchet release fixes the corresponding SDK and documentation. -* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.16.0](service/location/CHANGELOG.md#v1160-2022-03-22) - * **Feature**: Amazon Location Service now includes a MaxResults parameter for GetDevicePositionHistory requests. -* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.14.0](service/polly/CHANGELOG.md#v1140-2022-03-22) - * **Feature**: Amazon Polly adds new Catalan voice - Arlet. Arlet is available as Neural voice only. - -# Release (2022-03-21) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.8.0](service/chimesdkmeetings/CHANGELOG.md#v180-2022-03-21) - * **Feature**: Add support for media replication to link multiple WebRTC media sessions together to reach larger and global audiences. Participants connected to a replica session can be granted access to join the primary session and can switch sessions with their existing WebRTC connection -* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.17.0](service/ecr/CHANGELOG.md#v1170-2022-03-21) - * **Feature**: This release includes a fix in the DescribeImageScanFindings paginated output. -* `github.com/aws/aws-sdk-go-v2/service/mediaconnect`: [v1.16.0](service/mediaconnect/CHANGELOG.md#v1160-2022-03-21) - * **Feature**: This release adds support for selecting a maintenance window. -* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.21.0](service/quicksight/CHANGELOG.md#v1210-2022-03-21) - * **Feature**: AWS QuickSight Service Features - Expand public API support for group management. -* `github.com/aws/aws-sdk-go-v2/service/ram`: [v1.16.1](service/ram/CHANGELOG.md#v1161-2022-03-21) - * **Documentation**: Document improvements to the RAM API operations and parameter descriptions. - -# Release (2022-03-18) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.22.0](service/glue/CHANGELOG.md#v1220-2022-03-18) - * **Feature**: Added 9 new APIs for AWS Glue Interactive Sessions: ListSessions, StopSession, CreateSession, GetSession, DeleteSession, RunStatement, GetStatement, ListStatements, CancelStatement - -# Release (2022-03-16) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.17.0](service/acmpca/CHANGELOG.md#v1170-2022-03-16) - * **Feature**: AWS Certificate Manager (ACM) Private Certificate Authority (CA) now supports customizable certificate subject names and extensions. -* `github.com/aws/aws-sdk-go-v2/service/amplifybackend`: [v1.13.0](service/amplifybackend/CHANGELOG.md#v1130-2022-03-16) - * **Feature**: Adding the ability to customize Cognito verification messages for email and SMS in CreateBackendAuth and UpdateBackendAuth. Adding deprecation documentation for ForgotPassword in CreateBackendAuth and UpdateBackendAuth -* `github.com/aws/aws-sdk-go-v2/service/billingconductor`: [v1.0.0](service/billingconductor/CHANGELOG.md#v100-2022-03-16) - * **Release**: New AWS service client module - * **Feature**: This is the initial SDK release for AWS Billing Conductor. The AWS Billing Conductor is a customizable billing service, allowing you to customize your billing data to match your desired business structure. -* `github.com/aws/aws-sdk-go-v2/service/s3outposts`: [v1.13.0](service/s3outposts/CHANGELOG.md#v1130-2022-03-16) - * **Feature**: S3 on Outposts is releasing a new API, ListSharedEndpoints, that lists all endpoints associated with S3 on Outpost, that has been shared by Resource Access Manager (RAM). -* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.13.0](service/ssmincidents/CHANGELOG.md#v1130-2022-03-16) - * **Feature**: Removed incorrect validation pattern for IncidentRecordSource.invokedBy - -# Release (2022-03-15) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.15.0](service/cognitoidentityprovider/CHANGELOG.md#v1150-2022-03-15) - * **Feature**: Updated EmailConfigurationType and SmsConfigurationType to reflect that you can now choose Amazon SES and Amazon SNS resources in the same Region. -* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.15.0](service/dataexchange/CHANGELOG.md#v1150-2022-03-15) - * **Feature**: This feature enables data providers to use the RevokeRevision operation to revoke subscriber access to a given revision. Subscribers are unable to interact with assets within a revoked revision. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.32.0](service/ec2/CHANGELOG.md#v1320-2022-03-15) - * **Feature**: Adds the Cascade parameter to the DeleteIpam API. Customers can use this parameter to automatically delete their IPAM, including non-default scopes, pools, cidrs, and allocations. There mustn't be any pools provisioned in the default public scope to use this parameter. -* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.18.1](service/ecs/CHANGELOG.md#v1181-2022-03-15) - * **Documentation**: Documentation only update to address tickets -* `github.com/aws/aws-sdk-go-v2/service/keyspaces`: [v1.0.2](service/keyspaces/CHANGELOG.md#v102-2022-03-15) - * **Documentation**: Fixing formatting issues in CLI and SDK documentation -* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.15.1](service/location/CHANGELOG.md#v1151-2022-03-15) - * **Documentation**: New HERE style "VectorHereExplore" and "VectorHereExploreTruck". -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.18.1](service/rds/CHANGELOG.md#v1181-2022-03-15) - * **Documentation**: Various documentation improvements -* `github.com/aws/aws-sdk-go-v2/service/robomaker`: [v1.17.0](service/robomaker/CHANGELOG.md#v1170-2022-03-15) - * **Feature**: This release deprecates ROS, Ubuntu and Gazbeo from RoboMaker Simulation Service Software Suites in favor of user-supplied containers and Relaxed Software Suites. - -# Release (2022-03-14) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.19.0](service/configservice/CHANGELOG.md#v1190-2022-03-14) - * **Feature**: Add resourceType enums for AWS::ECR::PublicRepository and AWS::EC2::LaunchTemplate -* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.20.1](service/elasticache/CHANGELOG.md#v1201-2022-03-14) - * **Documentation**: Doc only update for ElastiCache -* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.23.0](service/kendra/CHANGELOG.md#v1230-2022-03-14) - * **Feature**: Amazon Kendra now provides a data source connector for Slack. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/data-source-slack.html -* `github.com/aws/aws-sdk-go-v2/service/timestreamquery`: [v1.14.0](service/timestreamquery/CHANGELOG.md#v1140-2022-03-14) - * **Feature**: Amazon Timestream Scheduled Queries now support Timestamp datatype in a multi-measure record. - -# Release (2022-03-11) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.20.0](service/chime/CHANGELOG.md#v1200-2022-03-11) - * **Feature**: Chime VoiceConnector Logging APIs will now support MediaMetricLogs. Also CreateMeetingDialOut now returns AccessDeniedException. -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.20.0](service/connect/CHANGELOG.md#v1200-2022-03-11) - * **Feature**: This release adds support for enabling Rich Messaging when starting a new chat session via the StartChatContact API. Rich Messaging enables the following formatting options: bold, italics, hyperlinks, bulleted lists, and numbered lists. -* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.20.0](service/lambda/CHANGELOG.md#v1200-2022-03-11) - * **Feature**: Adds PrincipalOrgID support to AddPermission API. Customers can use it to manage permissions to lambda functions at AWS Organizations level. -* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.18.0](service/outposts/CHANGELOG.md#v1180-2022-03-11) - * **Feature**: This release adds address filters for listSites -* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.15.1](service/secretsmanager/CHANGELOG.md#v1151-2022-03-11) - * **Documentation**: Documentation updates for Secrets Manager. -* `github.com/aws/aws-sdk-go-v2/service/transcribestreaming`: [v1.6.0](service/transcribestreaming/CHANGELOG.md#v160-2022-03-11) - * **Feature**: Amazon Transcribe StartTranscription API now supports additional parameters for Language Identification feature: customVocabularies and customFilterVocabularies - -# Release (2022-03-10) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.20.0](service/lexmodelsv2/CHANGELOG.md#v1200-2022-03-10) - * **Feature**: This release makes slotTypeId an optional parameter in CreateSlot and UpdateSlot APIs in Amazon Lex V2 for model building. Customers can create and update slots without specifying a slot type id. -* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.18.0](service/transcribe/CHANGELOG.md#v1180-2022-03-10) - * **Feature**: Documentation fix for API `StartMedicalTranscriptionJobRequest`, now showing min sample rate as 16khz -* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.18.0](service/transfer/CHANGELOG.md#v1180-2022-03-10) - * **Feature**: Adding more descriptive error types for managed workflows - -# Release (2022-03-09) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.17.0](service/comprehend/CHANGELOG.md#v1170-2022-03-09) - * **Feature**: Amazon Comprehend now supports extracting the sentiment associated with entities such as brands, products and services from text documents. - -# Release (2022-03-08.3) - -* No change notes available for this release. - -# Release (2022-03-08.2) - -* No change notes available for this release. - -# Release (2022-03-08) - -## General Highlights -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/amplify`: [v1.11.0](service/amplify/CHANGELOG.md#v1110-2022-03-08) - * **Feature**: Updated service client model to latest release. -* `github.com/aws/aws-sdk-go-v2/service/amplifyuibuilder`: [v1.5.0](service/amplifyuibuilder/CHANGELOG.md#v150-2022-03-08) - * **Feature**: Updated service client model to latest release. -* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.14.0](service/appflow/CHANGELOG.md#v1140-2022-03-08) - * **Feature**: Updated service client model to latest release. -* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.11.0](service/apprunner/CHANGELOG.md#v1110-2022-03-08) - * **Feature**: Updated service client model to latest release. -* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.14.0](service/athena/CHANGELOG.md#v1140-2022-03-08) - * **Feature**: Updated service client model to latest release. -* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.15.0](service/braket/CHANGELOG.md#v1150-2022-03-08) - * **Feature**: Updated service client model to latest release. -* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.7.0](service/chimesdkmeetings/CHANGELOG.md#v170-2022-03-08) - * **Feature**: Updated service client model to latest release. -* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.15.0](service/cloudtrail/CHANGELOG.md#v1150-2022-03-08) - * **Feature**: Updated service client model to latest release. -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.19.0](service/connect/CHANGELOG.md#v1190-2022-03-08) - * **Feature**: Updated service client model to latest release. -* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.16.0](service/devopsguru/CHANGELOG.md#v1160-2022-03-08) - * **Feature**: Updated service client model to latest release. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.31.0](service/ec2/CHANGELOG.md#v1310-2022-03-08) - * **Feature**: Updated service client model to latest release. -* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.16.0](service/ecr/CHANGELOG.md#v1160-2022-03-08) - * **Feature**: Updated service client model to latest release. -* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.18.0](service/ecs/CHANGELOG.md#v1180-2022-03-08) - * **Feature**: Updated service client model to latest release. -* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.20.0](service/elasticache/CHANGELOG.md#v1200-2022-03-08) - * **Documentation**: Updated service client model to latest release. -* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.10.0](service/finspacedata/CHANGELOG.md#v1100-2022-03-08) - * **Feature**: Updated service client model to latest release. -* `github.com/aws/aws-sdk-go-v2/service/fis`: [v1.12.0](service/fis/CHANGELOG.md#v1120-2022-03-08) - * **Feature**: Updated service client model to latest release. -* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.20.0](service/fsx/CHANGELOG.md#v1200-2022-03-08) - * **Feature**: Updated service client model to latest release. -* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.14.0](service/gamelift/CHANGELOG.md#v1140-2022-03-08) - * **Documentation**: Updated service client model to latest release. -* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.15.0](service/greengrassv2/CHANGELOG.md#v1150-2022-03-08) - * **Documentation**: Updated service client model to latest release. -* `github.com/aws/aws-sdk-go-v2/service/internal/checksum`: [v1.1.0](service/internal/checksum/CHANGELOG.md#v110-2022-03-08) - * **Feature**: Updates the SDK's checksum validation logic to require opt-in to output response payload validation. The SDK was always preforming output response payload checksum validation, not respecting the output validation model option. Fixes [#1606](https://github.com/aws/aws-sdk-go-v2/issues/1606) -* `github.com/aws/aws-sdk-go-v2/service/kafkaconnect`: [v1.8.0](service/kafkaconnect/CHANGELOG.md#v180-2022-03-08) - * **Feature**: Updated service client model to latest release. -* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.22.0](service/kendra/CHANGELOG.md#v1220-2022-03-08) - * **Feature**: Updated service client model to latest release. -* `github.com/aws/aws-sdk-go-v2/service/keyspaces`: [v1.0.0](service/keyspaces/CHANGELOG.md#v100-2022-03-08) - * **Release**: New AWS service client module -* `github.com/aws/aws-sdk-go-v2/service/macie`: [v1.14.0](service/macie/CHANGELOG.md#v1140-2022-03-08) - * **Documentation**: Updated service client model to latest release. -* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.15.0](service/mediapackage/CHANGELOG.md#v1150-2022-03-08) - * **Feature**: Updated service client model to latest release. -* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.13.0](service/mgn/CHANGELOG.md#v1130-2022-03-08) - * **Feature**: Updated service client model to latest release. -* `github.com/aws/aws-sdk-go-v2/service/migrationhubrefactorspaces`: [v1.5.0](service/migrationhubrefactorspaces/CHANGELOG.md#v150-2022-03-08) - * **Documentation**: Updated service client model to latest release. -* `github.com/aws/aws-sdk-go-v2/service/mq`: [v1.12.0](service/mq/CHANGELOG.md#v1120-2022-03-08) - * **Feature**: Updated service client model to latest release. -* `github.com/aws/aws-sdk-go-v2/service/panorama`: [v1.6.0](service/panorama/CHANGELOG.md#v160-2022-03-08) - * **Feature**: Updated service client model to latest release. -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.18.0](service/rds/CHANGELOG.md#v1180-2022-03-08) - * **Documentation**: Updated service client model to latest release. -* `github.com/aws/aws-sdk-go-v2/service/route53recoverycluster`: [v1.8.0](service/route53recoverycluster/CHANGELOG.md#v180-2022-03-08) - * **Feature**: Updated service client model to latest release. -* `github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry`: [v1.12.0](service/servicecatalogappregistry/CHANGELOG.md#v1120-2022-03-08) - * **Documentation**: Updated service client model to latest release. -* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.18.0](service/sqs/CHANGELOG.md#v1180-2022-03-08) - * **Feature**: Updated service client model to latest release. -* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.16.0](service/sts/CHANGELOG.md#v1160-2022-03-08) - * **Documentation**: Updated service client model to latest release. -* `github.com/aws/aws-sdk-go-v2/service/synthetics`: [v1.14.0](service/synthetics/CHANGELOG.md#v1140-2022-03-08) - * **Documentation**: Updated service client model to latest release. -* `github.com/aws/aws-sdk-go-v2/service/timestreamquery`: [v1.13.0](service/timestreamquery/CHANGELOG.md#v1130-2022-03-08) - * **Documentation**: Updated service client model to latest release. -* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.17.0](service/transfer/CHANGELOG.md#v1170-2022-03-08) - * **Feature**: Updated service client model to latest release. - -# Release (2022-02-24.2) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.21.0](service/autoscaling/CHANGELOG.md#v1210-2022-02-242) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.18.0](service/databrew/CHANGELOG.md#v1180-2022-02-242) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.15.0](service/fms/CHANGELOG.md#v1150-2022-02-242) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.17.0](service/lightsail/CHANGELOG.md#v1170-2022-02-242) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.19.0](service/route53/CHANGELOG.md#v1190-2022-02-242) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.20.0](service/s3control/CHANGELOG.md#v1200-2022-02-242) - * **Feature**: API client updated - -# Release (2022-02-24) - -## General Highlights -* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Bug Fix**: Fixes the AWS Sigv4 signer to trim header value's whitespace when computing the canonical headers block of the string to sign. -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2`: v1.14.0 - * **Feature**: Add new AdaptiveMode retryer to aws/retry package. This new retryer uses dynamic token bucketing with client ratelimiting when throttle responses are received. - * **Feature**: Adds new interface aws.RetryerV2, replacing aws.Retryer and deprecating the GetInitialToken method in favor of GetAttemptToken so Context can be provided. The SDK will use aws.RetryerV2 internally. Wrapping aws.Retryers as aws.RetryerV2 automatically. -* `github.com/aws/aws-sdk-go-v2/config`: [v1.14.0](config/CHANGELOG.md#v1140-2022-02-24) - * **Feature**: Adds support for loading RetryMaxAttempts and RetryMod from the environment and shared configuration files. These parameters drive how the SDK's API client will initialize its default retryer, if custome retryer has not been specified. See [config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) module and [aws.Config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws#Config) for more information about and how to use these new options. - * **Feature**: Adds support for the `ca_bundle` parameter in shared config and credentials files. The usage of the file is the same as environment variable, `AWS_CA_BUNDLE`, but sourced from shared config. Fixes [#1589](https://github.com/aws/aws-sdk-go-v2/issues/1589) -* `github.com/aws/aws-sdk-go-v2/credentials`: [v1.9.0](credentials/CHANGELOG.md#v190-2022-02-24) - * **Feature**: Adds support for `SourceIdentity` to `stscreds.AssumeRoleProvider` [#1588](https://github.com/aws/aws-sdk-go-v2/pull/1588). Fixes [#1575](https://github.com/aws/aws-sdk-go-v2/issues/1575) -* `github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue`: [v1.7.0](feature/dynamodb/attributevalue/CHANGELOG.md#v170-2022-02-24) - * **Feature**: Fixes [#645](https://github.com/aws/aws-sdk-go-v2/issues/645), [#411](https://github.com/aws/aws-sdk-go-v2/issues/411) by adding support for (un)marshaling AttributeValue maps to Go maps key types of string, number, bool, and types implementing encoding.Text(un)Marshaler interface - * **Bug Fix**: Fixes [#1569](https://github.com/aws/aws-sdk-go-v2/issues/1569) inconsistent serialization of Go struct field names -* `github.com/aws/aws-sdk-go-v2/feature/dynamodb/expression`: [v1.4.0](feature/dynamodb/expression/CHANGELOG.md#v140-2022-02-24) - * **Feature**: Add support for expression names with dots via new NameBuilder function NameNoDotSplit, related to [aws/aws-sdk-go#2570](https://github.com/aws/aws-sdk-go/issues/2570) -* `github.com/aws/aws-sdk-go-v2/feature/dynamodbstreams/attributevalue`: [v1.7.0](feature/dynamodbstreams/attributevalue/CHANGELOG.md#v170-2022-02-24) - * **Feature**: Fixes [#645](https://github.com/aws/aws-sdk-go-v2/issues/645), [#411](https://github.com/aws/aws-sdk-go-v2/issues/411) by adding support for (un)marshaling AttributeValue maps to Go maps key types of string, number, bool, and types implementing encoding.Text(un)Marshaler interface - * **Bug Fix**: Fixes [#1569](https://github.com/aws/aws-sdk-go-v2/issues/1569) inconsistent serialization of Go struct field names -* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.14.0](service/accessanalyzer/CHANGELOG.md#v1140-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/account`: [v1.5.0](service/account/CHANGELOG.md#v150-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/acm`: [v1.13.0](service/acm/CHANGELOG.md#v1130-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.15.0](service/acmpca/CHANGELOG.md#v1150-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/alexaforbusiness`: [v1.13.0](service/alexaforbusiness/CHANGELOG.md#v1130-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/amp`: [v1.13.0](service/amp/CHANGELOG.md#v1130-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/amplify`: [v1.10.0](service/amplify/CHANGELOG.md#v1100-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/amplifybackend`: [v1.11.0](service/amplifybackend/CHANGELOG.md#v1110-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/amplifyuibuilder`: [v1.4.0](service/amplifyuibuilder/CHANGELOG.md#v140-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/apigateway`: [v1.14.0](service/apigateway/CHANGELOG.md#v1140-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/apigatewaymanagementapi`: [v1.9.0](service/apigatewaymanagementapi/CHANGELOG.md#v190-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/apigatewayv2`: [v1.11.0](service/apigatewayv2/CHANGELOG.md#v1110-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/appconfig`: [v1.11.0](service/appconfig/CHANGELOG.md#v1110-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/appconfigdata`: [v1.3.0](service/appconfigdata/CHANGELOG.md#v130-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.13.0](service/appflow/CHANGELOG.md#v1130-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/appintegrations`: [v1.12.0](service/appintegrations/CHANGELOG.md#v1120-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.14.0](service/applicationautoscaling/CHANGELOG.md#v1140-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/applicationcostprofiler`: [v1.8.0](service/applicationcostprofiler/CHANGELOG.md#v180-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/applicationdiscoveryservice`: [v1.11.0](service/applicationdiscoveryservice/CHANGELOG.md#v1110-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/applicationinsights`: [v1.14.0](service/applicationinsights/CHANGELOG.md#v1140-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/appmesh`: [v1.12.0](service/appmesh/CHANGELOG.md#v1120-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.10.0](service/apprunner/CHANGELOG.md#v1100-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.14.0](service/appstream/CHANGELOG.md#v1140-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.13.0](service/appsync/CHANGELOG.md#v1130-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.13.0](service/athena/CHANGELOG.md#v1130-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.16.0](service/auditmanager/CHANGELOG.md#v1160-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.20.0](service/autoscaling/CHANGELOG.md#v1200-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/autoscalingplans`: [v1.11.0](service/autoscalingplans/CHANGELOG.md#v1110-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.14.0](service/backup/CHANGELOG.md#v1140-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/backupgateway`: [v1.4.0](service/backupgateway/CHANGELOG.md#v140-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.15.0](service/batch/CHANGELOG.md#v1150-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.14.0](service/braket/CHANGELOG.md#v1140-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/budgets`: [v1.11.0](service/budgets/CHANGELOG.md#v1110-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.18.0](service/chime/CHANGELOG.md#v1180-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/chimesdkidentity`: [v1.8.0](service/chimesdkidentity/CHANGELOG.md#v180-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.6.0](service/chimesdkmeetings/CHANGELOG.md#v160-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.8.0](service/chimesdkmessaging/CHANGELOG.md#v180-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/cloud9`: [v1.15.0](service/cloud9/CHANGELOG.md#v1150-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/cloudcontrol`: [v1.7.0](service/cloudcontrol/CHANGELOG.md#v170-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/clouddirectory`: [v1.11.0](service/clouddirectory/CHANGELOG.md#v1110-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.19.0](service/cloudformation/CHANGELOG.md#v1190-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.15.0](service/cloudfront/CHANGELOG.md#v1150-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/cloudhsm`: [v1.11.0](service/cloudhsm/CHANGELOG.md#v1110-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/cloudhsmv2`: [v1.12.0](service/cloudhsmv2/CHANGELOG.md#v1120-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/cloudsearch`: [v1.12.0](service/cloudsearch/CHANGELOG.md#v1120-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/cloudsearchdomain`: [v1.10.0](service/cloudsearchdomain/CHANGELOG.md#v1100-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.14.0](service/cloudtrail/CHANGELOG.md#v1140-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.16.0](service/cloudwatch/CHANGELOG.md#v1160-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/cloudwatchevents`: [v1.13.0](service/cloudwatchevents/CHANGELOG.md#v1130-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.14.0](service/cloudwatchlogs/CHANGELOG.md#v1140-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.11.0](service/codeartifact/CHANGELOG.md#v1110-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/codebuild`: [v1.18.0](service/codebuild/CHANGELOG.md#v1180-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/codecommit`: [v1.12.0](service/codecommit/CHANGELOG.md#v1120-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/codedeploy`: [v1.13.0](service/codedeploy/CHANGELOG.md#v1130-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/codeguruprofiler`: [v1.11.0](service/codeguruprofiler/CHANGELOG.md#v1110-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/codegurureviewer`: [v1.14.0](service/codegurureviewer/CHANGELOG.md#v1140-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/codepipeline`: [v1.12.0](service/codepipeline/CHANGELOG.md#v1120-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/codestar`: [v1.10.0](service/codestar/CHANGELOG.md#v1100-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/codestarconnections`: [v1.12.0](service/codestarconnections/CHANGELOG.md#v1120-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/codestarnotifications`: [v1.10.0](service/codestarnotifications/CHANGELOG.md#v1100-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/cognitoidentity`: [v1.12.0](service/cognitoidentity/CHANGELOG.md#v1120-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.13.0](service/cognitoidentityprovider/CHANGELOG.md#v1130-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/cognitosync`: [v1.10.0](service/cognitosync/CHANGELOG.md#v1100-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.15.0](service/comprehend/CHANGELOG.md#v1150-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/comprehendmedical`: [v1.12.0](service/comprehendmedical/CHANGELOG.md#v1120-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.16.0](service/computeoptimizer/CHANGELOG.md#v1160-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.17.0](service/configservice/CHANGELOG.md#v1170-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.18.0](service/connect/CHANGELOG.md#v1180-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/connectcontactlens`: [v1.11.0](service/connectcontactlens/CHANGELOG.md#v1110-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/connectparticipant`: [v1.10.0](service/connectparticipant/CHANGELOG.md#v1100-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/costandusagereportservice`: [v1.12.0](service/costandusagereportservice/CHANGELOG.md#v1120-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.16.0](service/costexplorer/CHANGELOG.md#v1160-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.16.0](service/customerprofiles/CHANGELOG.md#v1160-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.17.0](service/databasemigrationservice/CHANGELOG.md#v1170-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.17.0](service/databrew/CHANGELOG.md#v1170-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.13.0](service/dataexchange/CHANGELOG.md#v1130-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/datapipeline`: [v1.12.0](service/datapipeline/CHANGELOG.md#v1120-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.13.0](service/datasync/CHANGELOG.md#v1130-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/dax`: [v1.10.0](service/dax/CHANGELOG.md#v1100-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/detective`: [v1.14.0](service/detective/CHANGELOG.md#v1140-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/devicefarm`: [v1.12.0](service/devicefarm/CHANGELOG.md#v1120-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.15.0](service/devopsguru/CHANGELOG.md#v1150-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.16.0](service/directconnect/CHANGELOG.md#v1160-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/directoryservice`: [v1.12.0](service/directoryservice/CHANGELOG.md#v1120-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/dlm`: [v1.10.0](service/dlm/CHANGELOG.md#v1100-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.16.0](service/docdb/CHANGELOG.md#v1160-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.4.0](service/drs/CHANGELOG.md#v140-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.14.0](service/dynamodb/CHANGELOG.md#v1140-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/dynamodbstreams`: [v1.12.0](service/dynamodbstreams/CHANGELOG.md#v1120-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ebs`: [v1.13.0](service/ebs/CHANGELOG.md#v1130-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.30.0](service/ec2/CHANGELOG.md#v1300-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ec2instanceconnect`: [v1.12.0](service/ec2instanceconnect/CHANGELOG.md#v1120-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.15.0](service/ecr/CHANGELOG.md#v1150-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ecrpublic`: [v1.12.0](service/ecrpublic/CHANGELOG.md#v1120-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.17.0](service/ecs/CHANGELOG.md#v1170-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.15.0](service/efs/CHANGELOG.md#v1150-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.19.0](service/eks/CHANGELOG.md#v1190-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.19.0](service/elasticache/CHANGELOG.md#v1190-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk`: [v1.13.0](service/elasticbeanstalk/CHANGELOG.md#v1130-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/elasticinference`: [v1.10.0](service/elasticinference/CHANGELOG.md#v1100-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing`: [v1.13.0](service/elasticloadbalancing/CHANGELOG.md#v1130-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.17.0](service/elasticloadbalancingv2/CHANGELOG.md#v1170-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.14.0](service/elasticsearchservice/CHANGELOG.md#v1140-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/elastictranscoder`: [v1.12.0](service/elastictranscoder/CHANGELOG.md#v1120-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.16.0](service/emr/CHANGELOG.md#v1160-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.12.0](service/emrcontainers/CHANGELOG.md#v1120-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.14.0](service/eventbridge/CHANGELOG.md#v1140-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/evidently`: [v1.5.0](service/evidently/CHANGELOG.md#v150-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/finspace`: [v1.7.0](service/finspace/CHANGELOG.md#v170-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.9.0](service/finspacedata/CHANGELOG.md#v190-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/firehose`: [v1.13.0](service/firehose/CHANGELOG.md#v1130-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/fis`: [v1.11.0](service/fis/CHANGELOG.md#v1110-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.14.0](service/fms/CHANGELOG.md#v1140-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.18.0](service/forecast/CHANGELOG.md#v1180-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/forecastquery`: [v1.10.0](service/forecastquery/CHANGELOG.md#v1100-2022-02-24) - * **Feature**: API client updated - * **Bug Fix**: Fixed an issue that resulted in the wrong service endpoints being constructed. -* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.18.0](service/frauddetector/CHANGELOG.md#v1180-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.19.0](service/fsx/CHANGELOG.md#v1190-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.13.0](service/gamelift/CHANGELOG.md#v1130-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/glacier`: [v1.12.0](service/glacier/CHANGELOG.md#v1120-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/globalaccelerator`: [v1.12.0](service/globalaccelerator/CHANGELOG.md#v1120-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.20.0](service/glue/CHANGELOG.md#v1200-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.6.0](service/grafana/CHANGELOG.md#v160-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/greengrass`: [v1.12.0](service/greengrass/CHANGELOG.md#v1120-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.14.0](service/greengrassv2/CHANGELOG.md#v1140-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.12.0](service/groundstation/CHANGELOG.md#v1120-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.12.0](service/guardduty/CHANGELOG.md#v1120-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/health`: [v1.14.0](service/health/CHANGELOG.md#v1140-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/healthlake`: [v1.13.0](service/healthlake/CHANGELOG.md#v1130-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/honeycode`: [v1.11.0](service/honeycode/CHANGELOG.md#v1110-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.17.0](service/iam/CHANGELOG.md#v1170-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.13.0](service/identitystore/CHANGELOG.md#v1130-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.18.0](service/imagebuilder/CHANGELOG.md#v1180-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/inspector`: [v1.11.0](service/inspector/CHANGELOG.md#v1110-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.5.0](service/inspector2/CHANGELOG.md#v150-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/internal/checksum`: [v1.0.0](service/internal/checksum/CHANGELOG.md#v100-2022-02-24) - * **Release**: New module for computing checksums -* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.22.0](service/iot/CHANGELOG.md#v1220-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/iot1clickdevicesservice`: [v1.9.0](service/iot1clickdevicesservice/CHANGELOG.md#v190-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/iot1clickprojects`: [v1.10.0](service/iot1clickprojects/CHANGELOG.md#v1100-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/iotanalytics`: [v1.11.0](service/iotanalytics/CHANGELOG.md#v1110-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/iotdataplane`: [v1.10.0](service/iotdataplane/CHANGELOG.md#v1100-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/iotdeviceadvisor`: [v1.13.0](service/iotdeviceadvisor/CHANGELOG.md#v1130-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/iotevents`: [v1.13.0](service/iotevents/CHANGELOG.md#v1130-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ioteventsdata`: [v1.10.0](service/ioteventsdata/CHANGELOG.md#v1100-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/iotfleethub`: [v1.11.0](service/iotfleethub/CHANGELOG.md#v1110-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/iotjobsdataplane`: [v1.10.0](service/iotjobsdataplane/CHANGELOG.md#v1100-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/iotsecuretunneling`: [v1.11.0](service/iotsecuretunneling/CHANGELOG.md#v1110-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.19.0](service/iotsitewise/CHANGELOG.md#v1190-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/iotthingsgraph`: [v1.11.0](service/iotthingsgraph/CHANGELOG.md#v1110-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/iottwinmaker`: [v1.4.0](service/iottwinmaker/CHANGELOG.md#v140-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.17.0](service/iotwireless/CHANGELOG.md#v1170-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.15.0](service/ivs/CHANGELOG.md#v1150-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/kafka`: [v1.16.0](service/kafka/CHANGELOG.md#v1160-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/kafkaconnect`: [v1.7.0](service/kafkaconnect/CHANGELOG.md#v170-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.21.0](service/kendra/CHANGELOG.md#v1210-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.14.0](service/kinesis/CHANGELOG.md#v1140-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/kinesisanalytics`: [v1.12.0](service/kinesisanalytics/CHANGELOG.md#v1120-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2`: [v1.13.0](service/kinesisanalyticsv2/CHANGELOG.md#v1130-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/kinesisvideo`: [v1.10.0](service/kinesisvideo/CHANGELOG.md#v1100-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/kinesisvideoarchivedmedia`: [v1.11.0](service/kinesisvideoarchivedmedia/CHANGELOG.md#v1110-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/kinesisvideomedia`: [v1.9.0](service/kinesisvideomedia/CHANGELOG.md#v190-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/kinesisvideosignaling`: [v1.9.0](service/kinesisvideosignaling/CHANGELOG.md#v190-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.15.0](service/kms/CHANGELOG.md#v1150-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.14.0](service/lakeformation/CHANGELOG.md#v1140-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.18.0](service/lambda/CHANGELOG.md#v1180-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/lexmodelbuildingservice`: [v1.15.0](service/lexmodelbuildingservice/CHANGELOG.md#v1150-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.18.0](service/lexmodelsv2/CHANGELOG.md#v1180-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/lexruntimeservice`: [v1.11.0](service/lexruntimeservice/CHANGELOG.md#v1110-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.13.0](service/lexruntimev2/CHANGELOG.md#v1130-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/licensemanager`: [v1.14.0](service/licensemanager/CHANGELOG.md#v1140-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.16.0](service/lightsail/CHANGELOG.md#v1160-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.14.0](service/location/CHANGELOG.md#v1140-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/lookoutequipment`: [v1.11.0](service/lookoutequipment/CHANGELOG.md#v1110-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.10.0](service/lookoutmetrics/CHANGELOG.md#v1100-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/lookoutvision`: [v1.11.0](service/lookoutvision/CHANGELOG.md#v1110-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/machinelearning`: [v1.13.0](service/machinelearning/CHANGELOG.md#v1130-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/macie`: [v1.13.0](service/macie/CHANGELOG.md#v1130-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.19.0](service/macie2/CHANGELOG.md#v1190-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/managedblockchain`: [v1.11.0](service/managedblockchain/CHANGELOG.md#v1110-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/marketplacecatalog`: [v1.11.0](service/marketplacecatalog/CHANGELOG.md#v1110-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/marketplacecommerceanalytics`: [v1.10.0](service/marketplacecommerceanalytics/CHANGELOG.md#v1100-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/marketplaceentitlementservice`: [v1.10.0](service/marketplaceentitlementservice/CHANGELOG.md#v1100-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/marketplacemetering`: [v1.12.0](service/marketplacemetering/CHANGELOG.md#v1120-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/mediaconnect`: [v1.14.0](service/mediaconnect/CHANGELOG.md#v1140-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.20.0](service/mediaconvert/CHANGELOG.md#v1200-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.18.0](service/medialive/CHANGELOG.md#v1180-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.14.0](service/mediapackage/CHANGELOG.md#v1140-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.15.0](service/mediapackagevod/CHANGELOG.md#v1150-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/mediastore`: [v1.11.0](service/mediastore/CHANGELOG.md#v1110-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/mediastoredata`: [v1.11.0](service/mediastoredata/CHANGELOG.md#v1110-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.15.0](service/mediatailor/CHANGELOG.md#v1150-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/memorydb`: [v1.8.0](service/memorydb/CHANGELOG.md#v180-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.12.0](service/mgn/CHANGELOG.md#v1120-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/migrationhub`: [v1.11.0](service/migrationhub/CHANGELOG.md#v1110-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/migrationhubconfig`: [v1.11.0](service/migrationhubconfig/CHANGELOG.md#v1110-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/migrationhubrefactorspaces`: [v1.4.0](service/migrationhubrefactorspaces/CHANGELOG.md#v140-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/migrationhubstrategy`: [v1.4.0](service/migrationhubstrategy/CHANGELOG.md#v140-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/mobile`: [v1.10.0](service/mobile/CHANGELOG.md#v1100-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/mq`: [v1.11.0](service/mq/CHANGELOG.md#v1110-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/mturk`: [v1.12.0](service/mturk/CHANGELOG.md#v1120-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/mwaa`: [v1.11.0](service/mwaa/CHANGELOG.md#v1110-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.15.0](service/neptune/CHANGELOG.md#v1150-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.14.0](service/networkfirewall/CHANGELOG.md#v1140-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.11.0](service/networkmanager/CHANGELOG.md#v1110-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.11.0](service/nimble/CHANGELOG.md#v1110-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.8.0](service/opensearch/CHANGELOG.md#v180-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/opsworks`: [v1.12.0](service/opsworks/CHANGELOG.md#v1120-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/opsworkscm`: [v1.13.0](service/opsworkscm/CHANGELOG.md#v1130-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/organizations`: [v1.13.0](service/organizations/CHANGELOG.md#v1130-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.16.0](service/outposts/CHANGELOG.md#v1160-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/panorama`: [v1.5.0](service/panorama/CHANGELOG.md#v150-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.16.0](service/personalize/CHANGELOG.md#v1160-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/personalizeevents`: [v1.10.0](service/personalizeevents/CHANGELOG.md#v1100-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/personalizeruntime`: [v1.10.0](service/personalizeruntime/CHANGELOG.md#v1100-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/pi`: [v1.13.0](service/pi/CHANGELOG.md#v1130-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.15.0](service/pinpoint/CHANGELOG.md#v1150-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/pinpointemail`: [v1.10.0](service/pinpointemail/CHANGELOG.md#v1100-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoice`: [v1.9.0](service/pinpointsmsvoice/CHANGELOG.md#v190-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.12.0](service/polly/CHANGELOG.md#v1120-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/pricing`: [v1.13.0](service/pricing/CHANGELOG.md#v1130-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.11.0](service/proton/CHANGELOG.md#v1110-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/qldb`: [v1.13.0](service/qldb/CHANGELOG.md#v1130-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/qldbsession`: [v1.12.0](service/qldbsession/CHANGELOG.md#v1120-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.19.0](service/quicksight/CHANGELOG.md#v1190-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ram`: [v1.15.0](service/ram/CHANGELOG.md#v1150-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/rbin`: [v1.5.0](service/rbin/CHANGELOG.md#v150-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.17.0](service/rds/CHANGELOG.md#v1170-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/rdsdata`: [v1.10.0](service/rdsdata/CHANGELOG.md#v1100-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.20.0](service/redshift/CHANGELOG.md#v1200-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/redshiftdata`: [v1.14.0](service/redshiftdata/CHANGELOG.md#v1140-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.16.0](service/rekognition/CHANGELOG.md#v1160-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/resiliencehub`: [v1.4.0](service/resiliencehub/CHANGELOG.md#v140-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/resourcegroups`: [v1.11.0](service/resourcegroups/CHANGELOG.md#v1110-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi`: [v1.12.0](service/resourcegroupstaggingapi/CHANGELOG.md#v1120-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/robomaker`: [v1.15.0](service/robomaker/CHANGELOG.md#v1150-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.18.0](service/route53/CHANGELOG.md#v1180-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/route53domains`: [v1.11.0](service/route53domains/CHANGELOG.md#v1110-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/route53recoverycluster`: [v1.7.0](service/route53recoverycluster/CHANGELOG.md#v170-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig`: [v1.8.0](service/route53recoverycontrolconfig/CHANGELOG.md#v180-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/route53recoveryreadiness`: [v1.7.0](service/route53recoveryreadiness/CHANGELOG.md#v170-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/route53resolver`: [v1.14.0](service/route53resolver/CHANGELOG.md#v1140-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/rum`: [v1.5.0](service/rum/CHANGELOG.md#v150-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.25.0](service/s3/CHANGELOG.md#v1250-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.19.0](service/s3control/CHANGELOG.md#v1190-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/s3outposts`: [v1.11.0](service/s3outposts/CHANGELOG.md#v1110-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.25.0](service/sagemaker/CHANGELOG.md#v1250-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/sagemakera2iruntime`: [v1.11.0](service/sagemakera2iruntime/CHANGELOG.md#v1110-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/sagemakeredge`: [v1.10.0](service/sagemakeredge/CHANGELOG.md#v1100-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/sagemakerfeaturestoreruntime`: [v1.10.0](service/sagemakerfeaturestoreruntime/CHANGELOG.md#v1100-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.14.0](service/sagemakerruntime/CHANGELOG.md#v1140-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/savingsplans`: [v1.10.0](service/savingsplans/CHANGELOG.md#v1100-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/schemas`: [v1.13.0](service/schemas/CHANGELOG.md#v1130-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.14.0](service/secretsmanager/CHANGELOG.md#v1140-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.18.0](service/securityhub/CHANGELOG.md#v1180-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/serverlessapplicationrepository`: [v1.10.0](service/serverlessapplicationrepository/CHANGELOG.md#v1100-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.12.0](service/servicecatalog/CHANGELOG.md#v1120-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry`: [v1.11.0](service/servicecatalogappregistry/CHANGELOG.md#v1110-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/servicediscovery`: [v1.16.0](service/servicediscovery/CHANGELOG.md#v1160-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/servicequotas`: [v1.12.0](service/servicequotas/CHANGELOG.md#v1120-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ses`: [v1.13.0](service/ses/CHANGELOG.md#v1130-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/sesv2`: [v1.12.0](service/sesv2/CHANGELOG.md#v1120-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/sfn`: [v1.12.0](service/sfn/CHANGELOG.md#v1120-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/shield`: [v1.15.0](service/shield/CHANGELOG.md#v1150-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/signer`: [v1.12.0](service/signer/CHANGELOG.md#v1120-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/sms`: [v1.11.0](service/sms/CHANGELOG.md#v1110-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/snowball`: [v1.14.0](service/snowball/CHANGELOG.md#v1140-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/snowdevicemanagement`: [v1.7.0](service/snowdevicemanagement/CHANGELOG.md#v170-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.16.0](service/sns/CHANGELOG.md#v1160-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.17.0](service/sqs/CHANGELOG.md#v1170-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.21.0](service/ssm/CHANGELOG.md#v1210-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ssmcontacts`: [v1.12.0](service/ssmcontacts/CHANGELOG.md#v1120-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.11.0](service/ssmincidents/CHANGELOG.md#v1110-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/sso`: [v1.10.0](service/sso/CHANGELOG.md#v1100-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ssoadmin`: [v1.13.0](service/ssoadmin/CHANGELOG.md#v1130-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ssooidc`: [v1.11.0](service/ssooidc/CHANGELOG.md#v1110-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.15.0](service/storagegateway/CHANGELOG.md#v1150-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.15.0](service/sts/CHANGELOG.md#v1150-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/support`: [v1.12.0](service/support/CHANGELOG.md#v1120-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/swf`: [v1.12.0](service/swf/CHANGELOG.md#v1120-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/synthetics`: [v1.13.0](service/synthetics/CHANGELOG.md#v1130-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.13.0](service/textract/CHANGELOG.md#v1130-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/timestreamquery`: [v1.12.0](service/timestreamquery/CHANGELOG.md#v1120-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/timestreamwrite`: [v1.12.0](service/timestreamwrite/CHANGELOG.md#v1120-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.16.0](service/transcribe/CHANGELOG.md#v1160-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/transcribestreaming`: [v1.4.0](service/transcribestreaming/CHANGELOG.md#v140-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.16.0](service/transfer/CHANGELOG.md#v1160-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/translate`: [v1.12.0](service/translate/CHANGELOG.md#v1120-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/voiceid`: [v1.7.0](service/voiceid/CHANGELOG.md#v170-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/waf`: [v1.10.0](service/waf/CHANGELOG.md#v1100-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/wafregional`: [v1.11.0](service/wafregional/CHANGELOG.md#v1110-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.17.0](service/wafv2/CHANGELOG.md#v1170-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.13.0](service/wellarchitected/CHANGELOG.md#v1130-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.6.0](service/wisdom/CHANGELOG.md#v160-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/workdocs`: [v1.10.0](service/workdocs/CHANGELOG.md#v1100-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/worklink`: [v1.10.0](service/worklink/CHANGELOG.md#v1100-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/workmail`: [v1.14.0](service/workmail/CHANGELOG.md#v1140-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/workmailmessageflow`: [v1.10.0](service/workmailmessageflow/CHANGELOG.md#v1100-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.15.0](service/workspaces/CHANGELOG.md#v1150-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/workspacesweb`: [v1.4.0](service/workspacesweb/CHANGELOG.md#v140-2022-02-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/xray`: [v1.12.0](service/xray/CHANGELOG.md#v1120-2022-02-24) - * **Feature**: API client updated - -# Release (2022-01-28) - -## General Highlights -* **Bug Fix**: Fixes the SDK's handling of `duration_sections` in the shared credentials file or specified in multiple shared config and shared credentials files under the same profile. [#1568](https://github.com/aws/aws-sdk-go-v2/pull/1568). Thanks to [Amir Szekely](https://github.com/kichik) for help reproduce this bug. -* **Bug Fix**: Updates SDK API client deserialization to pre-allocate byte slice and string response payloads, [#1565](https://github.com/aws/aws-sdk-go-v2/pull/1565). Thanks to [Tyson Mote](https://github.com/tysonmote) for submitting this PR. -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/config`: [v1.13.1](config/CHANGELOG.md#v1131-2022-01-28) - * **Bug Fix**: Fixes LoadDefaultConfig handling of errors returned by passed in functional options. Previously errors returned from the LoadOptions passed into LoadDefaultConfig were incorrectly ignored. [#1562](https://github.com/aws/aws-sdk-go-v2/pull/1562). Thanks to [Pinglei Guo](https://github.com/pingleig) for submitting this PR. - * **Bug Fix**: Updates `config` module to use os.UserHomeDir instead of hard coded environment variable for OS. [#1563](https://github.com/aws/aws-sdk-go-v2/pull/1563) -* `github.com/aws/aws-sdk-go-v2/service/applicationinsights`: [v1.13.0](service/applicationinsights/CHANGELOG.md#v1130-2022-01-28) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.13.1](service/cloudtrail/CHANGELOG.md#v1131-2022-01-28) - * **Documentation**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/codegurureviewer`: [v1.13.1](service/codegurureviewer/CHANGELOG.md#v1131-2022-01-28) - * **Documentation**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.16.0](service/configservice/CHANGELOG.md#v1160-2022-01-28) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.17.0](service/connect/CHANGELOG.md#v1170-2022-01-28) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/ebs`: [v1.12.1](service/ebs/CHANGELOG.md#v1121-2022-01-28) - * **Documentation**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.29.0](service/ec2/CHANGELOG.md#v1290-2022-01-28) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/ec2instanceconnect`: [v1.11.0](service/ec2instanceconnect/CHANGELOG.md#v1110-2022-01-28) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.14.0](service/efs/CHANGELOG.md#v1140-2022-01-28) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/fis`: [v1.10.0](service/fis/CHANGELOG.md#v1100-2022-01-28) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.17.0](service/frauddetector/CHANGELOG.md#v1170-2022-01-28) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.18.0](service/fsx/CHANGELOG.md#v1180-2022-01-28) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/greengrass`: [v1.11.0](service/greengrass/CHANGELOG.md#v1110-2022-01-28) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.13.0](service/greengrassv2/CHANGELOG.md#v1130-2022-01-28) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.11.0](service/guardduty/CHANGELOG.md#v1110-2022-01-28) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/honeycode`: [v1.10.0](service/honeycode/CHANGELOG.md#v1100-2022-01-28) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.14.0](service/ivs/CHANGELOG.md#v1140-2022-01-28) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/kafka`: [v1.15.0](service/kafka/CHANGELOG.md#v1150-2022-01-28) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.13.0](service/location/CHANGELOG.md#v1130-2022-01-28) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.9.0](service/lookoutmetrics/CHANGELOG.md#v190-2022-01-28) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.18.0](service/macie2/CHANGELOG.md#v1180-2022-01-28) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.19.0](service/mediaconvert/CHANGELOG.md#v1190-2022-01-28) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.14.0](service/mediatailor/CHANGELOG.md#v1140-2022-01-28) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/ram`: [v1.14.0](service/ram/CHANGELOG.md#v1140-2022-01-28) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/route53recoveryreadiness`: [v1.6.1](service/route53recoveryreadiness/CHANGELOG.md#v161-2022-01-28) - * **Documentation**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.24.0](service/sagemaker/CHANGELOG.md#v1240-2022-01-28) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.17.0](service/securityhub/CHANGELOG.md#v1170-2022-01-28) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.14.0](service/storagegateway/CHANGELOG.md#v1140-2022-01-28) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.15.0](service/transcribe/CHANGELOG.md#v1150-2022-01-28) - * **Feature**: Updated to latest API model. - -# Release (2022-01-14) - -## General Highlights -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2`: v1.13.0 - * **Bug Fix**: Updates the Retry middleware to release the retry token, on subsequent attempts. This fixes #1413, and is based on PR #1424 -* `github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue`: [v1.6.0](feature/dynamodb/attributevalue/CHANGELOG.md#v160-2022-01-14) - * **Feature**: Adds new MarshalWithOptions and UnmarshalWithOptions helpers allowing Encoding and Decoding options to be specified when serializing AttributeValues. Addresses issue: https://github.com/aws/aws-sdk-go-v2/issues/1494 -* `github.com/aws/aws-sdk-go-v2/feature/dynamodbstreams/attributevalue`: [v1.6.0](feature/dynamodbstreams/attributevalue/CHANGELOG.md#v160-2022-01-14) - * **Feature**: Adds new MarshalWithOptions and UnmarshalWithOptions helpers allowing Encoding and Decoding options to be specified when serializing AttributeValues. Addresses issue: https://github.com/aws/aws-sdk-go-v2/issues/1494 -* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.12.0](service/appsync/CHANGELOG.md#v1120-2022-01-14) - * **Feature**: Updated API models -* `github.com/aws/aws-sdk-go-v2/service/autoscalingplans`: [v1.10.0](service/autoscalingplans/CHANGELOG.md#v1100-2022-01-14) - * **Documentation**: Updated API models -* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.15.0](service/computeoptimizer/CHANGELOG.md#v1150-2022-01-14) - * **Feature**: Updated API models -* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.15.0](service/costexplorer/CHANGELOG.md#v1150-2022-01-14) - * **Documentation**: Updated API models -* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.16.0](service/databasemigrationservice/CHANGELOG.md#v1160-2022-01-14) - * **Documentation**: Updated API models -* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.16.0](service/databrew/CHANGELOG.md#v1160-2022-01-14) - * **Feature**: Updated API models -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.28.0](service/ec2/CHANGELOG.md#v1280-2022-01-14) - * **Feature**: Updated API models -* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.18.0](service/elasticache/CHANGELOG.md#v1180-2022-01-14) - * **Feature**: Updated API models -* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.13.0](service/elasticsearchservice/CHANGELOG.md#v1130-2022-01-14) - * **Feature**: Updated API models -* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.8.0](service/finspacedata/CHANGELOG.md#v180-2022-01-14) - * **Documentation**: Updated API models -* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.13.0](service/fms/CHANGELOG.md#v1130-2022-01-14) - * **Documentation**: Updated API models -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.19.0](service/glue/CHANGELOG.md#v1190-2022-01-14) - * **Feature**: Updated API models -* `github.com/aws/aws-sdk-go-v2/service/honeycode`: [v1.9.0](service/honeycode/CHANGELOG.md#v190-2022-01-14) - * **Feature**: Updated API models -* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.12.0](service/identitystore/CHANGELOG.md#v1120-2022-01-14) - * **Documentation**: Updated API models -* `github.com/aws/aws-sdk-go-v2/service/ioteventsdata`: [v1.9.0](service/ioteventsdata/CHANGELOG.md#v190-2022-01-14) - * **Documentation**: Updated API models -* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.16.0](service/iotwireless/CHANGELOG.md#v1160-2022-01-14) - * **Feature**: Updated API models -* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.20.0](service/kendra/CHANGELOG.md#v1200-2022-01-14) - * **Feature**: Updated API models -* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.17.0](service/lexmodelsv2/CHANGELOG.md#v1170-2022-01-14) - * **Feature**: Updated API models -* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.12.0](service/lexruntimev2/CHANGELOG.md#v1120-2022-01-14) - * **Feature**: Updated API models -* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.8.0](service/lookoutmetrics/CHANGELOG.md#v180-2022-01-14) - * **Feature**: Updated API models -* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.17.0](service/medialive/CHANGELOG.md#v1170-2022-01-14) - * **Feature**: Updated API models -* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.13.0](service/mediatailor/CHANGELOG.md#v1130-2022-01-14) - * **Feature**: Updated API models -* `github.com/aws/aws-sdk-go-v2/service/mwaa`: [v1.10.0](service/mwaa/CHANGELOG.md#v1100-2022-01-14) - * **Feature**: Updated API models -* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.10.0](service/nimble/CHANGELOG.md#v1100-2022-01-14) - * **Feature**: Updated API models -* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.7.0](service/opensearch/CHANGELOG.md#v170-2022-01-14) - * **Feature**: Updated API models -* `github.com/aws/aws-sdk-go-v2/service/pi`: [v1.12.0](service/pi/CHANGELOG.md#v1120-2022-01-14) - * **Feature**: Updated API models -* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.14.0](service/pinpoint/CHANGELOG.md#v1140-2022-01-14) - * **Feature**: Updated API models -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.16.0](service/rds/CHANGELOG.md#v1160-2022-01-14) - * **Feature**: Updated API models -* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.20.0](service/ssm/CHANGELOG.md#v1200-2022-01-14) - * **Feature**: Updated API models -* `github.com/aws/aws-sdk-go-v2/service/sso`: [v1.9.0](service/sso/CHANGELOG.md#v190-2022-01-14) - * **Documentation**: Updated API models -* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.14.0](service/transcribe/CHANGELOG.md#v1140-2022-01-14) - * **Documentation**: Updated API models -* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.14.0](service/workspaces/CHANGELOG.md#v1140-2022-01-14) - * **Feature**: Updated API models - -# Release (2022-01-07) - -## General Highlights -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/config`: [v1.12.0](config/CHANGELOG.md#v1120-2022-01-07) - * **Feature**: Add load option for CredentialCache. Adds a new member to the LoadOptions struct, CredentialsCacheOptions. This member allows specifying a function that will be used to configure the CredentialsCache. The CredentialsCacheOptions will only be used if the configuration loader will wrap the underlying credential provider in the CredentialsCache. -* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.12.0](service/appstream/CHANGELOG.md#v1120-2022-01-07) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.12.0](service/cloudtrail/CHANGELOG.md#v1120-2022-01-07) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/detective`: [v1.12.0](service/detective/CHANGELOG.md#v1120-2022-01-07) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.27.0](service/ec2/CHANGELOG.md#v1270-2022-01-07) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.15.0](service/ecs/CHANGELOG.md#v1150-2022-01-07) - * **Documentation**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.17.0](service/eks/CHANGELOG.md#v1170-2022-01-07) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.18.0](service/glue/CHANGELOG.md#v1180-2022-01-07) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.11.0](service/greengrassv2/CHANGELOG.md#v1110-2022-01-07) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.20.0](service/iot/CHANGELOG.md#v1200-2022-01-07) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.12.0](service/lakeformation/CHANGELOG.md#v1120-2022-01-07) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.16.0](service/lambda/CHANGELOG.md#v1160-2022-01-07) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.17.0](service/mediaconvert/CHANGELOG.md#v1170-2022-01-07) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.17.0](service/quicksight/CHANGELOG.md#v1170-2022-01-07) - * **Documentation**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.15.0](service/rds/CHANGELOG.md#v1150-2022-01-07) - * **Documentation**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.14.0](service/rekognition/CHANGELOG.md#v1140-2022-01-07) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.23.0](service/s3/CHANGELOG.md#v1230-2022-01-07) - * **Documentation**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.17.0](service/s3control/CHANGELOG.md#v1170-2022-01-07) - * **Documentation**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/s3outposts`: [v1.9.0](service/s3outposts/CHANGELOG.md#v190-2022-01-07) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.22.0](service/sagemaker/CHANGELOG.md#v1220-2022-01-07) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.12.0](service/secretsmanager/CHANGELOG.md#v1120-2022-01-07) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ssooidc`: [v1.9.0](service/ssooidc/CHANGELOG.md#v190-2022-01-07) - * **Feature**: API client updated - -# Release (2021-12-21) - -## General Highlights -* **Feature**: API Paginators now support specifying the initial starting token, and support stopping on empty string tokens. -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.11.0](service/accessanalyzer/CHANGELOG.md#v1110-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/acm`: [v1.10.0](service/acm/CHANGELOG.md#v1100-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/apigateway`: [v1.11.0](service/apigateway/CHANGELOG.md#v1110-2021-12-21) - * **Documentation**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.11.0](service/applicationautoscaling/CHANGELOG.md#v1110-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.10.0](service/appsync/CHANGELOG.md#v1100-2021-12-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.17.0](service/autoscaling/CHANGELOG.md#v1170-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.3.0](service/chimesdkmeetings/CHANGELOG.md#v130-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.5.0](service/chimesdkmessaging/CHANGELOG.md#v150-2021-12-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/cloudcontrol`: [v1.4.0](service/cloudcontrol/CHANGELOG.md#v140-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.16.0](service/cloudformation/CHANGELOG.md#v1160-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.13.0](service/cloudwatch/CHANGELOG.md#v1130-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/cloudwatchevents`: [v1.10.0](service/cloudwatchevents/CHANGELOG.md#v1100-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.11.0](service/cloudwatchlogs/CHANGELOG.md#v1110-2021-12-21) - * **Feature**: API client updated - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/codedeploy`: [v1.10.0](service/codedeploy/CHANGELOG.md#v1100-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/comprehendmedical`: [v1.9.0](service/comprehendmedical/CHANGELOG.md#v190-2021-12-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.13.0](service/configservice/CHANGELOG.md#v1130-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.13.0](service/customerprofiles/CHANGELOG.md#v1130-2021-12-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.14.0](service/databasemigrationservice/CHANGELOG.md#v1140-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.10.0](service/datasync/CHANGELOG.md#v1100-2021-12-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.12.0](service/devopsguru/CHANGELOG.md#v1120-2021-12-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.13.0](service/directconnect/CHANGELOG.md#v1130-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.13.0](service/docdb/CHANGELOG.md#v1130-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.11.0](service/dynamodb/CHANGELOG.md#v1110-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/dynamodbstreams`: [v1.9.0](service/dynamodbstreams/CHANGELOG.md#v190-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.26.0](service/ec2/CHANGELOG.md#v1260-2021-12-21) - * **Feature**: API client updated - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.12.0](service/ecr/CHANGELOG.md#v1120-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.14.0](service/ecs/CHANGELOG.md#v1140-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.16.0](service/elasticache/CHANGELOG.md#v1160-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing`: [v1.10.0](service/elasticloadbalancing/CHANGELOG.md#v1100-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.14.0](service/elasticloadbalancingv2/CHANGELOG.md#v1140-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.11.0](service/elasticsearchservice/CHANGELOG.md#v1110-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.13.0](service/emr/CHANGELOG.md#v1130-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.11.0](service/eventbridge/CHANGELOG.md#v1110-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.6.0](service/finspacedata/CHANGELOG.md#v160-2021-12-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.15.0](service/forecast/CHANGELOG.md#v1150-2021-12-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/glacier`: [v1.9.0](service/glacier/CHANGELOG.md#v190-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.9.0](service/groundstation/CHANGELOG.md#v190-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/health`: [v1.11.0](service/health/CHANGELOG.md#v1110-2021-12-21) - * **Documentation**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.15.0](service/imagebuilder/CHANGELOG.md#v1150-2021-12-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.19.0](service/iot/CHANGELOG.md#v1190-2021-12-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.11.0](service/kinesis/CHANGELOG.md#v1110-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/kinesisanalytics`: [v1.9.0](service/kinesisanalytics/CHANGELOG.md#v190-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2`: [v1.10.0](service/kinesisanalyticsv2/CHANGELOG.md#v1100-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.12.0](service/kms/CHANGELOG.md#v1120-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.15.0](service/lambda/CHANGELOG.md#v1150-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.15.0](service/lexmodelsv2/CHANGELOG.md#v1150-2021-12-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.10.0](service/location/CHANGELOG.md#v1100-2021-12-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.6.0](service/lookoutmetrics/CHANGELOG.md#v160-2021-12-21) - * **Feature**: API client updated - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/lookoutvision`: [v1.8.0](service/lookoutvision/CHANGELOG.md#v180-2021-12-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/marketplacemetering`: [v1.9.0](service/marketplacemetering/CHANGELOG.md#v190-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/mediaconnect`: [v1.11.0](service/mediaconnect/CHANGELOG.md#v1110-2021-12-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.12.0](service/neptune/CHANGELOG.md#v1120-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.11.0](service/networkfirewall/CHANGELOG.md#v1110-2021-12-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.8.0](service/nimble/CHANGELOG.md#v180-2021-12-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.5.0](service/opensearch/CHANGELOG.md#v150-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.13.0](service/outposts/CHANGELOG.md#v1130-2021-12-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/pi`: [v1.10.0](service/pi/CHANGELOG.md#v1100-2021-12-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/qldb`: [v1.10.0](service/qldb/CHANGELOG.md#v1100-2021-12-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.14.0](service/rds/CHANGELOG.md#v1140-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.17.0](service/redshift/CHANGELOG.md#v1170-2021-12-21) - * **Feature**: API client updated - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/resourcegroups`: [v1.8.0](service/resourcegroups/CHANGELOG.md#v180-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi`: [v1.9.0](service/resourcegroupstaggingapi/CHANGELOG.md#v190-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.15.0](service/route53/CHANGELOG.md#v1150-2021-12-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/route53domains`: [v1.8.0](service/route53domains/CHANGELOG.md#v180-2021-12-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig`: [v1.5.0](service/route53recoverycontrolconfig/CHANGELOG.md#v150-2021-12-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.22.0](service/s3/CHANGELOG.md#v1220-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.16.0](service/s3control/CHANGELOG.md#v1160-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.21.0](service/sagemaker/CHANGELOG.md#v1210-2021-12-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/savingsplans`: [v1.7.3](service/savingsplans/CHANGELOG.md#v173-2021-12-21) - * **Documentation**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.11.0](service/secretsmanager/CHANGELOG.md#v1110-2021-12-21) - * **Documentation**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.14.0](service/securityhub/CHANGELOG.md#v1140-2021-12-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/sfn`: [v1.9.0](service/sfn/CHANGELOG.md#v190-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/sms`: [v1.8.0](service/sms/CHANGELOG.md#v180-2021-12-21) - * **Documentation**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.13.0](service/sns/CHANGELOG.md#v1130-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.14.0](service/sqs/CHANGELOG.md#v1140-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.18.0](service/ssm/CHANGELOG.md#v1180-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.12.0](service/sts/CHANGELOG.md#v1120-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/support`: [v1.9.0](service/support/CHANGELOG.md#v190-2021-12-21) - * **Documentation**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/swf`: [v1.9.0](service/swf/CHANGELOG.md#v190-2021-12-21) - * **Feature**: Updated to latest service endpoints -* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.13.0](service/transfer/CHANGELOG.md#v1130-2021-12-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/workmail`: [v1.11.0](service/workmail/CHANGELOG.md#v1110-2021-12-21) - * **Feature**: API client updated - -# Release (2021-12-03) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.10.1](service/accessanalyzer/CHANGELOG.md#v1101-2021-12-03) - * **Bug Fix**: Fixed an issue that prevent auto-filling of an API's idempotency parameters when not explictly provided by the caller. -* `github.com/aws/aws-sdk-go-v2/service/amp`: [v1.9.3](service/amp/CHANGELOG.md#v193-2021-12-03) - * **Bug Fix**: Fixed an issue that prevent auto-filling of an API's idempotency parameters when not explictly provided by the caller. -* `github.com/aws/aws-sdk-go-v2/service/amplifyuibuilder`: [v1.0.0](service/amplifyuibuilder/CHANGELOG.md#v100-2021-12-03) - * **Release**: New AWS service client module -* `github.com/aws/aws-sdk-go-v2/service/appmesh`: [v1.8.3](service/appmesh/CHANGELOG.md#v183-2021-12-03) - * **Bug Fix**: Fixed an issue that prevent auto-filling of an API's idempotency parameters when not explictly provided by the caller. -* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.10.2](service/braket/CHANGELOG.md#v1102-2021-12-03) - * **Bug Fix**: Fixed an issue that prevent auto-filling of an API's idempotency parameters when not explictly provided by the caller. -* `github.com/aws/aws-sdk-go-v2/service/codeguruprofiler`: [v1.7.3](service/codeguruprofiler/CHANGELOG.md#v173-2021-12-03) - * **Bug Fix**: Fixed an issue that prevent auto-filling of an API's idempotency parameters when not explictly provided by the caller. -* `github.com/aws/aws-sdk-go-v2/service/evidently`: [v1.1.1](service/evidently/CHANGELOG.md#v111-2021-12-03) - * **Bug Fix**: Fixed a bug that prevented the resolution of the correct endpoint for some API operations. -* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.2.3](service/grafana/CHANGELOG.md#v123-2021-12-03) - * **Bug Fix**: Fixed an issue that prevent auto-filling of an API's idempotency parameters when not explictly provided by the caller. -* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.9.2](service/location/CHANGELOG.md#v192-2021-12-03) - * **Bug Fix**: Fixed a bug that prevented the resolution of the correct endpoint for some API operations. - * **Bug Fix**: Fixed an issue that caused some operations to not be signed using sigv4, resulting in authentication failures. -* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.7.0](service/networkmanager/CHANGELOG.md#v170-2021-12-03) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.7.3](service/nimble/CHANGELOG.md#v173-2021-12-03) - * **Bug Fix**: Fixed an issue that prevent auto-filling of an API's idempotency parameters when not explictly provided by the caller. -* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.7.2](service/proton/CHANGELOG.md#v172-2021-12-03) - * **Bug Fix**: Fixed an issue that prevent auto-filling of an API's idempotency parameters when not explictly provided by the caller. -* `github.com/aws/aws-sdk-go-v2/service/ram`: [v1.10.0](service/ram/CHANGELOG.md#v1100-2021-12-03) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.12.0](service/rekognition/CHANGELOG.md#v1120-2021-12-03) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/snowdevicemanagement`: [v1.3.3](service/snowdevicemanagement/CHANGELOG.md#v133-2021-12-03) - * **Bug Fix**: Fixed an issue that prevent auto-filling of an API's idempotency parameters when not explictly provided by the caller. -* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.2.3](service/wisdom/CHANGELOG.md#v123-2021-12-03) - * **Bug Fix**: Fixed an issue that prevent auto-filling of an API's idempotency parameters when not explictly provided by the caller. - -# Release (2021-12-02) - -## General Highlights -* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514)) -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/config`: [v1.11.0](config/CHANGELOG.md#v1110-2021-12-02) - * **Feature**: Add support for specifying `EndpointResolverWithOptions` on `LoadOptions`, and associated `WithEndpointResolverWithOptions`. -* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.10.0](service/accessanalyzer/CHANGELOG.md#v1100-2021-12-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/applicationinsights`: [v1.9.0](service/applicationinsights/CHANGELOG.md#v190-2021-12-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/backupgateway`: [v1.0.0](service/backupgateway/CHANGELOG.md#v100-2021-12-02) - * **Release**: New AWS service client module -* `github.com/aws/aws-sdk-go-v2/service/cloudhsm`: [v1.8.0](service/cloudhsm/CHANGELOG.md#v180-2021-12-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.11.0](service/devopsguru/CHANGELOG.md#v1110-2021-12-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.12.0](service/directconnect/CHANGELOG.md#v1120-2021-12-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.10.0](service/dynamodb/CHANGELOG.md#v1100-2021-12-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.25.0](service/ec2/CHANGELOG.md#v1250-2021-12-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/evidently`: [v1.1.0](service/evidently/CHANGELOG.md#v110-2021-12-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.14.0](service/fsx/CHANGELOG.md#v1140-2021-12-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.16.0](service/glue/CHANGELOG.md#v1160-2021-12-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.1.0](service/inspector2/CHANGELOG.md#v110-2021-12-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.18.0](service/iot/CHANGELOG.md#v1180-2021-12-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/iottwinmaker`: [v1.0.0](service/iottwinmaker/CHANGELOG.md#v100-2021-12-02) - * **Release**: New AWS service client module -* `github.com/aws/aws-sdk-go-v2/service/kafka`: [v1.11.0](service/kafka/CHANGELOG.md#v1110-2021-12-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.17.0](service/kendra/CHANGELOG.md#v1170-2021-12-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.10.0](service/kinesis/CHANGELOG.md#v1100-2021-12-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.10.0](service/lakeformation/CHANGELOG.md#v1100-2021-12-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.14.0](service/lexmodelsv2/CHANGELOG.md#v1140-2021-12-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.10.0](service/lexruntimev2/CHANGELOG.md#v1100-2021-12-02) - * **Feature**: Support has been added for the `StartConversation` API. -* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.12.0](service/outposts/CHANGELOG.md#v1120-2021-12-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/rbin`: [v1.1.0](service/rbin/CHANGELOG.md#v110-2021-12-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/redshiftdata`: [v1.10.0](service/redshiftdata/CHANGELOG.md#v1100-2021-12-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/rum`: [v1.1.0](service/rum/CHANGELOG.md#v110-2021-12-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.21.0](service/s3/CHANGELOG.md#v1210-2021-12-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.20.0](service/sagemaker/CHANGELOG.md#v1200-2021-12-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.11.0](service/sagemakerruntime/CHANGELOG.md#v1110-2021-12-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/shield`: [v1.11.0](service/shield/CHANGELOG.md#v1110-2021-12-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/snowball`: [v1.10.0](service/snowball/CHANGELOG.md#v1100-2021-12-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.10.0](service/storagegateway/CHANGELOG.md#v1100-2021-12-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/workspacesweb`: [v1.0.0](service/workspacesweb/CHANGELOG.md#v100-2021-12-02) - * **Release**: New AWS service client module - -# Release (2021-11-30) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.16.0](service/autoscaling/CHANGELOG.md#v1160-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.10.0](service/backup/CHANGELOG.md#v1100-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.10.0](service/braket/CHANGELOG.md#v1100-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.2.0](service/chimesdkmeetings/CHANGELOG.md#v120-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.15.0](service/cloudformation/CHANGELOG.md#v1150-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.13.0](service/computeoptimizer/CHANGELOG.md#v1130-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.13.0](service/connect/CHANGELOG.md#v1130-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.12.0](service/customerprofiles/CHANGELOG.md#v1120-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.13.0](service/databasemigrationservice/CHANGELOG.md#v1130-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.9.0](service/dataexchange/CHANGELOG.md#v190-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.9.0](service/dynamodb/CHANGELOG.md#v190-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.24.0](service/ec2/CHANGELOG.md#v1240-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.11.0](service/ecr/CHANGELOG.md#v1110-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.13.0](service/ecs/CHANGELOG.md#v1130-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.15.0](service/eks/CHANGELOG.md#v1150-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.15.0](service/elasticache/CHANGELOG.md#v1150-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.13.0](service/elasticloadbalancingv2/CHANGELOG.md#v1130-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.10.0](service/elasticsearchservice/CHANGELOG.md#v1100-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/evidently`: [v1.0.0](service/evidently/CHANGELOG.md#v100-2021-11-30) - * **Release**: New AWS service client module -* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.5.0](service/finspacedata/CHANGELOG.md#v150-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.14.0](service/imagebuilder/CHANGELOG.md#v1140-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.0.0](service/inspector2/CHANGELOG.md#v100-2021-11-30) - * **Release**: New AWS service client module -* `github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery`: [v1.3.2](service/internal/endpoint-discovery/CHANGELOG.md#v132-2021-11-30) - * **Bug Fix**: Fixed a race condition that caused concurrent calls relying on endpoint discovery to share the same `url.URL` reference in their operation's http.Request. -* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.17.0](service/iot/CHANGELOG.md#v1170-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/iotdeviceadvisor`: [v1.9.0](service/iotdeviceadvisor/CHANGELOG.md#v190-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.15.0](service/iotsitewise/CHANGELOG.md#v1150-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.13.0](service/iotwireless/CHANGELOG.md#v1130-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.14.0](service/lambda/CHANGELOG.md#v1140-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.14.0](service/macie2/CHANGELOG.md#v1140-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.8.0](service/mgn/CHANGELOG.md#v180-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/migrationhubrefactorspaces`: [v1.0.0](service/migrationhubrefactorspaces/CHANGELOG.md#v100-2021-11-30) - * **Release**: New AWS service client module -* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.4.0](service/opensearch/CHANGELOG.md#v140-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.11.0](service/outposts/CHANGELOG.md#v1110-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.12.0](service/personalize/CHANGELOG.md#v1120-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/personalizeruntime`: [v1.7.0](service/personalizeruntime/CHANGELOG.md#v170-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.12.0](service/pinpoint/CHANGELOG.md#v1120-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.7.0](service/proton/CHANGELOG.md#v170-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.15.0](service/quicksight/CHANGELOG.md#v1150-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/rbin`: [v1.0.0](service/rbin/CHANGELOG.md#v100-2021-11-30) - * **Release**: New AWS service client module -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.13.0](service/rds/CHANGELOG.md#v1130-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.16.0](service/redshift/CHANGELOG.md#v1160-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/rum`: [v1.0.0](service/rum/CHANGELOG.md#v100-2021-11-30) - * **Release**: New AWS service client module -* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.20.0](service/s3/CHANGELOG.md#v1200-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.15.0](service/s3control/CHANGELOG.md#v1150-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.13.0](service/sqs/CHANGELOG.md#v1130-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.17.0](service/ssm/CHANGELOG.md#v1170-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.11.0](service/sts/CHANGELOG.md#v1110-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.10.0](service/textract/CHANGELOG.md#v1100-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/timestreamquery`: [v1.8.0](service/timestreamquery/CHANGELOG.md#v180-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/timestreamwrite`: [v1.8.0](service/timestreamwrite/CHANGELOG.md#v180-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/transcribestreaming`: [v1.1.0](service/transcribestreaming/CHANGELOG.md#v110-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/translate`: [v1.8.0](service/translate/CHANGELOG.md#v180-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.9.0](service/wellarchitected/CHANGELOG.md#v190-2021-11-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.11.0](service/workspaces/CHANGELOG.md#v1110-2021-11-30) - * **Feature**: API client updated - -# Release (2021-11-19) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2`: v1.11.1 - * **Bug Fix**: Fixed a bug that prevented aws.EndpointResolverWithOptionsFunc from satisfying the aws.EndpointResolverWithOptions interface. -* `github.com/aws/aws-sdk-go-v2/service/amplifybackend`: [v1.8.0](service/amplifybackend/CHANGELOG.md#v180-2021-11-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/apigateway`: [v1.10.0](service/apigateway/CHANGELOG.md#v1100-2021-11-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/appconfig`: [v1.7.0](service/appconfig/CHANGELOG.md#v170-2021-11-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/appconfigdata`: [v1.0.0](service/appconfigdata/CHANGELOG.md#v100-2021-11-19) - * **Release**: New AWS service client module -* `github.com/aws/aws-sdk-go-v2/service/applicationinsights`: [v1.8.0](service/applicationinsights/CHANGELOG.md#v180-2021-11-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.10.0](service/appstream/CHANGELOG.md#v1100-2021-11-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.12.0](service/auditmanager/CHANGELOG.md#v1120-2021-11-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.11.0](service/batch/CHANGELOG.md#v1110-2021-11-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.14.0](service/chime/CHANGELOG.md#v1140-2021-11-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.1.0](service/chimesdkmeetings/CHANGELOG.md#v110-2021-11-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.14.0](service/cloudformation/CHANGELOG.md#v1140-2021-11-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.10.0](service/cloudtrail/CHANGELOG.md#v1100-2021-11-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.12.0](service/cloudwatch/CHANGELOG.md#v1120-2021-11-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.12.0](service/connect/CHANGELOG.md#v1120-2021-11-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.12.0](service/databasemigrationservice/CHANGELOG.md#v1120-2021-11-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.13.0](service/databrew/CHANGELOG.md#v1130-2021-11-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.10.0](service/devopsguru/CHANGELOG.md#v1100-2021-11-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.0.0](service/drs/CHANGELOG.md#v100-2021-11-19) - * **Release**: New AWS service client module -* `github.com/aws/aws-sdk-go-v2/service/dynamodbstreams`: [v1.8.0](service/dynamodbstreams/CHANGELOG.md#v180-2021-11-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.23.0](service/ec2/CHANGELOG.md#v1230-2021-11-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.14.0](service/eks/CHANGELOG.md#v1140-2021-11-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.14.0](service/forecast/CHANGELOG.md#v1140-2021-11-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.10.0](service/ivs/CHANGELOG.md#v1100-2021-11-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/kafka`: [v1.10.0](service/kafka/CHANGELOG.md#v1100-2021-11-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.16.0](service/kendra/CHANGELOG.md#v1160-2021-11-19) - * **Announcement**: Fix API modeling bug incorrectly generating `DocumentAttributeValue` type as a union instead of a structure. This update corrects this bug by correcting the `DocumentAttributeValue` type to be a `struct` instead of an `interface`. This change also removes the `DocumentAttributeValueMember` types. To migrate to this change your application using service/kendra will need to be updated to use struct members in `DocumentAttributeValue` instead of `DocumentAttributeValueMember` types. - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.11.0](service/kms/CHANGELOG.md#v1110-2021-11-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.13.0](service/lambda/CHANGELOG.md#v1130-2021-11-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.13.0](service/lexmodelsv2/CHANGELOG.md#v1130-2021-11-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.9.0](service/lexruntimev2/CHANGELOG.md#v190-2021-11-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.9.0](service/location/CHANGELOG.md#v190-2021-11-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.15.0](service/mediaconvert/CHANGELOG.md#v1150-2021-11-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.14.0](service/medialive/CHANGELOG.md#v1140-2021-11-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.7.0](service/mgn/CHANGELOG.md#v170-2021-11-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/migrationhubstrategy`: [v1.0.0](service/migrationhubstrategy/CHANGELOG.md#v100-2021-11-19) - * **Release**: New AWS service client module -* `github.com/aws/aws-sdk-go-v2/service/qldb`: [v1.9.0](service/qldb/CHANGELOG.md#v190-2021-11-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/qldbsession`: [v1.9.0](service/qldbsession/CHANGELOG.md#v190-2021-11-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.15.0](service/redshift/CHANGELOG.md#v1150-2021-11-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.12.0](service/sns/CHANGELOG.md#v1120-2021-11-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.16.0](service/ssm/CHANGELOG.md#v1160-2021-11-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.12.0](service/transfer/CHANGELOG.md#v1120-2021-11-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.14.0](service/wafv2/CHANGELOG.md#v1140-2021-11-19) - * **Feature**: API client updated - -# Release (2021-11-12) - -## General Highlights -* **Feature**: Service clients now support custom endpoints that have an initial URI path defined. -* **Feature**: Waiters now have a `WaitForOutput` method, which can be used to retrieve the output of the successful wait operation. Thank you to [Andrew Haines](https://github.com/haines) for contributing this feature. -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.9.0](service/backup/CHANGELOG.md#v190-2021-11-12) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.10.0](service/batch/CHANGELOG.md#v1100-2021-11-12) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.0.0](service/chimesdkmeetings/CHANGELOG.md#v100-2021-11-12) - * **Release**: New AWS service client module -* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.12.0](service/computeoptimizer/CHANGELOG.md#v1120-2021-11-12) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.11.0](service/connect/CHANGELOG.md#v1110-2021-11-12) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.12.0](service/docdb/CHANGELOG.md#v1120-2021-11-12) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.8.0](service/dynamodb/CHANGELOG.md#v180-2021-11-12) - * **Documentation**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.22.0](service/ec2/CHANGELOG.md#v1220-2021-11-12) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.12.0](service/ecs/CHANGELOG.md#v1120-2021-11-12) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.9.0](service/gamelift/CHANGELOG.md#v190-2021-11-12) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.9.0](service/greengrassv2/CHANGELOG.md#v190-2021-11-12) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/health`: [v1.10.0](service/health/CHANGELOG.md#v1100-2021-11-12) - * **Documentation**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.9.0](service/identitystore/CHANGELOG.md#v190-2021-11-12) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.12.0](service/iotwireless/CHANGELOG.md#v1120-2021-11-12) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.11.0](service/neptune/CHANGELOG.md#v1110-2021-11-12) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.12.0](service/rds/CHANGELOG.md#v1120-2021-11-12) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/resiliencehub`: [v1.0.0](service/resiliencehub/CHANGELOG.md#v100-2021-11-12) - * **Release**: New AWS service client module -* `github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi`: [v1.8.0](service/resourcegroupstaggingapi/CHANGELOG.md#v180-2021-11-12) - * **Documentation**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.14.0](service/s3control/CHANGELOG.md#v1140-2021-11-12) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.19.0](service/sagemaker/CHANGELOG.md#v1190-2021-11-12) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.10.0](service/sagemakerruntime/CHANGELOG.md#v1100-2021-11-12) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.7.0](service/ssmincidents/CHANGELOG.md#v170-2021-11-12) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.11.0](service/transcribe/CHANGELOG.md#v1110-2021-11-12) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/translate`: [v1.7.0](service/translate/CHANGELOG.md#v170-2021-11-12) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.13.0](service/wafv2/CHANGELOG.md#v1130-2021-11-12) - * **Feature**: Updated service to latest API model. - -# Release (2021-11-06) - -## General Highlights -* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream`: [v1.0.0](aws/protocol/eventstream/CHANGELOG.md#v100-2021-11-06) - * **Announcement**: Support has been added for AWS EventStream APIs for Kinesis, S3, and Transcribe Streaming. Support for the Lex Runtime V2 EventStream API will be added in a future release. - * **Release**: Protocol support has been added for AWS event stream. -* `github.com/aws/aws-sdk-go-v2/internal/endpoints/v2`: [v2.0.0](internal/endpoints/v2/CHANGELOG.md#v200-2021-11-06) - * **Release**: Endpoint Variant Model Support -* `github.com/aws/aws-sdk-go-v2/service/applicationinsights`: [v1.6.0](service/applicationinsights/CHANGELOG.md#v160-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.8.0](service/appstream/CHANGELOG.md#v180-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.11.0](service/auditmanager/CHANGELOG.md#v1110-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.14.0](service/autoscaling/CHANGELOG.md#v1140-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.13.0](service/chime/CHANGELOG.md#v1130-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/chimesdkidentity`: [v1.4.0](service/chimesdkidentity/CHANGELOG.md#v140-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.4.0](service/chimesdkmessaging/CHANGELOG.md#v140-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.10.0](service/cloudfront/CHANGELOG.md#v1100-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/codecommit`: [v1.7.0](service/codecommit/CHANGELOG.md#v170-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.10.0](service/connect/CHANGELOG.md#v1100-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/connectcontactlens`: [v1.7.0](service/connectcontactlens/CHANGELOG.md#v170-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/connectparticipant`: [v1.6.0](service/connectparticipant/CHANGELOG.md#v160-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.10.0](service/databasemigrationservice/CHANGELOG.md#v1100-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.8.0](service/datasync/CHANGELOG.md#v180-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.11.0](service/docdb/CHANGELOG.md#v1110-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/ebs`: [v1.9.0](service/ebs/CHANGELOG.md#v190-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.21.0](service/ec2/CHANGELOG.md#v1210-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.9.0](service/ecr/CHANGELOG.md#v190-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.11.0](service/ecs/CHANGELOG.md#v1110-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.12.0](service/eks/CHANGELOG.md#v1120-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.13.0](service/elasticache/CHANGELOG.md#v1130-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.9.0](service/elasticsearchservice/CHANGELOG.md#v190-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.8.0](service/emrcontainers/CHANGELOG.md#v180-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/finspace`: [v1.4.0](service/finspace/CHANGELOG.md#v140-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.12.0](service/fsx/CHANGELOG.md#v1120-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.8.0](service/gamelift/CHANGELOG.md#v180-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/health`: [v1.9.0](service/health/CHANGELOG.md#v190-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.12.0](service/iam/CHANGELOG.md#v1120-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/internal/eventstreamtesting`: [v1.0.0](service/internal/eventstreamtesting/CHANGELOG.md#v100-2021-11-06) - * **Release**: Protocol support has been added for AWS event stream. -* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.13.0](service/iotsitewise/CHANGELOG.md#v1130-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.14.0](service/kendra/CHANGELOG.md#v1140-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.8.0](service/kinesis/CHANGELOG.md#v180-2021-11-06) - * **Feature**: Support has been added for the SubscribeToShard API. -* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.9.0](service/kms/CHANGELOG.md#v190-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.12.0](service/lightsail/CHANGELOG.md#v1120-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.13.0](service/macie2/CHANGELOG.md#v1130-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.6.0](service/mgn/CHANGELOG.md#v160-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.10.0](service/neptune/CHANGELOG.md#v1100-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.6.0](service/networkmanager/CHANGELOG.md#v160-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.6.0](service/nimble/CHANGELOG.md#v160-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.3.0](service/opensearch/CHANGELOG.md#v130-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.14.0](service/quicksight/CHANGELOG.md#v1140-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.11.0](service/rds/CHANGELOG.md#v1110-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.10.0](service/rekognition/CHANGELOG.md#v1100-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/route53resolver`: [v1.9.0](service/route53resolver/CHANGELOG.md#v190-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.18.0](service/s3/CHANGELOG.md#v1180-2021-11-06) - * **Feature**: Support has been added for the SelectObjectContent API. - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.13.0](service/s3control/CHANGELOG.md#v1130-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.18.0](service/sagemaker/CHANGELOG.md#v1180-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/servicediscovery`: [v1.11.0](service/servicediscovery/CHANGELOG.md#v1110-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.6.0](service/ssmincidents/CHANGELOG.md#v160-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/sso`: [v1.6.0](service/sso/CHANGELOG.md#v160-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.8.0](service/storagegateway/CHANGELOG.md#v180-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/support`: [v1.7.0](service/support/CHANGELOG.md#v170-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.8.0](service/textract/CHANGELOG.md#v180-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.10.0](service/transcribe/CHANGELOG.md#v1100-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/transcribestreaming`: [v1.0.0](service/transcribestreaming/CHANGELOG.md#v100-2021-11-06) - * **Release**: New AWS service client module - * **Feature**: Support has been added for the StartStreamTranscription and StartMedicalStreamTranscription APIs. -* `github.com/aws/aws-sdk-go-v2/service/waf`: [v1.6.0](service/waf/CHANGELOG.md#v160-2021-11-06) - * **Feature**: Updated service to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.2.0](service/wisdom/CHANGELOG.md#v120-2021-11-06) - * **Feature**: Updated service to latest API model. - -# Release (2021-10-21) - -## General Highlights -* **Feature**: Updated to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2`: v1.10.0 - * **Feature**: Adds dynamic signing middleware that switches to unsigned payload when TLS is enabled. -* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.8.0](service/appflow/CHANGELOG.md#v180-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.8.0](service/applicationautoscaling/CHANGELOG.md#v180-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.13.0](service/autoscaling/CHANGELOG.md#v1130-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.3.0](service/chimesdkmessaging/CHANGELOG.md#v130-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.11.0](service/cloudformation/CHANGELOG.md#v1110-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/cloudsearch`: [v1.7.0](service/cloudsearch/CHANGELOG.md#v170-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.7.0](service/cloudtrail/CHANGELOG.md#v170-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.9.0](service/cloudwatch/CHANGELOG.md#v190-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/cloudwatchevents`: [v1.7.0](service/cloudwatchevents/CHANGELOG.md#v170-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.8.0](service/cloudwatchlogs/CHANGELOG.md#v180-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/codedeploy`: [v1.7.0](service/codedeploy/CHANGELOG.md#v170-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.10.0](service/configservice/CHANGELOG.md#v1100-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.7.0](service/dataexchange/CHANGELOG.md#v170-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.9.0](service/directconnect/CHANGELOG.md#v190-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.10.0](service/docdb/CHANGELOG.md#v1100-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.6.0](service/dynamodb/CHANGELOG.md#v160-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.20.0](service/ec2/CHANGELOG.md#v1200-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.8.0](service/ecr/CHANGELOG.md#v180-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.10.0](service/ecs/CHANGELOG.md#v1100-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.9.0](service/efs/CHANGELOG.md#v190-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.12.0](service/elasticache/CHANGELOG.md#v1120-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing`: [v1.7.0](service/elasticloadbalancing/CHANGELOG.md#v170-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.10.0](service/elasticloadbalancingv2/CHANGELOG.md#v1100-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.10.0](service/emr/CHANGELOG.md#v1100-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.8.0](service/eventbridge/CHANGELOG.md#v180-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/glacier`: [v1.6.0](service/glacier/CHANGELOG.md#v160-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.13.0](service/glue/CHANGELOG.md#v1130-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.8.0](service/ivs/CHANGELOG.md#v180-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.13.0](service/kendra/CHANGELOG.md#v1130-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.7.0](service/kinesis/CHANGELOG.md#v170-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2`: [v1.7.0](service/kinesisanalyticsv2/CHANGELOG.md#v170-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.8.0](service/kms/CHANGELOG.md#v180-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.10.0](service/lambda/CHANGELOG.md#v1100-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.13.0](service/mediaconvert/CHANGELOG.md#v1130-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.9.0](service/mediapackage/CHANGELOG.md#v190-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.10.0](service/mediapackagevod/CHANGELOG.md#v1100-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.9.0](service/mediatailor/CHANGELOG.md#v190-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.9.0](service/neptune/CHANGELOG.md#v190-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/panorama`: [v1.0.0](service/panorama/CHANGELOG.md#v100-2021-10-21) - * **Release**: New AWS service client module -* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.13.0](service/quicksight/CHANGELOG.md#v1130-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.10.0](service/rds/CHANGELOG.md#v1100-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.12.0](service/redshift/CHANGELOG.md#v1120-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/robomaker`: [v1.10.0](service/robomaker/CHANGELOG.md#v1100-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.17.0](service/s3/CHANGELOG.md#v1170-2021-10-21) - * **Feature**: Updates S3 streaming operations - PutObject, UploadPart, WriteGetObjectResponse to use unsigned payload signing auth when TLS is enabled. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.17.0](service/sagemaker/CHANGELOG.md#v1170-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.12.0](service/securityhub/CHANGELOG.md#v1120-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/sfn`: [v1.6.0](service/sfn/CHANGELOG.md#v160-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.9.0](service/sns/CHANGELOG.md#v190-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.10.0](service/sqs/CHANGELOG.md#v1100-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.7.0](service/storagegateway/CHANGELOG.md#v170-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.8.0](service/sts/CHANGELOG.md#v180-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/swf`: [v1.6.0](service/swf/CHANGELOG.md#v160-2021-10-21) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/workmail`: [v1.8.0](service/workmail/CHANGELOG.md#v180-2021-10-21) - * **Feature**: API client updated - -# Release (2021-10-11) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/feature/ec2/imds`: [v1.6.0](feature/ec2/imds/CHANGELOG.md#v160-2021-10-11) - * **Feature**: Respect passed in Context Deadline/Timeout. Updates the IMDS Client operations to not override the passed in Context's Deadline or Timeout options. If an Client operation is called with a Context with a Deadline or Timeout, the client will no longer override it with the client's default timeout. - * **Bug Fix**: Fix IMDS client's response handling and operation timeout race. Fixes #1253 -* `github.com/aws/aws-sdk-go-v2/service/amplifybackend`: [v1.5.0](service/amplifybackend/CHANGELOG.md#v150-2021-10-11) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.7.0](service/applicationautoscaling/CHANGELOG.md#v170-2021-10-11) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.3.0](service/apprunner/CHANGELOG.md#v130-2021-10-11) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.6.0](service/backup/CHANGELOG.md#v160-2021-10-11) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.11.0](service/chime/CHANGELOG.md#v1110-2021-10-11) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/codebuild`: [v1.11.0](service/codebuild/CHANGELOG.md#v1110-2021-10-11) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.10.0](service/databrew/CHANGELOG.md#v1100-2021-10-11) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.19.0](service/ec2/CHANGELOG.md#v1190-2021-10-11) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.8.0](service/efs/CHANGELOG.md#v180-2021-10-11) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.9.0](service/elasticloadbalancingv2/CHANGELOG.md#v190-2021-10-11) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/firehose`: [v1.7.0](service/firehose/CHANGELOG.md#v170-2021-10-11) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.10.0](service/frauddetector/CHANGELOG.md#v1100-2021-10-11) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.10.0](service/fsx/CHANGELOG.md#v1100-2021-10-11) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.12.0](service/glue/CHANGELOG.md#v1120-2021-10-11) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.0.0](service/grafana/CHANGELOG.md#v100-2021-10-11) - * **Release**: New AWS service client module - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/iotevents`: [v1.8.0](service/iotevents/CHANGELOG.md#v180-2021-10-11) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.12.0](service/kendra/CHANGELOG.md#v1120-2021-10-11) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.7.0](service/kms/CHANGELOG.md#v170-2021-10-11) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.9.0](service/lexmodelsv2/CHANGELOG.md#v190-2021-10-11) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.6.0](service/lexruntimev2/CHANGELOG.md#v160-2021-10-11) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.6.0](service/location/CHANGELOG.md#v160-2021-10-11) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.12.0](service/mediaconvert/CHANGELOG.md#v1120-2021-10-11) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.10.0](service/medialive/CHANGELOG.md#v1100-2021-10-11) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.16.0](service/sagemaker/CHANGELOG.md#v1160-2021-10-11) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.7.0](service/secretsmanager/CHANGELOG.md#v170-2021-10-11) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.11.0](service/securityhub/CHANGELOG.md#v1110-2021-10-11) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.12.0](service/ssm/CHANGELOG.md#v1120-2021-10-11) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ssooidc`: [v1.6.0](service/ssooidc/CHANGELOG.md#v160-2021-10-11) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/synthetics`: [v1.7.0](service/synthetics/CHANGELOG.md#v170-2021-10-11) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.6.0](service/textract/CHANGELOG.md#v160-2021-10-11) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/workmail`: [v1.7.0](service/workmail/CHANGELOG.md#v170-2021-10-11) - * **Feature**: API client updated - -# Release (2021-09-30) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/account`: [v1.0.0](service/account/CHANGELOG.md#v100-2021-09-30) - * **Release**: New AWS service client module - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/amp`: [v1.6.0](service/amp/CHANGELOG.md#v160-2021-09-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/appintegrations`: [v1.7.0](service/appintegrations/CHANGELOG.md#v170-2021-09-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/cloudcontrol`: [v1.0.0](service/cloudcontrol/CHANGELOG.md#v100-2021-09-30) - * **Release**: New AWS service client module - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/cloudhsmv2`: [v1.5.0](service/cloudhsmv2/CHANGELOG.md#v150-2021-09-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.8.0](service/connect/CHANGELOG.md#v180-2021-09-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.6.0](service/dataexchange/CHANGELOG.md#v160-2021-09-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.8.0](service/elasticloadbalancingv2/CHANGELOG.md#v180-2021-09-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.11.0](service/imagebuilder/CHANGELOG.md#v1110-2021-09-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.9.0](service/lambda/CHANGELOG.md#v190-2021-09-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.11.0](service/macie2/CHANGELOG.md#v1110-2021-09-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.7.0](service/networkfirewall/CHANGELOG.md#v170-2021-09-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.8.0](service/pinpoint/CHANGELOG.md#v180-2021-09-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/sesv2`: [v1.6.0](service/sesv2/CHANGELOG.md#v160-2021-09-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.8.0](service/transfer/CHANGELOG.md#v180-2021-09-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/voiceid`: [v1.0.0](service/voiceid/CHANGELOG.md#v100-2021-09-30) - * **Release**: New AWS service client module - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.0.0](service/wisdom/CHANGELOG.md#v100-2021-09-30) - * **Release**: New AWS service client module - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/workmail`: [v1.6.0](service/workmail/CHANGELOG.md#v160-2021-09-30) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.7.0](service/workspaces/CHANGELOG.md#v170-2021-09-30) - * **Feature**: API client updated - -# Release (2021-09-24) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/feature/dynamodb/expression`: [v1.2.4](feature/dynamodb/expression/CHANGELOG.md#v124-2021-09-24) - * **Documentation**: Fixes typo in NameBuilder.NamesList example documentation to use the correct variable name. -* `github.com/aws/aws-sdk-go-v2/service/appmesh`: [v1.6.0](service/appmesh/CHANGELOG.md#v160-2021-09-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.7.0](service/appsync/CHANGELOG.md#v170-2021-09-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.9.0](service/auditmanager/CHANGELOG.md#v190-2021-09-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/codecommit`: [v1.5.0](service/codecommit/CHANGELOG.md#v150-2021-09-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.8.0](service/comprehend/CHANGELOG.md#v180-2021-09-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.8.0](service/databasemigrationservice/CHANGELOG.md#v180-2021-09-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.18.0](service/ec2/CHANGELOG.md#v1180-2021-09-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.7.0](service/ecr/CHANGELOG.md#v170-2021-09-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.7.0](service/elasticsearchservice/CHANGELOG.md#v170-2021-09-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.10.0](service/iam/CHANGELOG.md#v1100-2021-09-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.6.0](service/identitystore/CHANGELOG.md#v160-2021-09-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.10.0](service/imagebuilder/CHANGELOG.md#v1100-2021-09-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.13.0](service/iot/CHANGELOG.md#v1130-2021-09-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/iotevents`: [v1.7.0](service/iotevents/CHANGELOG.md#v170-2021-09-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/kafkaconnect`: [v1.1.0](service/kafkaconnect/CHANGELOG.md#v110-2021-09-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.6.0](service/lakeformation/CHANGELOG.md#v160-2021-09-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.8.0](service/lexmodelsv2/CHANGELOG.md#v180-2021-09-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.5.0](service/lexruntimev2/CHANGELOG.md#v150-2021-09-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/licensemanager`: [v1.8.0](service/licensemanager/CHANGELOG.md#v180-2021-09-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.11.0](service/mediaconvert/CHANGELOG.md#v1110-2021-09-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.9.0](service/mediapackagevod/CHANGELOG.md#v190-2021-09-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.8.0](service/mediatailor/CHANGELOG.md#v180-2021-09-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.1.0](service/opensearch/CHANGELOG.md#v110-2021-09-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.12.0](service/quicksight/CHANGELOG.md#v1120-2021-09-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.11.0](service/ssm/CHANGELOG.md#v1110-2021-09-24) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.10.0](service/wafv2/CHANGELOG.md#v1100-2021-09-24) - * **Feature**: API client updated - -# Release (2021-09-17) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.10.0](service/chime/CHANGELOG.md#v1100-2021-09-17) - * **Feature**: Updated API client and endpoints to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.10.1](service/cloudformation/CHANGELOG.md#v1101-2021-09-17) - * **Documentation**: Updated API client documentation. -* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.7.0](service/comprehend/CHANGELOG.md#v170-2021-09-17) - * **Feature**: Updated API client and endpoints to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.17.0](service/ec2/CHANGELOG.md#v1170-2021-09-17) - * **Feature**: Updated API client and endpoints to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.6.0](service/ecr/CHANGELOG.md#v160-2021-09-17) - * **Feature**: Updated API client and endpoints to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.12.0](service/iot/CHANGELOG.md#v1120-2021-09-17) - * **Feature**: Updated API client and endpoints to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/kafkaconnect`: [v1.0.0](service/kafkaconnect/CHANGELOG.md#v100-2021-09-17) - * **Release**: New AWS service client module -* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.7.0](service/lexmodelsv2/CHANGELOG.md#v170-2021-09-17) - * **Feature**: Updated API client and endpoints to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.4.0](service/lexruntimev2/CHANGELOG.md#v140-2021-09-17) - * **Feature**: Updated API client and endpoints to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.10.0](service/macie2/CHANGELOG.md#v1100-2021-09-17) - * **Feature**: Updated API client and endpoints to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.8.0](service/mediapackagevod/CHANGELOG.md#v180-2021-09-17) - * **Feature**: Updated API client and endpoints to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.6.0](service/networkfirewall/CHANGELOG.md#v160-2021-09-17) - * **Feature**: Updated API client and endpoints to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.7.0](service/pinpoint/CHANGELOG.md#v170-2021-09-17) - * **Feature**: Updated API client and endpoints to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.11.0](service/quicksight/CHANGELOG.md#v1110-2021-09-17) - * **Feature**: Updated API client and endpoints to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.9.0](service/rds/CHANGELOG.md#v190-2021-09-17) - * **Feature**: Updated API client and endpoints to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/robomaker`: [v1.9.0](service/robomaker/CHANGELOG.md#v190-2021-09-17) - * **Feature**: Updated API client and endpoints to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.16.0](service/s3/CHANGELOG.md#v1160-2021-09-17) - * **Feature**: Updated API client and endpoints to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.15.0](service/sagemaker/CHANGELOG.md#v1150-2021-09-17) - * **Feature**: Updated API client and endpoints to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/ssooidc`: [v1.5.0](service/ssooidc/CHANGELOG.md#v150-2021-09-17) - * **Feature**: Updated API client and endpoints to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.8.0](service/transcribe/CHANGELOG.md#v180-2021-09-17) - * **Feature**: Updated API client and endpoints to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.9.0](service/wafv2/CHANGELOG.md#v190-2021-09-17) - * **Feature**: Updated API client and endpoints to latest revision. - -# Release (2021-09-10) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/credentials`: [v1.4.1](credentials/CHANGELOG.md#v141-2021-09-10) - * **Documentation**: Fixes the AssumeRoleProvider's documentation for using custom TokenProviders. -* `github.com/aws/aws-sdk-go-v2/service/amp`: [v1.5.0](service/amp/CHANGELOG.md#v150-2021-09-10) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.7.0](service/braket/CHANGELOG.md#v170-2021-09-10) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/chimesdkidentity`: [v1.2.0](service/chimesdkidentity/CHANGELOG.md#v120-2021-09-10) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.2.0](service/chimesdkmessaging/CHANGELOG.md#v120-2021-09-10) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/codegurureviewer`: [v1.7.0](service/codegurureviewer/CHANGELOG.md#v170-2021-09-10) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.10.0](service/eks/CHANGELOG.md#v1100-2021-09-10) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.11.0](service/elasticache/CHANGELOG.md#v1110-2021-09-10) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.9.0](service/emr/CHANGELOG.md#v190-2021-09-10) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.10.0](service/forecast/CHANGELOG.md#v1100-2021-09-10) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.9.0](service/frauddetector/CHANGELOG.md#v190-2021-09-10) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/kafka`: [v1.7.0](service/kafka/CHANGELOG.md#v170-2021-09-10) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/lookoutequipment`: [v1.4.0](service/lookoutequipment/CHANGELOG.md#v140-2021-09-10) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.8.0](service/mediapackage/CHANGELOG.md#v180-2021-09-10) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.0.0](service/opensearch/CHANGELOG.md#v100-2021-09-10) - * **Release**: New AWS service client module - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.8.0](service/outposts/CHANGELOG.md#v180-2021-09-10) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ram`: [v1.7.0](service/ram/CHANGELOG.md#v170-2021-09-10) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.14.0](service/sagemaker/CHANGELOG.md#v1140-2021-09-10) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/servicediscovery`: [v1.9.0](service/servicediscovery/CHANGELOG.md#v190-2021-09-10) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ssmcontacts`: [v1.5.0](service/ssmcontacts/CHANGELOG.md#v150-2021-09-10) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/xray`: [v1.6.0](service/xray/CHANGELOG.md#v160-2021-09-10) - * **Feature**: API client updated - -# Release (2021-09-02) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/config`: [v1.8.0](config/CHANGELOG.md#v180-2021-09-02) - * **Feature**: Add support for S3 Multi-Region Access Point ARNs. -* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.7.0](service/accessanalyzer/CHANGELOG.md#v170-2021-09-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.8.0](service/acmpca/CHANGELOG.md#v180-2021-09-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/cloud9`: [v1.8.0](service/cloud9/CHANGELOG.md#v180-2021-09-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.10.0](service/cloudformation/CHANGELOG.md#v1100-2021-09-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.6.0](service/cloudtrail/CHANGELOG.md#v160-2021-09-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/codebuild`: [v1.10.0](service/codebuild/CHANGELOG.md#v1100-2021-09-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.9.0](service/computeoptimizer/CHANGELOG.md#v190-2021-09-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.9.0](service/configservice/CHANGELOG.md#v190-2021-09-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ebs`: [v1.7.0](service/ebs/CHANGELOG.md#v170-2021-09-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.16.0](service/ec2/CHANGELOG.md#v1160-2021-09-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.7.0](service/efs/CHANGELOG.md#v170-2021-09-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.8.0](service/emr/CHANGELOG.md#v180-2021-09-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/firehose`: [v1.6.0](service/firehose/CHANGELOG.md#v160-2021-09-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.8.0](service/frauddetector/CHANGELOG.md#v180-2021-09-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.9.0](service/fsx/CHANGELOG.md#v190-2021-09-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/internal/s3shared`: [v1.7.0](service/internal/s3shared/CHANGELOG.md#v170-2021-09-02) - * **Feature**: Add support for S3 Multi-Region Access Point ARNs. -* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.11.0](service/iot/CHANGELOG.md#v1110-2021-09-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/iotjobsdataplane`: [v1.5.0](service/iotjobsdataplane/CHANGELOG.md#v150-2021-09-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.7.0](service/ivs/CHANGELOG.md#v170-2021-09-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.6.0](service/kms/CHANGELOG.md#v160-2021-09-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/lexmodelbuildingservice`: [v1.9.0](service/lexmodelbuildingservice/CHANGELOG.md#v190-2021-09-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.7.0](service/mediatailor/CHANGELOG.md#v170-2021-09-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/memorydb`: [v1.2.0](service/memorydb/CHANGELOG.md#v120-2021-09-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/mwaa`: [v1.5.0](service/mwaa/CHANGELOG.md#v150-2021-09-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.6.0](service/polly/CHANGELOG.md#v160-2021-09-02) - * **Feature**: API client updated - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.10.0](service/quicksight/CHANGELOG.md#v1100-2021-09-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.15.0](service/s3/CHANGELOG.md#v1150-2021-09-02) - * **Feature**: API client updated - * **Feature**: Add support for S3 Multi-Region Access Point ARNs. -* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.11.0](service/s3control/CHANGELOG.md#v1110-2021-09-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.7.0](service/sagemakerruntime/CHANGELOG.md#v170-2021-09-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/schemas`: [v1.6.0](service/schemas/CHANGELOG.md#v160-2021-09-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.10.0](service/securityhub/CHANGELOG.md#v1100-2021-09-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry`: [v1.5.0](service/servicecatalogappregistry/CHANGELOG.md#v150-2021-09-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.9.0](service/sqs/CHANGELOG.md#v190-2021-09-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.4.0](service/ssmincidents/CHANGELOG.md#v140-2021-09-02) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.7.0](service/transfer/CHANGELOG.md#v170-2021-09-02) - * **Feature**: API client updated - -# Release (2021-08-27) - -## General Highlights -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/credentials`: [v1.4.0](credentials/CHANGELOG.md#v140-2021-08-27) - * **Feature**: Adds support for Tags and TransitiveTagKeys to stscreds.AssumeRoleProvider. Closes https://github.com/aws/aws-sdk-go-v2/issues/723 -* `github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue`: [v1.2.0](feature/dynamodb/attributevalue/CHANGELOG.md#v120-2021-08-27) - * **Bug Fix**: Fix unmarshaler's decoding of AttributeValueMemberN into a type that is a string alias. -* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.7.0](service/acmpca/CHANGELOG.md#v170-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/amplify`: [v1.5.0](service/amplify/CHANGELOG.md#v150-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/amplifybackend`: [v1.4.0](service/amplifybackend/CHANGELOG.md#v140-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/apigateway`: [v1.7.0](service/apigateway/CHANGELOG.md#v170-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/apigatewaymanagementapi`: [v1.4.0](service/apigatewaymanagementapi/CHANGELOG.md#v140-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.7.0](service/appflow/CHANGELOG.md#v170-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/applicationinsights`: [v1.4.0](service/applicationinsights/CHANGELOG.md#v140-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.2.0](service/apprunner/CHANGELOG.md#v120-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.6.0](service/appstream/CHANGELOG.md#v160-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.6.0](service/appsync/CHANGELOG.md#v160-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.6.0](service/athena/CHANGELOG.md#v160-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.8.0](service/auditmanager/CHANGELOG.md#v180-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/autoscalingplans`: [v1.5.0](service/autoscalingplans/CHANGELOG.md#v150-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.5.0](service/backup/CHANGELOG.md#v150-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.7.0](service/batch/CHANGELOG.md#v170-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.6.0](service/braket/CHANGELOG.md#v160-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/chimesdkidentity`: [v1.1.0](service/chimesdkidentity/CHANGELOG.md#v110-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.1.0](service/chimesdkmessaging/CHANGELOG.md#v110-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.5.0](service/cloudtrail/CHANGELOG.md#v150-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/cloudwatchevents`: [v1.6.0](service/cloudwatchevents/CHANGELOG.md#v160-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.5.0](service/codeartifact/CHANGELOG.md#v150-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/codebuild`: [v1.9.0](service/codebuild/CHANGELOG.md#v190-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/codecommit`: [v1.4.0](service/codecommit/CHANGELOG.md#v140-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/codeguruprofiler`: [v1.5.0](service/codeguruprofiler/CHANGELOG.md#v150-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/codestarnotifications`: [v1.4.0](service/codestarnotifications/CHANGELOG.md#v140-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/cognitoidentity`: [v1.5.0](service/cognitoidentity/CHANGELOG.md#v150-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.6.0](service/cognitoidentityprovider/CHANGELOG.md#v160-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.6.0](service/comprehend/CHANGELOG.md#v160-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.8.0](service/computeoptimizer/CHANGELOG.md#v180-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/connectcontactlens`: [v1.5.0](service/connectcontactlens/CHANGELOG.md#v150-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.9.0](service/customerprofiles/CHANGELOG.md#v190-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.7.0](service/databasemigrationservice/CHANGELOG.md#v170-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.6.0](service/datasync/CHANGELOG.md#v160-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/dax`: [v1.4.0](service/dax/CHANGELOG.md#v140-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/directoryservice`: [v1.5.0](service/directoryservice/CHANGELOG.md#v150-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/dlm`: [v1.5.0](service/dlm/CHANGELOG.md#v150-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/dynamodbstreams`: [v1.4.0](service/dynamodbstreams/CHANGELOG.md#v140-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.15.0](service/ec2/CHANGELOG.md#v1150-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/ecrpublic`: [v1.5.0](service/ecrpublic/CHANGELOG.md#v150-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.6.0](service/efs/CHANGELOG.md#v160-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.9.0](service/eks/CHANGELOG.md#v190-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.6.0](service/emrcontainers/CHANGELOG.md#v160-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.7.0](service/eventbridge/CHANGELOG.md#v170-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/finspace`: [v1.2.0](service/finspace/CHANGELOG.md#v120-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.2.0](service/finspacedata/CHANGELOG.md#v120-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/firehose`: [v1.5.0](service/firehose/CHANGELOG.md#v150-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.7.0](service/fms/CHANGELOG.md#v170-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.9.0](service/forecast/CHANGELOG.md#v190-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/forecastquery`: [v1.4.0](service/forecastquery/CHANGELOG.md#v140-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.7.0](service/frauddetector/CHANGELOG.md#v170-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.8.0](service/fsx/CHANGELOG.md#v180-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.6.0](service/gamelift/CHANGELOG.md#v160-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.11.0](service/glue/CHANGELOG.md#v1110-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.6.0](service/groundstation/CHANGELOG.md#v160-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.5.0](service/guardduty/CHANGELOG.md#v150-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/health`: [v1.7.0](service/health/CHANGELOG.md#v170-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/healthlake`: [v1.6.0](service/healthlake/CHANGELOG.md#v160-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.10.0](service/iot/CHANGELOG.md#v1100-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/iot1clickdevicesservice`: [v1.4.0](service/iot1clickdevicesservice/CHANGELOG.md#v140-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/iotanalytics`: [v1.5.0](service/iotanalytics/CHANGELOG.md#v150-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/iotdataplane`: [v1.4.0](service/iotdataplane/CHANGELOG.md#v140-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/iotfleethub`: [v1.5.0](service/iotfleethub/CHANGELOG.md#v150-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.11.0](service/iotsitewise/CHANGELOG.md#v1110-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.6.0](service/ivs/CHANGELOG.md#v160-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.5.0](service/lakeformation/CHANGELOG.md#v150-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.6.0](service/lexmodelsv2/CHANGELOG.md#v160-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.3.0](service/lexruntimev2/CHANGELOG.md#v130-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/licensemanager`: [v1.7.0](service/licensemanager/CHANGELOG.md#v170-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.10.0](service/lightsail/CHANGELOG.md#v1100-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/lookoutequipment`: [v1.3.0](service/lookoutequipment/CHANGELOG.md#v130-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.3.0](service/lookoutmetrics/CHANGELOG.md#v130-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.9.0](service/macie2/CHANGELOG.md#v190-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.10.0](service/mediaconvert/CHANGELOG.md#v1100-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.7.0](service/mediapackage/CHANGELOG.md#v170-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.7.0](service/mediapackagevod/CHANGELOG.md#v170-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/mq`: [v1.5.0](service/mq/CHANGELOG.md#v150-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.5.0](service/networkfirewall/CHANGELOG.md#v150-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.7.0](service/outposts/CHANGELOG.md#v170-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/pi`: [v1.6.0](service/pi/CHANGELOG.md#v160-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoice`: [v1.4.0](service/pinpointsmsvoice/CHANGELOG.md#v140-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.5.0](service/polly/CHANGELOG.md#v150-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/qldb`: [v1.6.0](service/qldb/CHANGELOG.md#v160-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/qldbsession`: [v1.5.0](service/qldbsession/CHANGELOG.md#v150-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/ram`: [v1.6.0](service/ram/CHANGELOG.md#v160-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.8.0](service/rekognition/CHANGELOG.md#v180-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi`: [v1.5.0](service/resourcegroupstaggingapi/CHANGELOG.md#v150-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/robomaker`: [v1.8.0](service/robomaker/CHANGELOG.md#v180-2021-08-27) - * **Bug Fix**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig`: [v1.1.0](service/route53recoverycontrolconfig/CHANGELOG.md#v110-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/route53resolver`: [v1.7.0](service/route53resolver/CHANGELOG.md#v170-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.14.0](service/s3/CHANGELOG.md#v1140-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.10.0](service/s3control/CHANGELOG.md#v1100-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/s3outposts`: [v1.5.0](service/s3outposts/CHANGELOG.md#v150-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.5.0](service/servicecatalog/CHANGELOG.md#v150-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry`: [v1.4.0](service/servicecatalogappregistry/CHANGELOG.md#v140-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/signer`: [v1.5.0](service/signer/CHANGELOG.md#v150-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/ssooidc`: [v1.4.0](service/ssooidc/CHANGELOG.md#v140-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.6.0](service/storagegateway/CHANGELOG.md#v160-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/synthetics`: [v1.6.0](service/synthetics/CHANGELOG.md#v160-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.5.0](service/textract/CHANGELOG.md#v150-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.7.0](service/transcribe/CHANGELOG.md#v170-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.6.0](service/transfer/CHANGELOG.md#v160-2021-08-27) - * **Feature**: Updated API model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/wafregional`: [v1.5.0](service/wafregional/CHANGELOG.md#v150-2021-08-27) - * **Feature**: Updated API model to latest revision. - -# Release (2021-08-19) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/apigateway`: [v1.6.0](service/apigateway/CHANGELOG.md#v160-2021-08-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/apigatewayv2`: [v1.5.0](service/apigatewayv2/CHANGELOG.md#v150-2021-08-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.6.0](service/appflow/CHANGELOG.md#v160-2021-08-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.5.0](service/applicationautoscaling/CHANGELOG.md#v150-2021-08-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/cloud9`: [v1.6.0](service/cloud9/CHANGELOG.md#v160-2021-08-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/clouddirectory`: [v1.4.0](service/clouddirectory/CHANGELOG.md#v140-2021-08-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.6.0](service/cloudwatchlogs/CHANGELOG.md#v160-2021-08-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/codebuild`: [v1.8.0](service/codebuild/CHANGELOG.md#v180-2021-08-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.7.0](service/configservice/CHANGELOG.md#v170-2021-08-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.8.0](service/costexplorer/CHANGELOG.md#v180-2021-08-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.8.0](service/customerprofiles/CHANGELOG.md#v180-2021-08-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.8.0](service/databrew/CHANGELOG.md#v180-2021-08-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/directoryservice`: [v1.4.0](service/directoryservice/CHANGELOG.md#v140-2021-08-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.14.0](service/ec2/CHANGELOG.md#v1140-2021-08-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.9.0](service/elasticache/CHANGELOG.md#v190-2021-08-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.6.0](service/emr/CHANGELOG.md#v160-2021-08-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.10.0](service/iotsitewise/CHANGELOG.md#v1100-2021-08-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.7.0](service/lambda/CHANGELOG.md#v170-2021-08-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/licensemanager`: [v1.6.0](service/licensemanager/CHANGELOG.md#v160-2021-08-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/memorydb`: [v1.0.0](service/memorydb/CHANGELOG.md#v100-2021-08-19) - * **Release**: New AWS service client module -* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.8.0](service/quicksight/CHANGELOG.md#v180-2021-08-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.10.0](service/route53/CHANGELOG.md#v1100-2021-08-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/route53resolver`: [v1.6.0](service/route53resolver/CHANGELOG.md#v160-2021-08-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.13.0](service/s3/CHANGELOG.md#v1130-2021-08-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.12.0](service/sagemaker/CHANGELOG.md#v1120-2021-08-19) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.5.0](service/sagemakerruntime/CHANGELOG.md#v150-2021-08-19) - * **Feature**: API client updated - -# Release (2021-08-12) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/feature/cloudfront/sign`: [v1.3.1](feature/cloudfront/sign/CHANGELOG.md#v131-2021-08-12) - * **Bug Fix**: Update to not escape HTML when encoding the policy. -* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.5.0](service/athena/CHANGELOG.md#v150-2021-08-12) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.11.0](service/autoscaling/CHANGELOG.md#v1110-2021-08-12) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.8.0](service/chime/CHANGELOG.md#v180-2021-08-12) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/chimesdkidentity`: [v1.0.0](service/chimesdkidentity/CHANGELOG.md#v100-2021-08-12) - * **Release**: New AWS service client module - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.0.0](service/chimesdkmessaging/CHANGELOG.md#v100-2021-08-12) - * **Release**: New AWS service client module - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/codebuild`: [v1.7.0](service/codebuild/CHANGELOG.md#v170-2021-08-12) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.6.0](service/connect/CHANGELOG.md#v160-2021-08-12) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ebs`: [v1.5.0](service/ebs/CHANGELOG.md#v150-2021-08-12) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.8.0](service/ecs/CHANGELOG.md#v180-2021-08-12) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.5.0](service/lexmodelsv2/CHANGELOG.md#v150-2021-08-12) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.9.0](service/lightsail/CHANGELOG.md#v190-2021-08-12) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.3.0](service/nimble/CHANGELOG.md#v130-2021-08-12) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.7.0](service/rekognition/CHANGELOG.md#v170-2021-08-12) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.9.0](service/route53/CHANGELOG.md#v190-2021-08-12) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/snowdevicemanagement`: [v1.0.0](service/snowdevicemanagement/CHANGELOG.md#v100-2021-08-12) - * **Release**: New AWS service client module - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.9.0](service/ssm/CHANGELOG.md#v190-2021-08-12) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/synthetics`: [v1.5.0](service/synthetics/CHANGELOG.md#v150-2021-08-12) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.7.0](service/wafv2/CHANGELOG.md#v170-2021-08-12) - * **Feature**: API client updated - -# Release (2021-08-04) - -## General Highlights -* **Feature**: adds error handling for defered close calls -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2`: v1.8.0 - * **Bug Fix**: Corrected an issue where the retryer was not using the last attempt's ResultMetadata as the bases for the return result from the stack. ([#1345](https://github.com/aws/aws-sdk-go-v2/pull/1345)) -* `github.com/aws/aws-sdk-go-v2/feature/dynamodb/expression`: [v1.2.0](feature/dynamodb/expression/CHANGELOG.md#v120-2021-08-04) - * **Feature**: Add IsSet helper for ConditionBuilder and KeyConditionBuilder ([#1329](https://github.com/aws/aws-sdk-go-v2/pull/1329)) -* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.5.2](service/accessanalyzer/CHANGELOG.md#v152-2021-08-04) - * **Bug Fix**: Fixed an issue that caused one or more API operations to fail when attempting to resolve the service endpoint. ([#1349](https://github.com/aws/aws-sdk-go-v2/pull/1349)) -* `github.com/aws/aws-sdk-go-v2/service/amp`: [v1.3.1](service/amp/CHANGELOG.md#v131-2021-08-04) - * **Bug Fix**: Fixed an issue that caused one or more API operations to fail when attempting to resolve the service endpoint. ([#1349](https://github.com/aws/aws-sdk-go-v2/pull/1349)) -* `github.com/aws/aws-sdk-go-v2/service/appintegrations`: [v1.5.0](service/appintegrations/CHANGELOG.md#v150-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/appmesh`: [v1.4.2](service/appmesh/CHANGELOG.md#v142-2021-08-04) - * **Bug Fix**: Fixed an issue that caused one or more API operations to fail when attempting to resolve the service endpoint. ([#1349](https://github.com/aws/aws-sdk-go-v2/pull/1349)) -* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.5.0](service/appsync/CHANGELOG.md#v150-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.7.0](service/auditmanager/CHANGELOG.md#v170-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.6.0](service/batch/CHANGELOG.md#v160-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.5.2](service/braket/CHANGELOG.md#v152-2021-08-04) - * **Bug Fix**: Fixed an issue that caused one or more API operations to fail when attempting to resolve the service endpoint. ([#1349](https://github.com/aws/aws-sdk-go-v2/pull/1349)) -* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.7.0](service/chime/CHANGELOG.md#v170-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.8.0](service/cloudformation/CHANGELOG.md#v180-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.7.0](service/cloudwatch/CHANGELOG.md#v170-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/codebuild`: [v1.6.0](service/codebuild/CHANGELOG.md#v160-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/codeguruprofiler`: [v1.4.2](service/codeguruprofiler/CHANGELOG.md#v142-2021-08-04) - * **Bug Fix**: Fixed an issue that caused one or more API operations to fail when attempting to resolve the service endpoint. ([#1349](https://github.com/aws/aws-sdk-go-v2/pull/1349)) -* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.5.0](service/cognitoidentityprovider/CHANGELOG.md#v150-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.7.0](service/computeoptimizer/CHANGELOG.md#v170-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.7.0](service/databrew/CHANGELOG.md#v170-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.7.0](service/directconnect/CHANGELOG.md#v170-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.13.0](service/ec2/CHANGELOG.md#v1130-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.7.0](service/ecs/CHANGELOG.md#v170-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.6.0](service/elasticloadbalancingv2/CHANGELOG.md#v160-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.5.0](service/emr/CHANGELOG.md#v150-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.5.0](service/emrcontainers/CHANGELOG.md#v150-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.6.0](service/eventbridge/CHANGELOG.md#v160-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.10.0](service/glue/CHANGELOG.md#v1100-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.5.0](service/greengrassv2/CHANGELOG.md#v150-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.5.2](service/groundstation/CHANGELOG.md#v152-2021-08-04) - * **Bug Fix**: Fixed an issue that caused one or more API operations to fail when attempting to resolve the service endpoint. ([#1349](https://github.com/aws/aws-sdk-go-v2/pull/1349)) -* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.8.0](service/iam/CHANGELOG.md#v180-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.4.0](service/identitystore/CHANGELOG.md#v140-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.8.0](service/imagebuilder/CHANGELOG.md#v180-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.9.0](service/iot/CHANGELOG.md#v190-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/iotanalytics`: [v1.4.0](service/iotanalytics/CHANGELOG.md#v140-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.9.0](service/iotsitewise/CHANGELOG.md#v190-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.8.0](service/iotwireless/CHANGELOG.md#v180-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.10.0](service/kendra/CHANGELOG.md#v1100-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.6.0](service/lambda/CHANGELOG.md#v160-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/lexmodelbuildingservice`: [v1.7.0](service/lexmodelbuildingservice/CHANGELOG.md#v170-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.4.0](service/lexmodelsv2/CHANGELOG.md#v140-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.4.0](service/location/CHANGELOG.md#v140-2021-08-04) - * **Feature**: Updated to latest API model. - * **Bug Fix**: Fixed an issue that caused one or more API operations to fail when attempting to resolve the service endpoint. ([#1349](https://github.com/aws/aws-sdk-go-v2/pull/1349)) -* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.9.0](service/mediaconvert/CHANGELOG.md#v190-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.8.0](service/medialive/CHANGELOG.md#v180-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.3.1](service/mgn/CHANGELOG.md#v131-2021-08-04) - * **Bug Fix**: Fixed an issue that caused one or more API operations to fail when attempting to resolve the service endpoint. ([#1349](https://github.com/aws/aws-sdk-go-v2/pull/1349)) -* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.7.0](service/personalize/CHANGELOG.md#v170-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.2.0](service/proton/CHANGELOG.md#v120-2021-08-04) - * **Feature**: Updated to latest API model. - * **Bug Fix**: Fixed an issue that caused one or more API operations to fail when attempting to resolve the service endpoint. ([#1349](https://github.com/aws/aws-sdk-go-v2/pull/1349)) -* `github.com/aws/aws-sdk-go-v2/service/qldb`: [v1.5.0](service/qldb/CHANGELOG.md#v150-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.7.0](service/quicksight/CHANGELOG.md#v170-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.7.0](service/rds/CHANGELOG.md#v170-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.10.0](service/redshift/CHANGELOG.md#v1100-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/redshiftdata`: [v1.5.0](service/redshiftdata/CHANGELOG.md#v150-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/robomaker`: [v1.7.0](service/robomaker/CHANGELOG.md#v170-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.8.0](service/route53/CHANGELOG.md#v180-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/route53recoverycluster`: [v1.0.0](service/route53recoverycluster/CHANGELOG.md#v100-2021-08-04) - * **Release**: New AWS service client module - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig`: [v1.0.0](service/route53recoverycontrolconfig/CHANGELOG.md#v100-2021-08-04) - * **Release**: New AWS service client module - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/route53recoveryreadiness`: [v1.0.0](service/route53recoveryreadiness/CHANGELOG.md#v100-2021-08-04) - * **Release**: New AWS service client module - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.12.0](service/s3/CHANGELOG.md#v1120-2021-08-04) - * **Feature**: Add `HeadObject` presign support. ([#1346](https://github.com/aws/aws-sdk-go-v2/pull/1346)) -* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.9.0](service/s3control/CHANGELOG.md#v190-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/s3outposts`: [v1.4.0](service/s3outposts/CHANGELOG.md#v140-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.11.0](service/sagemaker/CHANGELOG.md#v1110-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.5.0](service/secretsmanager/CHANGELOG.md#v150-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.8.0](service/securityhub/CHANGELOG.md#v180-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/shield`: [v1.6.0](service/shield/CHANGELOG.md#v160-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/ssmcontacts`: [v1.3.0](service/ssmcontacts/CHANGELOG.md#v130-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.2.0](service/ssmincidents/CHANGELOG.md#v120-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/ssoadmin`: [v1.5.0](service/ssoadmin/CHANGELOG.md#v150-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/synthetics`: [v1.4.0](service/synthetics/CHANGELOG.md#v140-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.4.0](service/textract/CHANGELOG.md#v140-2021-08-04) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.6.0](service/transcribe/CHANGELOG.md#v160-2021-08-04) - * **Feature**: Updated to latest API model. - -# Release (2021-07-15) - -## General Highlights -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/config`: [v1.5.0](config/CHANGELOG.md#v150-2021-07-15) - * **Feature**: Support has been added for EC2 IPv6-enabled Instance Metadata Service Endpoints. -* `github.com/aws/aws-sdk-go-v2/feature/ec2/imds`: [v1.3.0](feature/ec2/imds/CHANGELOG.md#v130-2021-07-15) - * **Feature**: Support has been added for EC2 IPv6-enabled Instance Metadata Service Endpoints. -* `github.com/aws/aws-sdk-go-v2/service/acm`: [v1.5.0](service/acm/CHANGELOG.md#v150-2021-07-15) - * **Feature**: Updated service model to latest version. -* `github.com/aws/aws-sdk-go-v2/service/amp`: [v1.3.0](service/amp/CHANGELOG.md#v130-2021-07-15) - * **Feature**: Updated service model to latest version. -* `github.com/aws/aws-sdk-go-v2/service/amplify`: [v1.4.0](service/amplify/CHANGELOG.md#v140-2021-07-15) - * **Feature**: Updated service model to latest version. -* `github.com/aws/aws-sdk-go-v2/service/amplifybackend`: [v1.3.0](service/amplifybackend/CHANGELOG.md#v130-2021-07-15) - * **Feature**: Updated service model to latest version. -* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.10.0](service/autoscaling/CHANGELOG.md#v1100-2021-07-15) - * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. -* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.6.0](service/chime/CHANGELOG.md#v160-2021-07-15) - * **Feature**: Updated service model to latest version. -* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.7.0](service/cloudformation/CHANGELOG.md#v170-2021-07-15) - * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. -* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.7.0](service/cloudfront/CHANGELOG.md#v170-2021-07-15) - * **Feature**: Updated service model to latest version. -* `github.com/aws/aws-sdk-go-v2/service/cloudsearch`: [v1.5.0](service/cloudsearch/CHANGELOG.md#v150-2021-07-15) - * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. -* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.6.0](service/cloudwatch/CHANGELOG.md#v160-2021-07-15) - * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. -* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.6.0](service/databasemigrationservice/CHANGELOG.md#v160-2021-07-15) - * **Feature**: Updated service model to latest version. -* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.6.0](service/devopsguru/CHANGELOG.md#v160-2021-07-15) - * **Feature**: Updated service model to latest version. -* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.6.0](service/directconnect/CHANGELOG.md#v160-2021-07-15) - * **Feature**: Updated service model to latest version. -* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.8.0](service/docdb/CHANGELOG.md#v180-2021-07-15) - * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.12.0](service/ec2/CHANGELOG.md#v1120-2021-07-15) - * **Feature**: Updated service model to latest version. -* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.8.0](service/eks/CHANGELOG.md#v180-2021-07-15) - * **Feature**: Updated service model to latest version. -* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.8.0](service/elasticache/CHANGELOG.md#v180-2021-07-15) - * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. -* `github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk`: [v1.5.0](service/elasticbeanstalk/CHANGELOG.md#v150-2021-07-15) - * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. -* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing`: [v1.5.0](service/elasticloadbalancing/CHANGELOG.md#v150-2021-07-15) - * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. - * **Documentation**: Updated service model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.5.0](service/elasticloadbalancingv2/CHANGELOG.md#v150-2021-07-15) - * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. - * **Documentation**: Updated service model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.6.0](service/fms/CHANGELOG.md#v160-2021-07-15) - * **Feature**: Updated service model to latest version. -* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.6.0](service/frauddetector/CHANGELOG.md#v160-2021-07-15) - * **Feature**: Updated service model to latest version. -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.9.0](service/glue/CHANGELOG.md#v190-2021-07-15) - * **Feature**: Updated service model to latest version. -* `github.com/aws/aws-sdk-go-v2/service/health`: [v1.6.0](service/health/CHANGELOG.md#v160-2021-07-15) - * **Feature**: Updated service model to latest version. -* `github.com/aws/aws-sdk-go-v2/service/healthlake`: [v1.5.0](service/healthlake/CHANGELOG.md#v150-2021-07-15) - * **Feature**: Updated service model to latest version. -* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.7.0](service/iam/CHANGELOG.md#v170-2021-07-15) - * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. - * **Documentation**: Updated service model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.7.0](service/imagebuilder/CHANGELOG.md#v170-2021-07-15) - * **Feature**: Updated service model to latest version. -* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.8.0](service/iot/CHANGELOG.md#v180-2021-07-15) - * **Feature**: Updated service model to latest version. -* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.8.0](service/iotsitewise/CHANGELOG.md#v180-2021-07-15) - * **Feature**: Updated service model to latest version. -* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.9.0](service/kendra/CHANGELOG.md#v190-2021-07-15) - * **Feature**: Updated service model to latest version. -* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.5.0](service/lambda/CHANGELOG.md#v150-2021-07-15) - * **Feature**: Updated service model to latest version. -* `github.com/aws/aws-sdk-go-v2/service/lexmodelbuildingservice`: [v1.6.0](service/lexmodelbuildingservice/CHANGELOG.md#v160-2021-07-15) - * **Feature**: Updated service model to latest version. -* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.8.0](service/lightsail/CHANGELOG.md#v180-2021-07-15) - * **Feature**: Updated service model to latest version. -* `github.com/aws/aws-sdk-go-v2/service/macie`: [v1.5.1](service/macie/CHANGELOG.md#v151-2021-07-15) - * **Documentation**: Updated service model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.8.1](service/macie2/CHANGELOG.md#v181-2021-07-15) - * **Documentation**: Updated service model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.8.0](service/mediaconvert/CHANGELOG.md#v180-2021-07-15) - * **Feature**: Updated service model to latest version. -* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.5.0](service/mediatailor/CHANGELOG.md#v150-2021-07-15) - * **Feature**: Updated service model to latest version. -* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.3.0](service/mgn/CHANGELOG.md#v130-2021-07-15) - * **Feature**: Updated service model to latest version. -* `github.com/aws/aws-sdk-go-v2/service/mq`: [v1.4.0](service/mq/CHANGELOG.md#v140-2021-07-15) - * **Feature**: Updated service model to latest version. -* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.7.0](service/neptune/CHANGELOG.md#v170-2021-07-15) - * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. -* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.6.0](service/outposts/CHANGELOG.md#v160-2021-07-15) - * **Feature**: Updated service model to latest version. -* `github.com/aws/aws-sdk-go-v2/service/pricing`: [v1.5.1](service/pricing/CHANGELOG.md#v151-2021-07-15) - * **Documentation**: Updated service model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.6.0](service/rds/CHANGELOG.md#v160-2021-07-15) - * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. -* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.9.0](service/redshift/CHANGELOG.md#v190-2021-07-15) - * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. - * **Feature**: Updated service model to latest version. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.10.0](service/sagemaker/CHANGELOG.md#v1100-2021-07-15) - * **Feature**: Updated service model to latest version. -* `github.com/aws/aws-sdk-go-v2/service/ses`: [v1.5.0](service/ses/CHANGELOG.md#v150-2021-07-15) - * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. -* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.7.0](service/sns/CHANGELOG.md#v170-2021-07-15) - * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. - * **Documentation**: Updated service model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.7.0](service/sqs/CHANGELOG.md#v170-2021-07-15) - * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. -* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.8.0](service/ssm/CHANGELOG.md#v180-2021-07-15) - * **Feature**: Updated service model to latest version. - * **Documentation**: Updated service model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.5.0](service/storagegateway/CHANGELOG.md#v150-2021-07-15) - * **Feature**: Updated service model to latest version. -* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.6.0](service/sts/CHANGELOG.md#v160-2021-07-15) - * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. - * **Documentation**: Updated service model to latest revision. -* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.5.0](service/wellarchitected/CHANGELOG.md#v150-2021-07-15) - * **Feature**: Updated service model to latest version. - -# Release (2021-07-01) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/internal/ini`: [v1.1.0](internal/ini/CHANGELOG.md#v110-2021-07-01) - * **Feature**: Support for `:`, `=`, `[`, `]` being present in expression values. -* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.9.0](service/autoscaling/CHANGELOG.md#v190-2021-07-01) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.6.0](service/databrew/CHANGELOG.md#v160-2021-07-01) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.11.0](service/ec2/CHANGELOG.md#v1110-2021-07-01) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.8.0](service/glue/CHANGELOG.md#v180-2021-07-01) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.8.0](service/kendra/CHANGELOG.md#v180-2021-07-01) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.7.0](service/mediaconvert/CHANGELOG.md#v170-2021-07-01) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.6.0](service/mediapackagevod/CHANGELOG.md#v160-2021-07-01) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.8.0](service/redshift/CHANGELOG.md#v180-2021-07-01) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.9.0](service/sagemaker/CHANGELOG.md#v190-2021-07-01) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/servicediscovery`: [v1.7.0](service/servicediscovery/CHANGELOG.md#v170-2021-07-01) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.6.0](service/sqs/CHANGELOG.md#v160-2021-07-01) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ssmcontacts`: [v1.2.0](service/ssmcontacts/CHANGELOG.md#v120-2021-07-01) - * **Feature**: API client updated - -# Release (2021-06-25) - -## General Highlights -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2`: v1.7.0 - * **Feature**: Adds configuration values for enabling endpoint discovery. - * **Bug Fix**: Keep Object-Lock headers a header when presigning Sigv4 signing requests -* `github.com/aws/aws-sdk-go-v2/config`: [v1.4.0](config/CHANGELOG.md#v140-2021-06-25) - * **Feature**: Adds configuration setting for enabling endpoint discovery. -* `github.com/aws/aws-sdk-go-v2/credentials`: [v1.3.0](credentials/CHANGELOG.md#v130-2021-06-25) - * **Bug Fix**: Fixed example usages of aws.CredentialsCache ([#1275](https://github.com/aws/aws-sdk-go-v2/pull/1275)) -* `github.com/aws/aws-sdk-go-v2/feature/cloudfront/sign`: [v1.2.0](feature/cloudfront/sign/CHANGELOG.md#v120-2021-06-25) - * **Feature**: Add UnmarshalJSON for AWSEpochTime to correctly unmarshal AWSEpochTime, ([#1298](https://github.com/aws/aws-sdk-go-v2/pull/1298)) -* `github.com/aws/aws-sdk-go-v2/internal/configsources`: [v1.0.0](internal/configsources/CHANGELOG.md#v100-2021-06-25) - * **Release**: Release new modules -* `github.com/aws/aws-sdk-go-v2/service/amp`: [v1.2.0](service/amp/CHANGELOG.md#v120-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/amplify`: [v1.3.0](service/amplify/CHANGELOG.md#v130-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/amplifybackend`: [v1.2.0](service/amplifybackend/CHANGELOG.md#v120-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.5.0](service/appflow/CHANGELOG.md#v150-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/appmesh`: [v1.4.0](service/appmesh/CHANGELOG.md#v140-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.5.0](service/chime/CHANGELOG.md#v150-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/cloud9`: [v1.5.0](service/cloud9/CHANGELOG.md#v150-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.6.0](service/cloudformation/CHANGELOG.md#v160-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.6.0](service/cloudfront/CHANGELOG.md#v160-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/cloudsearch`: [v1.4.0](service/cloudsearch/CHANGELOG.md#v140-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.5.0](service/cloudwatch/CHANGELOG.md#v150-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/cloudwatchevents`: [v1.5.0](service/cloudwatchevents/CHANGELOG.md#v150-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/codebuild`: [v1.5.0](service/codebuild/CHANGELOG.md#v150-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/codegurureviewer`: [v1.5.0](service/codegurureviewer/CHANGELOG.md#v150-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/cognitoidentity`: [v1.4.0](service/cognitoidentity/CHANGELOG.md#v140-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.4.0](service/cognitoidentityprovider/CHANGELOG.md#v140-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.5.0](service/connect/CHANGELOG.md#v150-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/dax`: [v1.3.0](service/dax/CHANGELOG.md#v130-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.7.0](service/docdb/CHANGELOG.md#v170-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.4.0](service/dynamodb/CHANGELOG.md#v140-2021-06-25) - * **Feature**: Adds support for endpoint discovery. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.10.0](service/ec2/CHANGELOG.md#v1100-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.7.0](service/elasticache/CHANGELOG.md#v170-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk`: [v1.4.0](service/elasticbeanstalk/CHANGELOG.md#v140-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing`: [v1.4.0](service/elasticloadbalancing/CHANGELOG.md#v140-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.4.0](service/elasticloadbalancingv2/CHANGELOG.md#v140-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.5.0](service/eventbridge/CHANGELOG.md#v150-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/greengrass`: [v1.5.0](service/greengrass/CHANGELOG.md#v150-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.4.0](service/greengrassv2/CHANGELOG.md#v140-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.6.0](service/iam/CHANGELOG.md#v160-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery`: [v1.0.0](service/internal/endpoint-discovery/CHANGELOG.md#v100-2021-06-25) - * **Release**: Release new modules - * **Feature**: Module supporting endpoint-discovery across all service clients. -* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.7.0](service/iot/CHANGELOG.md#v170-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/iotanalytics`: [v1.3.0](service/iotanalytics/CHANGELOG.md#v130-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.7.0](service/kendra/CHANGELOG.md#v170-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.4.0](service/kms/CHANGELOG.md#v140-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.3.0](service/lexmodelsv2/CHANGELOG.md#v130-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.2.0](service/lexruntimev2/CHANGELOG.md#v120-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/licensemanager`: [v1.5.0](service/licensemanager/CHANGELOG.md#v150-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.2.0](service/lookoutmetrics/CHANGELOG.md#v120-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/managedblockchain`: [v1.4.0](service/managedblockchain/CHANGELOG.md#v140-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/mediaconnect`: [v1.6.0](service/mediaconnect/CHANGELOG.md#v160-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.7.0](service/medialive/CHANGELOG.md#v170-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.4.0](service/mediatailor/CHANGELOG.md#v140-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.6.0](service/neptune/CHANGELOG.md#v160-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.1.0](service/proton/CHANGELOG.md#v110-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.6.0](service/quicksight/CHANGELOG.md#v160-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ram`: [v1.5.0](service/ram/CHANGELOG.md#v150-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.5.0](service/rds/CHANGELOG.md#v150-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.7.0](service/redshift/CHANGELOG.md#v170-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/redshiftdata`: [v1.4.0](service/redshiftdata/CHANGELOG.md#v140-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.7.0](service/route53/CHANGELOG.md#v170-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.8.0](service/sagemaker/CHANGELOG.md#v180-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/sagemakerfeaturestoreruntime`: [v1.4.0](service/sagemakerfeaturestoreruntime/CHANGELOG.md#v140-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.7.0](service/securityhub/CHANGELOG.md#v170-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ses`: [v1.4.0](service/ses/CHANGELOG.md#v140-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/snowball`: [v1.5.0](service/snowball/CHANGELOG.md#v150-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.6.0](service/sns/CHANGELOG.md#v160-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.5.0](service/sqs/CHANGELOG.md#v150-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.5.0](service/sts/CHANGELOG.md#v150-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/timestreamquery`: [v1.3.0](service/timestreamquery/CHANGELOG.md#v130-2021-06-25) - * **Feature**: Adds support for endpoint discovery. -* `github.com/aws/aws-sdk-go-v2/service/timestreamwrite`: [v1.3.0](service/timestreamwrite/CHANGELOG.md#v130-2021-06-25) - * **Feature**: Adds support for endpoint discovery. -* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.5.0](service/transfer/CHANGELOG.md#v150-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/waf`: [v1.3.0](service/waf/CHANGELOG.md#v130-2021-06-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.6.0](service/wafv2/CHANGELOG.md#v160-2021-06-25) - * **Feature**: API client updated - -# Release (2021-06-11) - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.7.0](service/autoscaling/CHANGELOG.md#v170-2021-06-11) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.3.2](service/cloudtrail/CHANGELOG.md#v132-2021-06-11) - * **Documentation**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.3.3](service/cognitoidentityprovider/CHANGELOG.md#v133-2021-06-11) - * **Documentation**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.6.0](service/eks/CHANGELOG.md#v160-2021-06-11) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.6.0](service/fsx/CHANGELOG.md#v160-2021-06-11) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.6.0](service/glue/CHANGELOG.md#v160-2021-06-11) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.6.0](service/kendra/CHANGELOG.md#v160-2021-06-11) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.7.0](service/macie2/CHANGELOG.md#v170-2021-06-11) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.6.0](service/medialive/CHANGELOG.md#v160-2021-06-11) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/pi`: [v1.4.0](service/pi/CHANGELOG.md#v140-2021-06-11) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.0.0](service/proton/CHANGELOG.md#v100-2021-06-11) - * **Release**: New AWS service client module -* `github.com/aws/aws-sdk-go-v2/service/qldb`: [v1.3.1](service/qldb/CHANGELOG.md#v131-2021-06-11) - * **Documentation**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.4.2](service/rds/CHANGELOG.md#v142-2021-06-11) - * **Documentation**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.7.0](service/sagemaker/CHANGELOG.md#v170-2021-06-11) - * **Feature**: Updated to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.4.1](service/transfer/CHANGELOG.md#v141-2021-06-11) - * **Documentation**: Updated to latest API model. - -# Release (2021-06-04) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.5.0](service/acmpca/CHANGELOG.md#v150-2021-06-04) - * **Feature**: Updated service client to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.6.0](service/autoscaling/CHANGELOG.md#v160-2021-06-04) - * **Feature**: Updated service client to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.4.0](service/braket/CHANGELOG.md#v140-2021-06-04) - * **Feature**: Updated service client to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.5.2](service/cloudfront/CHANGELOG.md#v152-2021-06-04) - * **Documentation**: Updated service client to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.4.0](service/datasync/CHANGELOG.md#v140-2021-06-04) - * **Feature**: Updated service client to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/devicefarm`: [v1.3.0](service/devicefarm/CHANGELOG.md#v130-2021-06-04) - * **Feature**: Updated service client to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.6.0](service/docdb/CHANGELOG.md#v160-2021-06-04) - * **Feature**: Updated service client to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.9.0](service/ec2/CHANGELOG.md#v190-2021-06-04) - * **Feature**: Updated service client to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.5.0](service/ecs/CHANGELOG.md#v150-2021-06-04) - * **Feature**: Updated service client to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.7.0](service/forecast/CHANGELOG.md#v170-2021-06-04) - * **Feature**: Updated service client to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.5.0](service/fsx/CHANGELOG.md#v150-2021-06-04) - * **Feature**: Updated service client to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.5.1](service/iam/CHANGELOG.md#v151-2021-06-04) - * **Documentation**: Updated service client to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/internal/s3shared`: [v1.4.0](service/internal/s3shared/CHANGELOG.md#v140-2021-06-04) - * **Feature**: The handling of AccessPoint and Outpost ARNs have been updated. -* `github.com/aws/aws-sdk-go-v2/service/iotevents`: [v1.4.0](service/iotevents/CHANGELOG.md#v140-2021-06-04) - * **Feature**: Updated service client to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/ioteventsdata`: [v1.3.0](service/ioteventsdata/CHANGELOG.md#v130-2021-06-04) - * **Feature**: Updated service client to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.6.0](service/iotsitewise/CHANGELOG.md#v160-2021-06-04) - * **Feature**: Updated service client to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.6.0](service/iotwireless/CHANGELOG.md#v160-2021-06-04) - * **Feature**: Updated service client to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.5.0](service/kendra/CHANGELOG.md#v150-2021-06-04) - * **Feature**: Updated service client to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.6.1](service/lightsail/CHANGELOG.md#v161-2021-06-04) - * **Documentation**: Updated service client to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.2.0](service/location/CHANGELOG.md#v120-2021-06-04) - * **Feature**: Updated service client to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/mwaa`: [v1.2.0](service/mwaa/CHANGELOG.md#v120-2021-06-04) - * **Feature**: Updated service client to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.4.0](service/outposts/CHANGELOG.md#v140-2021-06-04) - * **Feature**: Updated service client to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.3.0](service/polly/CHANGELOG.md#v130-2021-06-04) - * **Feature**: Updated service client to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/qldb`: [v1.3.0](service/qldb/CHANGELOG.md#v130-2021-06-04) - * **Feature**: Updated service client to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/resourcegroups`: [v1.3.2](service/resourcegroups/CHANGELOG.md#v132-2021-06-04) - * **Documentation**: Updated service client to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.6.2](service/route53/CHANGELOG.md#v162-2021-06-04) - * **Documentation**: Updated service client to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/route53resolver`: [v1.4.2](service/route53resolver/CHANGELOG.md#v142-2021-06-04) - * **Documentation**: Updated service client to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.10.0](service/s3/CHANGELOG.md#v1100-2021-06-04) - * **Feature**: The handling of AccessPoint and Outpost ARNs have been updated. - * **Feature**: Updated service client to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.7.0](service/s3control/CHANGELOG.md#v170-2021-06-04) - * **Feature**: The handling of AccessPoint and Outpost ARNs have been updated. - * **Feature**: Updated service client to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/servicediscovery`: [v1.5.0](service/servicediscovery/CHANGELOG.md#v150-2021-06-04) - * **Feature**: Updated service client to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.5.0](service/sns/CHANGELOG.md#v150-2021-06-04) - * **Feature**: Updated service client to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.4.2](service/sqs/CHANGELOG.md#v142-2021-06-04) - * **Documentation**: Updated service client to latest API model. -* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.6.2](service/ssm/CHANGELOG.md#v162-2021-06-04) - * **Documentation**: Updated service client to latest API model. - -# Release (2021-05-25) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.4.0](service/cloudwatchlogs/CHANGELOG.md#v140-2021-05-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/commander`: [v1.1.0](service/commander/CHANGELOG.md#v110-2021-05-25) - * **Feature**: Deprecated module. The API client was incorrectly named. Use AWS Systems Manager Incident Manager (ssmincidents) instead. -* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.5.0](service/computeoptimizer/CHANGELOG.md#v150-2021-05-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.6.0](service/costexplorer/CHANGELOG.md#v160-2021-05-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.8.0](service/ec2/CHANGELOG.md#v180-2021-05-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.4.0](service/efs/CHANGELOG.md#v140-2021-05-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.6.0](service/forecast/CHANGELOG.md#v160-2021-05-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.6.0](service/iot/CHANGELOG.md#v160-2021-05-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/opsworkscm`: [v1.4.0](service/opsworkscm/CHANGELOG.md#v140-2021-05-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.5.0](service/quicksight/CHANGELOG.md#v150-2021-05-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.9.0](service/s3/CHANGELOG.md#v190-2021-05-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.0.0](service/ssmincidents/CHANGELOG.md#v100-2021-05-25) - * **Release**: New AWS service client module -* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.4.0](service/transfer/CHANGELOG.md#v140-2021-05-25) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.4.0](service/workspaces/CHANGELOG.md#v140-2021-05-25) - * **Feature**: API client updated - -# Release (2021-05-20) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2`: v1.6.0 - * **Feature**: `internal/ini`: This package has been migrated to a separate module at `github.com/aws/aws-sdk-go-v2/internal/ini`. -* `github.com/aws/aws-sdk-go-v2/config`: [v1.3.0](config/CHANGELOG.md#v130-2021-05-20) - * **Feature**: SSO credentials can now be defined alongside other credential providers within the same configuration profile. - * **Bug Fix**: Profile names were incorrectly normalized to lower-case, which could result in unexpected profile configurations. -* `github.com/aws/aws-sdk-go-v2/internal/ini`: [v1.0.0](internal/ini/CHANGELOG.md#v100-2021-05-20) - * **Release**: The `github.com/aws/aws-sdk-go-v2/internal/ini` package is now a Go Module. -* `github.com/aws/aws-sdk-go-v2/service/applicationcostprofiler`: [v1.0.0](service/applicationcostprofiler/CHANGELOG.md#v100-2021-05-20) - * **Release**: New AWS service client module - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.0.0](service/apprunner/CHANGELOG.md#v100-2021-05-20) - * **Release**: New AWS service client module - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.5.0](service/autoscaling/CHANGELOG.md#v150-2021-05-20) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.4.0](service/computeoptimizer/CHANGELOG.md#v140-2021-05-20) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/detective`: [v1.6.0](service/detective/CHANGELOG.md#v160-2021-05-20) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.5.0](service/eks/CHANGELOG.md#v150-2021-05-20) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.6.0](service/elasticache/CHANGELOG.md#v160-2021-05-20) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.4.0](service/elasticsearchservice/CHANGELOG.md#v140-2021-05-20) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.5.0](service/iam/CHANGELOG.md#v150-2021-05-20) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.5.0](service/imagebuilder/CHANGELOG.md#v150-2021-05-20) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.5.0](service/iot/CHANGELOG.md#v150-2021-05-20) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/iotdeviceadvisor`: [v1.4.0](service/iotdeviceadvisor/CHANGELOG.md#v140-2021-05-20) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.5.0](service/iotsitewise/CHANGELOG.md#v150-2021-05-20) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.4.0](service/kinesis/CHANGELOG.md#v140-2021-05-20) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/kinesisanalytics`: [v1.3.0](service/kinesisanalytics/CHANGELOG.md#v130-2021-05-20) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2`: [v1.4.0](service/kinesisanalyticsv2/CHANGELOG.md#v140-2021-05-20) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.2.0](service/lexmodelsv2/CHANGELOG.md#v120-2021-05-20) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/licensemanager`: [v1.4.0](service/licensemanager/CHANGELOG.md#v140-2021-05-20) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.6.0](service/lightsail/CHANGELOG.md#v160-2021-05-20) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/macie`: [v1.4.0](service/macie/CHANGELOG.md#v140-2021-05-20) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.6.0](service/macie2/CHANGELOG.md#v160-2021-05-20) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/mediaconnect`: [v1.5.0](service/mediaconnect/CHANGELOG.md#v150-2021-05-20) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.5.0](service/neptune/CHANGELOG.md#v150-2021-05-20) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.5.0](service/personalize/CHANGELOG.md#v150-2021-05-20) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.4.0](service/quicksight/CHANGELOG.md#v140-2021-05-20) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.5.0](service/rekognition/CHANGELOG.md#v150-2021-05-20) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.8.0](service/s3/CHANGELOG.md#v180-2021-05-20) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.6.0](service/sagemaker/CHANGELOG.md#v160-2021-05-20) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/sagemakera2iruntime`: [v1.3.0](service/sagemakera2iruntime/CHANGELOG.md#v130-2021-05-20) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.6.0](service/securityhub/CHANGELOG.md#v160-2021-05-20) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/support`: [v1.3.0](service/support/CHANGELOG.md#v130-2021-05-20) - * **Feature**: API client updated -* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.4.0](service/transcribe/CHANGELOG.md#v140-2021-05-20) - * **Feature**: API client updated - -# Release (2021-05-14) - -## General Highlights -* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/aws-sdk-go-v2`: v1.5.0 - * **Feature**: `AddSDKAgentKey` and `AddSDKAgentKeyValue` in `aws/middleware` package have been updated to direct metadata to `User-Agent` HTTP header. -* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.3.0](service/codeartifact/CHANGELOG.md#v130-2021-05-14) - * **Feature**: Updated to latest service API model. -* `github.com/aws/aws-sdk-go-v2/service/commander`: [v1.0.0](service/commander/CHANGELOG.md#v100-2021-05-14) - * **Release**: New AWS service client module - * **Feature**: Updated to latest service API model. -* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.5.0](service/configservice/CHANGELOG.md#v150-2021-05-14) - * **Feature**: Updated to latest service API model. -* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.4.0](service/connect/CHANGELOG.md#v140-2021-05-14) - * **Feature**: Updated to latest service API model. -* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.7.0](service/ec2/CHANGELOG.md#v170-2021-05-14) - * **Feature**: Updated to latest service API model. -* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.4.0](service/ecs/CHANGELOG.md#v140-2021-05-14) - * **Feature**: Updated to latest service API model. -* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.4.0](service/eks/CHANGELOG.md#v140-2021-05-14) - * **Feature**: Updated to latest service API model. -* `github.com/aws/aws-sdk-go-v2/service/finspace`: [v1.0.0](service/finspace/CHANGELOG.md#v100-2021-05-14) - * **Release**: New AWS service client module - * **Feature**: Updated to latest service API model. -* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.0.0](service/finspacedata/CHANGELOG.md#v100-2021-05-14) - * **Release**: New AWS service client module - * **Feature**: Updated to latest service API model. -* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.4.0](service/iot/CHANGELOG.md#v140-2021-05-14) - * **Feature**: Updated to latest service API model. -* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.5.0](service/iotwireless/CHANGELOG.md#v150-2021-05-14) - * **Feature**: Updated to latest service API model. -* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.3.0](service/kinesis/CHANGELOG.md#v130-2021-05-14) - * **Feature**: Updated to latest service API model. -* `github.com/aws/aws-sdk-go-v2/service/kinesisanalytics`: [v1.2.0](service/kinesisanalytics/CHANGELOG.md#v120-2021-05-14) - * **Feature**: Updated to latest service API model. -* `github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2`: [v1.3.0](service/kinesisanalyticsv2/CHANGELOG.md#v130-2021-05-14) - * **Feature**: Updated to latest service API model. -* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.3.0](service/lakeformation/CHANGELOG.md#v130-2021-05-14) - * **Feature**: Updated to latest service API model. -* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.1.0](service/lookoutmetrics/CHANGELOG.md#v110-2021-05-14) - * **Feature**: Updated to latest service API model. -* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.5.0](service/mediaconvert/CHANGELOG.md#v150-2021-05-14) - * **Feature**: Updated to latest service API model. -* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.6.0](service/route53/CHANGELOG.md#v160-2021-05-14) - * **Feature**: Updated to latest service API model. -* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.7.0](service/s3/CHANGELOG.md#v170-2021-05-14) - * **Feature**: Updated to latest service API model. -* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.6.0](service/s3control/CHANGELOG.md#v160-2021-05-14) - * **Feature**: Updated to latest service API model. -* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.6.0](service/ssm/CHANGELOG.md#v160-2021-05-14) - * **Feature**: Updated to latest service API model. -* `github.com/aws/aws-sdk-go-v2/service/ssmcontacts`: [v1.0.0](service/ssmcontacts/CHANGELOG.md#v100-2021-05-14) - * **Release**: New AWS service client module - * **Feature**: Updated to latest service API model. - -# Release 2021-05-06 - -## Breaking change -* `service/ec2` - v1.6.0 - * This release contains a breaking change to the Amazon EC2 API client. API number(int/int64/etc) and boolean members were changed from value, to pointer type. Your applications using the EC2 API client will fail to compile after upgrading for all members that were updated. To migrate to this module you'll need to update your application to use pointers for all number and boolean members in the API client module. The SDK provides helper utilities to convert between value and pointer types. For example the [aws.Bool](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws#Bool) function to get the address from a bool literal. Similar utilities are available for all other primitive types in the [aws](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws) package. - -## Service Client Highlights -* `service/acmpca` - v1.3.0 - * Feature: API client updated -* `service/apigateway` - v1.3.0 - * Feature: API client updated -* `service/auditmanager` - v1.4.0 - * Feature: API client updated -* `service/chime` - v1.3.0 - * Feature: API client updated -* `service/cloudformation` - v1.4.0 - * Feature: API client updated -* `service/cloudfront` - v1.4.0 - * Feature: API client updated -* `service/codegurureviewer` - v1.3.0 - * Feature: API client updated -* `service/connect` - v1.3.0 - * Feature: API client updated -* `service/customerprofiles` - v1.5.0 - * Feature: API client updated -* `service/devopsguru` - v1.3.0 - * Feature: API client updated -* `service/docdb` - v1.4.0 - * Feature: API client updated -* `service/ec2` - v1.6.0 - * Bug Fix: Fix incorrectly modeled Amazon EC2 number and boolean members in structures. The Amazon EC2 API client has been updated with a breaking change to fix all structure number and boolean members to be pointer types instead of value types. Fixes [#1107](https://github.com/aws/aws-sdk-go-v2/issues/1107), [#1178](https://github.com/aws/aws-sdk-go-v2/issues/1178), and [#1190](https://github.com/aws/aws-sdk-go-v2/issues/1190). This breaking change is made within the major version of the client' module, because the client operations failed and were unusable with value type number and boolean members with the EC2 API. - * Feature: API client updated -* `service/ecs` - v1.3.0 - * Feature: API client updated -* `service/eks` - v1.3.0 - * Feature: API client updated -* `service/forecast` - v1.4.0 - * Feature: API client updated -* `service/glue` - v1.4.0 - * Feature: API client updated -* `service/health` - v1.3.0 - * Feature: API client updated -* `service/iotsitewise` - v1.3.0 - * Feature: API client updated -* `service/iotwireless` - v1.4.0 - * Feature: API client updated -* `service/kafka` - v1.3.0 - * Feature: API client updated -* `service/kinesisanalyticsv2` - v1.2.0 - * Feature: API client updated -* `service/macie2` - v1.4.0 - * Feature: API client updated -* `service/marketplacecatalog` - v1.2.0 - * Feature: API client updated -* `service/mediaconvert` - v1.4.0 - * Feature: API client updated -* `service/mediapackage` - v1.4.0 - * Feature: API client updated -* `service/mediapackagevod` - v1.3.0 - * Feature: API client updated -* `service/mturk` - v1.2.0 - * Feature: API client updated -* `service/nimble` - v1.0.0 - * Feature: API client updated -* `service/organizations` - v1.3.0 - * Feature: API client updated -* `service/personalize` - v1.3.0 - * Feature: API client updated -* `service/robomaker` - v1.4.0 - * Feature: API client updated -* `service/route53` - v1.5.0 - * Feature: API client updated -* `service/s3` - v1.6.0 - * Bug Fix: Fix PutObject and UploadPart unseekable stream documentation link to point to the correct location. - * Feature: API client updated -* `service/sagemaker` - v1.4.0 - * Feature: API client updated -* `service/securityhub` - v1.4.0 - * Feature: API client updated -* `service/servicediscovery` - v1.3.0 - * Feature: API client updated -* `service/snowball` - v1.3.0 - * Feature: API client updated -* `service/sns` - v1.3.0 - * Feature: API client updated -* `service/ssm` - v1.5.0 - * Feature: API client updated -## Core SDK Highlights -* Dependency Update: Update smithy-go dependency to v1.4.0 -* Dependency Update: Updated SDK dependencies to their latest versions. -* `aws` - v1.4.0 - * Feature: Add support for FIPS global partition endpoints ([#1242](https://github.com/aws/aws-sdk-go-v2/pull/1242)) - -# Release 2021-04-23 -## Service Client Highlights -* `service/cloudformation` - v1.3.2 - * Documentation: Service Documentation Updates -* `service/cognitoidentityprovider` - v1.2.3 - * Documentation: Service Documentation Updates -* `service/costexplorer` - v1.4.0 - * Feature: Service API Updates -* `service/databasemigrationservice` - v1.3.0 - * Feature: Service API Updates -* `service/detective` - v1.4.0 - * Feature: Service API Updates -* `service/elasticache` - v1.4.0 - * Feature: Service API Updates -* `service/forecast` - v1.3.0 - * Feature: Service API Updates -* `service/groundstation` - v1.3.0 - * Feature: Service API Updates -* `service/kendra` - v1.3.0 - * Feature: Service API Updates -* `service/redshift` - v1.5.0 - * Feature: Service API Updates -* `service/savingsplans` - v1.2.0 - * Feature: Service API Updates -* `service/securityhub` - v1.3.0 - * Feature: Service API Updates -## Core SDK Highlights -* Dependency Update: Updated SDK dependencies to their latest versions. -* `feature/rds/auth` - v1.0.0 - * Feature: Add Support for Amazon RDS IAM Authentication - -# Release 2021-04-14 -## Service Client Highlights -* `service/codebuild` - v1.3.0 - * Feature: API client updated -* `service/codestarconnections` - v1.2.0 - * Feature: API client updated -* `service/comprehendmedical` - v1.2.0 - * Feature: API client updated -* `service/configservice` - v1.4.0 - * Feature: API client updated -* `service/ec2` - v1.5.0 - * Feature: API client updated -* `service/fsx` - v1.3.0 - * Feature: API client updated -* `service/lightsail` - v1.4.0 - * Feature: API client updated -* `service/mediaconnect` - v1.3.0 - * Feature: API client updated -* `service/rds` - v1.3.0 - * Feature: API client updated -* `service/redshift` - v1.4.0 - * Feature: API client updated -* `service/shield` - v1.3.0 - * Feature: API client updated -* `service/sts` - v1.3.0 - * Feature: API client updated -## Core SDK Highlights -* Dependency Update: Updated SDK dependencies to their latest versions. - -# Release 2021-04-08 -## Service Client Highlights -* Feature: API model sync -* `service/lookoutequipment` - v1.0.0 - * v1 Release: new service client -* `service/mgn` - v1.0.0 - * v1 Release: new service client -## Core SDK Highlights -* Dependency Update: smithy-go version bump -* Dependency Update: Updated SDK dependencies to their latest versions. - -# Release 2021-04-01 -## Service Client Highlights -* Bug Fix: Fix URL Path and RawQuery of resolved endpoint being ignored by the API client's request serialization. - * Fixes [issue#1191](https://github.com/aws/aws-sdk-go-v2/issues/1191) -* Refactored internal endpoints model for accessors -* Feature: updated to latest models -* New services - * `service/location` - v1.0.0 - * `service/lookoutmetrics` - v1.0.0 -## Core SDK Highlights -* Dependency Update: update smithy-go module -* Dependency Update: Updated SDK dependencies to their latest versions. - -# Release 2021-03-18 -## Service Client Highlights -* Bug Fix: Updated presign URLs to no longer include the X-Amz-User-Agent header -* Feature: Update API model -* Add New supported API -* `service/internal/s3shared` - v1.2.0 - * Feature: Support for S3 Object Lambda -* `service/s3` - v1.3.0 - * Bug Fix: Adds documentation to the PutObject and UploadPart operations Body member how to upload unseekable objects to an Amazon S3 Bucket. - * Feature: S3 Object Lambda is a new S3 feature that enables users to apply their own custom code to process the output of a standard S3 GET request by automatically invoking a Lambda function with a GET request -* `service/s3control` - v1.3.0 - * Feature: S3 Object Lambda is a new S3 feature that enables users to apply their own custom code to process the output of a standard S3 GET request by automatically invoking a Lambda function with a GET request -## Core SDK Highlights -* Dependency Update: Updated SDK dependencies to their latest versions. -* `aws` - v1.3.0 - * Feature: Add helper to V4 signer package to swap compute payload hash middleware with unsigned payload middleware -* `feature/s3/manager` - v1.1.0 - * Bug Fix: Add support for Amazon S3 Object Lambda feature. - * Feature: Updates for S3 Object Lambda feature - -# Release 2021-03-12 -## Service Client Highlights -* Bug Fix: Fixed a bug that could union shape types to be deserialized incorrectly -* Bug Fix: Fixed a bug where unboxed shapes that were marked as required were not serialized and sent over the wire, causing an API error from the service. -* Bug Fix: Fixed a bug with generated API Paginators' handling of nil input parameters causing a panic. -* Dependency Update: update smithy-go dependency -* `service/detective` - v1.1.2 - * Bug Fix: Fix deserialization of API response timestamp member. -* `service/docdb` - v1.2.0 - * Feature: Client now support presigned URL generation for CopyDBClusterSnapshot and CreateDBCluster operations by specifying the target SourceRegion -* `service/neptune` - v1.2.0 - * Feature: Client now support presigned URL generation for CopyDBClusterSnapshot and CreateDBCluster operations by specifying the target SourceRegion -* `service/s3` - v1.2.1 - * Bug Fix: Fixed an issue where ListObjectsV2 and ListParts paginators could loop infinitely - * Bug Fix: Fixed key encoding when addressing S3 Access Points -## Core SDK Highlights -* Dependency Update: Updated SDK dependencies to their latest versions. -* `config` - v1.1.2 - * Bug Fix: Fixed a panic when using WithEC2IMDSRegion without a specified IMDS client - -# Release 2021-02-09 -## Service Client Highlights -* `service/s3` - v1.2.0 - * Feature: adds support for s3 vpc endpoint interface [#1113](https://github.com/aws/aws-sdk-go-v2/pull/1113) -* `service/s3control` - v1.2.0 - * Feature: adds support for s3 vpc endpoint interface [#1113](https://github.com/aws/aws-sdk-go-v2/pull/1113) -## Core SDK Highlights -* Dependency Update: Updated SDK dependencies to their latest versions. -* `aws` - v1.2.0 - * Feature: support to add endpoint source on context. Adds getter/setter for the endpoint source [#1113](https://github.com/aws/aws-sdk-go-v2/pull/1113) -* `config` - v1.1.1 - * Bug Fix: Only Validate SSO profile configuration when attempting to use SSO credentials [#1103](https://github.com/aws/aws-sdk-go-v2/pull/1103) - * Bug Fix: Environment credentials were not taking precedence over AWS_PROFILE [#1103](https://github.com/aws/aws-sdk-go-v2/pull/1103) - -# Release 2021-01-29 -## Service Client Highlights -* Bug Fix: A serialization bug has been fixed that caused some service operations with empty inputs to not be serialized correctly ([#1071](https://github.com/aws/aws-sdk-go-v2/pull/1071)) -* Bug Fix: Fixes a bug that could cause a waiter to fail when comparing types ([#1083](https://github.com/aws/aws-sdk-go-v2/pull/1083)) -## Core SDK Highlights -* Feature: EndpointResolverFromURL helpers have been added for constructing a service EndpointResolver type ([#1066](https://github.com/aws/aws-sdk-go-v2/pull/1066)) -* Dependency Update: Updated SDK dependencies to their latest versions. -* `aws` - v1.1.0 - * Feature: Add support for specifying the EndpointSource on aws.Endpoint types ([#1070](https://github.com/aws/aws-sdk-go-v2/pull/1070/)) -* `config` - v1.1.0 - * Feature: Add Support for AWS Single Sign-On (SSO) credential provider ([#1072](https://github.com/aws/aws-sdk-go-v2/pull/1072)) -* `credentials` - v1.1.0 - * Feature: Add AWS Single Sign-On (SSO) credential provider ([#1072](https://github.com/aws/aws-sdk-go-v2/pull/1072)) - -# Release 2021-01-19 - -We are excited to announce the [General Availability](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-version-2-general-availability/) -(GA) release of the [AWS SDK for Go version 2 (v2)](https://github.com/aws/aws-sdk-go-v2). -This release follows the [Release candidate](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-version-2-v2-release-candidate) -of the AWS SDK for Go v2. Version 2 incorporates customer feedback from version 1 and takes advantage of modern Go language features. - -## Breaking Changes -* `aws`: Updated Config.Retryer member to be a func that returns aws.Retryer ([#1033](https://github.com/aws/aws-sdk-go-v2/pull/1033)) - * Updates the SDK's references to Config.Retryer to be a function that returns aws.Retryer value. This ensures that custom retry options specified in the `aws.Config` are scoped to individual client instances. - * All API clients created with the config will call the `Config.Retryer` function to get an aws.Retryer. - * Removes duplicate `Retryer` interface from `retry` package. Single definition is `aws.Retryer` now. -* `aws/middleware`: Updates `AddAttemptClockSkewMiddleware` to use appropriate `AddRecordResponseTiming` naming ([#1031](https://github.com/aws/aws-sdk-go-v2/pull/1031)) - * Removes `ResponseMetadata` struct type, and adds its members to middleware metadata directly, to improve discoverability. -* `config`: Updated the `WithRetryer` helper to take a function that returns an aws.Retryer ([#1033](https://github.com/aws/aws-sdk-go-v2/pull/1033)) - * All API clients created with the config will call the `Config.Retryer` function to get an aws.Retryer. -* `API Clients`: Fix SDK's API client enum constant name generation to have expected casing ([#1020](https://github.com/aws/aws-sdk-go-v2/pull/1020)) - * This updates of the generated enum const value names in API client's `types` package to have the expected casing. Prior to this, enum names were being generated with lowercase names instead of camel case. -* `API Clients`: Updates SDK's API client request middleware stack values to be scoped to individual operation call ([#1019](https://github.com/aws/aws-sdk-go-v2/pull/1019)) - * The API client request middleware stack values were mistakenly allowed to escape to nested API operation calls. This broke the SDK's presigners. - * Stack values that should not escape are not scoped to the individual operation call. -* `Multiple API Clients`: Unexported the API client's `WithEndpointResolver` this type wasn't intended to be exported ([#1051](https://github.com/aws/aws-sdk-go-v2/pull/1051)) - * Using the `aws.Config.EndpointResolver` member for setting custom endpoint resolver instead. - -## New Features -* `service/sts`: Add support for presigning GetCallerIdentity operation ([#1030](https://github.com/aws/aws-sdk-go-v2/pull/1030)) - * Adds a PresignClient to the `sts` API client module. Use PresignGetCallerIdentity to obtain presigned URLs for the create presigned URLs for the GetCallerIdentity operation. - * Fixes [#1021](https://github.com/aws/aws-sdk-go-v2/issues/1021) -* `aws/retry`: Add package documentation for retry package ([#1033](https://github.com/aws/aws-sdk-go-v2/pull/1033)) - * Adds documentation for the retry package - -## Bug Fixes -* `Multiple API Clients`: Fix SDK's generated serde for unmodeled operation input/output ([#1050](https://github.com/aws/aws-sdk-go-v2/pull/1050)) - * Fixes [#1047](https://github.com/aws/aws-sdk-go-v2/issues/1047) by fixing the how the SDKs generated serialization and deserialization of API operations that did not have modeled input or output types. This caused the SDK to incorrectly attempt to deserialize response documents that were either empty, or contained unexpected data. -* `service/s3`: Fix Tagging parameter not serialized correctly for presigned PutObject requests ([#1017](https://github.com/aws/aws-sdk-go-v2/pull/1017)) - * Fixes the Tagging parameter incorrectly being serialized to the URL's query string instead of being signed as a HTTP request header. - * When using PresignPutObject make sure to add all signed headers returned by the method to your down stream's HTTP client's request. These headers must be included in the request, or the request will fail with signature errors. - * Fixes [#1016](https://github.com/aws/aws-sdk-go-v2/issues/1016) -* `service/s3`: Fix Unmarshaling `GetObjectAcl` operation's Grantee type response ([#1034](https://github.com/aws/aws-sdk-go-v2/pull/1034)) - * Updates the SDK's codegen for correctly deserializing XML attributes in tags with XML namespaces. - * Fixes [#1013](https://github.com/aws/aws-sdk-go-v2/issues/1013) -* `service/s3`: Fix Unmarshaling `GetBucketLocation` operation's response ([#1027](https://github.com/aws/aws-sdk-go-v2/pull/1027)) - * Fixes [#908](https://github.com/aws/aws-sdk-go-v2/issues/908) - -## Migrating from v2 preview SDK's v0.31.0 to v1.0.0 - -### aws.Config Retryer member - -If your application sets the `Config.Retryer` member the application will need -to be updated to set a function that returns an `aws.Retryer`. In addition, if -your application used the `config.WithRetryer` helper a function that returns -an `aws.Retryer` needs to be used. - -If your application used the `retry.Retryer` type, update to using the -`aws.Retryer` type instead. - -### API Client enum value names - -If your application used the enum values in the API Client's `types` package between v0.31.0 and the latest version of the client module you may need to update the naming of the enum value. The enum value name casing were updated to camel case instead lowercased. - -# Release 2020-12-23 - -We’re happy to announce the Release Candidate (RC) of the AWS SDK for Go v2. -This RC follows the developer preview release of the AWS SDK for Go v2. The SDK -has undergone a major rewrite from the v1 code base to incorporate your -feedback and to take advantage of modern Go language features. - -## Documentation -* Developer Guide: https://aws.github.io/aws-sdk-go-v2/docs/ -* API Reference docs: https://pkg.go.dev/github.com/aws/aws-sdk-go-v2 -* Migration Guide: https://aws.github.io/aws-sdk-go-v2/docs/migrating/ - -## Breaking Changes -* Dependency `github.com/awslabs/smithy-go` has been relocated to `github.com/aws/smithy-go` - * The `smithy-go` repository was moved from the `awslabs` GitHub organization to `aws`. - * `xml`, `httpbinding`, and `json` package relocated under `encoding` package. -* The module `ec2imds` moved to `feature/ec2/imds` path ([#984](https://github.com/aws/aws-sdk-go-v2/pull/984)) - * Moves the `ec2imds` feature module to be in common location as other SDK features. -* `aws/signer/v4`: Refactor AWS Sigv4 Signer and options types to allow function options ([#955](https://github.com/aws/aws-sdk-go-v2/pull/955)) - * Fixes [#917](https://github.com/aws/aws-sdk-go-v2/issues/917), [#960](https://github.com/aws/aws-sdk-go-v2/issues/960), [#958](https://github.com/aws/aws-sdk-go-v2/issues/958) -* `aws`: CredentialCache type updated to require constructor function ([#946](https://github.com/aws/aws-sdk-go-v2/pull/946)) - * Fixes [#940](https://github.com/aws/aws-sdk-go-v2/issues/940) -* `credentials`: ExpiryWindow and Jitter moved from credential provider to `CredentialCache` ([#946](https://github.com/aws/aws-sdk-go-v2/pull/946)) - * Moves ExpiryWindow and Jitter options to common option of the `CredentialCache` instead of duplicated across providers. - * Fixes [#940](https://github.com/aws/aws-sdk-go-v2/issues/940) -* `config`: Ensure shared credentials file has precedence over shared config file ([#990](https://github.com/aws/aws-sdk-go-v2/pull/990)) - * The shared config file was incorrectly overriding the shared credentials file when merging values. -* `config`: Add `context.Context` to `LoadDefaultConfig` ([#951](https://github.com/aws/aws-sdk-go-v2/pull/951)) - * Updates `config#LoadDefaultConfig` function to take `context.Context` as well as functional options for the `config#LoadOptions` type. - * Fixes [#926](https://github.com/aws/aws-sdk-go-v2/issues/926), [#819](https://github.com/aws/aws-sdk-go-v2/issues/819) -* `aws`: Rename `NoOpRetryer` to `NopRetryer` to have consistent naming with rest of SDK ([#987](https://github.com/aws/aws-sdk-go-v2/pull/987)) - * Fixes [#878](https://github.com/aws/aws-sdk-go-v2/issues/878) -* `service/s3control`: Change `S3InitiateRestoreObjectOperation.ExpirationInDays` from value to pointer type ([#988](https://github.com/aws/aws-sdk-go-v2/pull/988)) -* `aws`: `ReaderSeekerCloser` and `WriteAtBuffer` have been relocated to `feature/s3/manager`. - -## New Features -* *Waiters*: Add Waiter utilities for API clients ([aws/smithy-go#237](https://github.com/aws/smithy-go/pull/237)) - * Your application can now use Waiter utilities to wait for AWS resources. -* `feature/dynamodb/attributevalue`: Add Amazon DynamoDB Attribute value marshaler utility ([#948](https://github.com/aws/aws-sdk-go-v2/pull/948)) - * Adds a utility for marshaling Go types too and from Amazon DynamoDB AttributeValues. - * Also includes utility for converting from Amazon DynamoDB Streams AttributeValues to Amazon DynamoDB AttributeValues. -* `feature/dynamodbstreams/attributevalue`: Add Amazon DynamoDB Streams Attribute value marshaler utility ([#948](https://github.com/aws/aws-sdk-go-v2/pull/948)) - * Adds a utility for marshaling Go types too and from Amazon DynamoDB Streams AttributeValues. - * Also includes utility for converting from Amazon DynamoDB AttributeValues to Amazon DynamoDB Streams AttributeValues. -* `feature/dynamodb/expression`: Add Amazon DynamoDB expression utility ([#981](https://github.com/aws/aws-sdk-go-v2/pull/981)) - * Adds the expression utility to the SDK for easily building Amazon DynamoDB operation expressions in code. - -## Bug Fixes -* `service/s3`: Fix Presigner to configure client correctly for Amazon S3 ([#969](https://github.com/aws/aws-sdk-go-v2/pull/969)) -* service/s3: Fix deserialization of CompleteMultipartUpload ([#965](https://github.com/aws/aws-sdk-go-v2/pull/965) - * Fixes [#927](https://github.com/aws/aws-sdk-go-v2/issues/927) -* `codegen`: Fix API client union serialization ([#979](https://github.com/aws/aws-sdk-go-v2/pull/979)) - * Fixes [#978](https://github.com/aws/aws-sdk-go-v2/issues/978) - -## Service Client Highlights -* API Clients have been bumped to version `v0.31.0` -* Regenerate API Clients from updated API models adding waiter utilities, and union parameters. -* `codegen`: - * Add documentation to union API parameters describing valid member types, and usage example ([aws/smithy-go#239](https://github.com/aws/smithy-go/pull/239)) - * Normalize Metadata header map keys to be lower case ([aws/smithy-go#241](https://github.com/aws/smithy-go/pull/241)), ([#982](https://github.com/aws/aws-sdk-go-v2/pull/982)) - * Fixes [#376](https://github.com/aws/aws-sdk-go-v2/issues/376) Amazon S3 Metadata parameters keys are always returned as lower case. - * Fix API client deserialization of XML based responses ([aws/smithy-go#245](https://github.com/aws/smithy-go/pull/245)), ([#992](https://github.com/aws/aws-sdk-go-v2/pull/992)) - * Fixes [#910](https://github.com/aws/aws-sdk-go-v2/issues/910) -* `service/s3`, `service/s3control`: - * Add support for reading `s3_use_arn_region` from shared config file ([#991](https://github.com/aws/aws-sdk-go-v2/pull/991)) - * Add Utility for getting RequestID and HostID of response ([#983](https://github.com/aws/aws-sdk-go-v2/pull/983)) - -## Other changes -* Updates branch `HEAD` points from `master` to `main`. - * This should not impact your application, but if you have pull requests or forks of the SDK you may need to update the upstream branch your fork is based off of. - -## Migrating from v2 preview SDK's v0.30.0 to v0.31.0 release candidate - -### smithy-go module relocation - -If your application uses `smithy-go` utilities for request pipeline your application will need to be updated to refer to the new import path of `github.com/aws/smithy-go`. If you application did *not* use `smithy-go` utilities directly, your application will update automatically. - -### EC2 IMDS module relocation - -If your application used the `ec2imds` module, it has been relocated to `feature/ec2/imds`. Your application will need to update to the new import path, `github.com/aws/aws-sdk-go-v2/feature/ec2/imds`. - -### CredentialsCache Constructor and ExpiryWindow Options - -The `aws#CredentialsCache` type was updated, and a new constructor function, `NewCredentialsCache` was added. This function needs to be used to initialize the `CredentialCache`. The constructor also has function options to specify additional configuration, e.g. ExpiryWindow and Jitter. - -If your application was specifying the `ExpiryWindow` with the `credentials/stscreds#AssumeRoleOptions`, `credentials/stscreds#WebIdentityRoleOptions`, `credentials/processcreds#Options`, or `credentials/ec2rolecrds#Options` types the `ExpiryWindow` option will need to specified on the `CredentialsCache` constructor instead. - -### AWS Sigv4 Signer Refactor - -The `aws/signer/v4` package's `Signer.SignHTTP` and `Signer.PresignHTTP` methods were updated to take functional options. If your application provided a custom implementation for API client's `HTTPSignerV4` or `HTTPPresignerV4` interfaces, that implementation will need to be updated for the new function signature. - -### Configuration Loading - -The `config#LoadDefaultConfig` function has been updated to require a `context.Context` as the first parameter, with additional optional function options as variadic additional arguments. Your application will need to update its usage of `LoadDefaultConfig` to pass in `context.Context` as the first parameter. If your application used the `With...` helpers those should continue to work without issue. - -The v2 SDK corrects its behavior to be inline with the AWS CLI and other AWS SDKs. Refer to https://docs.aws.amazon.com/credref/latest/refdocs/overview.html for more information how to use the shared config and credentials files. - -# Release 2020-11-30 - -## Breaking Change -* `codegen`: Add support for slice and maps generated with value members instead of pointer ([#887](https://github.com/aws/aws-sdk-go-v2/pull/887)) - * This update allow the SDK's code generation to be aware of API shapes and members that are not nullable, and can be rendered as value types by the code generation instead of pointer types. - * Several API client parameter types will change from pointer members to value members for slice, map, number and bool member types. - * See Migration notes for migrating to v0.30.0 with this change. -* `aws/transport/http`: Move aws.BuildableHTTPClient to HTTP transport package ([#898](https://github.com/aws/aws-sdk-go-v2/pull/898)) - * Moves the `BuildableHTTPClient` from the SDK's `aws` package to the `aws/transport/http` package as `BuildableClient` to with other HTTP specific utilities. -* `feature/cloudfront/sign`: Add CloudFront sign feature as module ([#884](https://github.com/aws/aws-sdk-go-v2/pull/884)) - * Moves `service/cloudfront/sign` package out of the `cloudfront` module, and into its own module as `github.com/aws/aws-sdk-go-v2/feature/cloudfront/sign`. - -## New Features -* `config`: Add a WithRetryer provider helper to the config loader ([#897](https://github.com/aws/aws-sdk-go-v2/pull/897)) - * Adds a `WithRetryer` configuration provider to the config loader as a convenience helper to set the `Retryer` on the `aws.Config` when its being loaded. -* `config`: Default to TLS 1.2 for HTTPS requests ([#892](https://github.com/aws/aws-sdk-go-v2/pull/892)) - * Updates the SDK's default HTTP client to use TLS 1.2 as the minimum TLS version for all HTTPS requests by default. - -## Bug Fixes -* `config`: Fix AWS_CA_BUNDLE usage while loading default config ([#912](https://github.com/aws/aws-sdk-go-v2/pull/)) - * Fixes the `LoadDefaultConfig`'s configuration provider order to correctly load a custom HTTP client prior to configuring the client for `AWS_CA_BUNDLE` environment variable. -* `service/s3`: Fix signature mismatch error for s3 ([#913](https://github.com/aws/aws-sdk-go-v2/pull/913)) - * Fixes ([#883](https://github.com/aws/aws-sdk-go-v2/issues/883)) -* `service/s3control`: - * Fix HostPrefix addition behavior for s3control ([#882](https://github.com/aws/aws-sdk-go-v2/pull/882)) - * Fixes ([#863](https://github.com/aws/aws-sdk-go-v2/issues/863)) - * Fix s3control error deserializer ([#875](https://github.com/aws/aws-sdk-go-v2/pull/875)) - * Fixes ([#864](https://github.com/aws/aws-sdk-go-v2/issues/864)) - -## Service Client Highlights -* Pagination support has been added to supported APIs. See [Using Operation Paginators](https://aws.github.io/aws-sdk-go-v2/docs/making-requests/#using-operation-paginators) in the Developer Guide. ([#885](https://github.com/aws/aws-sdk-go-v2/pull/885)) -* Logging support has been added to service clients. See [Logging](https://aws.github.io/aws-sdk-go-v2/docs/configuring-sdk/logging/) in the Developer Guide. ([#872](https://github.com/aws/aws-sdk-go-v2/pull/872)) -* `service`: Add support for pre-signed URL clients for S3, RDS, EC2 service ([#888](https://github.com/aws/aws-sdk-go-v2/pull/888)) - * `service/s3`: operations `PutObject` and `GetObject` are now supported with s3 pre-signed url client. - * `service/ec2`: operation `CopySnapshot` is now supported with ec2 pre-signed url client. - * `service/rds`: operations `CopyDBSnapshot`, `CreateDBInstanceReadReplica`, `CopyDBClusterSnapshot`, `CreateDBCluster` are now supported with rds pre-signed url client. -* `service/s3`: Add support for S3 access point and S3 on outposts access point ARNs ([#870](https://github.com/aws/aws-sdk-go-v2/pull/870)) -* `service/s3control`: Adds support for S3 on outposts access point and S3 on outposts bucket ARNs ([#870](https://github.com/aws/aws-sdk-go-v2/pull/870)) - -## Migrating from v2 preview SDK's v0.29.0 to v0.30.0 - -### aws.BuildableHTTPClient move -The `aws`'s `BuildableHTTPClient` HTTP client implementation was moved to `aws/transport/http` as `BuildableClient`. If your application used the `aws.BuildableHTTPClient` type, update it to use the `BuildableClient` in the `aws/transport/http` package. - -### Slice and Map API member types -This release includes several code generation updates for API client's slice map members. Using API modeling metadata the Slice and map members are now generated as value types instead of pointer types. For your application this means that for these types, the SDK no longer will have pointer member types, and have value member types. - -To migrate to this change you'll need to remove the pointer handling for slice and map members, and instead use value type handling of the member values. - -### Boolean and Number API member types -Similar to the slice and map API member types being generated as value, the SDK's code generation now has metadata where the SDK can generate boolean and number members as value type instead of pointer types. - -To migrate to this change you'll need to remove the pointer handling for numbers and boolean member types, and instead use value handling. - -# Release 2020-10-30 - -## New Features -* Adds HostnameImmutable flag on aws.Endpoint to direct SDK if the associated endpoint is modifiable.([#848](https://github.com/aws/aws-sdk-go-v2/pull/848)) - -## Bug Fixes -* Fix SDK handling of xml based services - xml namespaces ([#858](https://github.com/aws/aws-sdk-go-v2/pull/858)) - * Fixes ([#850](https://github.com/aws/aws-sdk-go-v2/issues/850)) - -## Service Client Highlights -* API Clients have been bumped to version `v0.29.0` - * Regenerate API Clients from update API models. -* Improve client doc generation. - -## Core SDK Highlights -* Dependency Update: Updated SDK dependencies to their latest versions. - -## Migrating from v2 preview SDK's v0.28.0 to v0.29.0 -* API Clients ResolverOptions type renamed to EndpointResolverOptions - -# Release 2020-10-26 - -## New Features -* `service/s3`: Add support for Accelerate, and Dualstack ([#836](https://github.com/aws/aws-sdk-go-v2/pull/836)) -* `service/s3control`: Add support for Dualstack ([#836](https://github.com/aws/aws-sdk-go-v2/pull/836)) - -## Service Client Highlights -* API Clients have been bumped to version `v0.28.0` - * Regenerate API Clients from update API models. -* `service/s3`: Add support for Accelerate, and Dualstack ([#836](https://github.com/aws/aws-sdk-go-v2/pull/836)) -* `service/s3control`: Add support for Dualstack ([#836](https://github.com/aws/aws-sdk-go-v2/pull/836)) -* `service/route53`: Fix sanitizeURL customization to handle leading slash(`/`) [#846](https://github.com/aws/aws-sdk-go-v2/pull/846) - * Fixes [#843](https://github.com/aws/aws-sdk-go-v2/issues/843) -* `service/route53`: Fix codegen to correctly look for operations that need sanitize url ([#851](https://github.com/aws/aws-sdk-go-v2/pull/851)) - -## Core SDK Highlights -* `aws/protocol/restjson`: Fix unexpected JSON error response deserialization ([#837](https://github.com/aws/aws-sdk-go-v2/pull/837)) - * Fixes [#832](https://github.com/aws/aws-sdk-go-v2/issues/832) -* `example/service/s3/listobjects`: Add example for Amazon S3 ListObjectsV2 ([#838](https://github.com/aws/aws-sdk-go-v2/pull/838)) - -# Release 2020-10-16 - -## New Features -* `feature/s3/manager`: - * Initial `v0.1.0` release - * Add the Amazon S3 Upload and Download transfer manager ([#802](https://github.com/aws/aws-sdk-go-v2/pull/802)) - -## Service Client Highlights -* Clients have been bumped to version `v0.27.0` -* `service/machinelearning`: Add customization for setting client endpoint with PredictEndpoint value if set ([#782](https://github.com/aws/aws-sdk-go-v2/pull/782)) -* `service/s3`: Fix empty response body deserialization in case of error response ([#801](https://github.com/aws/aws-sdk-go-v2/pull/801)) - * Fixes xml deserialization util to correctly handle empty response body in case of an error response. -* `service/s3`: Add customization to auto fill Content-Md5 request header for Amazon S3 operations ([#812](https://github.com/aws/aws-sdk-go-v2/pull/812)) -* `service/s3`: Add fallback to using HTTP status code for error code ([#818](https://github.com/aws/aws-sdk-go-v2/pull/818)) - * Adds falling back to using the HTTP status code to create a API Error code when not error code is received from the service, such as HeadObject. -* `service/route53`: Add support for deserialzing `InvalidChangeBatch` API error ([#792](https://github.com/aws/aws-sdk-go-v2/pull/792)) -* `codegen`: Remove API client `Options` getter methods ([#788](https://github.com/aws/aws-sdk-go-v2/pull/788)) -* `codegen`: Regenerate API Client modeled endpoints ([#791](https://github.com/aws/aws-sdk-go-v2/pull/791)) -* `codegen`: Sort API Client struct member paramaters by required and alphabetical ([#787](https://github.com/aws/aws-sdk-go-v2/pull/787)) -* `codegen`: Add package docs to API client modules ([#821](https://github.com/aws/aws-sdk-go-v2/pull/821)) -* `codegen`: Rename `smithy-go`'s `smithy.OperationError` to `smithy.OperationInvokeError`. - -## Core SDK Highlights -* `config`: - * Bumped to `v0.2.0` - * Refactor Config Module, Add Config Package Documentation and Examples, Improve Overall SDK Readme ([#822](https://github.com/aws/aws-sdk-go-v2/pull/822)) -* `credentials`: - * Bumped to `v0.1.2` - * Strip Monotonic Clock Readings when Comparing Credential Expiry Time ([#789](https://github.com/aws/aws-sdk-go-v2/pull/789)) -* `ec2imds`: - * Bumped to `v0.1.2` - * Fix refreshing API token if expired ([#789](https://github.com/aws/aws-sdk-go-v2/pull/789)) - -## Migrating from v0.26.0 to v0.27.0 - -#### Configuration - -The `config` module's exported types were trimmed down to add clarity and reduce confusion. Additional changes to the `config` module' helpers. - -* Refactored `WithCredentialsProvider`, `WithHTTPClient`, and `WithEndpointResolver` to functions instead of structs. -* Removed `MFATokenFuncProvider`, use `AssumeRoleCredentialOptionsProvider` for setting options for `stscreds.AssumeRoleOptions`. -* Renamed `WithWebIdentityCredentialProviderOptions` to `WithWebIdentityRoleCredentialOptions` -* Renamed `AssumeRoleCredentialProviderOptions` to `AssumeRoleCredentialOptionsProvider` -* Renamed `EndpointResolverFuncProvider` to `EndpointResolverProvider` - -#### API Client -* API Client `Options` type getter methods have been removed. Use the struct members instead. -* The error returned by API Client operations was renamed from `smithy.OperationError` to `smithy.OperationInvokeError`. - -# Release 2020-09-30 - -## Service Client Highlights -* Service clients have been bumped to `v0.26.0` simplify the documentation experience when using [pkg.go.dev](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2). -* `service/s3`: Disable automatic decompression of getting Amazon S3 objects with the `Content-Encoding: gzip` metadata header. ([#748](https://github.com/aws/aws-sdk-go-v2/pull/748)) - * This changes the SDK's default behavior with regard to making S3 API calls. The client will no longer automatically set the `Accept-Encoding` HTTP request header, nor will it automatically decompress the gzipped response when the `Content-Encoding: gzip` response header was received. - * If you'd like the client to sent the `Accept-Encoding: gzip` request header, you can add this header to the API operation method call with the [SetHeaderValue](https://pkg.go.dev/github.com/awslabs/smithy-go/transport/http#SetHeaderValue). middleware helper. -* `service/cloudfront/sign`: Fix cloudfront example usage of SignWithPolicy ([#673](https://github.com/aws/aws-sdk-go-v2/pull/673)) - * Fixes [#671](https://github.com/aws/aws-sdk-go-v2/issues/671) documentation typo by correcting the usage of `SignWithPolicy`. - -## Core SDK Highlights -* SDK core module released at `v0.26.0` -* `config` module released at `v0.1.1` -* `credentials` module released at `v0.1.1` -* `ec2imds` module released at `v0.1.1` - -# Release 2020-09-28 -## Announcements -We’re happy to share the updated clients for the v0.25.0 preview version of the AWS SDK for Go V2. - -The updated clients leverage new developments and advancements within AWS and the Go software ecosystem at large since -our original preview announcement. Using the new clients will be a bit different than before. The key differences are: -simplified API operation invocation, performance improvements, support for error wrapping, and a new middleware architecture. -So below we have a guided walkthrough to help try it out and share your feedback in order to better influence the features -you’d like to see in the GA version. - -See [Announcement Blog Post](https://aws.amazon.com/blogs/developer/client-updates-in-the-preview-version-of-the-aws-sdk-for-go-v2/) for more details. - -## Service Client Highlights -* Initial service clients released at version `v0.1.0` -## Core SDK Highlights -* SDK core module released at `v0.25.0` -* `config` module released at `v0.1.0` -* `credentials` module released at `v0.1.0` -* `ec2imds` module released at `v0.1.0` - -## Migrating from v2 preview SDK's v0.24.0 to v0.25.0 - -#### Design changes - -The v2 preview SDK `v0.25.0` release represents a significant stepping stone bringing the v2 SDK closer to its target design and usability. This release includes significant breaking changes to the v2 preview SDK. The updates in the `v0.25.0` release focus on refactoring and modularization of the SDK’s API clients to use the new [client design](https://github.com/aws/aws-sdk-go-v2/issues/438), updated request pipeline (aka [middleware](https://pkg.go.dev/github.com/awslabs/smithy-go/middleware)), refactored [credential providers](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials), and [configuration loading](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) packages. - -We've also bumped the minimum supported Go version with this release. Starting with v0.25.0 the SDK requires a minimum version of Go `v1.15`. - -As a part of the refactoring done to v2 preview SDK some components have not been included in this update. The following is a non exhaustive list of features that are not available. - -* API Paginators - [#439](https://github.com/aws/aws-sdk-go-v2/issues/439) -* API Waiters - [#442](https://github.com/aws/aws-sdk-go-v2/issues/442) -* Presign URL - [#794](https://github.com/aws/aws-sdk-go-v2/issues/794) -* Amazon S3 Upload and Download manager - [#802](https://github.com/aws/aws-sdk-go-v2/pull/802) -* Amazon DynamoDB's AttributeValue marshaler, and Expression package - [#790](https://github.com/aws/aws-sdk-go-v2/issues/790) -* Debug Logging - [#594](https://github.com/aws/aws-sdk-go-v2/issues/594) - -We expect additional breaking changes to the v2 preview SDK in the coming releases. We expect these changes to focus on organizational, naming, and hardening the SDK's design for future feature capabilities after it is released for general availability. - -#### Relocated Packages - -In this release packages within the SDK were relocated, and in some cases those packages were converted to Go modules. The following is a list of packages have were relocated. - -* `github.com/aws/aws-sdk-go-v2/aws/external` => `github.com/aws/aws-sdk-go-v2/config` module -* `github.com/aws/aws-sdk-go-v2/aws/ec2metadata` => `github.com/aws/aws-sdk-go-v2/ec2imds` module - -The `github.com/aws/aws-sdk-go-v2/credentials` module contains refactored credentials providers. - -* `github.com/aws/aws-sdk-go-v2/ec2rolecreds` => `github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds` -* `github.com/aws/aws-sdk-go-v2/endpointcreds` => `github.com/aws/aws-sdk-go-v2/credentials/endpointcreds` -* `github.com/aws/aws-sdk-go-v2/processcreds` => `github.com/aws/aws-sdk-go-v2/credentials/processcreds` -* `github.com/aws/aws-sdk-go-v2/stscreds` => `github.com/aws/aws-sdk-go-v2/credentials/stscreds` - -#### Modularization - -New modules were added to the v2 preview SDK to allow the components to be versioned independently from each other. This allows your application to depend on specific versions of an API client module, and take discrete updates from the SDK core and other API client modules as desired. - -* [github.com/aws/aws-sdk-go-v2/config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) -* [github.com/aws/aws-sdk-go-v2/credentials](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials) -* Module for each API client, e.g. [github.com/aws/aws-sdk-go-v2/service/s3](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/s3) - -#### API Clients - -The following is a list of the major changes to the API client modules - -* Removed paginators: we plan to add these back once they are implemented to integrate with the SDK's new API client design. -* Removed waiters: we need to further investigate how the V2 SDK should expose waiters, and how their behavior should be modeled. -* API Clients are now Go modules. When migrating to the v2 preview SDK `v0.25.0`, you'll need to add the API client's module to your application's go.mod file. -* API parameter nested types have been moved to a `types` package within the API client's module, e.g. `github.com/aws/aws-sdk-go-v2/service/s3/types` These types were moved to improve documentation and discovery of the API client, operation, and input/output types. For example Amazon S3's ListObject's operation [ListObjectOutput.Contents](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/s3/#ListObjectsOutput) input parameter is a slice of [types.Object](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/s3/types#Object). -* The client operation method has been renamed, removing the `Request` suffix. The method now invokes the operation instead of constructing a request, which needed to be invoked separately. The operation methods were also expanded to include functional options for providing operation specific configuration, such as modifying the request pipeline. - -```go -result, err := client.Scan(context.TODO(), &dynamodb.ScanInput{ - TableName: aws.String("exampleTable"), -}, func(o *Options) { - // Limit operation calls to only 1 attempt. - o.Retryer = retry.AddWithMaxAttempts(o.Retryer, 1) -}) -``` - -#### Configuration - -In addition to the `github.com/aws/aws-sdk-go-v2/aws/external` package being made a module at `github.com/aws/aws-sdk-go-v2/config`, the `LoadDefaultAWSConfig` function was renamed to `LoadDefaultConfig`. - -The `github.com/aws/aws-sdk-go-v2/aws/defaults` package has been removed. Its components have been migrated to the `github.com/aws/aws-sdk-go-v2/aws` package, and `github.com/aws/aws-sdk-go-v2/config` module. - -#### Error Handling - -The `github.com/aws/aws-sdk-go-v2/aws/awserr` package was removed as a part of the SDK error handling refactor. The SDK now uses typed errors built around [Go v1.13](https://golang.org/doc/go1.13#error_wrapping)'s [errors.As](https://pkg.go.dev/errors#As) and [errors.Unwrap](https://pkg.go.dev/errors#Unwrap) features. All SDK error types that wrap other errors implement the `Unwrap` method. Generic v2 preview SDK errors created with `fmt.Errorf` use `%w` to wrap the underlying error. - -The SDK API clients now include generated public error types for errors modeled for an API. The SDK will automatically deserialize the error response from the API into the appropriate error type. Your application should use `errors.As` to check if the returned error matches one it is interested in. Your application can also use the generic interface [smithy.APIError](https://pkg.go.dev/github.com/awslabs/smithy-go/#APIError) to test if the API client's operation method returned an API error, but not check against a specific error. - -API client errors returned to the caller will use error wrapping to layer the error values. This allows underlying error types to be specific to their use case, and the SDK's more generic error types to wrap the underlying error. - -For example, if an [Amazon DynamoDB](https://aws.amazon.com/dynamodb/) [Scan](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/dynamodb#Scan) operation call cannot find the `TableName` requested, the error returned will contain [dynamodb.ResourceNotFoundException](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/dynamodb/types#ResourceNotFoundException). The SDK will return this error value wrapped in a couple layers, with each layer adding additional contextual information such as [ResponseError](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/transport/http#ResponseError) for AWS HTTP response error metadata , and [smithy.OperationError](https://pkg.go.dev/github.com/awslabs/smithy-go/#OperationError) for API operation call metadata. - -```go -result, err := client.Scan(context.TODO(), params) -if err != nil { - // To get a specific API error - var notFoundErr *types.ResourceNotFoundException - if errors.As(err, ¬FoundErr) { - log.Printf("scan failed because the table was not found, %v", - notFoundErr.ErrorMessage()) - } - - // To get any API error - var apiErr smithy.APIError - if errors.As(err, &apiErr) { - log.Printf("scan failed because of an API error, Code: %v, Message: %v", - apiErr.ErrorCode(), apiErr.ErrorMessage()) - } - - // To get the AWS response metadata, such as RequestID - var respErr *awshttp.ResponseError // Using import alias "awshttp" for package github.com/aws/aws-sdk-go-v2/aws/transport/http - if errors.As(err, &respErr) { - log.Printf("scan failed with HTTP status code %v, Request ID %v and error %v", - respErr.HTTPStatusCode(), respErr.ServiceRequestID(), respErr) - } - - return err -} -``` - -Logging an error value will include information from each wrapped error. For example, the following is a mock error logged for a Scan operation call that failed because the table was not found. - -> 2020/10/15 16:03:37 operation error DynamoDB: Scan, https response error StatusCode: 400, RequestID: ABCREQUESTID123, ResourceNotFoundException: Requested resource not found - -#### Endpoints - -The `github.com/aws/aws-sdk-go-v2/aws/endpoints` has been removed from the SDK, along with all exported endpoint definitions and iteration behavior. Each generated API client now includes its own endpoint definition internally to the module. - -API clients can optionally be configured with a generic [aws.EndpointResolver](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws#EndpointResolver) via the [aws.Config.EndpointResolver](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws#Config.EndpointResolver). If the API client is not configured with a custom endpoint resolver it will defer to the endpoint resolver the client module was generated with. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/CODE_OF_CONDUCT.md b/vendor/github.com/aws/aws-sdk-go-v2/CODE_OF_CONDUCT.md deleted file mode 100644 index 5b627cfa6..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,4 +0,0 @@ -## Code of Conduct -This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). -For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact -opensource-codeofconduct@amazon.com with any additional questions or comments. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/CONTRIBUTING.md b/vendor/github.com/aws/aws-sdk-go-v2/CONTRIBUTING.md deleted file mode 100644 index 5e59bba7b..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/CONTRIBUTING.md +++ /dev/null @@ -1,177 +0,0 @@ -# Contributing to the AWS SDK for Go - -Thank you for your interest in contributing to the AWS SDK for Go! -We work hard to provide a high-quality and useful SDK, and we greatly value -feedback and contributions from our community. Whether it's a bug report, -new feature, correction, or additional documentation, we welcome your issues -and pull requests. Please read through this document before submitting any -[issues] or [pull requests][pr] to ensure we have all the necessary information to -effectively respond to your bug report or contribution. - -Jump To: - -* [Bug Reports](#bug-reports) -* [Feature Requests](#feature-requests) -* [Code Contributions](#code-contributions) - -## How to contribute - -*Before you send us a pull request, please be sure that:* - -1. You're working from the latest source on the `main` branch. -2. You check existing open, and recently closed, pull requests to be sure - that someone else hasn't already addressed the problem. -3. You create an issue before working on a contribution that will take a - significant amount of your time. - -*Creating a Pull Request* - -1. Fork the repository. -2. In your fork, make your change in a branch that's based on this repo's `main` branch. -3. Commit the change to your fork, using a clear and descriptive commit message. -4. Create a pull request, answering any questions in the pull request form. - -For contributions that will take a significant amount of time, open a new -issue to pitch your idea before you get started. Explain the problem and -describe the content you want to see added to the documentation. Let us know -if you'll write it yourself or if you'd like us to help. We'll discuss your -proposal with you and let you know whether we're likely to accept it. - -## Bug Reports - -You can file bug reports against the SDK on the [GitHub issues][issues] page. - -If you are filing a report for a bug or regression in the SDK, it's extremely -helpful to provide as much information as possible when opening the original -issue. This helps us reproduce and investigate the possible bug without having -to wait for this extra information to be provided. Please read the following -guidelines prior to filing a bug report. - -1. Search through existing [issues][] to ensure that your specific issue has - not yet been reported. If it is a common issue, it is likely there is - already a bug report for your problem. - -2. Ensure that you have tested the latest version of the SDK. Although you - may have an issue against an older version of the SDK, we cannot provide - bug fixes for old versions. It's also possible that the bug may have been - fixed in the latest release. - -3. Provide as much information about your environment, SDK version, and - relevant dependencies as possible. For example, let us know what version - of Go you are using, which and version of the operating system, and the - the environment your code is running in. e.g Container. - -4. Provide a minimal test case that reproduces your issue or any error - information you related to your problem. We can provide feedback much - more quickly if we know what operations you are calling in the SDK. If - you cannot provide a full test case, provide as much code as you can - to help us diagnose the problem. Any relevant information should be provided - as well, like whether this is a persistent issue, or if it only occurs - some of the time. - -## Feature Requests - -Open an [issue][issues] with the following: - -* A short, descriptive title. Ideally, other community members should be able - to get a good idea of the feature just from reading the title. -* A detailed description of the the proposed feature. - * Why it should be added to the SDK. - * If possible, example code to illustrate how it should work. -* Use Markdown to make the request easier to read; -* If you intend to implement this feature, indicate that you'd like to the issue to be assigned to you. - -## Code Contributions - -We are always happy to receive code and documentation contributions to the SDK. -Please be aware of the following notes prior to opening a pull request: - -1. The SDK is released under the [Apache license][license]. Any code you submit - will be released under that license. For substantial contributions, we may - ask you to sign a [Contributor License Agreement (CLA)][cla]. - -2. If you would like to implement support for a significant feature that is not - yet available in the SDK, please talk to us beforehand to avoid any - duplication of effort. - -3. Wherever possible, pull requests should contain tests as appropriate. - Bugfixes should contain tests that exercise the corrected behavior (i.e., the - test should fail without the bugfix and pass with it), and new features - should be accompanied by tests exercising the feature. - -4. Pull requests that contain failing tests will not be merged until the test - failures are addressed. Pull requests that cause a significant drop in the - SDK's test coverage percentage are unlikely to be merged until tests have - been added. - -5. The JSON files under the SDK's `models` folder are sourced from outside the SDK. - Such as `models/apis/ec2/2016-11-15/api.json`. We will not accept pull requests - directly on these models. If you discover an issue with the models please - create a [GitHub issue][issues] describing the issue. - -### Testing - -To run the tests locally, running the `make unit` command will `go get` the -SDK's testing dependencies, and run vet, link and unit tests for the SDK. - -``` -make unit -``` - -Standard go testing functionality is supported as well. To test SDK code that -is tagged with `codegen` you'll need to set the build tag in the go test -command. The `make unit` command will do this automatically. - -``` -go test -tags codegen ./private/... -``` - -See the `Makefile` for additional testing tags that can be used in testing. - -To test on multiple platform the SDK includes several DockerFiles under the -`awstesting/sandbox` folder, and associated make recipes to to execute -unit testing within environments configured for specific Go versions. - -``` -make sandbox-test-go18 -``` - -To run all sandbox environments use the following make recipe - -``` -# Optionally update the Go tip that will be used during the batch testing -make update-aws-golang-tip - -# Run all SDK tests for supported Go versions in sandboxes -make sandbox-test -``` - -In addition the sandbox environment include make recipes for interactive modes -so you can run command within the Docker container and context of the SDK. - -``` -make sandbox-go18 -``` - -### Changelog Documents - -You can see all release changes in the `CHANGELOG.md` file at the root of the -repository. The release notes added to this file will contain service client -updates, and major SDK changes. When submitting a pull request please include an entry in `CHANGELOG_PENDING.md` under the appropriate changelog type so your changelog entry is included on the following release. - -#### Changelog Types - -* `SDK Features` - For major additive features, internal changes that have -outward impact, or updates to the SDK foundations. This will result in a minor -version change. -* `SDK Enhancements` - For minor additive features or incremental sized changes. -This will result in a patch version change. -* `SDK Bugs` - For minor changes that resolve an issue. This will result in a -patch version change. - -[issues]: https://github.com/aws/aws-sdk-go-v2/issues -[pr]: https://github.com/aws/aws-sdk-go-v2/pulls -[license]: http://aws.amazon.com/apache2.0/ -[cla]: http://en.wikipedia.org/wiki/Contributor_License_Agreement -[releasenotes]: https://github.com/aws/aws-sdk-go-v2/releases - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/DESIGN.md b/vendor/github.com/aws/aws-sdk-go-v2/DESIGN.md deleted file mode 100644 index 4c9be94a2..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/DESIGN.md +++ /dev/null @@ -1,15 +0,0 @@ -Open Discussions ---- -The following issues are currently open for community feedback. -All discourse must adhere to the [Code of Conduct] policy. - -* [Refactoring API Client Paginators](https://github.com/aws/aws-sdk-go-v2/issues/439) -* [Refactoring API Client Waiters](https://github.com/aws/aws-sdk-go-v2/issues/442) -* [Refactoring API Client Enums and Types to Discrete Packages](https://github.com/aws/aws-sdk-go-v2/issues/445) -* [SDK Modularization](https://github.com/aws/aws-sdk-go-v2/issues/444) - -Past Discussions ---- -The issues listed here are for documentation purposes, and is used to capture issues and their associated discussions. - -[Code of Conduct]: https://github.com/aws/aws-sdk-go-v2/blob/main/CODE_OF_CONDUCT.md diff --git a/vendor/github.com/aws/aws-sdk-go-v2/Makefile b/vendor/github.com/aws/aws-sdk-go-v2/Makefile deleted file mode 100644 index e089ad351..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/Makefile +++ /dev/null @@ -1,538 +0,0 @@ -# Lint rules to ignore -LINTIGNORESINGLEFIGHT='internal/sync/singleflight/singleflight.go:.+error should be the last type' -LINT_IGNORE_S3MANAGER_INPUT='feature/s3/manager/upload.go:.+struct field SSEKMSKeyId should be SSEKMSKeyID' -# Names of these are tied to endpoint rules and they're internal so ignore them -LINT_IGNORE_AWSRULESFN_ARN='internal/endpoints/awsrulesfn/arn.go' -LINT_IGNORE_AWSRULESFN_PARTITION='internal/endpoints/awsrulesfn/partition.go' - -UNIT_TEST_TAGS= -BUILD_TAGS=-tags "example,codegen,integration,ec2env,perftest" - -SMITHY_GO_SRC ?= $(shell pwd)/../smithy-go - -SDK_MIN_GO_VERSION ?= 1.19 - -EACHMODULE_FAILFAST ?= true -EACHMODULE_FAILFAST_FLAG=-fail-fast=${EACHMODULE_FAILFAST} - -EACHMODULE_CONCURRENCY ?= 1 -EACHMODULE_CONCURRENCY_FLAG=-c ${EACHMODULE_CONCURRENCY} - -EACHMODULE_SKIP ?= -EACHMODULE_SKIP_FLAG=-skip="${EACHMODULE_SKIP}" - -EACHMODULE_FLAGS=${EACHMODULE_CONCURRENCY_FLAG} ${EACHMODULE_FAILFAST_FLAG} ${EACHMODULE_SKIP_FLAG} - -# SDK's Core and client packages that are compatible with Go 1.9+. -SDK_CORE_PKGS=./aws/... ./internal/... -SDK_CLIENT_PKGS=./service/... -SDK_COMPA_PKGS=${SDK_CORE_PKGS} ${SDK_CLIENT_PKGS} - -# SDK additional packages that are used for development of the SDK. -SDK_EXAMPLES_PKGS= -SDK_ALL_PKGS=${SDK_COMPA_PKGS} ${SDK_EXAMPLES_PKGS} - -RUN_NONE=-run NONE -RUN_INTEG=-run '^TestInteg_' - -CODEGEN_RESOURCES_PATH=$(shell pwd)/codegen/smithy-aws-go-codegen/src/main/resources/software/amazon/smithy/aws/go/codegen -CODEGEN_API_MODELS_PATH=$(shell pwd)/codegen/sdk-codegen/aws-models -ENDPOINTS_JSON=${CODEGEN_RESOURCES_PATH}/endpoints.json -ENDPOINT_PREFIX_JSON=${CODEGEN_RESOURCES_PATH}/endpoint-prefix.json - -LICENSE_FILE=$(shell pwd)/LICENSE.txt - -SMITHY_GO_VERSION ?= -PRE_RELEASE_VERSION ?= -RELEASE_MANIFEST_FILE ?= -RELEASE_CHGLOG_DESC_FILE ?= - -REPOTOOLS_VERSION ?= latest -REPOTOOLS_MODULE = github.com/awslabs/aws-go-multi-module-repository-tools -REPOTOOLS_CMD_ANNOTATE_STABLE_GEN = ${REPOTOOLS_MODULE}/cmd/annotatestablegen@${REPOTOOLS_VERSION} -REPOTOOLS_CMD_MAKE_RELATIVE = ${REPOTOOLS_MODULE}/cmd/makerelative@${REPOTOOLS_VERSION} -REPOTOOLS_CMD_CALCULATE_RELEASE = ${REPOTOOLS_MODULE}/cmd/calculaterelease@${REPOTOOLS_VERSION} -REPOTOOLS_CMD_UPDATE_REQUIRES = ${REPOTOOLS_MODULE}/cmd/updaterequires@${REPOTOOLS_VERSION} -REPOTOOLS_CMD_UPDATE_MODULE_METADATA = ${REPOTOOLS_MODULE}/cmd/updatemodulemeta@${REPOTOOLS_VERSION} -REPOTOOLS_CMD_GENERATE_CHANGELOG = ${REPOTOOLS_MODULE}/cmd/generatechangelog@${REPOTOOLS_VERSION} -REPOTOOLS_CMD_CHANGELOG = ${REPOTOOLS_MODULE}/cmd/changelog@${REPOTOOLS_VERSION} -REPOTOOLS_CMD_TAG_RELEASE = ${REPOTOOLS_MODULE}/cmd/tagrelease@${REPOTOOLS_VERSION} -REPOTOOLS_CMD_EDIT_MODULE_DEPENDENCY = ${REPOTOOLS_MODULE}/cmd/editmoduledependency@${REPOTOOLS_VERSION} - -REPOTOOLS_CALCULATE_RELEASE_VERBOSE ?= false -REPOTOOLS_CALCULATE_RELEASE_VERBOSE_FLAG=-v=${REPOTOOLS_CALCULATE_RELEASE_VERBOSE} - -REPOTOOLS_CALCULATE_RELEASE_ADDITIONAL_ARGS ?= - -ifneq ($(PRE_RELEASE_VERSION),) - REPOTOOLS_CALCULATE_RELEASE_ADDITIONAL_ARGS += -preview=${PRE_RELEASE_VERSION} -endif - -.PHONY: all -all: generate unit - -################### -# Code Generation # -################### -.PHONY: generate smithy-generate smithy-build smithy-build-% smithy-clean smithy-go-publish-local format \ -gen-config-asserts gen-repo-mod-replace gen-mod-replace-smithy gen-mod-dropreplace-smithy-% gen-aws-ptrs tidy-modules-% \ -add-module-license-files sync-models sync-endpoints-model sync-endpoints.json clone-v1-models gen-internal-codegen \ -sync-api-models copy-attributevalue-feature min-go-version-% update-requires smithy-annotate-stable \ -update-module-metadata download-modules-% - -generate: smithy-generate update-requires gen-repo-mod-replace update-module-metadata smithy-annotate-stable \ -gen-config-asserts gen-internal-codegen copy-attributevalue-feature gen-mod-dropreplace-smithy-. min-go-version-. \ -tidy-modules-. add-module-license-files gen-aws-ptrs format - -generate-tmpreplace-smithy: smithy-generate update-requires gen-repo-mod-replace update-module-metadata smithy-annotate-stable \ -gen-config-asserts gen-internal-codegen copy-attributevalue-feature gen-mod-replace-smithy-. min-go-version-. \ -tidy-modules-. add-module-license-files gen-aws-ptrs format gen-mod-dropreplace-smithy-. reset-sum - -reset-sum: - find . -name go.sum -exec git checkout -- {} \; - -smithy-generate: - cd codegen && ./gradlew clean build -Plog-tests && ./gradlew clean - -smithy-build: - cd codegen && ./gradlew clean build -Plog-tests - -smithy-build-%: - @# smithy-build- command that uses the pattern to define build filter that - @# the smithy API model service id starts with. Strips off the - @# "smithy-build-". - @# - @# e.g. smithy-build-com.amazonaws.rds - @# e.g. smithy-build-com.amazonaws.rds#AmazonRDSv19 - cd codegen && \ - SMITHY_GO_BUILD_API="$(subst smithy-build-,,$@)" ./gradlew clean build -Plog-tests - -smithy-annotate-stable: - go run ${REPOTOOLS_CMD_ANNOTATE_STABLE_GEN} - -smithy-clean: - cd codegen && ./gradlew clean - -smithy-go-publish-local: - rm -rf /tmp/smithy-go-local - git clone https://github.com/aws/smithy-go /tmp/smithy-go-local - make -C /tmp/smithy-go-local smithy-clean smithy-publish-local - -format: - gofmt -w -s . - -gen-config-asserts: - @echo "Generating SDK config package implementor assertions" - cd config \ - && go mod tidy \ - && go generate - -gen-internal-codegen: - @echo "Generating internal/codegen" - cd internal/codegen \ - && go mod tidy \ - && go generate - -gen-repo-mod-replace: - @echo "Generating go.mod replace for repo modules" - go run ${REPOTOOLS_CMD_MAKE_RELATIVE} - -gen-mod-replace-smithy-%: - @# gen-mod-replace-smithy- command that uses the pattern to define build filter that - @# for modules to add replace to. Strips off the "gen-mod-replace-smithy-". - @# - @# SMITHY_GO_SRC environment variable is the path to add replace to - @# - @# e.g. gen-mod-replace-smithy-service_ssooidc - cd ./internal/repotools/cmd/eachmodule \ - && go run . -p $(subst _,/,$(subst gen-mod-replace-smithy-,,$@)) ${EACHMODULE_FLAGS} \ - "go mod edit -replace github.com/aws/smithy-go=${SMITHY_GO_SRC}" - -gen-mod-dropreplace-smithy-%: - @# gen-mod-dropreplace-smithy- command that uses the pattern to define build filter that - @# for modules to add replace to. Strips off the "gen-mod-dropreplace-smithy-". - @# - @# e.g. gen-mod-dropreplace-smithy-service_ssooidc - cd ./internal/repotools/cmd/eachmodule \ - && go run . -p $(subst _,/,$(subst gen-mod-dropreplace-smithy-,,$@)) ${EACHMODULE_FLAGS} \ - "go mod edit -dropreplace github.com/aws/smithy-go" - -gen-aws-ptrs: - cd aws && go generate - -tidy-modules-%: - @# tidy command that uses the pattern to define the root path that the - @# module testing will start from. Strips off the "tidy-modules-" and - @# replaces all "_" with "/". - @# - @# e.g. tidy-modules-internal_protocoltest - cd ./internal/repotools/cmd/eachmodule \ - && go run . -p $(subst _,/,$(subst tidy-modules-,,$@)) ${EACHMODULE_FLAGS} \ - "go mod tidy" - -download-modules-%: - @# download command that uses the pattern to define the root path that the - @# module testing will start from. Strips off the "download-modules-" and - @# replaces all "_" with "/". - @# - @# e.g. download-modules-internal_protocoltest - cd ./internal/repotools/cmd/eachmodule \ - && go run . -p $(subst _,/,$(subst download-modules-,,$@)) ${EACHMODULE_FLAGS} \ - "go mod download all" - -add-module-license-files: - cd internal/repotools/cmd/eachmodule && \ - go run . -skip-root \ - "cp $(LICENSE_FILE) ." - -sync-models: sync-endpoints-model sync-api-models - -sync-endpoints-model: sync-endpoints.json - -sync-endpoints.json: - [[ ! -z "${ENDPOINTS_MODEL}" ]] && cp ${ENDPOINTS_MODEL} ${ENDPOINTS_JSON} || echo "ENDPOINTS_MODEL not set, must not be empty" - -clone-v1-models: - rm -rf /tmp/aws-sdk-go-model-sync - git clone https://github.com/aws/aws-sdk-go.git --depth 1 /tmp/aws-sdk-go-model-sync - -sync-api-models: - cd internal/repotools/cmd/syncAPIModels && \ - go run . \ - -m ${API_MODELS} \ - -o ${CODEGEN_API_MODELS_PATH} - -copy-attributevalue-feature: - cd ./feature/dynamodbstreams/attributevalue && \ - find . -name "*.go" | grep -v "doc.go" | xargs -I % rm % && \ - find ../../dynamodb/attributevalue -name "*.go" | grep -v "doc.go" | xargs -I % cp % . && \ - ls *.go | grep -v "convert.go" | grep -v "doc.go" | \ - xargs -I % sed -i.bk -E 's:github.com/aws/aws-sdk-go-v2/(service|feature)/dynamodb:github.com/aws/aws-sdk-go-v2/\1/dynamodbstreams:g' % && \ - ls *.go | grep -v "convert.go" | grep -v "doc.go" | \ - xargs -I % sed -i.bk 's:DynamoDB:DynamoDBStreams:g' % && \ - ls *.go | grep -v "doc.go" | \ - xargs -I % sed -i.bk 's:dynamodb\.:dynamodbstreams.:g' % && \ - sed -i.bk 's:streams\.:ddbtypes.:g' "convert.go" && \ - sed -i.bk 's:ddb\.:streams.:g' "convert.go" && \ - sed -i.bk 's:ddbtypes\.:ddb.:g' "convert.go" &&\ - sed -i.bk 's:Streams::g' "convert.go" && \ - rm -rf ./*.bk && \ - go mod tidy && \ - gofmt -w -s . && \ - go test . - -min-go-version-%: - cd ./internal/repotools/cmd/eachmodule \ - && go run . -p $(subst _,/,$(subst min-go-version-,,$@)) ${EACHMODULE_FLAGS} \ - "go mod edit -go=${SDK_MIN_GO_VERSION}" - -update-requires: - go run ${REPOTOOLS_CMD_UPDATE_REQUIRES} - -update-module-metadata: - go run ${REPOTOOLS_CMD_UPDATE_MODULE_METADATA} - -################ -# Unit Testing # -################ -.PHONY: unit unit-race unit-test unit-race-test unit-race-modules-% unit-modules-% build build-modules-% \ -go-build-modules-% test test-race-modules-% test-modules-% cachedep cachedep-modules-% api-diff-modules-% - -unit: lint unit-modules-. -unit-race: lint unit-race-modules-. - -unit-test: test-modules-. -unit-race-test: test-race-modules-. - -unit-race-modules-%: - @# unit command that uses the pattern to define the root path that the - @# module testing will start from. Strips off the "unit-race-modules-" and - @# replaces all "_" with "/". - @# - @# e.g. unit-race-modules-internal_protocoltest - cd ./internal/repotools/cmd/eachmodule \ - && go run . -p $(subst _,/,$(subst unit-race-modules-,,$@)) ${EACHMODULE_FLAGS} \ - "go vet ${BUILD_TAGS} --all ./..." \ - "go test ${BUILD_TAGS} ${RUN_NONE} ./..." \ - "go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./..." - -unit-modules-%: - @# unit command that uses the pattern to define the root path that the - @# module testing will start from. Strips off the "unit-modules-" and - @# replaces all "_" with "/". - @# - @# e.g. unit-modules-internal_protocoltest - cd ./internal/repotools/cmd/eachmodule \ - && go run . -p $(subst _,/,$(subst unit-modules-,,$@)) ${EACHMODULE_FLAGS} \ - "go vet ${BUILD_TAGS} --all ./..." \ - "go test ${BUILD_TAGS} ${RUN_NONE} ./..." \ - "go test -timeout=1m ${UNIT_TEST_TAGS} ./..." - -build: build-modules-. - -build-modules-%: - @# build command that uses the pattern to define the root path that the - @# module testing will start from. Strips off the "build-modules-" and - @# replaces all "_" with "/". - @# - @# e.g. build-modules-internal_protocoltest - cd ./internal/repotools/cmd/eachmodule \ - && go run . -p $(subst _,/,$(subst build-modules-,,$@)) ${EACHMODULE_FLAGS} \ - "go test ${BUILD_TAGS} ${RUN_NONE} ./..." - -go-build-modules-%: - @# build command that uses the pattern to define the root path that the - @# module testing will start from. Strips off the "build-modules-" and - @# replaces all "_" with "/". - @# - @# Validates that all modules in the repo have buildable Go files. - @# - @# e.g. go-build-modules-internal_protocoltest - cd ./internal/repotools/cmd/eachmodule \ - && go run . -p $(subst _,/,$(subst go-build-modules-,,$@)) ${EACHMODULE_FLAGS} \ - "go build ${BUILD_TAGS} ./..." - -test: test-modules-. - -test-race-modules-%: - @# Test command that uses the pattern to define the root path that the - @# module testing will start from. Strips off the "test-race-modules-" and - @# replaces all "_" with "/". - @# - @# e.g. test-race-modules-internal_protocoltest - cd ./internal/repotools/cmd/eachmodule \ - && go run . -p $(subst _,/,$(subst test-race-modules-,,$@)) ${EACHMODULE_FLAGS} \ - "go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./..." - -test-modules-%: - @# Test command that uses the pattern to define the root path that the - @# module testing will start from. Strips off the "test-modules-" and - @# replaces all "_" with "/". - @# - @# e.g. test-modules-internal_protocoltest - cd ./internal/repotools/cmd/eachmodule \ - && go run . -p $(subst _,/,$(subst test-modules-,,$@)) ${EACHMODULE_FLAGS} \ - "go test -timeout=1m ${UNIT_TEST_TAGS} ./..." - -cachedep: cachedep-modules-. - -cachedep-modules-%: - @# build command that uses the pattern to define the root path that the - @# module caching will start from. Strips off the "cachedep-modules-" and - @# replaces all "_" with "/". - @# - @# e.g. cachedep-modules-internal_protocoltest - cd ./internal/repotools/cmd/eachmodule \ - && go run . -p $(subst _,/,$(subst cachedep-modules-,,$@)) ${EACHMODULE_FLAGS} \ - "go mod download" - -api-diff-modules-%: - @# Command that uses the pattern to define the root path that the - @# module testing will start from. Strips off the "api-diff-modules-" and - @# replaces all "_" with "/". - @# - @# Requires golang.org/x/exp/cmd/gorelease to be available in the GOPATH. - @# - @# e.g. api-diff-modules-internal_protocoltest - cd ./internal/repotools/cmd/eachmodule \ - && go run . -p $(subst _,/,$(subst api-diff-modules-,,$@)) \ - -fail-fast=true \ - -c 1 \ - -skip="internal/repotools" \ - "$$(go env GOPATH)/bin/gorelease" - -############## -# CI Testing # -############## -.PHONY: ci-test ci-test-no-generate ci-test-generate-validate - -ci-test: generate unit-race ci-test-generate-validate -ci-test-no-generate: unit-race - -ci-test-generate-validate: - @echo "CI test validate no generated code changes" - git update-index --assume-unchanged go.mod go.sum - git add . -A - gitstatus=`git diff --cached --ignore-space-change`; \ - echo "$$gitstatus"; \ - if [ "$$gitstatus" != "" ] && [ "$$gitstatus" != "skipping validation" ]; then echo "$$gitstatus"; exit 1; fi - git update-index --no-assume-unchanged go.mod go.sum - -ci-lint: ci-lint-. - -ci-lint-%: - @# Run golangci-lint command that uses the pattern to define the root path that the - @# module check will start from. Strips off the "ci-lint-" and - @# replaces all "_" with "/". - @# - @# e.g. ci-lint-internal_protocoltest - cd ./internal/repotools/cmd/eachmodule \ - && go run . -p $(subst _,/,$(subst ci-lint-,,$@)) \ - -fail-fast=false \ - -c 1 \ - -skip="internal/repotools" \ - "golangci-lint run" - -ci-lint-install: - @# Installs golangci-lint at GoPATH. - @# This should be used to run golangci-lint locally. - @# - go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest - -####################### -# Integration Testing # -####################### -.PHONY: integration integ-modules-% cleanup-integ-buckets - -integration: integ-modules-service - -integ-modules-%: - @# integration command that uses the pattern to define the root path that - @# the module testing will start from. Strips off the "integ-modules-" and - @# replaces all "_" with "/". - @# - @# e.g. test-modules-service_dynamodb - cd ./internal/repotools/cmd/eachmodule \ - && go run . -p $(subst _,/,$(subst integ-modules-,,$@)) ${EACHMODULE_FLAGS} \ - "go test -timeout=10m -tags "integration" -v ${RUN_INTEG} -count 1 ./..." - -cleanup-integ-buckets: - @echo "Cleaning up SDK integration resources" - go run -tags "integration" ./internal/awstesting/cmd/bucket_cleanup/main.go "aws-sdk-go-integration" - -############## -# Benchmarks # -############## -.PHONY: bench bench-modules-% - -bench: bench-modules-. - -bench-modules-%: - @# benchmark command that uses the pattern to define the root path that - @# the module testing will start from. Strips off the "bench-modules-" and - @# replaces all "_" with "/". - @# - @# e.g. bench-modules-service_dynamodb - cd ./internal/repotools/cmd/eachmodule \ - && go run . -p $(subst _,/,$(subst bench-modules-,,$@)) ${EACHMODULE_FLAGS} \ - "go test -timeout=10m -bench . --benchmem ${BUILD_TAGS} ${RUN_NONE} ./..." - -##################### -# Release Process # -##################### -.PHONY: preview-release pre-release-validation release - -ls-changes: - go run ${REPOTOOLS_CMD_CHANGELOG} ls - -preview-release: - go run ${REPOTOOLS_CMD_CALCULATE_RELEASE} ${REPOTOOLS_CALCULATE_RELEASE_VERBOSE_FLAG} ${REPOTOOLS_CALCULATE_RELEASE_ADDITIONAL_ARGS} - -pre-release-validation: - @if [[ -z "${RELEASE_MANIFEST_FILE}" ]]; then \ - echo "RELEASE_MANIFEST_FILE is required to specify the file to write the release manifest" && false; \ - fi - @if [[ -z "${RELEASE_CHGLOG_DESC_FILE}" ]]; then \ - echo "RELEASE_CHGLOG_DESC_FILE is required to specify the file to write the release notes" && false; \ - fi - -release: pre-release-validation - go run ${REPOTOOLS_CMD_CALCULATE_RELEASE} -o ${RELEASE_MANIFEST_FILE} ${REPOTOOLS_CALCULATE_RELEASE_VERBOSE_FLAG} ${REPOTOOLS_CALCULATE_RELEASE_ADDITIONAL_ARGS} - go run ${REPOTOOLS_CMD_UPDATE_REQUIRES} -release ${RELEASE_MANIFEST_FILE} - go run ${REPOTOOLS_CMD_UPDATE_MODULE_METADATA} -release ${RELEASE_MANIFEST_FILE} - go run ${REPOTOOLS_CMD_GENERATE_CHANGELOG} -release ${RELEASE_MANIFEST_FILE} -o ${RELEASE_CHGLOG_DESC_FILE} - go run ${REPOTOOLS_CMD_CHANGELOG} rm -all - go run ${REPOTOOLS_CMD_TAG_RELEASE} -release ${RELEASE_MANIFEST_FILE} - -############## -# Repo Tools # -############## -.PHONY: install-repotools - -install-repotools: - go install ${REPOTOOLS_MODULE}/cmd/changelog@${REPOTOOLS_VERSION} - -set-smithy-go-version: - @if [[ -z "${SMITHY_GO_VERSION}" ]]; then \ - echo "SMITHY_GO_VERSION is required to update SDK's smithy-go module dependency version" && false; \ - fi - go run ${REPOTOOLS_CMD_EDIT_MODULE_DEPENDENCY} -s "github.com/aws/smithy-go" -v "${SMITHY_GO_VERSION}" - -################## -# Linting/Verify # -################## -.PHONY: verify lint vet vet-modules-% sdkv1check - -verify: lint vet sdkv1check - -lint: - @echo "go lint SDK and vendor packages" - @lint=`golint ./...`; \ - dolint=`echo "$$lint" | grep -E -v \ - -e ${LINT_IGNORE_S3MANAGER_INPUT} \ - -e ${LINTIGNORESINGLEFIGHT} \ - -e ${LINT_IGNORE_AWSRULESFN_ARN} \ - -e ${LINT_IGNORE_AWSRULESFN_PARTITION}`; \ - echo "$$dolint"; \ - if [ "$$dolint" != "" ]; then exit 1; fi - -vet: vet-modules-. - -vet-modules-%: - cd ./internal/repotools/cmd/eachmodule \ - && go run . -p $(subst _,/,$(subst vet-modules-,,$@)) ${EACHMODULE_FLAGS} \ - "go vet ${BUILD_TAGS} --all ./..." - -sdkv1check: - @echo "Checking for usage of AWS SDK for Go v1" - @sdkv1usage=`go list -test -f '''{{ if not .Standard }}{{ range $$_, $$name := .Imports }} * {{ $$.ImportPath }} -> {{ $$name }}{{ print "\n" }}{{ end }}{{ range $$_, $$name := .TestImports }} *: {{ $$.ImportPath }} -> {{ $$name }}{{ print "\n" }}{{ end }}{{ end}}''' ./... | sort -u | grep '''/aws-sdk-go/'''`; \ - echo "$$sdkv1usage"; \ - if [ "$$sdkv1usage" != "" ]; then exit 1; fi - -list-deps: list-deps-. - -list-deps-%: - @# command that uses the pattern to define the root path that the - @# module testing will start from. Strips off the "list-deps-" and - @# replaces all "_" with "/". - @# - @# Trim output to only include stdout for list of dependencies only. - @# make list-deps 2>&- - @# - @# e.g. list-deps-internal_protocoltest - @cd ./internal/repotools/cmd/eachmodule \ - && go run . -p $(subst _,/,$(subst list-deps-,,$@)) ${EACHMODULE_FLAGS} \ - "go list -m all | grep -v 'github.com/aws/aws-sdk-go-v2'" | sort -u - -################### -# Sandbox Testing # -################### -.PHONY: sandbox-tests sandbox-build-% sandbox-run-% sandbox-test-% update-aws-golang-tip - -sandbox-tests: sandbox-test-go1.15 sandbox-test-go1.16 sandbox-test-go1.17 sandbox-test-go1.18 sandbox-test-go1.19 sandbox-test-go1.20 sandbox-test-gotip - -sandbox-build-%: - @# sandbox-build-go1.17 - @# sandbox-build-gotip - @if [ $@ == sandbox-build-gotip ]; then\ - docker build \ - -f ./internal/awstesting/sandbox/Dockerfile.test.gotip \ - -t "aws-sdk-go-$(subst sandbox-build-,,$@)" . ;\ - else\ - docker build \ - --build-arg GO_VERSION=$(subst sandbox-build-go,,$@) \ - -f ./internal/awstesting/sandbox/Dockerfile.test.goversion \ - -t "aws-sdk-go-$(subst sandbox-build-,,$@)" . ;\ - fi - -sandbox-run-%: sandbox-build-% - @# sandbox-run-go1.17 - @# sandbox-run-gotip - docker run -i -t "aws-sdk-go-$(subst sandbox-run-,,$@)" bash -sandbox-test-%: sandbox-build-% - @# sandbox-test-go1.17 - @# sandbox-test-gotip - docker run -t "aws-sdk-go-$(subst sandbox-test-,,$@)" - -update-aws-golang-tip: - docker build --no-cache=true -f ./internal/awstesting/sandbox/Dockerfile.golang-tip -t "aws-golang:tip" . diff --git a/vendor/github.com/aws/aws-sdk-go-v2/README.md b/vendor/github.com/aws/aws-sdk-go-v2/README.md deleted file mode 100644 index 9a6d0f4fd..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/README.md +++ /dev/null @@ -1,165 +0,0 @@ -# AWS SDK for Go v2 - -[![Go Build status](https://github.com/aws/aws-sdk-go-v2/actions/workflows/go.yml/badge.svg?branch=main)](https://github.com/aws/aws-sdk-go-v2/actions/workflows/go.yml)[![Codegen Build status](https://github.com/aws/aws-sdk-go-v2/actions/workflows/codegen.yml/badge.svg?branch=main)](https://github.com/aws/aws-sdk-go-v2/actions/workflows/codegen.yml) [![SDK Documentation](https://img.shields.io/badge/SDK-Documentation-blue)](https://aws.github.io/aws-sdk-go-v2/docs/) [![Migration Guide](https://img.shields.io/badge/Migration-Guide-blue)](https://aws.github.io/aws-sdk-go-v2/docs/migrating/) [![API Reference](https://img.shields.io/badge/api-reference-blue.svg)](https://pkg.go.dev/mod/github.com/aws/aws-sdk-go-v2) [![Apache V2 License](https://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/aws/aws-sdk-go-v2/blob/main/LICENSE.txt) - -`aws-sdk-go-v2` is the v2 AWS SDK for the Go programming language. - -The v2 SDK requires a minimum version of `Go 1.19`. - -Check out the [release notes](https://github.com/aws/aws-sdk-go-v2/blob/main/CHANGELOG.md) for information about the latest bug -fixes, updates, and features added to the SDK. - -Jump To: -* [Getting Started](#getting-started) -* [Getting Help](#getting-help) -* [Contributing](#feedback-and-contributing) -* [More Resources](#resources) - -## Maintenance and support for SDK major versions - -For information about maintenance and support for SDK major versions and their underlying dependencies, see the -following in the AWS SDKs and Tools Shared Configuration and Credentials Reference Guide: - -* [AWS SDKs and Tools Maintenance Policy](https://docs.aws.amazon.com/credref/latest/refdocs/maint-policy.html) -* [AWS SDKs and Tools Version Support Matrix](https://docs.aws.amazon.com/credref/latest/refdocs/version-support-matrix.html) - -### Go version support policy - -The v2 SDK follows the upstream [release policy](https://go.dev/doc/devel/release#policy) -with an additional six months of support for the most recently deprecated -language version. - -**AWS reserves the right to drop support for unsupported Go versions earlier to -address critical security issues.** - -## Getting started -To get started working with the SDK setup your project for Go modules, and retrieve the SDK dependencies with `go get`. -This example shows how you can use the v2 SDK to make an API request using the SDK's [Amazon DynamoDB] client. - -###### Initialize Project -```sh -$ mkdir ~/helloaws -$ cd ~/helloaws -$ go mod init helloaws -``` -###### Add SDK Dependencies -```sh -$ go get github.com/aws/aws-sdk-go-v2/aws -$ go get github.com/aws/aws-sdk-go-v2/config -$ go get github.com/aws/aws-sdk-go-v2/service/dynamodb -``` - -###### Write Code -In your preferred editor add the following content to `main.go` - -```go -package main - -import ( - "context" - "fmt" - "log" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/config" - "github.com/aws/aws-sdk-go-v2/service/dynamodb" -) - -func main() { - // Using the SDK's default configuration, loading additional config - // and credentials values from the environment variables, shared - // credentials, and shared configuration files - cfg, err := config.LoadDefaultConfig(context.TODO(), config.WithRegion("us-west-2")) - if err != nil { - log.Fatalf("unable to load SDK config, %v", err) - } - - // Using the Config value, create the DynamoDB client - svc := dynamodb.NewFromConfig(cfg) - - // Build the request with its input parameters - resp, err := svc.ListTables(context.TODO(), &dynamodb.ListTablesInput{ - Limit: aws.Int32(5), - }) - if err != nil { - log.Fatalf("failed to list tables, %v", err) - } - - fmt.Println("Tables:") - for _, tableName := range resp.TableNames { - fmt.Println(tableName) - } -} -``` - -###### Compile and Execute -```sh -$ go run . -Tables: -tableOne -tableTwo -``` - -## Getting Help - -Please use these community resources for getting help. We use the GitHub issues -for tracking bugs and feature requests. - -* Ask us a [question](https://github.com/aws/aws-sdk-go-v2/discussions/new?category=q-a) or open a [discussion](https://github.com/aws/aws-sdk-go-v2/discussions/new?category=general). -* If you think you may have found a bug, please open an [issue](https://github.com/aws/aws-sdk-go-v2/issues/new/choose). -* Open a support ticket with [AWS Support](http://docs.aws.amazon.com/awssupport/latest/user/getting-started.html). - -This SDK implements AWS service APIs. For general issues regarding the AWS services and their limitations, you may also take a look at the [Amazon Web Services Discussion Forums](https://forums.aws.amazon.com/). - -### Opening Issues - -If you encounter a bug with the AWS SDK for Go we would like to hear about it. -Search the [existing issues][Issues] and see -if others are also experiencing the same issue before opening a new issue. Please -include the version of AWS SDK for Go, Go language, and OS you’re using. Please -also include reproduction case when appropriate. - -The GitHub issues are intended for bug reports and feature requests. For help -and questions with using AWS SDK for Go please make use of the resources listed -in the [Getting Help](#getting-help) section. -Keeping the list of open issues lean will help us respond in a timely manner. - -## Feedback and contributing - -The v2 SDK will use GitHub [Issues] to track feature requests and issues with the SDK. In addition, we'll use GitHub [Projects] to track large tasks spanning multiple pull requests, such as refactoring the SDK's internal request lifecycle. You can provide feedback to us in several ways. - -**GitHub issues**. To provide feedback or report bugs, file GitHub [Issues] on the SDK. This is the preferred mechanism to give feedback so that other users can engage in the conversation, +1 issues, etc. Issues you open will be evaluated, and included in our roadmap for the GA launch. - -**Contributing**. You can open pull requests for fixes or additions to the AWS SDK for Go 2.0. All pull requests must be submitted under the Apache 2.0 license and will be reviewed by an SDK team member before being merged in. Accompanying unit tests, where possible, are appreciated. - -## Resources - -[SDK Developer Guide](https://aws.github.io/aws-sdk-go-v2/docs/) - Use this document to learn how to get started and -use the AWS SDK for Go V2. - -[SDK Migration Guide](https://aws.github.io/aws-sdk-go-v2/docs/migrating/) - Use this document to learn how to migrate to V2 from the AWS SDK for Go. - -[SDK API Reference Documentation](https://pkg.go.dev/mod/github.com/aws/aws-sdk-go-v2) - Use this -document to look up all API operation input and output parameters for AWS -services supported by the SDK. The API reference also includes documentation of -the SDK, and examples how to using the SDK, service client API operations, and -API operation require parameters. - -[Service Documentation](https://aws.amazon.com/documentation/) - Use this -documentation to learn how to interface with AWS services. These guides are -great for getting started with a service, or when looking for more -information about a service. While this document is not required for coding, -services may supply helpful samples to look out for. - -[Forum](https://forums.aws.amazon.com/forum.jspa?forumID=293) - Ask questions, get help, and give feedback - -[Issues] - Report issues, submit pull requests, and get involved - (see [Apache 2.0 License][license]) - -[Dep]: https://github.com/golang/dep -[Issues]: https://github.com/aws/aws-sdk-go-v2/issues -[Projects]: https://github.com/aws/aws-sdk-go-v2/projects -[CHANGELOG]: https://github.com/aws/aws-sdk-go-v2/blob/main/CHANGELOG.md -[Amazon DynamoDB]: https://aws.amazon.com/dynamodb/ -[design]: https://github.com/aws/aws-sdk-go-v2/blob/main/DESIGN.md -[license]: http://aws.amazon.com/apache2.0/ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go index 84b19df69..d4384d4e6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go @@ -3,4 +3,4 @@ package aws // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.22.1" +const goModuleVersion = "1.23.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go index 2de15528c..d66f0960a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go @@ -65,6 +65,9 @@ func GetServiceID(ctx context.Context) (v string) { // // Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues // to clear all stack values. +// +// Deprecated: This value is unstable. The resolved signing name is available +// in the signer properties object passed to the signer. func GetSigningName(ctx context.Context) (v string) { v, _ = middleware.GetStackValue(ctx, signingNameKey{}).(string) return v @@ -74,6 +77,9 @@ func GetSigningName(ctx context.Context) (v string) { // // Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues // to clear all stack values. +// +// Deprecated: This value is unstable. The resolved signing region is available +// in the signer properties object passed to the signer. func GetSigningRegion(ctx context.Context) (v string) { v, _ = middleware.GetStackValue(ctx, signingRegionKey{}).(string) return v @@ -125,10 +131,13 @@ func SetRequiresLegacyEndpoints(ctx context.Context, value bool) context.Context return middleware.WithStackValue(ctx, requiresLegacyEndpointsKey{}, value) } -// SetSigningName set or modifies the signing name on the context. +// SetSigningName set or modifies the sigv4 or sigv4a signing name on the context. // // Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues // to clear all stack values. +// +// Deprecated: This value is unstable. Use WithSigV4SigningName client option +// funcs instead. func SetSigningName(ctx context.Context, value string) context.Context { return middleware.WithStackValue(ctx, signingNameKey{}, value) } @@ -137,6 +146,9 @@ func SetSigningName(ctx context.Context, value string) context.Context { // // Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues // to clear all stack values. +// +// Deprecated: This value is unstable. Use WithSigV4SigningRegion client option +// funcs instead. func SetSigningRegion(ctx context.Context, value string) context.Context { return middleware.WithStackValue(ctx, signingRegionKey{}, value) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go index 71b1a3521..ca738f234 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go @@ -38,6 +38,7 @@ var RequiredSignedHeaders = Rules{ "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Expected-Bucket-Owner": struct{}{}, "X-Amz-Grant-Full-control": struct{}{}, "X-Amz-Grant-Read": struct{}{}, "X-Amz-Grant-Read-Acp": struct{}{}, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go index 0fb9b24e4..4b19444b0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go @@ -58,7 +58,7 @@ func (e *SigningError) Unwrap() error { // S3 PutObject API allows unsigned payload signing auth usage when TLS is enabled, and uses this middleware to // dynamically switch between unsigned and signed payload based on TLS state for request. func UseDynamicPayloadSigningMiddleware(stack *middleware.Stack) error { - _, err := stack.Build.Swap(computePayloadHashMiddlewareID, &dynamicPayloadSigningMiddleware{}) + _, err := stack.Finalize.Swap(computePayloadHashMiddlewareID, &dynamicPayloadSigningMiddleware{}) return err } @@ -71,24 +71,22 @@ func (m *dynamicPayloadSigningMiddleware) ID() string { return computePayloadHashMiddlewareID } -// HandleBuild sets a resolver that directs to the payload sha256 compute handler. -func (m *dynamicPayloadSigningMiddleware) HandleBuild( - ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +// HandleFinalize delegates SHA256 computation according to whether the request +// is TLS-enabled. +func (m *dynamicPayloadSigningMiddleware) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, ) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) } - // if TLS is enabled, use unsigned payload when supported if req.IsHTTPS() { - return (&unsignedPayload{}).HandleBuild(ctx, in, next) + return (&unsignedPayload{}).HandleFinalize(ctx, in, next) } - - // else fall back to signed payload - return (&computePayloadSHA256{}).HandleBuild(ctx, in, next) + return (&computePayloadSHA256{}).HandleFinalize(ctx, in, next) } // unsignedPayload sets the SigV4 request payload hash to unsigned. @@ -104,7 +102,7 @@ type unsignedPayload struct{} // AddUnsignedPayloadMiddleware adds unsignedPayload to the operation // middleware stack func AddUnsignedPayloadMiddleware(stack *middleware.Stack) error { - return stack.Build.Add(&unsignedPayload{}, middleware.After) + return stack.Finalize.Insert(&unsignedPayload{}, "ResolveEndpointV2", middleware.After) } // ID returns the unsignedPayload identifier @@ -112,23 +110,16 @@ func (m *unsignedPayload) ID() string { return computePayloadHashMiddlewareID } -// HandleBuild sets the payload hash to be an unsigned payload -func (m *unsignedPayload) HandleBuild( - ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +// HandleFinalize sets the payload hash magic value to the unsigned sentinel. +func (m *unsignedPayload) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, ) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { - // This should not compute the content SHA256 if the value is already - // known. (e.g. application pre-computed SHA256 before making API call). - // Does not have any tight coupling to the X-Amz-Content-Sha256 header, if - // that header is provided a middleware must translate it into the context. - contentSHA := GetPayloadHash(ctx) - if len(contentSHA) == 0 { - contentSHA = v4Internal.UnsignedPayload + if GetPayloadHash(ctx) == "" { + ctx = SetPayloadHash(ctx, v4Internal.UnsignedPayload) } - - ctx = SetPayloadHash(ctx, contentSHA) - return next.HandleBuild(ctx, in) + return next.HandleFinalize(ctx, in) } // computePayloadSHA256 computes SHA256 payload hash to sign. @@ -144,13 +135,13 @@ type computePayloadSHA256 struct{} // AddComputePayloadSHA256Middleware adds computePayloadSHA256 to the // operation middleware stack func AddComputePayloadSHA256Middleware(stack *middleware.Stack) error { - return stack.Build.Add(&computePayloadSHA256{}, middleware.After) + return stack.Finalize.Insert(&computePayloadSHA256{}, "ResolveEndpointV2", middleware.After) } // RemoveComputePayloadSHA256Middleware removes computePayloadSHA256 from the // operation middleware stack func RemoveComputePayloadSHA256Middleware(stack *middleware.Stack) error { - _, err := stack.Build.Remove(computePayloadHashMiddlewareID) + _, err := stack.Finalize.Remove(computePayloadHashMiddlewareID) return err } @@ -159,12 +150,17 @@ func (m *computePayloadSHA256) ID() string { return computePayloadHashMiddlewareID } -// HandleBuild compute the payload hash for the request payload -func (m *computePayloadSHA256) HandleBuild( - ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +// HandleFinalize computes the payload hash for the request, storing it to the +// context. This is a no-op if a caller has previously set that value. +func (m *computePayloadSHA256) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, ) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { + if GetPayloadHash(ctx) != "" { + return next.HandleFinalize(ctx, in) + } + req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &HashComputationError{ @@ -172,14 +168,6 @@ func (m *computePayloadSHA256) HandleBuild( } } - // This should not compute the content SHA256 if the value is already - // known. (e.g. application pre-computed SHA256 before making API call) - // Does not have any tight coupling to the X-Amz-Content-Sha256 header, if - // that header is provided a middleware must translate it into the context. - if contentSHA := GetPayloadHash(ctx); len(contentSHA) != 0 { - return next.HandleBuild(ctx, in) - } - hash := sha256.New() if stream := req.GetStream(); stream != nil { _, err = io.Copy(hash, stream) @@ -198,7 +186,7 @@ func (m *computePayloadSHA256) HandleBuild( ctx = SetPayloadHash(ctx, hex.EncodeToString(hash.Sum(nil))) - return next.HandleBuild(ctx, in) + return next.HandleFinalize(ctx, in) } // SwapComputePayloadSHA256ForUnsignedPayloadMiddleware replaces the @@ -207,7 +195,7 @@ func (m *computePayloadSHA256) HandleBuild( // Use this to disable computing the Payload SHA256 checksum and instead use // UNSIGNED-PAYLOAD for the SHA256 value. func SwapComputePayloadSHA256ForUnsignedPayloadMiddleware(stack *middleware.Stack) error { - _, err := stack.Build.Swap(computePayloadHashMiddlewareID, &unsignedPayload{}) + _, err := stack.Finalize.Swap(computePayloadHashMiddlewareID, &unsignedPayload{}) return err } @@ -218,13 +206,13 @@ type contentSHA256Header struct{} // AddContentSHA256HeaderMiddleware adds ContentSHA256Header to the // operation middleware stack func AddContentSHA256HeaderMiddleware(stack *middleware.Stack) error { - return stack.Build.Insert(&contentSHA256Header{}, computePayloadHashMiddlewareID, middleware.After) + return stack.Finalize.Insert(&contentSHA256Header{}, computePayloadHashMiddlewareID, middleware.After) } // RemoveContentSHA256HeaderMiddleware removes contentSHA256Header middleware // from the operation middleware stack func RemoveContentSHA256HeaderMiddleware(stack *middleware.Stack) error { - _, err := stack.Build.Remove((*contentSHA256Header)(nil).ID()) + _, err := stack.Finalize.Remove((*contentSHA256Header)(nil).ID()) return err } @@ -233,12 +221,12 @@ func (m *contentSHA256Header) ID() string { return "SigV4ContentSHA256Header" } -// HandleBuild sets the X-Amz-Content-Sha256 header value to the Payload hash +// HandleFinalize sets the X-Amz-Content-Sha256 header value to the Payload hash // stored in the context. -func (m *contentSHA256Header) HandleBuild( - ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +func (m *contentSHA256Header) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, ) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { req, ok := in.Request.(*smithyhttp.Request) if !ok { @@ -246,8 +234,7 @@ func (m *contentSHA256Header) HandleBuild( } req.Header.Set(v4Internal.ContentSHAKey, GetPayloadHash(ctx)) - - return next.HandleBuild(ctx, in) + return next.HandleFinalize(ctx, in) } // SignHTTPRequestMiddlewareOptions is the configuration options for the SignHTTPRequestMiddleware middleware. @@ -332,17 +319,17 @@ type streamingEventsPayload struct{} // AddStreamingEventsPayload adds the streamingEventsPayload middleware to the stack. func AddStreamingEventsPayload(stack *middleware.Stack) error { - return stack.Build.Add(&streamingEventsPayload{}, middleware.After) + return stack.Finalize.Add(&streamingEventsPayload{}, middleware.Before) } func (s *streamingEventsPayload) ID() string { return computePayloadHashMiddlewareID } -func (s *streamingEventsPayload) HandleBuild( - ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +func (s *streamingEventsPayload) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, ) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { contentSHA := GetPayloadHash(ctx) if len(contentSHA) == 0 { @@ -351,7 +338,7 @@ func (s *streamingEventsPayload) HandleBuild( ctx = SetPayloadHash(ctx, contentSHA) - return next.HandleBuild(ctx, in) + return next.HandleFinalize(ctx, in) } // GetSignedRequestSignature attempts to extract the signature of the request. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/buildspec.yml b/vendor/github.com/aws/aws-sdk-go-v2/buildspec.yml deleted file mode 100644 index b11df5082..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/buildspec.yml +++ /dev/null @@ -1,12 +0,0 @@ -version: 0.2 - -phases: - build: - commands: - - echo Build started on `date` - - export GOPATH=/go - - export SDK_CODEBUILD_ROOT=`pwd` - - make ci-test-no-generate - post_build: - commands: - - echo Build completed on `date` diff --git a/vendor/github.com/aws/aws-sdk-go-v2/ci-find-smithy-go.sh b/vendor/github.com/aws/aws-sdk-go-v2/ci-find-smithy-go.sh deleted file mode 100644 index 4da5d09cb..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/ci-find-smithy-go.sh +++ /dev/null @@ -1,58 +0,0 @@ -#!/bin/bash - -# looks for (and modreplaces if existing) a smithy-go branch matching the -# current branch name -# -# the loop will unfurl -*s off of the branch, e.g. sdk branch -# 'feat-foo-bar-baz' will match any of the following (in order): -# - feat-foo-bar-baz -# - feat-foo-bar -# - feat-foo - -if [ -z "$SMITHY_GO_REPOSITORY" ]; then - SMITHY_GO_REPOSITORY=aws/smithy-go -fi - -if [ -z "$RUNNER_TMPDIR" ]; then - echo env RUNNER_TMPDIR is required - exit 1 -fi - -branch=$(git branch --show-current) -if [ "$branch" == main ]; then - echo aws-sdk-go-v2 is on branch main, stop - exit 0 -fi - -# For PR workflows, only the triggering ref is checked out, which in isolation -# is not recognized as a branch by git. Use the specific workflow env instead. -if [ -z "$branch" ]; then - branch=$GITHUB_HEAD_REF -fi - -if [ -n "$GIT_PAT" ]; then - repository=https://$GIT_PAT@github.com/$SMITHY_GO_REPOSITORY -else - repository=https://github.com/$SMITHY_GO_REPOSITORY -fi - -echo on branch \"$branch\" -while [ -n "$branch" ] && [[ "$branch" == *-* ]]; do - echo looking for "$branch"... - git ls-remote --exit-code --heads "$repository" refs/heads/"$branch" - if [ "$?" == 0 ]; then - echo found "$branch" - matched_branch=$branch - break - fi - - branch=${branch%-*} -done - -if [ -z "$matched_branch" ]; then - echo found no matching smithy-go branch, stop - exit 0 -fi - -git clone -b "$matched_branch" "$repository" "$RUNNER_TMPDIR"/smithy-go -SMITHY_GO_SRC=$RUNNER_TMPDIR/smithy-go make gen-mod-replace-smithy-. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md index 774040216..d75e52119 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md @@ -1,3 +1,100 @@ +# v1.25.3 (2023-11-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.2 (2023-11-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.1 (2023-11-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.0 (2023-11-14) + +* **Feature**: Add support for dynamic auth token from file and EKS container host in absolute/relative URIs in the HTTP credential provider. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.0 (2023-11-13) + +* **Feature**: Replace the legacy config parser with a modern, less-strict implementation. Parsing failures within a section will now simply ignore the invalid line rather than silently drop the entire section. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.23.0 (2023-11-09.2) + +* **Feature**: BREAKFIX: In order to support subproperty parsing, invalid property definitions must not be ignored +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.3 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.2 (2023-11-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.1 (2023-11-06) + +* No change notes available for this release. + +# v1.22.0 (2023-11-02) + +* **Feature**: Add env and shared config settings for disabling IMDSv1 fallback. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.21.0 (2023-11-01) + +* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.19.1 (2023-10-24) + +* No change notes available for this release. + +# v1.19.0 (2023-10-16) + +* **Feature**: Modify logic of retrieving user agent appID from env config + +# v1.18.45 (2023-10-12) + +* **Bug Fix**: Fail to load config if an explicitly provided profile doesn't exist. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.44 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.43 (2023-10-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.42 (2023-09-22) + +* **Bug Fix**: Fixed a bug where merging `max_attempts` or `duration_seconds` fields across shared config files with invalid values would silently default them to 0. +* **Bug Fix**: Move type assertion of config values out of the parsing stage, which resolves an issue where the contents of a profile would silently be dropped with certain numeric formats. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.41 (2023-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.40 (2023-09-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.39 (2023-09-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.38 (2023-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.18.37 (2023-08-23) * No change notes available for this release. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/config.go index 138f8e76d..dfe629732 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/config.go @@ -2,18 +2,11 @@ package config import ( "context" + "os" "github.com/aws/aws-sdk-go-v2/aws" ) -// defaultLoaders are a slice of functions that will read external configuration -// sources for configuration values. These values are read by the AWSConfigResolvers -// using interfaces to extract specific information from the external configuration. -var defaultLoaders = []loader{ - loadEnvConfig, - loadSharedConfigIgnoreNotExist, -} - // defaultAWSConfigResolvers are a slice of functions that will resolve external // configuration values into AWS configuration values. // @@ -79,6 +72,8 @@ var defaultAWSConfigResolvers = []awsConfigResolver{ // Sets the sdk app ID if present in shared config profile resolveAppID, + + resolveBaseEndpoint, } // A Config represents a generic configuration value or set of values. This type @@ -170,7 +165,7 @@ func (cs configs) ResolveConfig(f func(configs []interface{}) error) error { // or the custom data will be ignored by the resolvers and config loaders. // // cfg, err := config.LoadDefaultConfig( context.TODO(), -// WithSharedConfigProfile("test-profile"), +// config.WithSharedConfigProfile("test-profile"), // ) // if err != nil { // panic(fmt.Sprintf("failed loading config, %v", err)) @@ -190,7 +185,7 @@ func LoadDefaultConfig(ctx context.Context, optFns ...func(*LoadOptions) error) // assign Load Options to configs var cfgCpy = configs{options} - cfgCpy, err = cfgCpy.AppendFromLoaders(ctx, defaultLoaders) + cfgCpy, err = cfgCpy.AppendFromLoaders(ctx, resolveConfigLoaders(&options)) if err != nil { return aws.Config{}, err } @@ -202,3 +197,17 @@ func LoadDefaultConfig(ctx context.Context, optFns ...func(*LoadOptions) error) return cfg, nil } + +func resolveConfigLoaders(options *LoadOptions) []loader { + loaders := make([]loader, 2) + loaders[0] = loadEnvConfig + + // specification of a profile should cause a load failure if it doesn't exist + if os.Getenv(awsProfileEnvVar) != "" || options.SharedConfigProfile != "" { + loaders[1] = loadSharedConfig + } else { + loaders[1] = loadSharedConfigIgnoreNotExist + } + + return loaders +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go index 63ecd02b3..78bc14933 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go @@ -57,7 +57,8 @@ const ( awsEc2MetadataServiceEndpointEnvVar = "AWS_EC2_METADATA_SERVICE_ENDPOINT" - awsEc2MetadataDisabled = "AWS_EC2_METADATA_DISABLED" + awsEc2MetadataDisabled = "AWS_EC2_METADATA_DISABLED" + awsEc2MetadataV1DisabledEnvVar = "AWS_EC2_METADATA_V1_DISABLED" awsS3DisableMultiRegionAccessPointEnvVar = "AWS_S3_DISABLE_MULTIREGION_ACCESS_POINTS" @@ -69,6 +70,10 @@ const ( awsRetryMaxAttempts = "AWS_MAX_ATTEMPTS" awsRetryMode = "AWS_RETRY_MODE" + awsSdkAppID = "AWS_SDK_UA_APP_ID" + + awsIgnoreConfiguredEndpoints = "AWS_IGNORE_CONFIGURED_ENDPOINT_URLS" + awsEndpointURL = "AWS_ENDPOINT_URL" ) var ( @@ -205,6 +210,11 @@ type EnvConfig struct { // AWS_EC2_METADATA_DISABLED=true EC2IMDSClientEnableState imds.ClientEnableState + // Specifies if EC2 IMDSv1 fallback is disabled. + // + // AWS_EC2_METADATA_V1_DISABLED=true + EC2IMDSv1Disabled *bool + // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6) // // AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE=IPv6 @@ -248,6 +258,16 @@ type EnvConfig struct { // // aws_retry_mode=standard RetryMode aws.RetryMode + + // aws sdk app ID that can be added to user agent header string + AppID string + + // Flag used to disable configured endpoints. + IgnoreConfiguredEndpoints *bool + + // Value to contain configured endpoints to be propagated to + // corresponding endpoint resolution field. + BaseEndpoint string } // loadEnvConfig reads configuration values from the OS's environment variables. @@ -288,6 +308,8 @@ func NewEnvConfig() (EnvConfig, error) { cfg.RoleARN = os.Getenv(awsRoleARNEnvVar) cfg.RoleSessionName = os.Getenv(awsRoleSessionNameEnvVar) + cfg.AppID = os.Getenv(awsSdkAppID) + if err := setEndpointDiscoveryTypeFromEnvVal(&cfg.EnableEndpointDiscovery, []string{awsEnableEndpointDiscoveryEnvVar}); err != nil { return cfg, err } @@ -301,6 +323,9 @@ func NewEnvConfig() (EnvConfig, error) { return cfg, err } cfg.EC2IMDSEndpoint = os.Getenv(awsEc2MetadataServiceEndpointEnvVar) + if err := setBoolPtrFromEnvVal(&cfg.EC2IMDSv1Disabled, []string{awsEc2MetadataV1DisabledEnvVar}); err != nil { + return cfg, err + } if err := setBoolPtrFromEnvVal(&cfg.S3DisableMultiRegionAccessPoints, []string{awsS3DisableMultiRegionAccessPointEnvVar}); err != nil { return cfg, err @@ -325,6 +350,12 @@ func NewEnvConfig() (EnvConfig, error) { return cfg, err } + setStringFromEnvVal(&cfg.BaseEndpoint, []string{awsEndpointURL}) + + if err := setBoolPtrFromEnvVal(&cfg.IgnoreConfiguredEndpoints, []string{awsIgnoreConfiguredEndpoints}); err != nil { + return cfg, err + } + return cfg, nil } @@ -335,6 +366,10 @@ func (c EnvConfig) getDefaultsMode(ctx context.Context) (aws.DefaultsMode, bool, return c.DefaultsMode, true, nil } +func (c EnvConfig) getAppID(context.Context) (string, bool, error) { + return c.AppID, len(c.AppID) > 0, nil +} + // GetRetryMaxAttempts returns the value of AWS_MAX_ATTEMPTS if was specified, // and not 0. func (c EnvConfig) GetRetryMaxAttempts(ctx context.Context) (int, bool, error) { @@ -472,6 +507,34 @@ func (c EnvConfig) getCustomCABundle(context.Context) (io.Reader, bool, error) { return bytes.NewReader(b), true, nil } +// GetIgnoreConfiguredEndpoints is used in knowing when to disable configured +// endpoints feature. +func (c EnvConfig) GetIgnoreConfiguredEndpoints(context.Context) (bool, bool, error) { + if c.IgnoreConfiguredEndpoints == nil { + return false, false, nil + } + + return *c.IgnoreConfiguredEndpoints, true, nil +} + +func (c EnvConfig) getBaseEndpoint(context.Context) (string, bool, error) { + return c.BaseEndpoint, len(c.BaseEndpoint) > 0, nil +} + +// GetServiceBaseEndpoint is used to retrieve a normalized SDK ID for use +// with configured endpoints. +func (c EnvConfig) GetServiceBaseEndpoint(ctx context.Context, sdkID string) (string, bool, error) { + if endpt := os.Getenv(fmt.Sprintf("%s_%s", awsEndpointURL, normalizeEnv(sdkID))); endpt != "" { + return endpt, true, nil + } + return "", false, nil +} + +func normalizeEnv(sdkID string) string { + upper := strings.ToUpper(sdkID) + return strings.ReplaceAll(upper, " ", "_") +} + // GetS3UseARNRegion returns whether to allow ARNs to direct the region // the S3 client's requests are sent to. func (c EnvConfig) GetS3UseARNRegion(ctx context.Context) (value, ok bool, err error) { @@ -663,3 +726,13 @@ func (c EnvConfig) GetEC2IMDSEndpoint() (string, bool, error) { return c.EC2IMDSEndpoint, true, nil } + +// GetEC2IMDSV1FallbackDisabled implements an EC2IMDSV1FallbackDisabled option +// resolver interface. +func (c EnvConfig) GetEC2IMDSV1FallbackDisabled() (bool, bool) { + if c.EC2IMDSv1Disabled == nil { + return false, false + } + + return *c.EC2IMDSv1Disabled, true +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go index c973bb552..36186d128 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go @@ -3,4 +3,4 @@ package config // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.18.37" +const goModuleVersion = "1.25.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go index 69e54b77f..d52358460 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go @@ -122,6 +122,58 @@ func getRegion(ctx context.Context, configs configs) (value string, found bool, return } +// IgnoreConfiguredEndpointsProvider is needed to search for all providers +// that provide a flag to disable configured endpoints. +type IgnoreConfiguredEndpointsProvider interface { + GetIgnoreConfiguredEndpoints(ctx context.Context) (bool, bool, error) +} + +// GetIgnoreConfiguredEndpoints is used in knowing when to disable configured +// endpoints feature. +func GetIgnoreConfiguredEndpoints(ctx context.Context, configs []interface{}) (value bool, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(IgnoreConfiguredEndpointsProvider); ok { + value, found, err = p.GetIgnoreConfiguredEndpoints(ctx) + if err != nil || found { + break + } + } + } + return +} + +type baseEndpointProvider interface { + getBaseEndpoint(ctx context.Context) (string, bool, error) +} + +func getBaseEndpoint(ctx context.Context, configs configs) (value string, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(baseEndpointProvider); ok { + value, found, err = p.getBaseEndpoint(ctx) + if err != nil || found { + break + } + } + } + return +} + +type servicesObjectProvider interface { + getServicesObject(ctx context.Context) (map[string]map[string]string, bool, error) +} + +func getServicesObject(ctx context.Context, configs configs) (value map[string]map[string]string, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(servicesObjectProvider); ok { + value, found, err = p.getServicesObject(ctx) + if err != nil || found { + break + } + } + } + return +} + // appIDProvider provides access to the sdk app ID value type appIDProvider interface { getAppID(ctx context.Context) (string, bool, error) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go index b03705350..b3b2c93cd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go @@ -27,7 +27,6 @@ func resolveDefaultAWSConfig(ctx context.Context, cfg *aws.Config, cfgs configs) } *cfg = aws.Config{ - Credentials: aws.AnonymousCredentials{}, Logger: logging.NewStandardLogger(os.Stderr), ConfigSources: sources, } @@ -106,6 +105,29 @@ func resolveRegion(ctx context.Context, cfg *aws.Config, configs configs) error return nil } +func resolveBaseEndpoint(ctx context.Context, cfg *aws.Config, configs configs) error { + var downcastCfgSources []interface{} + for _, cs := range configs { + downcastCfgSources = append(downcastCfgSources, interface{}(cs)) + } + + if val, found, err := GetIgnoreConfiguredEndpoints(ctx, downcastCfgSources); found && val && err == nil { + cfg.BaseEndpoint = nil + return nil + } + + v, found, err := getBaseEndpoint(ctx, configs) + if err != nil { + return err + } + + if !found { + return nil + } + cfg.BaseEndpoint = aws.String(v) + return nil +} + // resolveAppID extracts the sdk app ID from the configs slice's SharedConfig or env var func resolveAppID(ctx context.Context, cfg *aws.Config, configs configs) error { ID, _, err := getAppID(ctx, configs) @@ -113,10 +135,6 @@ func resolveAppID(ctx context.Context, cfg *aws.Config, configs configs) error { return err } - // if app ID is set in env var, it should precedence shared config value - if appID := os.Getenv(`AWS_SDK_UA_APP_ID`); len(appID) > 0 { - ID = appID - } cfg.AppID = ID return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go index b21cd3080..89368520f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go @@ -3,7 +3,10 @@ package config import ( "context" "fmt" + "io/ioutil" + "net" "net/url" + "os" "time" "github.com/aws/aws-sdk-go-v2/aws" @@ -21,11 +24,33 @@ import ( const ( // valid credential source values - credSourceEc2Metadata = "Ec2InstanceMetadata" - credSourceEnvironment = "Environment" - credSourceECSContainer = "EcsContainer" + credSourceEc2Metadata = "Ec2InstanceMetadata" + credSourceEnvironment = "Environment" + credSourceECSContainer = "EcsContainer" + httpProviderAuthFileEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE" ) +// direct representation of the IPv4 address for the ECS container +// "169.254.170.2" +var ecsContainerIPv4 net.IP = []byte{ + 169, 254, 170, 2, +} + +// direct representation of the IPv4 address for the EKS container +// "169.254.170.23" +var eksContainerIPv4 net.IP = []byte{ + 169, 254, 170, 23, +} + +// direct representation of the IPv6 address for the EKS container +// "fd00:ec2::23" +var eksContainerIPv6 net.IP = []byte{ + 0xFD, 0, 0xE, 0xC2, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0x23, +} + var ( ecsContainerEndpoint = "http://169.254.170.2" // not constant to allow for swapping during unit-testing ) @@ -222,6 +247,36 @@ func processCredentials(ctx context.Context, cfg *aws.Config, sharedConfig *Shar return nil } +// isAllowedHost allows host to be loopback or known ECS/EKS container IPs +// +// host can either be an IP address OR an unresolved hostname - resolution will +// be automatically performed in the latter case +func isAllowedHost(host string) (bool, error) { + if ip := net.ParseIP(host); ip != nil { + return isIPAllowed(ip), nil + } + + addrs, err := lookupHostFn(host) + if err != nil { + return false, err + } + + for _, addr := range addrs { + if ip := net.ParseIP(addr); ip == nil || !isIPAllowed(ip) { + return false, nil + } + } + + return true, nil +} + +func isIPAllowed(ip net.IP) bool { + return ip.IsLoopback() || + ip.Equal(ecsContainerIPv4) || + ip.Equal(eksContainerIPv4) || + ip.Equal(eksContainerIPv6) +} + func resolveLocalHTTPCredProvider(ctx context.Context, cfg *aws.Config, endpointURL, authToken string, configs configs) error { var resolveErr error @@ -232,10 +287,12 @@ func resolveLocalHTTPCredProvider(ctx context.Context, cfg *aws.Config, endpoint host := parsed.Hostname() if len(host) == 0 { resolveErr = fmt.Errorf("unable to parse host from local HTTP cred provider URL") - } else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil { - resolveErr = fmt.Errorf("failed to resolve host %q, %v", host, loopbackErr) - } else if !isLoopback { - resolveErr = fmt.Errorf("invalid endpoint host, %q, only loopback hosts are allowed", host) + } else if parsed.Scheme == "http" { + if isAllowedHost, allowHostErr := isAllowedHost(host); allowHostErr != nil { + resolveErr = fmt.Errorf("failed to resolve host %q, %v", host, allowHostErr) + } else if !isAllowedHost { + resolveErr = fmt.Errorf("invalid endpoint host, %q, only loopback/ecs/eks hosts are allowed", host) + } } } @@ -252,6 +309,16 @@ func resolveHTTPCredProvider(ctx context.Context, cfg *aws.Config, url, authToke if len(authToken) != 0 { options.AuthorizationToken = authToken } + if authFilePath := os.Getenv(httpProviderAuthFileEnvVar); authFilePath != "" { + options.AuthorizationTokenProvider = endpointcreds.TokenProviderFunc(func() (string, error) { + var contents []byte + var err error + if contents, err = ioutil.ReadFile(authFilePath); err != nil { + return "", fmt.Errorf("failed to read authorization token from %v: %v", authFilePath, err) + } + return string(contents), nil + }) + } options.APIOptions = cfg.APIOptions if cfg.Retryer != nil { options.Retryer = cfg.Retryer() diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go index e699194d3..20683bf5f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go @@ -28,6 +28,10 @@ const ( // the shared config file, not the credentials file. ssoSectionPrefix = `sso-session ` + // Prefix for services section. It is referenced in profile via the services + // parameter to configure clients for service-specific parameters. + servicesPrefix = `services` + // string equivalent for boolean endpointDiscoveryDisabled = `false` endpointDiscoveryEnabled = `true` @@ -75,6 +79,8 @@ const ( ec2MetadataServiceEndpointKey = "ec2_metadata_service_endpoint" + ec2MetadataV1DisabledKey = "ec2_metadata_v1_disabled" + // Use DualStack Endpoint Resolution useDualStackEndpoint = "use_dualstack_endpoint" @@ -97,6 +103,10 @@ const ( caBundleKey = "ca_bundle" sdkAppID = "sdk_ua_app_id" + + ignoreConfiguredEndpoints = "ignore_configured_endpoint_urls" + + endpointURL = "endpoint_url" ) // defaultSharedConfigProfile allows for swapping the default profile for testing @@ -150,6 +160,24 @@ func (s *SSOSession) setFromIniSection(section ini.Section) { updateString(&s.SSOStartURL, section, ssoStartURLKey) } +// Services contains values configured in the services section +// of the AWS configuration file. +type Services struct { + // Services section values + // {"serviceId": {"key": "value"}} + // e.g. {"s3": {"endpoint_url": "example.com"}} + ServiceValues map[string]map[string]string +} + +func (s *Services) setFromIniSection(section ini.Section) { + if s.ServiceValues == nil { + s.ServiceValues = make(map[string]map[string]string) + } + for _, service := range section.List() { + s.ServiceValues[service] = section.Map(service) + } +} + // SharedConfig represents the configuration fields of the SDK config files. type SharedConfig struct { Profile string @@ -220,6 +248,12 @@ type SharedConfig struct { // ec2_metadata_service_endpoint=http://fd00:ec2::254 EC2IMDSEndpoint string + // Specifies that IMDS clients should not fallback to IMDSv1 if token + // requests fail. + // + // ec2_metadata_v1_disabled=true + EC2IMDSv1Disabled *bool + // Specifies if the S3 service should disable support for Multi-Region // access-points // @@ -272,6 +306,16 @@ type SharedConfig struct { // aws sdk app ID that can be added to user agent header string AppID string + + // Flag used to disable configured endpoints. + IgnoreConfiguredEndpoints *bool + + // Value to contain configured endpoints to be propagated to + // corresponding endpoint resolution field. + BaseEndpoint string + + // Value to contain services section content. + Services Services } func (c SharedConfig) getDefaultsMode(ctx context.Context) (value aws.DefaultsMode, ok bool, err error) { @@ -361,6 +405,16 @@ func (c SharedConfig) GetEC2IMDSEndpoint() (string, bool, error) { return c.EC2IMDSEndpoint, true, nil } +// GetEC2IMDSV1FallbackDisabled implements an EC2IMDSV1FallbackDisabled option +// resolver interface. +func (c SharedConfig) GetEC2IMDSV1FallbackDisabled() (bool, bool) { + if c.EC2IMDSv1Disabled == nil { + return false, false + } + + return *c.EC2IMDSv1Disabled, true +} + // GetUseDualStackEndpoint returns whether the service's dual-stack endpoint should be // used for requests. func (c SharedConfig) GetUseDualStackEndpoint(ctx context.Context) (value aws.DualStackEndpointState, found bool, err error) { @@ -399,6 +453,40 @@ func (c SharedConfig) getAppID(context.Context) (string, bool, error) { return c.AppID, len(c.AppID) > 0, nil } +// GetIgnoreConfiguredEndpoints is used in knowing when to disable configured +// endpoints feature. +func (c SharedConfig) GetIgnoreConfiguredEndpoints(context.Context) (bool, bool, error) { + if c.IgnoreConfiguredEndpoints == nil { + return false, false, nil + } + + return *c.IgnoreConfiguredEndpoints, true, nil +} + +func (c SharedConfig) getBaseEndpoint(context.Context) (string, bool, error) { + return c.BaseEndpoint, len(c.BaseEndpoint) > 0, nil +} + +// GetServiceBaseEndpoint is used to retrieve a normalized SDK ID for use +// with configured endpoints. +func (c SharedConfig) GetServiceBaseEndpoint(ctx context.Context, sdkID string) (string, bool, error) { + if service, ok := c.Services.ServiceValues[normalizeShared(sdkID)]; ok { + if endpt, ok := service[endpointURL]; ok { + return endpt, true, nil + } + } + return "", false, nil +} + +func normalizeShared(sdkID string) string { + lower := strings.ToLower(sdkID) + return strings.ReplaceAll(lower, " ", "_") +} + +func (c SharedConfig) getServicesObject(context.Context) (map[string]map[string]string, bool, error) { + return c.Services.ServiceValues, c.Services.ServiceValues != nil, nil +} + // loadSharedConfigIgnoreNotExist is an alias for loadSharedConfig with the // addition of ignoring when none of the files exist or when the profile // is not found in any of the files. @@ -548,6 +636,7 @@ func LoadSharedConfigProfile(ctx context.Context, profile string, optFns ...func cfg := SharedConfig{} profiles := map[string]struct{}{} + if err = cfg.setFromIniSections(profiles, profile, configSections, option.Logger); err != nil { return SharedConfig{}, err } @@ -576,6 +665,7 @@ func processConfigSections(ctx context.Context, sections *ini.Sections, logger l skipSections[newName] = struct{}{} case strings.HasPrefix(section, ssoSectionPrefix): + case strings.HasPrefix(section, servicesPrefix): case strings.EqualFold(section, "default"): default: // drop this section, as invalid profile name @@ -735,11 +825,14 @@ func mergeSections(dst *ini.Sections, src ini.Sections) error { s3DisableMultiRegionAccessPointsKey, ec2MetadataServiceEndpointModeKey, ec2MetadataServiceEndpointKey, + ec2MetadataV1DisabledKey, useDualStackEndpoint, useFIPSEndpointKey, defaultsModeKey, retryModeKey, caBundleKey, + roleDurationSecondsKey, + retryMaxAttemptsKey, ssoSessionNameKey, ssoAccountIDKey, @@ -753,16 +846,6 @@ func mergeSections(dst *ini.Sections, src ini.Sections) error { } } - intKeys := []string{ - roleDurationSecondsKey, - retryMaxAttemptsKey, - } - for i := range intKeys { - if err := mergeIntKey(&srcSection, &dstSection, sectionName, intKeys[i]); err != nil { - return err - } - } - // set srcSection on dst srcSection *dst = dst.SetSection(sectionName, dstSection) } @@ -789,26 +872,6 @@ func mergeStringKey(srcSection *ini.Section, dstSection *ini.Section, sectionNam return nil } -func mergeIntKey(srcSection *ini.Section, dstSection *ini.Section, sectionName, key string) error { - if srcSection.Has(key) { - srcValue := srcSection.Int(key) - v, err := ini.NewIntValue(srcValue) - if err != nil { - return fmt.Errorf("error merging %s, %w", key, err) - } - - if dstSection.Has(key) { - dstSection.Logs = append(dstSection.Logs, newMergeKeyLogMessage(sectionName, key, - dstSection.SourceFile[key], srcSection.SourceFile[key])) - - } - - dstSection.UpdateValue(key, v) - dstSection.UpdateSourceFile(key, srcSection.SourceFile[key]) - } - return nil -} - func newMergeKeyLogMessage(sectionName, key, dstSourceFile, srcSourceFile string) string { return fmt.Sprintf("For profile: %v, overriding %v value, defined in %v "+ "with a %v value found in a duplicate profile defined at file %v. \n", @@ -912,6 +975,17 @@ func (c *SharedConfig) setFromIniSections(profiles map[string]struct{}, profile c.SSOSession = &ssoSession } + for _, sectionName := range sections.List() { + if strings.HasPrefix(sectionName, servicesPrefix) { + section, ok := sections.GetSection(sectionName) + if ok { + var svcs Services + svcs.setFromIniSection(section) + c.Services = svcs + } + } + } + return nil } @@ -962,9 +1036,16 @@ func (c *SharedConfig) setFromIniSection(profile string, section ini.Section) er updateString(&c.SSOAccountID, section, ssoAccountIDKey) updateString(&c.SSORoleName, section, ssoRoleNameKey) + // we're retaining a behavioral quirk with this field that existed before + // the removal of literal parsing for #2276: + // - if the key is missing, the config field will not be set + // - if the key is set to a non-numeric, the config field will be set to 0 if section.Has(roleDurationSecondsKey) { - d := time.Duration(section.Int(roleDurationSecondsKey)) * time.Second - c.RoleDurationSeconds = &d + if v, ok := section.Int(roleDurationSecondsKey); ok { + c.RoleDurationSeconds = aws.Duration(time.Duration(v) * time.Second) + } else { + c.RoleDurationSeconds = aws.Duration(time.Duration(0)) + } } updateString(&c.CredentialProcess, section, credentialProcessKey) @@ -978,6 +1059,7 @@ func (c *SharedConfig) setFromIniSection(profile string, section ini.Section) er return fmt.Errorf("failed to load %s from shared config, %v", ec2MetadataServiceEndpointModeKey, err) } updateString(&c.EC2IMDSEndpoint, section, ec2MetadataServiceEndpointKey) + updateBoolPtr(&c.EC2IMDSv1Disabled, section, ec2MetadataV1DisabledKey) updateUseDualStackEndpoint(&c.UseDualStackEndpoint, section, useDualStackEndpoint) updateUseFIPSEndpoint(&c.UseFIPSEndpoint, section, useFIPSEndpointKey) @@ -998,6 +1080,10 @@ func (c *SharedConfig) setFromIniSection(profile string, section ini.Section) er // user agent app ID added to request User-Agent header updateString(&c.AppID, section, sdkAppID) + updateBoolPtr(&c.IgnoreConfiguredEndpoints, section, ignoreConfiguredEndpoints) + + updateString(&c.BaseEndpoint, section, endpointURL) + // Shared Credentials creds := aws.Credentials{ AccessKeyID: section.String(accessKeyIDKey), @@ -1314,12 +1400,13 @@ func updateInt(dst *int, section ini.Section, key string) error { if !section.Has(key) { return nil } - if vt, _ := section.ValueType(key); vt != ini.IntegerType { - return fmt.Errorf("invalid value %s=%s, expect integer", - key, section.String(key)) + v, ok := section.Int(key) + if !ok { + return fmt.Errorf("invalid value %s=%s, expect integer", key, section.String(key)) } - *dst = int(section.Int(key)) + + *dst = int(v) return nil } @@ -1329,7 +1416,10 @@ func updateBool(dst *bool, section ini.Section, key string) { if !section.Has(key) { return } - *dst = section.Bool(key) + + // retains pre-#2276 behavior where non-bool value would resolve to false + v, _ := section.Bool(key) + *dst = v } // updateBoolPtr will only update the dst with the value in the section key, @@ -1338,8 +1428,11 @@ func updateBoolPtr(dst **bool, section ini.Section, key string) { if !section.Has(key) { return } + + // retains pre-#2276 behavior where non-bool value would resolve to false + v, _ := section.Bool(key) *dst = new(bool) - **dst = section.Bool(key) + **dst = v } // updateEndpointDiscoveryType will only update the dst with the value in the section, if @@ -1371,7 +1464,8 @@ func updateUseDualStackEndpoint(dst *aws.DualStackEndpointState, section ini.Sec return } - if section.Bool(key) { + // retains pre-#2276 behavior where non-bool value would resolve to false + if v, _ := section.Bool(key); v { *dst = aws.DualStackEndpointStateEnabled } else { *dst = aws.DualStackEndpointStateDisabled @@ -1387,7 +1481,8 @@ func updateUseFIPSEndpoint(dst *aws.FIPSEndpointState, section ini.Section, key return } - if section.Bool(key) { + // retains pre-#2276 behavior where non-bool value would resolve to false + if v, _ := section.Bool(key); v { *dst = aws.FIPSEndpointStateEnabled } else { *dst = aws.FIPSEndpointStateDisabled diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md index 6c3ca4670..633b9fa80 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md @@ -1,3 +1,19 @@ +# v1.16.2 (2023-11-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.1 (2023-11-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.0 (2023-11-14) + +* **Feature**: Add support for dynamic auth token from file and EKS container host in absolute/relative URIs in the HTTP credential provider. + +# v1.15.2 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.15.1 (2023-11-02) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go index adc7fc6b0..0c3c4d682 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go @@ -36,6 +36,7 @@ import ( "context" "fmt" "net/http" + "strings" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client" @@ -81,7 +82,37 @@ type Options struct { // Optional authorization token value if set will be used as the value of // the Authorization header of the endpoint credential request. + // + // When constructed from environment, the provider will use the value of + // AWS_CONTAINER_AUTHORIZATION_TOKEN environment variable as the token + // + // Will be overridden if AuthorizationTokenProvider is configured AuthorizationToken string + + // Optional auth provider func to dynamically load the auth token from a file + // everytime a credential is retrieved + // + // When constructed from environment, the provider will read and use the content + // of the file pointed to by AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE environment variable + // as the auth token everytime credentials are retrieved + // + // Will override AuthorizationToken if configured + AuthorizationTokenProvider AuthTokenProvider +} + +// AuthTokenProvider defines an interface to dynamically load a value to be passed +// for the Authorization header of a credentials request. +type AuthTokenProvider interface { + GetToken() (string, error) +} + +// TokenProviderFunc is a func type implementing AuthTokenProvider interface +// and enables customizing token provider behavior +type TokenProviderFunc func() (string, error) + +// GetToken func retrieves auth token according to TokenProviderFunc implementation +func (p TokenProviderFunc) GetToken() (string, error) { + return p() } // New returns a credentials Provider for retrieving AWS credentials @@ -132,5 +163,30 @@ func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { } func (p *Provider) getCredentials(ctx context.Context) (*client.GetCredentialsOutput, error) { - return p.client.GetCredentials(ctx, &client.GetCredentialsInput{AuthorizationToken: p.options.AuthorizationToken}) + authToken, err := p.resolveAuthToken() + if err != nil { + return nil, fmt.Errorf("resolve auth token: %v", err) + } + + return p.client.GetCredentials(ctx, &client.GetCredentialsInput{ + AuthorizationToken: authToken, + }) +} + +func (p *Provider) resolveAuthToken() (string, error) { + authToken := p.options.AuthorizationToken + + var err error + if p.options.AuthorizationTokenProvider != nil { + authToken, err = p.options.AuthorizationTokenProvider.GetToken() + if err != nil { + return "", err + } + } + + if strings.ContainsAny(authToken, "\r\n") { + return "", fmt.Errorf("authorization token contains invalid newline sequence") + } + + return authToken, nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go index e6cba9dc3..c51ee07c1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go @@ -3,4 +3,4 @@ package credentials // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.15.1" +const goModuleVersion = "1.16.2" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/doc.go deleted file mode 100644 index 944feac55..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/doc.go +++ /dev/null @@ -1,58 +0,0 @@ -// Package sdk is the official AWS SDK v2 for the Go programming language. -// -// aws-sdk-go-v2 is the the v2 of the AWS SDK for the Go programming language. -// -// # Getting started -// -// The best way to get started working with the SDK is to use `go get` to add the -// SDK and desired service clients to your Go dependencies explicitly. -// -// go get github.com/aws/aws-sdk-go-v2 -// go get github.com/aws/aws-sdk-go-v2/config -// go get github.com/aws/aws-sdk-go-v2/service/dynamodb -// -// # Hello AWS -// -// This example shows how you can use the v2 SDK to make an API request using the -// SDK's Amazon DynamoDB client. -// -// package main -// -// import ( -// "context" -// "fmt" -// "log" -// -// "github.com/aws/aws-sdk-go-v2/aws" -// "github.com/aws/aws-sdk-go-v2/config" -// "github.com/aws/aws-sdk-go-v2/service/dynamodb" -// ) -// -// func main() { -// // Using the SDK's default configuration, loading additional config -// // and credentials values from the environment variables, shared -// // credentials, and shared configuration files -// cfg, err := config.LoadDefaultConfig(context.TODO(), -// config.WithRegion("us-west-2"), -// ) -// if err != nil { -// log.Fatalf("unable to load SDK config, %v", err) -// } -// -// // Using the Config value, create the DynamoDB client -// svc := dynamodb.NewFromConfig(cfg) -// -// // Build the request with its input parameters -// resp, err := svc.ListTables(context.TODO(), &dynamodb.ListTablesInput{ -// Limit: aws.Int32(5), -// }) -// if err != nil { -// log.Fatalf("failed to list tables, %v", err) -// } -// -// fmt.Println("Tables:") -// for _, tableName := range resp.TableNames { -// fmt.Println(tableName) -// } -// } -package sdk diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md index ba06fb199..11ce26fa3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md @@ -1,3 +1,11 @@ +# v1.14.4 (2023-11-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.3 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.14.2 (2023-11-02) * No change notes available for this release. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go index 36432a4ff..90f2e5d78 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go @@ -3,4 +3,4 @@ package imds // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.14.2" +const goModuleVersion = "1.14.4" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/auth.go new file mode 100644 index 000000000..0b81db548 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/auth.go @@ -0,0 +1,45 @@ +package auth + +import ( + "github.com/aws/smithy-go/auth" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// HTTPAuthScheme is the SDK's internal implementation of smithyhttp.AuthScheme +// for pre-existing implementations where the signer was added to client +// config. SDK clients will key off of this type and ensure per-operation +// updates to those signers persist on the scheme itself. +type HTTPAuthScheme struct { + schemeID string + signer smithyhttp.Signer +} + +var _ smithyhttp.AuthScheme = (*HTTPAuthScheme)(nil) + +// NewHTTPAuthScheme returns an auth scheme instance with the given config. +func NewHTTPAuthScheme(schemeID string, signer smithyhttp.Signer) *HTTPAuthScheme { + return &HTTPAuthScheme{ + schemeID: schemeID, + signer: signer, + } +} + +// SchemeID identifies the auth scheme. +func (s *HTTPAuthScheme) SchemeID() string { + return s.schemeID +} + +// IdentityResolver gets the identity resolver for the auth scheme. +func (s *HTTPAuthScheme) IdentityResolver(o auth.IdentityResolverOptions) auth.IdentityResolver { + return o.GetIdentityResolver(s.schemeID) +} + +// Signer gets the signer for the auth scheme. +func (s *HTTPAuthScheme) Signer() smithyhttp.Signer { + return s.signer +} + +// WithSigner returns a new instance of the auth scheme with the updated signer. +func (s *HTTPAuthScheme) WithSigner(signer smithyhttp.Signer) *HTTPAuthScheme { + return NewHTTPAuthScheme(s.schemeID, signer) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_adapter.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_adapter.go new file mode 100644 index 000000000..94dda3ed8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_adapter.go @@ -0,0 +1,43 @@ +package smithy + +import ( + "context" + "fmt" + "time" + + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/auth/bearer" +) + +// BearerTokenAdapter adapts smithy bearer.Token to smithy auth.Identity. +type BearerTokenAdapter struct { + Token bearer.Token +} + +var _ auth.Identity = (*BearerTokenAdapter)(nil) + +// Expiration returns the time of expiration for the token. +func (v *BearerTokenAdapter) Expiration() time.Time { + return v.Token.Expires +} + +// BearerTokenProviderAdapter adapts smithy bearer.TokenProvider to smithy +// auth.IdentityResolver. +type BearerTokenProviderAdapter struct { + Provider bearer.TokenProvider +} + +var _ (auth.IdentityResolver) = (*BearerTokenProviderAdapter)(nil) + +// GetIdentity retrieves a bearer token using the underlying provider. +func (v *BearerTokenProviderAdapter) GetIdentity(ctx context.Context, _ smithy.Properties) ( + auth.Identity, error, +) { + token, err := v.Provider.RetrieveBearerToken(ctx) + if err != nil { + return nil, fmt.Errorf("get token: %v", err) + } + + return &BearerTokenAdapter{Token: token}, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_signer_adapter.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_signer_adapter.go new file mode 100644 index 000000000..081cd8299 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_signer_adapter.go @@ -0,0 +1,35 @@ +package smithy + +import ( + "context" + "fmt" + + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/auth/bearer" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// BearerTokenSignerAdapter adapts smithy bearer.Signer to smithy http +// auth.Signer. +type BearerTokenSignerAdapter struct { + Signer bearer.Signer +} + +var _ (smithyhttp.Signer) = (*BearerTokenSignerAdapter)(nil) + +// SignRequest signs the request with the provided bearer token. +func (v *BearerTokenSignerAdapter) SignRequest(ctx context.Context, r *smithyhttp.Request, identity auth.Identity, _ smithy.Properties) error { + ca, ok := identity.(*BearerTokenAdapter) + if !ok { + return fmt.Errorf("unexpected identity type: %T", identity) + } + + signed, err := v.Signer.SignWithBearerToken(ctx, ca.Token, r) + if err != nil { + return fmt.Errorf("sign request: %v", err) + } + + *r = *signed.(*smithyhttp.Request) + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/credentials_adapter.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/credentials_adapter.go new file mode 100644 index 000000000..6017044f4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/credentials_adapter.go @@ -0,0 +1,46 @@ +package smithy + +import ( + "context" + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/auth" +) + +// CredentialsAdapter adapts aws.Credentials to auth.Identity. +type CredentialsAdapter struct { + Credentials aws.Credentials +} + +var _ auth.Identity = (*CredentialsAdapter)(nil) + +// Expiration returns the time of expiration for the credentials. +func (v *CredentialsAdapter) Expiration() time.Time { + return v.Credentials.Expires +} + +// CredentialsProviderAdapter adapts aws.CredentialsProvider to auth.IdentityResolver. +type CredentialsProviderAdapter struct { + Provider aws.CredentialsProvider +} + +var _ (auth.IdentityResolver) = (*CredentialsProviderAdapter)(nil) + +// GetIdentity retrieves AWS credentials using the underlying provider. +func (v *CredentialsProviderAdapter) GetIdentity(ctx context.Context, _ smithy.Properties) ( + auth.Identity, error, +) { + if v.Provider == nil { + return &CredentialsAdapter{Credentials: aws.Credentials{}}, nil + } + + creds, err := v.Provider.Retrieve(ctx) + if err != nil { + return nil, fmt.Errorf("get credentials: %v", err) + } + + return &CredentialsAdapter{Credentials: creds}, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/smithy.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/smithy.go new file mode 100644 index 000000000..42b458673 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/smithy.go @@ -0,0 +1,2 @@ +// Package smithy adapts concrete AWS auth and signing types to the generic smithy versions. +package smithy diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/v4signer_adapter.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/v4signer_adapter.go new file mode 100644 index 000000000..919156edd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/v4signer_adapter.go @@ -0,0 +1,53 @@ +package smithy + +import ( + "context" + "fmt" + + v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/logging" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// V4SignerAdapter adapts v4.HTTPSigner to smithy http.Signer. +type V4SignerAdapter struct { + Signer v4.HTTPSigner + Logger logging.Logger + LogSigning bool +} + +var _ (smithyhttp.Signer) = (*V4SignerAdapter)(nil) + +// SignRequest signs the request with the provided identity. +func (v *V4SignerAdapter) SignRequest(ctx context.Context, r *smithyhttp.Request, identity auth.Identity, props smithy.Properties) error { + ca, ok := identity.(*CredentialsAdapter) + if !ok { + return fmt.Errorf("unexpected identity type: %T", identity) + } + + name, ok := smithyhttp.GetSigV4SigningName(&props) + if !ok { + return fmt.Errorf("sigv4 signing name is required") + } + + region, ok := smithyhttp.GetSigV4SigningRegion(&props) + if !ok { + return fmt.Errorf("sigv4 signing region is required") + } + + hash := v4.GetPayloadHash(ctx) + err := v.Signer.SignHTTP(ctx, ca.Credentials, r.Request, hash, name, region, sdk.NowTime(), func(o *v4.SignerOptions) { + o.DisableURIPathEscaping, _ = smithyhttp.GetDisableDoubleEncoding(&props) + + o.Logger = v.Logger + o.LogSigning = v.LogSigning + }) + if err != nil { + return fmt.Errorf("sign http: %v", err) + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md index e26f211e8..9159e6a45 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md @@ -1,3 +1,11 @@ +# v1.2.3 (2023-11-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.2 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.2.1 (2023-11-01) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go index 7766ee918..7ce6c26e3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go @@ -3,4 +3,4 @@ package configsources // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.2.1" +const goModuleVersion = "1.2.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/endpoints.go new file mode 100644 index 000000000..67950ca36 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/endpoints.go @@ -0,0 +1,201 @@ +package endpoints + +import ( + "fmt" + "regexp" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +const ( + defaultProtocol = "https" + defaultSigner = "v4" +) + +var ( + protocolPriority = []string{"https", "http"} + signerPriority = []string{"v4"} +) + +// Options provide configuration needed to direct how endpoints are resolved. +type Options struct { + // Disable usage of HTTPS (TLS / SSL) + DisableHTTPS bool +} + +// Partitions is a slice of partition +type Partitions []Partition + +// ResolveEndpoint resolves a service endpoint for the given region and options. +func (ps Partitions) ResolveEndpoint(region string, opts Options) (aws.Endpoint, error) { + if len(ps) == 0 { + return aws.Endpoint{}, fmt.Errorf("no partitions found") + } + + for i := 0; i < len(ps); i++ { + if !ps[i].canResolveEndpoint(region) { + continue + } + + return ps[i].ResolveEndpoint(region, opts) + } + + // fallback to first partition format to use when resolving the endpoint. + return ps[0].ResolveEndpoint(region, opts) +} + +// Partition is an AWS partition description for a service and its' region endpoints. +type Partition struct { + ID string + RegionRegex *regexp.Regexp + PartitionEndpoint string + IsRegionalized bool + Defaults Endpoint + Endpoints Endpoints +} + +func (p Partition) canResolveEndpoint(region string) bool { + _, ok := p.Endpoints[region] + return ok || p.RegionRegex.MatchString(region) +} + +// ResolveEndpoint resolves and service endpoint for the given region and options. +func (p Partition) ResolveEndpoint(region string, options Options) (resolved aws.Endpoint, err error) { + if len(region) == 0 && len(p.PartitionEndpoint) != 0 { + region = p.PartitionEndpoint + } + + e, _ := p.endpointForRegion(region) + + return e.resolve(p.ID, region, p.Defaults, options), nil +} + +func (p Partition) endpointForRegion(region string) (Endpoint, bool) { + if e, ok := p.Endpoints[region]; ok { + return e, true + } + + if !p.IsRegionalized { + return p.Endpoints[p.PartitionEndpoint], region == p.PartitionEndpoint + } + + // Unable to find any matching endpoint, return + // blank that will be used for generic endpoint creation. + return Endpoint{}, false +} + +// Endpoints is a map of service config regions to endpoints +type Endpoints map[string]Endpoint + +// CredentialScope is the credential scope of a region and service +type CredentialScope struct { + Region string + Service string +} + +// Endpoint is a service endpoint description +type Endpoint struct { + // True if the endpoint cannot be resolved for this partition/region/service + Unresolveable aws.Ternary + + Hostname string + Protocols []string + + CredentialScope CredentialScope + + SignatureVersions []string `json:"signatureVersions"` +} + +func (e Endpoint) resolve(partition, region string, def Endpoint, options Options) aws.Endpoint { + var merged Endpoint + merged.mergeIn(def) + merged.mergeIn(e) + e = merged + + var u string + if e.Unresolveable != aws.TrueTernary { + // Only attempt to resolve the endpoint if it can be resolved. + hostname := strings.Replace(e.Hostname, "{region}", region, 1) + + scheme := getEndpointScheme(e.Protocols, options.DisableHTTPS) + u = scheme + "://" + hostname + } + + signingRegion := e.CredentialScope.Region + if len(signingRegion) == 0 { + signingRegion = region + } + signingName := e.CredentialScope.Service + + return aws.Endpoint{ + URL: u, + PartitionID: partition, + SigningRegion: signingRegion, + SigningName: signingName, + SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), + } +} + +func (e *Endpoint) mergeIn(other Endpoint) { + if other.Unresolveable != aws.UnknownTernary { + e.Unresolveable = other.Unresolveable + } + if len(other.Hostname) > 0 { + e.Hostname = other.Hostname + } + if len(other.Protocols) > 0 { + e.Protocols = other.Protocols + } + if len(other.CredentialScope.Region) > 0 { + e.CredentialScope.Region = other.CredentialScope.Region + } + if len(other.CredentialScope.Service) > 0 { + e.CredentialScope.Service = other.CredentialScope.Service + } + if len(other.SignatureVersions) > 0 { + e.SignatureVersions = other.SignatureVersions + } +} + +func getEndpointScheme(protocols []string, disableHTTPS bool) string { + if disableHTTPS { + return "http" + } + + return getByPriority(protocols, protocolPriority, defaultProtocol) +} + +func getByPriority(s []string, p []string, def string) string { + if len(s) == 0 { + return def + } + + for i := 0; i < len(p); i++ { + for j := 0; j < len(s); j++ { + if s[j] == p[i] { + return s[j] + } + } + } + + return s[0] +} + +// MapFIPSRegion extracts the intrinsic AWS region from one that may have an +// embedded FIPS microformat. +func MapFIPSRegion(region string) string { + const fipsInfix = "-fips-" + const fipsPrefix = "fips-" + const fipsSuffix = "-fips" + + if strings.Contains(region, fipsInfix) || + strings.Contains(region, fipsPrefix) || + strings.Contains(region, fipsSuffix) { + region = strings.ReplaceAll(region, fipsInfix, "-") + region = strings.ReplaceAll(region, fipsPrefix, "") + region = strings.ReplaceAll(region, fipsSuffix, "") + } + + return region +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md index e19cd6630..0f71bbe71 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md @@ -1,3 +1,11 @@ +# v2.5.3 (2023-11-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.5.2 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + # v2.5.1 (2023-11-01) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go index 7128c248c..2de64a7cd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go @@ -3,4 +3,4 @@ package endpoints // goModuleVersion is the tagged release for this module -const goModuleVersion = "2.5.1" +const goModuleVersion = "2.5.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md index 74eff6a52..149f7f716 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md @@ -1,3 +1,46 @@ +# v1.7.1 (2023-11-16) + +* **Bug Fix**: Fix recognition of trailing comments in shared config properties. # or ; separators that aren't preceded by whitespace at the end of a property value should be considered part of it. + +# v1.7.0 (2023-11-13) + +* **Feature**: Replace the legacy config parser with a modern, less-strict implementation. Parsing failures within a section will now simply ignore the invalid line rather than silently drop the entire section. + +# v1.6.0 (2023-11-09.2) + +* **Feature**: BREAKFIX: In order to support subproperty parsing, invalid property definitions must not be ignored + +# v1.5.2 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.1 (2023-11-07) + +* **Bug Fix**: Fix subproperty performance regression + +# v1.5.0 (2023-11-01) + +* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.45 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.44 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.43 (2023-09-22) + +* **Bug Fix**: Fixed a bug where merging `max_attempts` or `duration_seconds` fields across shared config files with invalid values would silently default them to 0. +* **Bug Fix**: Move type assertion of config values out of the parsing stage, which resolves an issue where the contents of a profile would silently be dropped with certain numeric formats. + # v1.3.42 (2023-08-21) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ast.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ast.go deleted file mode 100644 index e83a99886..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ast.go +++ /dev/null @@ -1,120 +0,0 @@ -package ini - -// ASTKind represents different states in the parse table -// and the type of AST that is being constructed -type ASTKind int - -// ASTKind* is used in the parse table to transition between -// the different states -const ( - ASTKindNone = ASTKind(iota) - ASTKindStart - ASTKindExpr - ASTKindEqualExpr - ASTKindStatement - ASTKindSkipStatement - ASTKindExprStatement - ASTKindSectionStatement - ASTKindNestedSectionStatement - ASTKindCompletedNestedSectionStatement - ASTKindCommentStatement - ASTKindCompletedSectionStatement -) - -func (k ASTKind) String() string { - switch k { - case ASTKindNone: - return "none" - case ASTKindStart: - return "start" - case ASTKindExpr: - return "expr" - case ASTKindStatement: - return "stmt" - case ASTKindSectionStatement: - return "section_stmt" - case ASTKindExprStatement: - return "expr_stmt" - case ASTKindCommentStatement: - return "comment" - case ASTKindNestedSectionStatement: - return "nested_section_stmt" - case ASTKindCompletedSectionStatement: - return "completed_stmt" - case ASTKindSkipStatement: - return "skip" - default: - return "" - } -} - -// AST interface allows us to determine what kind of node we -// are on and casting may not need to be necessary. -// -// The root is always the first node in Children -type AST struct { - Kind ASTKind - Root Token - RootToken bool - Children []AST -} - -func newAST(kind ASTKind, root AST, children ...AST) AST { - return AST{ - Kind: kind, - Children: append([]AST{root}, children...), - } -} - -func newASTWithRootToken(kind ASTKind, root Token, children ...AST) AST { - return AST{ - Kind: kind, - Root: root, - RootToken: true, - Children: children, - } -} - -// AppendChild will append to the list of children an AST has. -func (a *AST) AppendChild(child AST) { - a.Children = append(a.Children, child) -} - -// GetRoot will return the root AST which can be the first entry -// in the children list or a token. -func (a *AST) GetRoot() AST { - if a.RootToken { - return *a - } - - if len(a.Children) == 0 { - return AST{} - } - - return a.Children[0] -} - -// GetChildren will return the current AST's list of children -func (a *AST) GetChildren() []AST { - if len(a.Children) == 0 { - return []AST{} - } - - if a.RootToken { - return a.Children - } - - return a.Children[1:] -} - -// SetChildren will set and override all children of the AST. -func (a *AST) SetChildren(children []AST) { - if a.RootToken { - a.Children = children - } else { - a.Children = append(a.Children[:1], children...) - } -} - -// Start is used to indicate the starting state of the parse table. -var Start = newAST(ASTKindStart, AST{}) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/comma_token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/comma_token.go deleted file mode 100644 index 0895d53cb..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/comma_token.go +++ /dev/null @@ -1,11 +0,0 @@ -package ini - -var commaRunes = []rune(",") - -func isComma(b rune) bool { - return b == ',' -} - -func newCommaToken() Token { - return newToken(TokenComma, commaRunes, NoneType) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/comment_token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/comment_token.go deleted file mode 100644 index 0b76999ba..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/comment_token.go +++ /dev/null @@ -1,35 +0,0 @@ -package ini - -// isComment will return whether or not the next byte(s) is a -// comment. -func isComment(b []rune) bool { - if len(b) == 0 { - return false - } - - switch b[0] { - case ';': - return true - case '#': - return true - } - - return false -} - -// newCommentToken will create a comment token and -// return how many bytes were read. -func newCommentToken(b []rune) (Token, int, error) { - i := 0 - for ; i < len(b); i++ { - if b[i] == '\n' { - break - } - - if len(b)-i > 2 && b[i] == '\r' && b[i+1] == '\n' { - break - } - } - - return newToken(TokenComment, b[:i], NoneType), i, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/dependency.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/dependency.go deleted file mode 100644 index f5ebe52e1..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/dependency.go +++ /dev/null @@ -1,6 +0,0 @@ -package ini - -import ( - // internal/ini module was carved out of this module - _ "github.com/aws/aws-sdk-go-v2" -) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/doc.go deleted file mode 100644 index fdd5321b4..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/doc.go +++ /dev/null @@ -1,43 +0,0 @@ -// Package ini is an LL(1) parser for configuration files. -// -// Example: -// sections, err := ini.OpenFile("/path/to/file") -// if err != nil { -// panic(err) -// } -// -// profile := "foo" -// section, ok := sections.GetSection(profile) -// if !ok { -// fmt.Printf("section %q could not be found", profile) -// } -// -// Below is the BNF that describes this parser -// -// Grammar: -// stmt -> section | stmt' -// stmt' -> epsilon | expr -// expr -> value (stmt)* | equal_expr (stmt)* -// equal_expr -> value ( ':' | '=' ) equal_expr' -// equal_expr' -> number | string | quoted_string -// quoted_string -> " quoted_string' -// quoted_string' -> string quoted_string_end -// quoted_string_end -> " -// -// section -> [ section' -// section' -> section_value section_close -// section_value -> number | string_subset | boolean | quoted_string_subset -// quoted_string_subset -> " quoted_string_subset' -// quoted_string_subset' -> string_subset quoted_string_end -// quoted_string_subset -> " -// section_close -> ] -// -// value -> number | string_subset | boolean -// string -> ? UTF-8 Code-Points except '\n' (U+000A) and '\r\n' (U+000D U+000A) ? -// string_subset -> ? Code-points excepted by grammar except ':' (U+003A), '=' (U+003D), '[' (U+005B), and ']' (U+005D) ? -// -// SkipState will skip (NL WS)+ -// -// comment -> # comment' | ; comment' -// comment' -> epsilon | value -package ini diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/empty_token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/empty_token.go deleted file mode 100644 index 04345a54c..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/empty_token.go +++ /dev/null @@ -1,4 +0,0 @@ -package ini - -// emptyToken is used to satisfy the Token interface -var emptyToken = newToken(TokenNone, []rune{}, NoneType) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/expression.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/expression.go deleted file mode 100644 index 91ba2a59d..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/expression.go +++ /dev/null @@ -1,24 +0,0 @@ -package ini - -// newExpression will return an expression AST. -// Expr represents an expression -// -// grammar: -// expr -> string | number -func newExpression(tok Token) AST { - return newASTWithRootToken(ASTKindExpr, tok) -} - -func newEqualExpr(left AST, tok Token) AST { - return newASTWithRootToken(ASTKindEqualExpr, tok, left) -} - -// EqualExprKey will return a LHS value in the equal expr -func EqualExprKey(ast AST) string { - children := ast.GetChildren() - if len(children) == 0 || ast.Kind != ASTKindEqualExpr { - return "" - } - - return string(children[0].Root.Raw()) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go index 709294b9e..5080ebe68 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go @@ -3,4 +3,4 @@ package ini // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.3.42" +const goModuleVersion = "1.7.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go index f74062313..cefcce91e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go @@ -1,13 +1,26 @@ +// Package ini implements parsing of the AWS shared config file. +// +// Example: +// sections, err := ini.OpenFile("/path/to/file") +// if err != nil { +// panic(err) +// } +// +// profile := "foo" +// section, ok := sections.GetSection(profile) +// if !ok { +// fmt.Printf("section %q could not be found", profile) +// } package ini import ( "fmt" "io" "os" + "strings" ) -// OpenFile takes a path to a given file, and will open and parse -// that file. +// OpenFile parses shared config from the given file path. func OpenFile(path string) (sections Sections, err error) { f, oerr := os.Open(path) if oerr != nil { @@ -26,33 +39,18 @@ func OpenFile(path string) (sections Sections, err error) { return Parse(f, path) } -// Parse will parse the given file using the shared config -// visitor. -func Parse(f io.Reader, path string) (Sections, error) { - tree, err := ParseAST(f) +// Parse parses shared config from the given reader. +func Parse(r io.Reader, path string) (Sections, error) { + contents, err := io.ReadAll(r) if err != nil { - return Sections{}, err + return Sections{}, fmt.Errorf("read all: %v", err) } - v := NewDefaultVisitor(path) - if err = Walk(tree, v); err != nil { - return Sections{}, err - } - - return v.Sections, nil -} - -// ParseBytes will parse the given bytes and return the parsed sections. -func ParseBytes(b []byte) (Sections, error) { - tree, err := ParseASTBytes(b) + lines := strings.Split(string(contents), "\n") + tokens, err := tokenize(lines) if err != nil { - return Sections{}, err - } - - v := NewDefaultVisitor("") - if err = Walk(tree, v); err != nil { - return Sections{}, err + return Sections{}, fmt.Errorf("tokenize: %v", err) } - return v.Sections, nil + return parse(tokens, path), nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_lexer.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_lexer.go deleted file mode 100644 index abf1fb036..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_lexer.go +++ /dev/null @@ -1,157 +0,0 @@ -package ini - -import ( - "bytes" - "io" - "io/ioutil" -) - -// TokenType represents the various different tokens types -type TokenType int - -func (t TokenType) String() string { - switch t { - case TokenNone: - return "none" - case TokenLit: - return "literal" - case TokenSep: - return "sep" - case TokenOp: - return "op" - case TokenWS: - return "ws" - case TokenNL: - return "newline" - case TokenComment: - return "comment" - case TokenComma: - return "comma" - default: - return "" - } -} - -// TokenType enums -const ( - TokenNone = TokenType(iota) - TokenLit - TokenSep - TokenComma - TokenOp - TokenWS - TokenNL - TokenComment -) - -type iniLexer struct{} - -// Tokenize will return a list of tokens during lexical analysis of the -// io.Reader. -func (l *iniLexer) Tokenize(r io.Reader) ([]Token, error) { - b, err := ioutil.ReadAll(r) - if err != nil { - return nil, &UnableToReadFile{Err: err} - } - - return l.tokenize(b) -} - -func (l *iniLexer) tokenize(b []byte) ([]Token, error) { - runes := bytes.Runes(b) - var err error - n := 0 - tokenAmount := countTokens(runes) - tokens := make([]Token, tokenAmount) - count := 0 - - for len(runes) > 0 && count < tokenAmount { - switch { - case isWhitespace(runes[0]): - tokens[count], n, err = newWSToken(runes) - case isComma(runes[0]): - tokens[count], n = newCommaToken(), 1 - case isComment(runes): - tokens[count], n, err = newCommentToken(runes) - case isNewline(runes): - tokens[count], n, err = newNewlineToken(runes) - case isSep(runes): - tokens[count], n, err = newSepToken(runes) - case isOp(runes): - tokens[count], n, err = newOpToken(runes) - default: - tokens[count], n, err = newLitToken(runes) - } - - if err != nil { - return nil, err - } - - count++ - - runes = runes[n:] - } - - return tokens[:count], nil -} - -func countTokens(runes []rune) int { - count, n := 0, 0 - var err error - - for len(runes) > 0 { - switch { - case isWhitespace(runes[0]): - _, n, err = newWSToken(runes) - case isComma(runes[0]): - _, n = newCommaToken(), 1 - case isComment(runes): - _, n, err = newCommentToken(runes) - case isNewline(runes): - _, n, err = newNewlineToken(runes) - case isSep(runes): - _, n, err = newSepToken(runes) - case isOp(runes): - _, n, err = newOpToken(runes) - default: - _, n, err = newLitToken(runes) - } - - if err != nil { - return 0 - } - - count++ - runes = runes[n:] - } - - return count + 1 -} - -// Token indicates a metadata about a given value. -type Token struct { - t TokenType - ValueType ValueType - base int - raw []rune -} - -var emptyValue = Value{} - -func newToken(t TokenType, raw []rune, v ValueType) Token { - return Token{ - t: t, - raw: raw, - ValueType: v, - } -} - -// Raw return the raw runes that were consumed -func (tok Token) Raw() []rune { - return tok.raw -} - -// Type returns the token type -func (tok Token) Type() TokenType { - return tok.t -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_parser.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_parser.go deleted file mode 100644 index 12fc7d5aa..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_parser.go +++ /dev/null @@ -1,349 +0,0 @@ -package ini - -import ( - "fmt" - "io" -) - -// ParseState represents the current state of the parser. -type ParseState uint - -// State enums for the parse table -const ( - InvalidState ParseState = iota - // stmt -> value stmt' - StatementState - // stmt' -> MarkComplete | op stmt - StatementPrimeState - // value -> number | string | boolean | quoted_string - ValueState - // section -> [ section' - OpenScopeState - // section' -> value section_close - SectionState - // section_close -> ] - CloseScopeState - // SkipState will skip (NL WS)+ - SkipState - // SkipTokenState will skip any token and push the previous - // state onto the stack. - SkipTokenState - // comment -> # comment' | ; comment' - // comment' -> MarkComplete | value - CommentState - // MarkComplete state will complete statements and move that - // to the completed AST list - MarkCompleteState - // TerminalState signifies that the tokens have been fully parsed - TerminalState -) - -// parseTable is a state machine to dictate the grammar above. -var parseTable = map[ASTKind]map[TokenType]ParseState{ - ASTKindStart: { - TokenLit: StatementState, - TokenSep: OpenScopeState, - TokenWS: SkipTokenState, - TokenNL: SkipTokenState, - TokenComment: CommentState, - TokenNone: TerminalState, - }, - ASTKindCommentStatement: { - TokenLit: StatementState, - TokenSep: OpenScopeState, - TokenWS: SkipTokenState, - TokenNL: SkipTokenState, - TokenComment: CommentState, - TokenNone: MarkCompleteState, - }, - ASTKindExpr: { - TokenOp: StatementPrimeState, - TokenLit: ValueState, - TokenSep: OpenScopeState, - TokenWS: ValueState, - TokenNL: SkipState, - TokenComment: CommentState, - TokenNone: MarkCompleteState, - }, - ASTKindEqualExpr: { - TokenLit: ValueState, - TokenSep: ValueState, - TokenOp: ValueState, - TokenWS: SkipTokenState, - TokenNL: SkipState, - }, - ASTKindStatement: { - TokenLit: SectionState, - TokenSep: CloseScopeState, - TokenWS: SkipTokenState, - TokenNL: SkipTokenState, - TokenComment: CommentState, - TokenNone: MarkCompleteState, - }, - ASTKindExprStatement: { - TokenLit: ValueState, - TokenSep: ValueState, - TokenOp: ValueState, - TokenWS: ValueState, - TokenNL: MarkCompleteState, - TokenComment: CommentState, - TokenNone: TerminalState, - TokenComma: SkipState, - }, - ASTKindSectionStatement: { - TokenLit: SectionState, - TokenOp: SectionState, - TokenSep: CloseScopeState, - TokenWS: SectionState, - TokenNL: SkipTokenState, - }, - ASTKindCompletedSectionStatement: { - TokenWS: SkipTokenState, - TokenNL: SkipTokenState, - TokenLit: StatementState, - TokenSep: OpenScopeState, - TokenComment: CommentState, - TokenNone: MarkCompleteState, - }, - ASTKindSkipStatement: { - TokenLit: StatementState, - TokenSep: OpenScopeState, - TokenWS: SkipTokenState, - TokenNL: SkipTokenState, - TokenComment: CommentState, - TokenNone: TerminalState, - }, -} - -// ParseAST will parse input from an io.Reader using -// an LL(1) parser. -func ParseAST(r io.Reader) ([]AST, error) { - lexer := iniLexer{} - tokens, err := lexer.Tokenize(r) - if err != nil { - return []AST{}, err - } - - return parse(tokens) -} - -// ParseASTBytes will parse input from a byte slice using -// an LL(1) parser. -func ParseASTBytes(b []byte) ([]AST, error) { - lexer := iniLexer{} - tokens, err := lexer.tokenize(b) - if err != nil { - return []AST{}, err - } - - return parse(tokens) -} - -func parse(tokens []Token) ([]AST, error) { - start := Start - stack := newParseStack(3, len(tokens)) - - stack.Push(start) - s := newSkipper() - -loop: - for stack.Len() > 0 { - k := stack.Pop() - - var tok Token - if len(tokens) == 0 { - // this occurs when all the tokens have been processed - // but reduction of what's left on the stack needs to - // occur. - tok = emptyToken - } else { - tok = tokens[0] - } - - step := parseTable[k.Kind][tok.Type()] - if s.ShouldSkip(tok) { - // being in a skip state with no tokens will break out of - // the parse loop since there is nothing left to process. - if len(tokens) == 0 { - break loop - } - // if should skip is true, we skip the tokens until should skip is set to false. - step = SkipTokenState - } - - switch step { - case TerminalState: - // Finished parsing. Push what should be the last - // statement to the stack. If there is anything left - // on the stack, an error in parsing has occurred. - if k.Kind != ASTKindStart { - stack.MarkComplete(k) - } - break loop - case SkipTokenState: - // When skipping a token, the previous state was popped off the stack. - // To maintain the correct state, the previous state will be pushed - // onto the stack. - stack.Push(k) - case StatementState: - if k.Kind != ASTKindStart { - stack.MarkComplete(k) - } - expr := newExpression(tok) - stack.Push(expr) - case StatementPrimeState: - if tok.Type() != TokenOp { - stack.MarkComplete(k) - continue - } - - if k.Kind != ASTKindExpr { - return nil, NewParseError( - fmt.Sprintf("invalid expression: expected Expr type, but found %T type", k), - ) - } - - k = trimSpaces(k) - expr := newEqualExpr(k, tok) - stack.Push(expr) - case ValueState: - // ValueState requires the previous state to either be an equal expression - // or an expression statement. - switch k.Kind { - case ASTKindEqualExpr: - // assigning a value to some key - k.AppendChild(newExpression(tok)) - stack.Push(newExprStatement(k)) - case ASTKindExpr: - k.Root.raw = append(k.Root.raw, tok.Raw()...) - stack.Push(k) - case ASTKindExprStatement: - root := k.GetRoot() - children := root.GetChildren() - if len(children) == 0 { - return nil, NewParseError( - fmt.Sprintf("invalid expression: AST contains no children %s", k.Kind), - ) - } - - rhs := children[len(children)-1] - - if rhs.Root.ValueType != QuotedStringType { - rhs.Root.ValueType = StringType - rhs.Root.raw = append(rhs.Root.raw, tok.Raw()...) - - } - - children[len(children)-1] = rhs - root.SetChildren(children) - - stack.Push(k) - } - case OpenScopeState: - if !runeCompare(tok.Raw(), openBrace) { - return nil, NewParseError("expected '['") - } - // If OpenScopeState is not at the start, we must mark the previous ast as complete - // - // for example: if previous ast was a skip statement; - // we should mark it as complete before we create a new statement - if k.Kind != ASTKindStart { - stack.MarkComplete(k) - } - - stmt := newStatement() - stack.Push(stmt) - case CloseScopeState: - if !runeCompare(tok.Raw(), closeBrace) { - return nil, NewParseError("expected ']'") - } - - k = trimSpaces(k) - stack.Push(newCompletedSectionStatement(k)) - case SectionState: - var stmt AST - - switch k.Kind { - case ASTKindStatement: - // If there are multiple literals inside of a scope declaration, - // then the current token's raw value will be appended to the Name. - // - // This handles cases like [ profile default ] - // - // k will represent a SectionStatement with the children representing - // the label of the section - stmt = newSectionStatement(tok) - case ASTKindSectionStatement: - k.Root.raw = append(k.Root.raw, tok.Raw()...) - stmt = k - default: - return nil, NewParseError( - fmt.Sprintf("invalid statement: expected statement: %v", k.Kind), - ) - } - - stack.Push(stmt) - case MarkCompleteState: - if k.Kind != ASTKindStart { - stack.MarkComplete(k) - } - - if stack.Len() == 0 { - stack.Push(start) - } - case SkipState: - stack.Push(newSkipStatement(k)) - s.Skip() - case CommentState: - if k.Kind == ASTKindStart { - stack.Push(k) - } else { - stack.MarkComplete(k) - } - - stmt := newCommentStatement(tok) - stack.Push(stmt) - default: - return nil, NewParseError( - fmt.Sprintf("invalid state with ASTKind %v and TokenType %v", - k.Kind, tok.Type())) - } - - if len(tokens) > 0 { - tokens = tokens[1:] - } - } - - // this occurs when a statement has not been completed - if stack.top > 1 { - return nil, NewParseError(fmt.Sprintf("incomplete ini expression")) - } - - // returns a sublist which exludes the start symbol - return stack.List(), nil -} - -// trimSpaces will trim spaces on the left and right hand side of -// the literal. -func trimSpaces(k AST) AST { - // trim left hand side of spaces - for i := 0; i < len(k.Root.raw); i++ { - if !isWhitespace(k.Root.raw[i]) { - break - } - - k.Root.raw = k.Root.raw[1:] - i-- - } - - // trim right hand side of spaces - for i := len(k.Root.raw) - 1; i >= 0; i-- { - if !isWhitespace(k.Root.raw[i]) { - break - } - - k.Root.raw = k.Root.raw[:len(k.Root.raw)-1] - } - - return k -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/literal_tokens.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/literal_tokens.go deleted file mode 100644 index eca42d1b2..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/literal_tokens.go +++ /dev/null @@ -1,336 +0,0 @@ -package ini - -import ( - "fmt" - "strconv" - "strings" - "unicode" -) - -var ( - runesTrue = []rune("true") - runesFalse = []rune("false") -) - -var literalValues = [][]rune{ - runesTrue, - runesFalse, -} - -func isBoolValue(b []rune) bool { - for _, lv := range literalValues { - if isCaselessLitValue(lv, b) { - return true - } - } - return false -} - -func isLitValue(want, have []rune) bool { - if len(have) < len(want) { - return false - } - - for i := 0; i < len(want); i++ { - if want[i] != have[i] { - return false - } - } - - return true -} - -// isCaselessLitValue is a caseless value comparison, assumes want is already lower-cased for efficiency. -func isCaselessLitValue(want, have []rune) bool { - if len(have) < len(want) { - return false - } - - for i := 0; i < len(want); i++ { - if want[i] != unicode.ToLower(have[i]) { - return false - } - } - - return true -} - -// isNumberValue will return whether not the leading characters in -// a byte slice is a number. A number is delimited by whitespace or -// the newline token. -// -// A number is defined to be in a binary, octal, decimal (int | float), hex format, -// or in scientific notation. -func isNumberValue(b []rune) bool { - negativeIndex := 0 - helper := numberHelper{} - needDigit := false - - for i := 0; i < len(b); i++ { - negativeIndex++ - - switch b[i] { - case '-': - if helper.IsNegative() || negativeIndex != 1 { - return false - } - helper.Determine(b[i]) - needDigit = true - continue - case 'e', 'E': - if err := helper.Determine(b[i]); err != nil { - return false - } - negativeIndex = 0 - needDigit = true - continue - case 'b': - if helper.numberFormat == hex { - break - } - fallthrough - case 'o', 'x': - needDigit = true - if i == 0 { - return false - } - - fallthrough - case '.': - if err := helper.Determine(b[i]); err != nil { - return false - } - needDigit = true - continue - } - - if i > 0 && (isNewline(b[i:]) || isWhitespace(b[i])) { - return !needDigit - } - - if !helper.CorrectByte(b[i]) { - return false - } - needDigit = false - } - - return !needDigit -} - -func isValid(b []rune) (bool, int, error) { - if len(b) == 0 { - // TODO: should probably return an error - return false, 0, nil - } - - return isValidRune(b[0]), 1, nil -} - -func isValidRune(r rune) bool { - return r != ':' && r != '=' && r != '[' && r != ']' && r != ' ' && r != '\n' -} - -// ValueType is an enum that will signify what type -// the Value is -type ValueType int - -func (v ValueType) String() string { - switch v { - case NoneType: - return "NONE" - case DecimalType: - return "FLOAT" - case IntegerType: - return "INT" - case StringType: - return "STRING" - case BoolType: - return "BOOL" - } - - return "" -} - -// ValueType enums -const ( - NoneType = ValueType(iota) - DecimalType - IntegerType - StringType - QuotedStringType - BoolType -) - -// Value is a union container -type Value struct { - Type ValueType - raw []rune - - integer int64 - decimal float64 - boolean bool - str string -} - -func newValue(t ValueType, base int, raw []rune) (Value, error) { - v := Value{ - Type: t, - raw: raw, - } - var err error - - switch t { - case DecimalType: - v.decimal, err = strconv.ParseFloat(string(raw), 64) - case IntegerType: - if base != 10 { - raw = raw[2:] - } - - v.integer, err = strconv.ParseInt(string(raw), base, 64) - case StringType: - v.str = string(raw) - case QuotedStringType: - v.str = string(raw[1 : len(raw)-1]) - case BoolType: - v.boolean = isCaselessLitValue(runesTrue, v.raw) - } - - // issue 2253 - // - // if the value trying to be parsed is too large, then we will use - // the 'StringType' and raw value instead. - if nerr, ok := err.(*strconv.NumError); ok && nerr.Err == strconv.ErrRange { - v.Type = StringType - v.str = string(raw) - err = nil - } - - return v, err -} - -// NewStringValue returns a Value type generated using a string input. -func NewStringValue(str string) (Value, error) { - return newValue(StringType, 10, []rune(str)) -} - -// NewIntValue returns a Value type generated using an int64 input. -func NewIntValue(i int64) (Value, error) { - v := strconv.FormatInt(i, 10) - return newValue(IntegerType, 10, []rune(v)) -} - -func (v Value) String() string { - switch v.Type { - case DecimalType: - return fmt.Sprintf("decimal: %f", v.decimal) - case IntegerType: - return fmt.Sprintf("integer: %d", v.integer) - case StringType: - return fmt.Sprintf("string: %s", string(v.raw)) - case QuotedStringType: - return fmt.Sprintf("quoted string: %s", string(v.raw)) - case BoolType: - return fmt.Sprintf("bool: %t", v.boolean) - default: - return "union not set" - } -} - -func newLitToken(b []rune) (Token, int, error) { - n := 0 - var err error - - token := Token{} - if b[0] == '"' { - n, err = getStringValue(b) - if err != nil { - return token, n, err - } - - token = newToken(TokenLit, b[:n], QuotedStringType) - } else if isNumberValue(b) { - var base int - base, n, err = getNumericalValue(b) - if err != nil { - return token, 0, err - } - - value := b[:n] - vType := IntegerType - if contains(value, '.') || hasExponent(value) { - vType = DecimalType - } - token = newToken(TokenLit, value, vType) - token.base = base - } else if isBoolValue(b) { - n, err = getBoolValue(b) - - token = newToken(TokenLit, b[:n], BoolType) - } else { - n, err = getValue(b) - token = newToken(TokenLit, b[:n], StringType) - } - - return token, n, err -} - -// IntValue returns an integer value -func (v Value) IntValue() int64 { - return v.integer -} - -// FloatValue returns a float value -func (v Value) FloatValue() float64 { - return v.decimal -} - -// BoolValue returns a bool value -func (v Value) BoolValue() bool { - return v.boolean -} - -func isTrimmable(r rune) bool { - switch r { - case '\n', ' ': - return true - } - return false -} - -// StringValue returns the string value -func (v Value) StringValue() string { - switch v.Type { - case StringType: - return strings.TrimFunc(string(v.raw), isTrimmable) - case QuotedStringType: - // preserve all characters in the quotes - return string(removeEscapedCharacters(v.raw[1 : len(v.raw)-1])) - default: - return strings.TrimFunc(string(v.raw), isTrimmable) - } -} - -func contains(runes []rune, c rune) bool { - for i := 0; i < len(runes); i++ { - if runes[i] == c { - return true - } - } - - return false -} - -func runeCompare(v1 []rune, v2 []rune) bool { - if len(v1) != len(v2) { - return false - } - - for i := 0; i < len(v1); i++ { - if v1[i] != v2[i] { - return false - } - } - - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/newline_token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/newline_token.go deleted file mode 100644 index e52ac399f..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/newline_token.go +++ /dev/null @@ -1,30 +0,0 @@ -package ini - -func isNewline(b []rune) bool { - if len(b) == 0 { - return false - } - - if b[0] == '\n' { - return true - } - - if len(b) < 2 { - return false - } - - return b[0] == '\r' && b[1] == '\n' -} - -func newNewlineToken(b []rune) (Token, int, error) { - i := 1 - if b[0] == '\r' && isNewline(b[1:]) { - i++ - } - - if !isNewline([]rune(b[:i])) { - return emptyToken, 0, NewParseError("invalid new line token") - } - - return newToken(TokenNL, b[:i], NoneType), i, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/number_helper.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/number_helper.go deleted file mode 100644 index a45c0bc56..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/number_helper.go +++ /dev/null @@ -1,152 +0,0 @@ -package ini - -import ( - "bytes" - "fmt" - "strconv" -) - -const ( - none = numberFormat(iota) - binary - octal - decimal - hex - exponent -) - -type numberFormat int - -// numberHelper is used to dictate what format a number is in -// and what to do for negative values. Since -1e-4 is a valid -// number, we cannot just simply check for duplicate negatives. -type numberHelper struct { - numberFormat numberFormat - - negative bool - negativeExponent bool -} - -func (b numberHelper) Exists() bool { - return b.numberFormat != none -} - -func (b numberHelper) IsNegative() bool { - return b.negative || b.negativeExponent -} - -func (b *numberHelper) Determine(c rune) error { - if b.Exists() { - return NewParseError(fmt.Sprintf("multiple number formats: 0%v", string(c))) - } - - switch c { - case 'b': - b.numberFormat = binary - case 'o': - b.numberFormat = octal - case 'x': - b.numberFormat = hex - case 'e', 'E': - b.numberFormat = exponent - case '-': - if b.numberFormat != exponent { - b.negative = true - } else { - b.negativeExponent = true - } - case '.': - b.numberFormat = decimal - default: - return NewParseError(fmt.Sprintf("invalid number character: %v", string(c))) - } - - return nil -} - -func (b numberHelper) CorrectByte(c rune) bool { - switch { - case b.numberFormat == binary: - if !isBinaryByte(c) { - return false - } - case b.numberFormat == octal: - if !isOctalByte(c) { - return false - } - case b.numberFormat == hex: - if !isHexByte(c) { - return false - } - case b.numberFormat == decimal: - if !isDigit(c) { - return false - } - case b.numberFormat == exponent: - if !isDigit(c) { - return false - } - case b.negativeExponent: - if !isDigit(c) { - return false - } - case b.negative: - if !isDigit(c) { - return false - } - default: - if !isDigit(c) { - return false - } - } - - return true -} - -func (b numberHelper) Base() int { - switch b.numberFormat { - case binary: - return 2 - case octal: - return 8 - case hex: - return 16 - default: - return 10 - } -} - -func (b numberHelper) String() string { - buf := bytes.Buffer{} - i := 0 - - switch b.numberFormat { - case binary: - i++ - buf.WriteString(strconv.Itoa(i) + ": binary format\n") - case octal: - i++ - buf.WriteString(strconv.Itoa(i) + ": octal format\n") - case hex: - i++ - buf.WriteString(strconv.Itoa(i) + ": hex format\n") - case exponent: - i++ - buf.WriteString(strconv.Itoa(i) + ": exponent format\n") - default: - i++ - buf.WriteString(strconv.Itoa(i) + ": integer format\n") - } - - if b.negative { - i++ - buf.WriteString(strconv.Itoa(i) + ": negative format\n") - } - - if b.negativeExponent { - i++ - buf.WriteString(strconv.Itoa(i) + ": negative exponent format\n") - } - - return buf.String() -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/op_tokens.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/op_tokens.go deleted file mode 100644 index 8a84c7cbe..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/op_tokens.go +++ /dev/null @@ -1,39 +0,0 @@ -package ini - -import ( - "fmt" -) - -var ( - equalOp = []rune("=") - equalColonOp = []rune(":") -) - -func isOp(b []rune) bool { - if len(b) == 0 { - return false - } - - switch b[0] { - case '=': - return true - case ':': - return true - default: - return false - } -} - -func newOpToken(b []rune) (Token, int, error) { - tok := Token{} - - switch b[0] { - case '=': - tok = newToken(TokenOp, equalOp, NoneType) - case ':': - tok = newToken(TokenOp, equalColonOp, NoneType) - default: - return tok, 0, NewParseError(fmt.Sprintf("unexpected op type, %v", b[0])) - } - return tok, 1, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse.go new file mode 100644 index 000000000..2422d9046 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse.go @@ -0,0 +1,109 @@ +package ini + +import ( + "fmt" + "strings" +) + +func parse(tokens []lineToken, path string) Sections { + parser := &parser{ + path: path, + sections: NewSections(), + } + parser.parse(tokens) + return parser.sections +} + +type parser struct { + csection, ckey string // current state + path string // source file path + sections Sections // parse result +} + +func (p *parser) parse(tokens []lineToken) { + for _, otok := range tokens { + switch tok := otok.(type) { + case *lineTokenProfile: + p.handleProfile(tok) + case *lineTokenProperty: + p.handleProperty(tok) + case *lineTokenSubProperty: + p.handleSubProperty(tok) + case *lineTokenContinuation: + p.handleContinuation(tok) + } + } +} + +func (p *parser) handleProfile(tok *lineTokenProfile) { + name := tok.Name + if tok.Type != "" { + name = fmt.Sprintf("%s %s", tok.Type, tok.Name) + } + p.ckey = "" + p.csection = name + if _, ok := p.sections.container[name]; !ok { + p.sections.container[name] = NewSection(name) + } +} + +func (p *parser) handleProperty(tok *lineTokenProperty) { + if p.csection == "" { + return // LEGACY: don't error on "global" properties + } + + p.ckey = tok.Key + if _, ok := p.sections.container[p.csection].values[tok.Key]; ok { + section := p.sections.container[p.csection] + section.Logs = append(p.sections.container[p.csection].Logs, + fmt.Sprintf( + "For profile: %v, overriding %v value, with a %v value found in a duplicate profile defined later in the same file %v. \n", + p.csection, tok.Key, tok.Key, p.path, + ), + ) + p.sections.container[p.csection] = section + } + + p.sections.container[p.csection].values[tok.Key] = Value{ + str: tok.Value, + } + p.sections.container[p.csection].SourceFile[tok.Key] = p.path +} + +func (p *parser) handleSubProperty(tok *lineTokenSubProperty) { + if p.csection == "" { + return // LEGACY: don't error on "global" properties + } + + if p.ckey == "" || p.sections.container[p.csection].values[p.ckey].str != "" { + // This is an "orphaned" subproperty, either because it's at + // the beginning of a section or because the last property's + // value isn't empty. Either way we're lenient here and + // "promote" this to a normal property. + p.handleProperty(&lineTokenProperty{ + Key: tok.Key, + Value: strings.TrimSpace(trimPropertyComment(tok.Value)), + }) + return + } + + if p.sections.container[p.csection].values[p.ckey].mp == nil { + p.sections.container[p.csection].values[p.ckey] = Value{ + mp: map[string]string{}, + } + } + p.sections.container[p.csection].values[p.ckey].mp[tok.Key] = tok.Value +} + +func (p *parser) handleContinuation(tok *lineTokenContinuation) { + if p.ckey == "" { + return + } + + value, _ := p.sections.container[p.csection].values[p.ckey] + if value.str != "" && value.mp == nil { + value.str = fmt.Sprintf("%s\n%s", value.str, tok.Value) + } + + p.sections.container[p.csection].values[p.ckey] = value +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse_error.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse_error.go deleted file mode 100644 index 30ae0b8f2..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse_error.go +++ /dev/null @@ -1,19 +0,0 @@ -package ini - -// ParseError is an error which is returned during any part of -// the parsing process. -type ParseError struct { - msg string -} - -// NewParseError will return a new ParseError where message -// is the description of the error. -func NewParseError(message string) *ParseError { - return &ParseError{ - msg: message, - } -} - -func (err *ParseError) Error() string { - return err.msg -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse_stack.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse_stack.go deleted file mode 100644 index 7f01cf7c7..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse_stack.go +++ /dev/null @@ -1,60 +0,0 @@ -package ini - -import ( - "bytes" - "fmt" -) - -// ParseStack is a stack that contains a container, the stack portion, -// and the list which is the list of ASTs that have been successfully -// parsed. -type ParseStack struct { - top int - container []AST - list []AST - index int -} - -func newParseStack(sizeContainer, sizeList int) ParseStack { - return ParseStack{ - container: make([]AST, sizeContainer), - list: make([]AST, sizeList), - } -} - -// Pop will return and truncate the last container element. -func (s *ParseStack) Pop() AST { - s.top-- - return s.container[s.top] -} - -// Push will add the new AST to the container -func (s *ParseStack) Push(ast AST) { - s.container[s.top] = ast - s.top++ -} - -// MarkComplete will append the AST to the list of completed statements -func (s *ParseStack) MarkComplete(ast AST) { - s.list[s.index] = ast - s.index++ -} - -// List will return the completed statements -func (s ParseStack) List() []AST { - return s.list[:s.index] -} - -// Len will return the length of the container -func (s *ParseStack) Len() int { - return s.top -} - -func (s ParseStack) String() string { - buf := bytes.Buffer{} - for i, node := range s.list { - buf.WriteString(fmt.Sprintf("%d: %v\n", i+1, node)) - } - - return buf.String() -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sections.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sections.go new file mode 100644 index 000000000..dd89848e6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sections.go @@ -0,0 +1,157 @@ +package ini + +import ( + "sort" +) + +// Sections is a map of Section structures that represent +// a configuration. +type Sections struct { + container map[string]Section +} + +// NewSections returns empty ini Sections +func NewSections() Sections { + return Sections{ + container: make(map[string]Section, 0), + } +} + +// GetSection will return section p. If section p does not exist, +// false will be returned in the second parameter. +func (t Sections) GetSection(p string) (Section, bool) { + v, ok := t.container[p] + return v, ok +} + +// HasSection denotes if Sections consist of a section with +// provided name. +func (t Sections) HasSection(p string) bool { + _, ok := t.container[p] + return ok +} + +// SetSection sets a section value for provided section name. +func (t Sections) SetSection(p string, v Section) Sections { + t.container[p] = v + return t +} + +// DeleteSection deletes a section entry/value for provided section name./ +func (t Sections) DeleteSection(p string) { + delete(t.container, p) +} + +// values represents a map of union values. +type values map[string]Value + +// List will return a list of all sections that were successfully +// parsed. +func (t Sections) List() []string { + keys := make([]string, len(t.container)) + i := 0 + for k := range t.container { + keys[i] = k + i++ + } + + sort.Strings(keys) + return keys +} + +// Section contains a name and values. This represent +// a sectioned entry in a configuration file. +type Section struct { + // Name is the Section profile name + Name string + + // values are the values within parsed profile + values values + + // Errors is the list of errors + Errors []error + + // Logs is the list of logs + Logs []string + + // SourceFile is the INI Source file from where this section + // was retrieved. They key is the property, value is the + // source file the property was retrieved from. + SourceFile map[string]string +} + +// NewSection returns an initialize section for the name +func NewSection(name string) Section { + return Section{ + Name: name, + values: values{}, + SourceFile: map[string]string{}, + } +} + +// List will return a list of all +// services in values +func (t Section) List() []string { + keys := make([]string, len(t.values)) + i := 0 + for k := range t.values { + keys[i] = k + i++ + } + + sort.Strings(keys) + return keys +} + +// UpdateSourceFile updates source file for a property to provided filepath. +func (t Section) UpdateSourceFile(property string, filepath string) { + t.SourceFile[property] = filepath +} + +// UpdateValue updates value for a provided key with provided value +func (t Section) UpdateValue(k string, v Value) error { + t.values[k] = v + return nil +} + +// Has will return whether or not an entry exists in a given section +func (t Section) Has(k string) bool { + _, ok := t.values[k] + return ok +} + +// ValueType will returned what type the union is set to. If +// k was not found, the NoneType will be returned. +func (t Section) ValueType(k string) (ValueType, bool) { + v, ok := t.values[k] + return v.Type, ok +} + +// Bool returns a bool value at k +func (t Section) Bool(k string) (bool, bool) { + return t.values[k].BoolValue() +} + +// Int returns an integer value at k +func (t Section) Int(k string) (int64, bool) { + return t.values[k].IntValue() +} + +// Map returns a map value at k +func (t Section) Map(k string) map[string]string { + return t.values[k].MapValue() +} + +// Float64 returns a float value at k +func (t Section) Float64(k string) (float64, bool) { + return t.values[k].FloatValue() +} + +// String returns the string value at k +func (t Section) String(k string) string { + _, ok := t.values[k] + if !ok { + return "" + } + return t.values[k].StringValue() +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sep_tokens.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sep_tokens.go deleted file mode 100644 index f82095ba2..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sep_tokens.go +++ /dev/null @@ -1,41 +0,0 @@ -package ini - -import ( - "fmt" -) - -var ( - emptyRunes = []rune{} -) - -func isSep(b []rune) bool { - if len(b) == 0 { - return false - } - - switch b[0] { - case '[', ']': - return true - default: - return false - } -} - -var ( - openBrace = []rune("[") - closeBrace = []rune("]") -) - -func newSepToken(b []rune) (Token, int, error) { - tok := Token{} - - switch b[0] { - case '[': - tok = newToken(TokenSep, openBrace, NoneType) - case ']': - tok = newToken(TokenSep, closeBrace, NoneType) - default: - return tok, 0, NewParseError(fmt.Sprintf("unexpected sep type, %v", b[0])) - } - return tok, 1, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/skipper.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/skipper.go deleted file mode 100644 index 07e90876a..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/skipper.go +++ /dev/null @@ -1,45 +0,0 @@ -package ini - -// skipper is used to skip certain blocks of an ini file. -// Currently skipper is used to skip nested blocks of ini -// files. See example below -// -// [ foo ] -// nested = ; this section will be skipped -// a=b -// c=d -// bar=baz ; this will be included -type skipper struct { - shouldSkip bool - TokenSet bool - prevTok Token -} - -func newSkipper() skipper { - return skipper{ - prevTok: emptyToken, - } -} - -func (s *skipper) ShouldSkip(tok Token) bool { - // should skip state will be modified only if previous token was new line (NL); - // and the current token is not WhiteSpace (WS). - if s.shouldSkip && - s.prevTok.Type() == TokenNL && - tok.Type() != TokenWS { - s.Continue() - return false - } - - s.prevTok = tok - return s.shouldSkip -} - -func (s *skipper) Skip() { - s.shouldSkip = true -} - -func (s *skipper) Continue() { - s.shouldSkip = false - s.prevTok = emptyToken -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/statement.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/statement.go deleted file mode 100644 index ba0af01b5..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/statement.go +++ /dev/null @@ -1,35 +0,0 @@ -package ini - -// Statement is an empty AST mostly used for transitioning states. -func newStatement() AST { - return newAST(ASTKindStatement, AST{}) -} - -// SectionStatement represents a section AST -func newSectionStatement(tok Token) AST { - return newASTWithRootToken(ASTKindSectionStatement, tok) -} - -// ExprStatement represents a completed expression AST -func newExprStatement(ast AST) AST { - return newAST(ASTKindExprStatement, ast) -} - -// CommentStatement represents a comment in the ini defintion. -// -// grammar: -// comment -> #comment' | ;comment' -// comment' -> epsilon | value -func newCommentStatement(tok Token) AST { - return newAST(ASTKindCommentStatement, newExpression(tok)) -} - -// CompletedSectionStatement represents a completed section -func newCompletedSectionStatement(ast AST) AST { - return newAST(ASTKindCompletedSectionStatement, ast) -} - -// SkipStatement is used to skip whole statements -func newSkipStatement(ast AST) AST { - return newAST(ASTKindSkipStatement, ast) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go new file mode 100644 index 000000000..661588c22 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go @@ -0,0 +1,93 @@ +package ini + +import ( + "strings" +) + +func trimProfileComment(s string) string { + r, _, _ := strings.Cut(s, "#") + r, _, _ = strings.Cut(r, ";") + return r +} + +func trimPropertyComment(s string) string { + r, _, _ := strings.Cut(s, " #") + r, _, _ = strings.Cut(r, " ;") + r, _, _ = strings.Cut(r, "\t#") + r, _, _ = strings.Cut(r, "\t;") + return r +} + +// assumes no surrounding comment +func splitProperty(s string) (string, string, bool) { + equalsi := strings.Index(s, "=") + coloni := strings.Index(s, ":") // LEGACY: also supported for property assignment + sep := "=" + if equalsi == -1 || coloni != -1 && coloni < equalsi { + sep = ":" + } + + k, v, ok := strings.Cut(s, sep) + if !ok { + return "", "", false + } + return strings.TrimSpace(k), strings.TrimSpace(v), true +} + +// assumes no surrounding comment, whitespace, or profile brackets +func splitProfile(s string) (string, string) { + var first int + for i, r := range s { + if isLineSpace(r) { + if first == 0 { + first = i + } + } else { + if first != 0 { + return s[:first], s[i:] + } + } + } + if first == 0 { + return "", s // type component is effectively blank + } + return "", "" +} + +func isLineSpace(r rune) bool { + return r == ' ' || r == '\t' +} + +func unquote(s string) string { + if isSingleQuoted(s) || isDoubleQuoted(s) { + return s[1 : len(s)-1] + } + return s +} + +// applies various legacy conversions to property values: +// - remote wrapping single/doublequotes +// - expand escaped quote and newline sequences +func legacyStrconv(s string) string { + s = unquote(s) + s = strings.ReplaceAll(s, `\"`, `"`) + s = strings.ReplaceAll(s, `\'`, `'`) + s = strings.ReplaceAll(s, `\n`, "\n") + return s +} + +func isSingleQuoted(s string) bool { + return hasAffixes(s, "'", "'") +} + +func isDoubleQuoted(s string) bool { + return hasAffixes(s, `"`, `"`) +} + +func isBracketed(s string) bool { + return hasAffixes(s, "[", "]") +} + +func hasAffixes(s, left, right string) bool { + return strings.HasPrefix(s, left) && strings.HasSuffix(s, right) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/token.go new file mode 100644 index 000000000..6e9a03744 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/token.go @@ -0,0 +1,32 @@ +package ini + +type lineToken interface { + isLineToken() +} + +type lineTokenProfile struct { + Type string + Name string +} + +func (*lineTokenProfile) isLineToken() {} + +type lineTokenProperty struct { + Key string + Value string +} + +func (*lineTokenProperty) isLineToken() {} + +type lineTokenContinuation struct { + Value string +} + +func (*lineTokenContinuation) isLineToken() {} + +type lineTokenSubProperty struct { + Key string + Value string +} + +func (*lineTokenSubProperty) isLineToken() {} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/tokenize.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/tokenize.go new file mode 100644 index 000000000..89a773684 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/tokenize.go @@ -0,0 +1,92 @@ +package ini + +import ( + "strings" +) + +func tokenize(lines []string) ([]lineToken, error) { + tokens := make([]lineToken, 0, len(lines)) + for _, line := range lines { + if len(strings.TrimSpace(line)) == 0 || isLineComment(line) { + continue + } + + if tok := asProfile(line); tok != nil { + tokens = append(tokens, tok) + } else if tok := asProperty(line); tok != nil { + tokens = append(tokens, tok) + } else if tok := asSubProperty(line); tok != nil { + tokens = append(tokens, tok) + } else if tok := asContinuation(line); tok != nil { + tokens = append(tokens, tok) + } // unrecognized tokens are effectively ignored + } + return tokens, nil +} + +func isLineComment(line string) bool { + trimmed := strings.TrimLeft(line, " \t") + return strings.HasPrefix(trimmed, "#") || strings.HasPrefix(trimmed, ";") +} + +func asProfile(line string) *lineTokenProfile { // " [ type name ] ; comment" + trimmed := strings.TrimSpace(trimProfileComment(line)) // "[ type name ]" + if !isBracketed(trimmed) { + return nil + } + trimmed = trimmed[1 : len(trimmed)-1] // " type name " (or just " name ") + trimmed = strings.TrimSpace(trimmed) // "type name" / "name" + typ, name := splitProfile(trimmed) + return &lineTokenProfile{ + Type: typ, + Name: name, + } +} + +func asProperty(line string) *lineTokenProperty { + if isLineSpace(rune(line[0])) { + return nil + } + + trimmed := trimPropertyComment(line) + trimmed = strings.TrimRight(trimmed, " \t") + k, v, ok := splitProperty(trimmed) + if !ok { + return nil + } + + return &lineTokenProperty{ + Key: strings.ToLower(k), // LEGACY: normalize key case + Value: legacyStrconv(v), // LEGACY: see func docs + } +} + +func asSubProperty(line string) *lineTokenSubProperty { + if !isLineSpace(rune(line[0])) { + return nil + } + + // comments on sub-properties are included in the value + trimmed := strings.TrimLeft(line, " \t") + k, v, ok := splitProperty(trimmed) + if !ok { + return nil + } + + return &lineTokenSubProperty{ // same LEGACY constraints as in normal property + Key: strings.ToLower(k), + Value: legacyStrconv(v), + } +} + +func asContinuation(line string) *lineTokenContinuation { + if !isLineSpace(rune(line[0])) { + return nil + } + + // includes comments like sub-properties + trimmed := strings.TrimLeft(line, " \t") + return &lineTokenContinuation{ + Value: trimmed, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value.go new file mode 100644 index 000000000..ade75bf34 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value.go @@ -0,0 +1,104 @@ +package ini + +import ( + "fmt" + "strconv" + "strings" +) + +// ValueType is an enum that will signify what type +// the Value is +type ValueType int + +func (v ValueType) String() string { + switch v { + case NoneType: + return "NONE" + case StringType: + return "STRING" + } + + return "" +} + +// ValueType enums +const ( + NoneType = ValueType(iota) + StringType + QuotedStringType +) + +// Value is a union container +type Value struct { + Type ValueType + + str string + mp map[string]string +} + +// NewStringValue returns a Value type generated using a string input. +func NewStringValue(str string) (Value, error) { + return Value{str: str}, nil +} + +func (v Value) String() string { + switch v.Type { + case StringType: + return fmt.Sprintf("string: %s", string(v.str)) + case QuotedStringType: + return fmt.Sprintf("quoted string: %s", string(v.str)) + default: + return "union not set" + } +} + +// MapValue returns a map value for sub properties +func (v Value) MapValue() map[string]string { + newlineParts := strings.Split(string(v.str), "\n") + mp := make(map[string]string) + for _, part := range newlineParts { + operandParts := strings.Split(part, "=") + if len(operandParts) < 2 { + continue + } + key := strings.TrimSpace(operandParts[0]) + val := strings.TrimSpace(operandParts[1]) + mp[key] = val + } + return mp +} + +// IntValue returns an integer value +func (v Value) IntValue() (int64, bool) { + i, err := strconv.ParseInt(string(v.str), 0, 64) + if err != nil { + return 0, false + } + return i, true +} + +// FloatValue returns a float value +func (v Value) FloatValue() (float64, bool) { + f, err := strconv.ParseFloat(string(v.str), 64) + if err != nil { + return 0, false + } + return f, true +} + +// BoolValue returns a bool value +func (v Value) BoolValue() (bool, bool) { + // we don't use ParseBool as it recognizes more than what we've + // historically supported + if strings.EqualFold(v.str, "true") { + return true, true + } else if strings.EqualFold(v.str, "false") { + return false, true + } + return false, false +} + +// StringValue returns the string value +func (v Value) StringValue() string { + return v.str +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value_util.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value_util.go deleted file mode 100644 index b5480fdeb..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value_util.go +++ /dev/null @@ -1,284 +0,0 @@ -package ini - -import ( - "fmt" -) - -// getStringValue will return a quoted string and the amount -// of bytes read -// -// an error will be returned if the string is not properly formatted -func getStringValue(b []rune) (int, error) { - if b[0] != '"' { - return 0, NewParseError("strings must start with '\"'") - } - - endQuote := false - i := 1 - - for ; i < len(b) && !endQuote; i++ { - if escaped := isEscaped(b[:i], b[i]); b[i] == '"' && !escaped { - endQuote = true - break - } else if escaped { - /*c, err := getEscapedByte(b[i]) - if err != nil { - return 0, err - } - - b[i-1] = c - b = append(b[:i], b[i+1:]...) - i--*/ - - continue - } - } - - if !endQuote { - return 0, NewParseError("missing '\"' in string value") - } - - return i + 1, nil -} - -// getBoolValue will return a boolean and the amount -// of bytes read -// -// an error will be returned if the boolean is not of a correct -// value -func getBoolValue(b []rune) (int, error) { - if len(b) < 4 { - return 0, NewParseError("invalid boolean value") - } - - n := 0 - for _, lv := range literalValues { - if len(lv) > len(b) { - continue - } - - if isCaselessLitValue(lv, b) { - n = len(lv) - } - } - - if n == 0 { - return 0, NewParseError("invalid boolean value") - } - - return n, nil -} - -// getNumericalValue will return a numerical string, the amount -// of bytes read, and the base of the number -// -// an error will be returned if the number is not of a correct -// value -func getNumericalValue(b []rune) (int, int, error) { - if !isDigit(b[0]) { - return 0, 0, NewParseError("invalid digit value") - } - - i := 0 - helper := numberHelper{} - -loop: - for negativeIndex := 0; i < len(b); i++ { - negativeIndex++ - - if !isDigit(b[i]) { - switch b[i] { - case '-': - if helper.IsNegative() || negativeIndex != 1 { - return 0, 0, NewParseError("parse error '-'") - } - - n := getNegativeNumber(b[i:]) - i += (n - 1) - helper.Determine(b[i]) - continue - case '.': - if err := helper.Determine(b[i]); err != nil { - return 0, 0, err - } - case 'e', 'E': - if err := helper.Determine(b[i]); err != nil { - return 0, 0, err - } - - negativeIndex = 0 - case 'b': - if helper.numberFormat == hex { - break - } - fallthrough - case 'o', 'x': - if i == 0 && b[i] != '0' { - return 0, 0, NewParseError("incorrect base format, expected leading '0'") - } - - if i != 1 { - return 0, 0, NewParseError(fmt.Sprintf("incorrect base format found %s at %d index", string(b[i]), i)) - } - - if err := helper.Determine(b[i]); err != nil { - return 0, 0, err - } - default: - if isWhitespace(b[i]) { - break loop - } - - if isNewline(b[i:]) { - break loop - } - - if !(helper.numberFormat == hex && isHexByte(b[i])) { - if i+2 < len(b) && !isNewline(b[i:i+2]) { - return 0, 0, NewParseError("invalid numerical character") - } else if !isNewline([]rune{b[i]}) { - return 0, 0, NewParseError("invalid numerical character") - } - - break loop - } - } - } - } - - return helper.Base(), i, nil -} - -// isDigit will return whether or not something is an integer -func isDigit(b rune) bool { - return b >= '0' && b <= '9' -} - -func hasExponent(v []rune) bool { - return contains(v, 'e') || contains(v, 'E') -} - -func isBinaryByte(b rune) bool { - switch b { - case '0', '1': - return true - default: - return false - } -} - -func isOctalByte(b rune) bool { - switch b { - case '0', '1', '2', '3', '4', '5', '6', '7': - return true - default: - return false - } -} - -func isHexByte(b rune) bool { - if isDigit(b) { - return true - } - return (b >= 'A' && b <= 'F') || - (b >= 'a' && b <= 'f') -} - -func getValue(b []rune) (int, error) { - i := 0 - - for i < len(b) { - if isNewline(b[i:]) { - break - } - - if isOp(b[i:]) { - break - } - - valid, n, err := isValid(b[i:]) - if err != nil { - return 0, err - } - - if !valid { - break - } - - i += n - } - - return i, nil -} - -// getNegativeNumber will return a negative number from a -// byte slice. This will iterate through all characters until -// a non-digit has been found. -func getNegativeNumber(b []rune) int { - if b[0] != '-' { - return 0 - } - - i := 1 - for ; i < len(b); i++ { - if !isDigit(b[i]) { - return i - } - } - - return i -} - -// isEscaped will return whether or not the character is an escaped -// character. -func isEscaped(value []rune, b rune) bool { - if len(value) == 0 { - return false - } - - switch b { - case '\'': // single quote - case '"': // quote - case 'n': // newline - case 't': // tab - case '\\': // backslash - default: - return false - } - - return value[len(value)-1] == '\\' -} - -func getEscapedByte(b rune) (rune, error) { - switch b { - case '\'': // single quote - return '\'', nil - case '"': // quote - return '"', nil - case 'n': // newline - return '\n', nil - case 't': // table - return '\t', nil - case '\\': // backslash - return '\\', nil - default: - return b, NewParseError(fmt.Sprintf("invalid escaped character %c", b)) - } -} - -func removeEscapedCharacters(b []rune) []rune { - for i := 0; i < len(b); i++ { - if isEscaped(b[:i], b[i]) { - c, err := getEscapedByte(b[i]) - if err != nil { - return b - } - - b[i-1] = c - b = append(b[:i], b[i+1:]...) - i-- - } - } - - return b -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/visitor.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/visitor.go deleted file mode 100644 index a07a63738..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/visitor.go +++ /dev/null @@ -1,269 +0,0 @@ -package ini - -import ( - "fmt" - "sort" - "strings" -) - -// Visitor is an interface used by walkers that will -// traverse an array of ASTs. -type Visitor interface { - VisitExpr(AST) error - VisitStatement(AST) error -} - -// DefaultVisitor is used to visit statements and expressions -// and ensure that they are both of the correct format. -// In addition, upon visiting this will build sections and populate -// the Sections field which can be used to retrieve profile -// configuration. -type DefaultVisitor struct { - - // scope is the profile which is being visited - scope string - - // path is the file path which the visitor is visiting - path string - - // Sections defines list of the profile section - Sections Sections -} - -// NewDefaultVisitor returns a DefaultVisitor. It takes in a filepath -// which points to the file it is visiting. -func NewDefaultVisitor(filepath string) *DefaultVisitor { - return &DefaultVisitor{ - Sections: Sections{ - container: map[string]Section{}, - }, - path: filepath, - } -} - -// VisitExpr visits expressions... -func (v *DefaultVisitor) VisitExpr(expr AST) error { - t := v.Sections.container[v.scope] - if t.values == nil { - t.values = values{} - } - if t.SourceFile == nil { - t.SourceFile = make(map[string]string, 0) - } - - switch expr.Kind { - case ASTKindExprStatement: - opExpr := expr.GetRoot() - switch opExpr.Kind { - case ASTKindEqualExpr: - children := opExpr.GetChildren() - if len(children) <= 1 { - return NewParseError("unexpected token type") - } - - rhs := children[1] - - // The right-hand value side the equality expression is allowed to contain '[', ']', ':', '=' in the values. - // If the token is not either a literal or one of the token types that identifies those four additional - // tokens then error. - if !(rhs.Root.Type() == TokenLit || rhs.Root.Type() == TokenOp || rhs.Root.Type() == TokenSep) { - return NewParseError("unexpected token type") - } - - key := EqualExprKey(opExpr) - val, err := newValue(rhs.Root.ValueType, rhs.Root.base, rhs.Root.Raw()) - if err != nil { - return err - } - - // lower case key to standardize - k := strings.ToLower(key) - - // identify if the section already had this key, append log on section - if t.Has(k) { - t.Logs = append(t.Logs, - fmt.Sprintf("For profile: %v, overriding %v value, "+ - "with a %v value found in a duplicate profile defined later in the same file %v. \n", - t.Name, k, k, v.path)) - } - - // assign the value - t.values[k] = val - // update the source file path for region - t.SourceFile[k] = v.path - default: - return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) - } - default: - return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) - } - - v.Sections.container[v.scope] = t - return nil -} - -// VisitStatement visits statements... -func (v *DefaultVisitor) VisitStatement(stmt AST) error { - switch stmt.Kind { - case ASTKindCompletedSectionStatement: - child := stmt.GetRoot() - if child.Kind != ASTKindSectionStatement { - return NewParseError(fmt.Sprintf("unsupported child statement: %T", child)) - } - - name := string(child.Root.Raw()) - - // trim start and end space - name = strings.TrimSpace(name) - - // if has prefix "profile " + [ws+] + "profile-name", - // we standardize by removing the [ws+] between prefix and profile-name. - if strings.HasPrefix(name, "profile ") { - names := strings.SplitN(name, " ", 2) - name = names[0] + " " + strings.TrimLeft(names[1], " ") - } - - // attach profile name on section - if !v.Sections.HasSection(name) { - v.Sections.container[name] = NewSection(name) - } - v.scope = name - default: - return NewParseError(fmt.Sprintf("unsupported statement: %s", stmt.Kind)) - } - - return nil -} - -// Sections is a map of Section structures that represent -// a configuration. -type Sections struct { - container map[string]Section -} - -// NewSections returns empty ini Sections -func NewSections() Sections { - return Sections{ - container: make(map[string]Section, 0), - } -} - -// GetSection will return section p. If section p does not exist, -// false will be returned in the second parameter. -func (t Sections) GetSection(p string) (Section, bool) { - v, ok := t.container[p] - return v, ok -} - -// HasSection denotes if Sections consist of a section with -// provided name. -func (t Sections) HasSection(p string) bool { - _, ok := t.container[p] - return ok -} - -// SetSection sets a section value for provided section name. -func (t Sections) SetSection(p string, v Section) Sections { - t.container[p] = v - return t -} - -// DeleteSection deletes a section entry/value for provided section name./ -func (t Sections) DeleteSection(p string) { - delete(t.container, p) -} - -// values represents a map of union values. -type values map[string]Value - -// List will return a list of all sections that were successfully -// parsed. -func (t Sections) List() []string { - keys := make([]string, len(t.container)) - i := 0 - for k := range t.container { - keys[i] = k - i++ - } - - sort.Strings(keys) - return keys -} - -// Section contains a name and values. This represent -// a sectioned entry in a configuration file. -type Section struct { - // Name is the Section profile name - Name string - - // values are the values within parsed profile - values values - - // Errors is the list of errors - Errors []error - - // Logs is the list of logs - Logs []string - - // SourceFile is the INI Source file from where this section - // was retrieved. They key is the property, value is the - // source file the property was retrieved from. - SourceFile map[string]string -} - -// NewSection returns an initialize section for the name -func NewSection(name string) Section { - return Section{ - Name: name, - values: values{}, - SourceFile: map[string]string{}, - } -} - -// UpdateSourceFile updates source file for a property to provided filepath. -func (t Section) UpdateSourceFile(property string, filepath string) { - t.SourceFile[property] = filepath -} - -// UpdateValue updates value for a provided key with provided value -func (t Section) UpdateValue(k string, v Value) error { - t.values[k] = v - return nil -} - -// Has will return whether or not an entry exists in a given section -func (t Section) Has(k string) bool { - _, ok := t.values[k] - return ok -} - -// ValueType will returned what type the union is set to. If -// k was not found, the NoneType will be returned. -func (t Section) ValueType(k string) (ValueType, bool) { - v, ok := t.values[k] - return v.Type, ok -} - -// Bool returns a bool value at k -func (t Section) Bool(k string) bool { - return t.values[k].BoolValue() -} - -// Int returns an integer value at k -func (t Section) Int(k string) int64 { - return t.values[k].IntValue() -} - -// Float64 returns a float value at k -func (t Section) Float64(k string) float64 { - return t.values[k].FloatValue() -} - -// String returns the string value at k -func (t Section) String(k string) string { - _, ok := t.values[k] - if !ok { - return "" - } - return t.values[k].StringValue() -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/walker.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/walker.go deleted file mode 100644 index 99915f7f7..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/walker.go +++ /dev/null @@ -1,25 +0,0 @@ -package ini - -// Walk will traverse the AST using the v, the Visitor. -func Walk(tree []AST, v Visitor) error { - for _, node := range tree { - switch node.Kind { - case ASTKindExpr, - ASTKindExprStatement: - - if err := v.VisitExpr(node); err != nil { - return err - } - case ASTKindStatement, - ASTKindCompletedSectionStatement, - ASTKindNestedSectionStatement, - ASTKindCompletedNestedSectionStatement: - - if err := v.VisitStatement(node); err != nil { - return err - } - } - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ws_token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ws_token.go deleted file mode 100644 index 7ffb4ae06..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ws_token.go +++ /dev/null @@ -1,24 +0,0 @@ -package ini - -import ( - "unicode" -) - -// isWhitespace will return whether or not the character is -// a whitespace character. -// -// Whitespace is defined as a space or tab. -func isWhitespace(c rune) bool { - return unicode.IsSpace(c) && c != '\n' && c != '\r' -} - -func newWSToken(b []rune) (Token, int, error) { - i := 0 - for ; i < len(b); i++ { - if !isWhitespace(b[i]) { - break - } - } - - return newToken(TokenWS, b[:i], NoneType), i, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/local-mod-replace.sh b/vendor/github.com/aws/aws-sdk-go-v2/local-mod-replace.sh deleted file mode 100644 index 81a836127..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/local-mod-replace.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env bash - -PROJECT_DIR="" -SDK_SOURCE_DIR=$(cd `dirname $0` && pwd) - -usage() { - echo "Usage: $0 [-s SDK_SOURCE_DIR] [-d PROJECT_DIR]" 1>&2 - exit 1 -} - -while getopts "hs:d:" options; do - case "${options}" in - s) - SDK_SOURCE_DIR=${OPTARG} - if [ "$SDK_SOURCE_DIR" == "" ]; then - echo "path to SDK source directory is required" || exit - usage - fi - ;; - d) - PROJECT_DIR=${OPTARG} - ;; - h) - usage - ;; - *) - usage - ;; - esac -done - -if [ "$PROJECT_DIR" != "" ]; then - cd "$PROJECT_DIR" || exit -fi - -go mod graph | awk '{print $1}' | cut -d '@' -f 1 | sort | uniq | grep "github.com/aws/aws-sdk-go-v2" | while read x; do - repPath=${x/github.com\/aws\/aws-sdk-go-v2/${SDK_SOURCE_DIR}} - echo -replace $x=$repPath -done | xargs go mod edit diff --git a/vendor/github.com/aws/aws-sdk-go-v2/modman.toml b/vendor/github.com/aws/aws-sdk-go-v2/modman.toml deleted file mode 100644 index 61c8da5c0..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/modman.toml +++ /dev/null @@ -1,78 +0,0 @@ - -[dependencies] - "github.com/aws/aws-sdk-go" = "v1.44.28" - "github.com/aws/smithy-go" = "v1.16.0" - "github.com/google/go-cmp" = "v0.5.8" - "github.com/jmespath/go-jmespath" = "v0.4.0" - "golang.org/x/net" = "v0.1.0" - -[modules] - - [modules."."] - metadata_package = "aws" - - [modules.codegen] - no_tag = true - - [modules."example/service/dynamodb/createTable"] - no_tag = true - - [modules."example/service/dynamodb/scanItems"] - no_tag = true - - [modules."example/service/s3/listObjects"] - no_tag = true - - [modules."example/service/s3/usingPrivateLink"] - no_tag = true - - [modules."feature/ec2/imds/internal/configtesting"] - no_tag = true - - [modules."internal/codegen"] - no_tag = true - - [modules."internal/configsources/configtesting"] - no_tag = true - - [modules."internal/protocoltest/awsrestjson"] - no_tag = true - - [modules."internal/protocoltest/ec2query"] - no_tag = true - - [modules."internal/protocoltest/jsonrpc"] - no_tag = true - - [modules."internal/protocoltest/jsonrpc10"] - no_tag = true - - [modules."internal/protocoltest/query"] - no_tag = true - - [modules."internal/protocoltest/restxml"] - no_tag = true - - [modules."internal/protocoltest/restxmlwithnamespace"] - no_tag = true - - [modules."internal/repotools"] - no_tag = true - - [modules."internal/repotools/changes"] - no_tag = true - - [modules."service/internal/benchmark"] - no_tag = true - - [modules."service/internal/integrationtest"] - no_tag = true - - [modules."service/kinesis/internal/testing"] - no_tag = true - - [modules."service/s3/internal/configtesting"] - no_tag = true - - [modules."service/transcribestreaming/internal/testing"] - no_tag = true diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md new file mode 100644 index 000000000..95cf44811 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md @@ -0,0 +1,112 @@ +# v1.10.1 (2023-11-15) + +* No change notes available for this release. + +# v1.10.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). + +# v1.9.15 (2023-10-06) + +* No change notes available for this release. + +# v1.9.14 (2023-08-18) + +* No change notes available for this release. + +# v1.9.13 (2023-08-07) + +* No change notes available for this release. + +# v1.9.12 (2023-07-31) + +* No change notes available for this release. + +# v1.9.11 (2022-12-02) + +* No change notes available for this release. + +# v1.9.10 (2022-10-24) + +* No change notes available for this release. + +# v1.9.9 (2022-09-14) + +* No change notes available for this release. + +# v1.9.8 (2022-09-02) + +* No change notes available for this release. + +# v1.9.7 (2022-08-31) + +* No change notes available for this release. + +# v1.9.6 (2022-08-29) + +* No change notes available for this release. + +# v1.9.5 (2022-08-11) + +* No change notes available for this release. + +# v1.9.4 (2022-08-09) + +* No change notes available for this release. + +# v1.9.3 (2022-06-29) + +* No change notes available for this release. + +# v1.9.2 (2022-06-07) + +* No change notes available for this release. + +# v1.9.1 (2022-03-24) + +* No change notes available for this release. + +# v1.9.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version + +# v1.8.0 (2022-02-24) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version + +# v1.7.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version + +# v1.6.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version + +# v1.5.0 (2021-11-06) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version + +# v1.4.0 (2021-10-21) + +* **Feature**: Updated to latest version + +# v1.3.0 (2021-08-27) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version + +# v1.2.2 (2021-08-04) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. + +# v1.2.1 (2021-07-15) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version + +# v1.2.0 (2021-06-25) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version + +# v1.1.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. + diff --git a/vendor/google.golang.org/genproto/LICENSE b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/LICENSE.txt similarity index 100% rename from vendor/google.golang.org/genproto/LICENSE rename to vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/LICENSE.txt diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/accept_encoding_gzip.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/accept_encoding_gzip.go new file mode 100644 index 000000000..3f451fc9b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/accept_encoding_gzip.go @@ -0,0 +1,176 @@ +package acceptencoding + +import ( + "compress/gzip" + "context" + "fmt" + "io" + + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const acceptEncodingHeaderKey = "Accept-Encoding" +const contentEncodingHeaderKey = "Content-Encoding" + +// AddAcceptEncodingGzipOptions provides the options for the +// AddAcceptEncodingGzip middleware setup. +type AddAcceptEncodingGzipOptions struct { + Enable bool +} + +// AddAcceptEncodingGzip explicitly adds handling for accept-encoding GZIP +// middleware to the operation stack. This allows checksums to be correctly +// computed without disabling GZIP support. +func AddAcceptEncodingGzip(stack *middleware.Stack, options AddAcceptEncodingGzipOptions) error { + if options.Enable { + if err := stack.Finalize.Add(&EnableGzip{}, middleware.Before); err != nil { + return err + } + if err := stack.Deserialize.Insert(&DecompressGzip{}, "OperationDeserializer", middleware.After); err != nil { + return err + } + return nil + } + + return stack.Finalize.Add(&DisableGzip{}, middleware.Before) +} + +// DisableGzip provides the middleware that will +// disable the underlying http client automatically enabling for gzip +// decompress content-encoding support. +type DisableGzip struct{} + +// ID returns the id for the middleware. +func (*DisableGzip) ID() string { + return "DisableAcceptEncodingGzip" +} + +// HandleFinalize implements the FinalizeMiddleware interface. +func (*DisableGzip) HandleFinalize( + ctx context.Context, input middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + output middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + req, ok := input.Request.(*smithyhttp.Request) + if !ok { + return output, metadata, &smithy.SerializationError{ + Err: fmt.Errorf("unknown request type %T", input.Request), + } + } + + // Explicitly enable gzip support, this will prevent the http client from + // auto extracting the zipped content. + req.Header.Set(acceptEncodingHeaderKey, "identity") + + return next.HandleFinalize(ctx, input) +} + +// EnableGzip provides a middleware to enable support for +// gzip responses, with manual decompression. This prevents the underlying HTTP +// client from performing the gzip decompression automatically. +type EnableGzip struct{} + +// ID returns the id for the middleware. +func (*EnableGzip) ID() string { + return "AcceptEncodingGzip" +} + +// HandleFinalize implements the FinalizeMiddleware interface. +func (*EnableGzip) HandleFinalize( + ctx context.Context, input middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + output middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + req, ok := input.Request.(*smithyhttp.Request) + if !ok { + return output, metadata, &smithy.SerializationError{ + Err: fmt.Errorf("unknown request type %T", input.Request), + } + } + + // Explicitly enable gzip support, this will prevent the http client from + // auto extracting the zipped content. + req.Header.Set(acceptEncodingHeaderKey, "gzip") + + return next.HandleFinalize(ctx, input) +} + +// DecompressGzip provides the middleware for decompressing a gzip +// response from the service. +type DecompressGzip struct{} + +// ID returns the id for the middleware. +func (*DecompressGzip) ID() string { + return "DecompressGzip" +} + +// HandleDeserialize implements the DeserializeMiddlware interface. +func (*DecompressGzip) HandleDeserialize( + ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + output middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + output, metadata, err = next.HandleDeserialize(ctx, input) + if err != nil { + return output, metadata, err + } + + resp, ok := output.RawResponse.(*smithyhttp.Response) + if !ok { + return output, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("unknown response type %T", output.RawResponse), + } + } + if v := resp.Header.Get(contentEncodingHeaderKey); v != "gzip" { + return output, metadata, err + } + + // Clear content length since it will no longer be valid once the response + // body is decompressed. + resp.Header.Del("Content-Length") + resp.ContentLength = -1 + + resp.Body = wrapGzipReader(resp.Body) + + return output, metadata, err +} + +type gzipReader struct { + reader io.ReadCloser + gzip *gzip.Reader +} + +func wrapGzipReader(reader io.ReadCloser) *gzipReader { + return &gzipReader{ + reader: reader, + } +} + +// Read wraps the gzip reader around the underlying io.Reader to extract the +// response bytes on the fly. +func (g *gzipReader) Read(b []byte) (n int, err error) { + if g.gzip == nil { + g.gzip, err = gzip.NewReader(g.reader) + if err != nil { + g.gzip = nil // ensure uninitialized gzip value isn't used in close. + return 0, fmt.Errorf("failed to decompress gzip response, %w", err) + } + } + + return g.gzip.Read(b) +} + +func (g *gzipReader) Close() error { + if g.gzip == nil { + return nil + } + + if err := g.gzip.Close(); err != nil { + g.reader.Close() + return fmt.Errorf("failed to decompress gzip response, %w", err) + } + + return g.reader.Close() +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/doc.go new file mode 100644 index 000000000..7056d9bf6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/doc.go @@ -0,0 +1,22 @@ +/* +Package acceptencoding provides customizations associated with Accept Encoding Header. + +# Accept encoding gzip + +The Go HTTP client automatically supports accept-encoding and content-encoding +gzip by default. This default behavior is not desired by the SDK, and prevents +validating the response body's checksum. To prevent this the SDK must manually +control usage of content-encoding gzip. + +To control content-encoding, the SDK must always set the `Accept-Encoding` +header to a value. This prevents the HTTP client from using gzip automatically. +When gzip is enabled on the API client, the SDK's customization will control +decompressing the gzip data in order to not break the checksum validation. When +gzip is disabled, the API client will disable gzip, preventing the HTTP +client's default behavior. + +An `EnableAcceptEncodingGzip` option may or may not be present depending on the client using +the below middleware. The option if present can be used to enable auto decompressing +gzip by the SDK. +*/ +package acceptencoding diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go new file mode 100644 index 000000000..ee1cccaab --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package acceptencoding + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.10.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md index b8338d32b..301998292 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md @@ -1,3 +1,11 @@ +# v1.10.3 (2023-11-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.2 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.10.1 (2023-11-01) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go index d4d780e21..4750c8c4e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go @@ -3,4 +3,4 @@ package presignedurl // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.10.1" +const goModuleVersion = "1.10.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/CHANGELOG.md index 5d715fe7c..eb0803bf2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/CHANGELOG.md @@ -1,3 +1,21 @@ +# v1.25.2 (2023-11-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.1 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.0 (2023-11-01) + +* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + # v1.23.0 (2023-10-26) * **Feature**: Message Archiving and Replay is now supported in Amazon SNS for FIFO topics. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_client.go index 8aff3b63f..7a1c3c45d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_client.go @@ -11,6 +11,8 @@ import ( "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" smithy "github.com/aws/smithy-go" smithydocument "github.com/aws/smithy-go/document" @@ -47,10 +49,18 @@ func New(options Options, optFns ...func(*Options)) *Client { resolveHTTPSignerV4(&options) + resolveEndpointResolverV2(&options) + + resolveAuthSchemeResolver(&options) + for _, fn := range optFns { fn(&options) } + ignoreAnonymousAuth(&options) + + resolveAuthSchemes(&options) + client := &Client{ options: options, } @@ -58,140 +68,10 @@ func New(options Options, optFns ...func(*Options)) *Client { return client } -type Options struct { - // Set of options to modify how an operation is invoked. These apply to all - // operations invoked for this client. Use functional options on operation call to - // modify this list for per operation behavior. - APIOptions []func(*middleware.Stack) error - - // The optional application specific identifier appended to the User-Agent header. - AppID string - - // This endpoint will be given as input to an EndpointResolverV2. It is used for - // providing a custom base endpoint that is subject to modifications by the - // processing EndpointResolverV2. - BaseEndpoint *string - - // Configures the events that will be sent to the configured logger. - ClientLogMode aws.ClientLogMode - - // The credentials object to use when signing requests. - Credentials aws.CredentialsProvider - - // The configuration DefaultsMode that the SDK should use when constructing the - // clients initial default settings. - DefaultsMode aws.DefaultsMode - - // The endpoint options to be used when attempting to resolve an endpoint. - EndpointOptions EndpointResolverOptions - - // The service endpoint resolver. - // - // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a - // value for this field will likely prevent you from using any endpoint-related - // service features released after the introduction of EndpointResolverV2 and - // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom - // endpoint, set the client option BaseEndpoint instead. - EndpointResolver EndpointResolver - - // Resolves the endpoint used for a particular service. This should be used over - // the deprecated EndpointResolver - EndpointResolverV2 EndpointResolverV2 - - // Signature Version 4 (SigV4) Signer - HTTPSignerV4 HTTPSignerV4 - - // The logger writer interface to write logging messages to. - Logger logging.Logger - - // The region to send requests to. (Required) - Region string - - // RetryMaxAttempts specifies the maximum number attempts an API client will call - // an operation that fails with a retryable error. A value of 0 is ignored, and - // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. When creating a new API Clients this - // member will only be used if the Retryer Options member is nil. This value will - // be ignored if Retryer is not nil. If specified in an operation call's functional - // options with a value that is different than the constructed client's Options, - // the Client's Retryer will be wrapped to use the operation's specific - // RetryMaxAttempts value. - RetryMaxAttempts int - - // RetryMode specifies the retry mode the API client will be created with, if - // Retryer option is not also specified. When creating a new API Clients this - // member will only be used if the Retryer Options member is nil. This value will - // be ignored if Retryer is not nil. Currently does not support per operation call - // overrides, may in the future. - RetryMode aws.RetryMode - - // Retryer guides how HTTP requests should be retried in case of recoverable - // failures. When nil the API client will use a default retryer. The kind of - // default retry created by the API client can be changed with the RetryMode - // option. - Retryer aws.Retryer - - // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set - // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You - // should not populate this structure programmatically, or rely on the values here - // within your applications. - RuntimeEnvironment aws.RuntimeEnvironment - - // The initial DefaultsMode used when the client options were constructed. If the - // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved - // value was at that point in time. Currently does not support per operation call - // overrides, may in the future. - resolvedDefaultsMode aws.DefaultsMode - - // The HTTP client to invoke API calls with. Defaults to client's default HTTP - // implementation if nil. - HTTPClient HTTPClient -} - -// WithAPIOptions returns a functional option for setting the Client's APIOptions -// option. -func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { - return func(o *Options) { - o.APIOptions = append(o.APIOptions, optFns...) - } -} - -// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for -// this field will likely prevent you from using any endpoint-related service -// features released after the introduction of EndpointResolverV2 and BaseEndpoint. -// To migrate an EndpointResolver implementation that uses a custom endpoint, set -// the client option BaseEndpoint instead. -func WithEndpointResolver(v EndpointResolver) func(*Options) { - return func(o *Options) { - o.EndpointResolver = v - } -} - -// WithEndpointResolverV2 returns a functional option for setting the Client's -// EndpointResolverV2 option. -func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) { - return func(o *Options) { - o.EndpointResolverV2 = v - } -} - -type HTTPClient interface { - Do(*http.Request) (*http.Response, error) -} - -// Copy creates a clone where the APIOptions list is deep copied. -func (o Options) Copy() Options { - to := o - to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) - copy(to.APIOptions, o.APIOptions) - - return to -} func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { ctx = middleware.ClearStackValues(ctx) stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) options := c.options.Copy() - resolveEndpointResolverV2(&options) for _, fn := range optFns { fn(&options) @@ -225,6 +105,63 @@ func (c *Client) invokeOperation(ctx context.Context, opID string, params interf return result, metadata, err } +type operationInputKey struct{} + +func setOperationInput(ctx context.Context, input interface{}) context.Context { + return middleware.WithStackValue(ctx, operationInputKey{}, input) +} + +func getOperationInput(ctx context.Context) interface{} { + return middleware.GetStackValue(ctx, operationInputKey{}) +} + +type setOperationInputMiddleware struct { +} + +func (*setOperationInputMiddleware) ID() string { + return "setOperationInput" +} + +func (m *setOperationInputMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + ctx = setOperationInput(ctx, in.Parameters) + return next.HandleSerialize(ctx, in) +} + +func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error { + if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil { + return fmt.Errorf("add ResolveAuthScheme: %v", err) + } + if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil { + return fmt.Errorf("add GetIdentity: %v", err) + } + if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil { + return fmt.Errorf("add ResolveEndpointV2: %v", err) + } + if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", middleware.After); err != nil { + return fmt.Errorf("add Signing: %v", err) + } + return nil +} +func resolveAuthSchemeResolver(options *Options) { + if options.AuthSchemeResolver == nil { + options.AuthSchemeResolver = &defaultAuthSchemeResolver{} + } +} + +func resolveAuthSchemes(options *Options) { + if options.AuthSchemes == nil { + options.AuthSchemes = []smithyhttp.AuthScheme{ + internalauth.NewHTTPAuthScheme("aws.auth#sigv4", &internalauthsmithy.V4SignerAdapter{ + Signer: options.HTTPSignerV4, + Logger: options.Logger, + LogSigning: options.ClientLogMode.IsSigning(), + }), + } + } +} + type noSmithyDocumentSerde = smithydocument.NoSerde type legacyEndpointContextSetter struct { @@ -296,6 +233,7 @@ func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { resolveAWSEndpointResolver(cfg, &opts) resolveUseDualStackEndpoint(cfg, &opts) resolveUseFIPSEndpoint(cfg, &opts) + resolveBaseEndpoint(cfg, &opts) return New(opts, optFns...) } @@ -414,15 +352,6 @@ func addClientUserAgent(stack *middleware.Stack, options Options) error { return nil } -func addHTTPSignerV4Middleware(stack *middleware.Stack, o Options) error { - mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{ - CredentialsProvider: o.Credentials, - Signer: o.HTTPSignerV4, - LogSigning: o.ClientLogMode.IsSigning(), - }) - return stack.Finalize.Add(mw, middleware.After) -} - type HTTPSignerV4 interface { SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error } @@ -496,31 +425,31 @@ func addRequestResponseLogging(stack *middleware.Stack, o Options) error { }, middleware.After) } -type endpointDisableHTTPSMiddleware struct { - EndpointDisableHTTPS bool +type disableHTTPSMiddleware struct { + DisableHTTPS bool } -func (*endpointDisableHTTPSMiddleware) ID() string { - return "endpointDisableHTTPSMiddleware" +func (*disableHTTPSMiddleware) ID() string { + return "disableHTTPS" } -func (m *endpointDisableHTTPSMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, +func (m *disableHTTPSMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) } - if m.EndpointDisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) { + if m.DisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) { req.URL.Scheme = "http" } - return next.HandleSerialize(ctx, in) - + return next.HandleFinalize(ctx, in) } -func addendpointDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { - return stack.Serialize.Insert(&endpointDisableHTTPSMiddleware{ - EndpointDisableHTTPS: o.EndpointOptions.DisableHTTPS, - }, "OperationSerializer", middleware.Before) + +func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { + return stack.Finalize.Insert(&disableHTTPSMiddleware{ + DisableHTTPS: o.EndpointOptions.DisableHTTPS, + }, "ResolveEndpointV2", middleware.After) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_AddPermission.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_AddPermission.go index ce26c45c9..3f639a759 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_AddPermission.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_AddPermission.go @@ -4,13 +4,9 @@ package sns import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -71,6 +67,9 @@ type AddPermissionOutput struct { } func (c *Client) addOperationAddPermissionMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpAddPermission{}, middleware.After) if err != nil { return err @@ -79,6 +78,10 @@ func (c *Client) addOperationAddPermissionMiddlewares(stack *middleware.Stack, o if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "AddPermission"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -100,9 +103,6 @@ func (c *Client) addOperationAddPermissionMiddlewares(stack *middleware.Stack, o if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -118,7 +118,7 @@ func (c *Client) addOperationAddPermissionMiddlewares(stack *middleware.Stack, o if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addAddPermissionResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpAddPermissionValidationMiddleware(stack); err != nil { @@ -139,7 +139,7 @@ func (c *Client) addOperationAddPermissionMiddlewares(stack *middleware.Stack, o if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -149,130 +149,6 @@ func newServiceMetadataMiddleware_opAddPermission(region string) *awsmiddleware. return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sns", OperationName: "AddPermission", } } - -type opAddPermissionResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opAddPermissionResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opAddPermissionResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sns" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sns" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sns") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addAddPermissionResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opAddPermissionResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_CheckIfPhoneNumberIsOptedOut.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_CheckIfPhoneNumberIsOptedOut.go index 102c9da1c..d75f9e9e6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_CheckIfPhoneNumberIsOptedOut.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_CheckIfPhoneNumberIsOptedOut.go @@ -4,13 +4,9 @@ package sns import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -62,6 +58,9 @@ type CheckIfPhoneNumberIsOptedOutOutput struct { } func (c *Client) addOperationCheckIfPhoneNumberIsOptedOutMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpCheckIfPhoneNumberIsOptedOut{}, middleware.After) if err != nil { return err @@ -70,6 +69,10 @@ func (c *Client) addOperationCheckIfPhoneNumberIsOptedOutMiddlewares(stack *midd if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "CheckIfPhoneNumberIsOptedOut"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -91,9 +94,6 @@ func (c *Client) addOperationCheckIfPhoneNumberIsOptedOutMiddlewares(stack *midd if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -109,7 +109,7 @@ func (c *Client) addOperationCheckIfPhoneNumberIsOptedOutMiddlewares(stack *midd if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addCheckIfPhoneNumberIsOptedOutResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpCheckIfPhoneNumberIsOptedOutValidationMiddleware(stack); err != nil { @@ -130,7 +130,7 @@ func (c *Client) addOperationCheckIfPhoneNumberIsOptedOutMiddlewares(stack *midd if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -140,130 +140,6 @@ func newServiceMetadataMiddleware_opCheckIfPhoneNumberIsOptedOut(region string) return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sns", OperationName: "CheckIfPhoneNumberIsOptedOut", } } - -type opCheckIfPhoneNumberIsOptedOutResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opCheckIfPhoneNumberIsOptedOutResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opCheckIfPhoneNumberIsOptedOutResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sns" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sns" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sns") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addCheckIfPhoneNumberIsOptedOutResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opCheckIfPhoneNumberIsOptedOutResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_ConfirmSubscription.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_ConfirmSubscription.go index aaa9a6081..d7cb556cb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_ConfirmSubscription.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_ConfirmSubscription.go @@ -4,13 +4,9 @@ package sns import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -70,6 +66,9 @@ type ConfirmSubscriptionOutput struct { } func (c *Client) addOperationConfirmSubscriptionMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpConfirmSubscription{}, middleware.After) if err != nil { return err @@ -78,6 +77,10 @@ func (c *Client) addOperationConfirmSubscriptionMiddlewares(stack *middleware.St if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "ConfirmSubscription"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -99,9 +102,6 @@ func (c *Client) addOperationConfirmSubscriptionMiddlewares(stack *middleware.St if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -117,7 +117,7 @@ func (c *Client) addOperationConfirmSubscriptionMiddlewares(stack *middleware.St if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addConfirmSubscriptionResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpConfirmSubscriptionValidationMiddleware(stack); err != nil { @@ -138,7 +138,7 @@ func (c *Client) addOperationConfirmSubscriptionMiddlewares(stack *middleware.St if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -148,130 +148,6 @@ func newServiceMetadataMiddleware_opConfirmSubscription(region string) *awsmiddl return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sns", OperationName: "ConfirmSubscription", } } - -type opConfirmSubscriptionResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opConfirmSubscriptionResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opConfirmSubscriptionResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sns" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sns" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sns") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addConfirmSubscriptionResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opConfirmSubscriptionResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_CreatePlatformApplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_CreatePlatformApplication.go index 0a78c4a86..9fc8e4b61 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_CreatePlatformApplication.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_CreatePlatformApplication.go @@ -4,13 +4,9 @@ package sns import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -92,6 +88,9 @@ type CreatePlatformApplicationOutput struct { } func (c *Client) addOperationCreatePlatformApplicationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpCreatePlatformApplication{}, middleware.After) if err != nil { return err @@ -100,6 +99,10 @@ func (c *Client) addOperationCreatePlatformApplicationMiddlewares(stack *middlew if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreatePlatformApplication"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -121,9 +124,6 @@ func (c *Client) addOperationCreatePlatformApplicationMiddlewares(stack *middlew if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -139,7 +139,7 @@ func (c *Client) addOperationCreatePlatformApplicationMiddlewares(stack *middlew if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addCreatePlatformApplicationResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpCreatePlatformApplicationValidationMiddleware(stack); err != nil { @@ -160,7 +160,7 @@ func (c *Client) addOperationCreatePlatformApplicationMiddlewares(stack *middlew if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -170,130 +170,6 @@ func newServiceMetadataMiddleware_opCreatePlatformApplication(region string) *aw return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sns", OperationName: "CreatePlatformApplication", } } - -type opCreatePlatformApplicationResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opCreatePlatformApplicationResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opCreatePlatformApplicationResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sns" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sns" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sns") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addCreatePlatformApplicationResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opCreatePlatformApplicationResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_CreatePlatformEndpoint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_CreatePlatformEndpoint.go index cfdf7dd0f..06bacefc4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_CreatePlatformEndpoint.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_CreatePlatformEndpoint.go @@ -4,13 +4,9 @@ package sns import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -85,6 +81,9 @@ type CreatePlatformEndpointOutput struct { } func (c *Client) addOperationCreatePlatformEndpointMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpCreatePlatformEndpoint{}, middleware.After) if err != nil { return err @@ -93,6 +92,10 @@ func (c *Client) addOperationCreatePlatformEndpointMiddlewares(stack *middleware if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreatePlatformEndpoint"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -114,9 +117,6 @@ func (c *Client) addOperationCreatePlatformEndpointMiddlewares(stack *middleware if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -132,7 +132,7 @@ func (c *Client) addOperationCreatePlatformEndpointMiddlewares(stack *middleware if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addCreatePlatformEndpointResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpCreatePlatformEndpointValidationMiddleware(stack); err != nil { @@ -153,7 +153,7 @@ func (c *Client) addOperationCreatePlatformEndpointMiddlewares(stack *middleware if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -163,130 +163,6 @@ func newServiceMetadataMiddleware_opCreatePlatformEndpoint(region string) *awsmi return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sns", OperationName: "CreatePlatformEndpoint", } } - -type opCreatePlatformEndpointResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opCreatePlatformEndpointResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opCreatePlatformEndpointResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sns" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sns" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sns") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addCreatePlatformEndpointResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opCreatePlatformEndpointResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_CreateSMSSandboxPhoneNumber.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_CreateSMSSandboxPhoneNumber.go index c2e4c4e21..43776b425 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_CreateSMSSandboxPhoneNumber.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_CreateSMSSandboxPhoneNumber.go @@ -4,14 +4,10 @@ package sns import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" "github.com/aws/aws-sdk-go-v2/service/sns/types" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -64,6 +60,9 @@ type CreateSMSSandboxPhoneNumberOutput struct { } func (c *Client) addOperationCreateSMSSandboxPhoneNumberMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpCreateSMSSandboxPhoneNumber{}, middleware.After) if err != nil { return err @@ -72,6 +71,10 @@ func (c *Client) addOperationCreateSMSSandboxPhoneNumberMiddlewares(stack *middl if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreateSMSSandboxPhoneNumber"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -93,9 +96,6 @@ func (c *Client) addOperationCreateSMSSandboxPhoneNumberMiddlewares(stack *middl if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -111,7 +111,7 @@ func (c *Client) addOperationCreateSMSSandboxPhoneNumberMiddlewares(stack *middl if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addCreateSMSSandboxPhoneNumberResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpCreateSMSSandboxPhoneNumberValidationMiddleware(stack); err != nil { @@ -132,7 +132,7 @@ func (c *Client) addOperationCreateSMSSandboxPhoneNumberMiddlewares(stack *middl if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -142,130 +142,6 @@ func newServiceMetadataMiddleware_opCreateSMSSandboxPhoneNumber(region string) * return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sns", OperationName: "CreateSMSSandboxPhoneNumber", } } - -type opCreateSMSSandboxPhoneNumberResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opCreateSMSSandboxPhoneNumberResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opCreateSMSSandboxPhoneNumberResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sns" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sns" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sns") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addCreateSMSSandboxPhoneNumberResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opCreateSMSSandboxPhoneNumberResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_CreateTopic.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_CreateTopic.go index 4086e2f84..5de2e617e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_CreateTopic.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_CreateTopic.go @@ -4,14 +4,10 @@ package sns import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" "github.com/aws/aws-sdk-go-v2/service/sns/types" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -119,6 +115,9 @@ type CreateTopicOutput struct { } func (c *Client) addOperationCreateTopicMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpCreateTopic{}, middleware.After) if err != nil { return err @@ -127,6 +126,10 @@ func (c *Client) addOperationCreateTopicMiddlewares(stack *middleware.Stack, opt if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreateTopic"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -148,9 +151,6 @@ func (c *Client) addOperationCreateTopicMiddlewares(stack *middleware.Stack, opt if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -166,7 +166,7 @@ func (c *Client) addOperationCreateTopicMiddlewares(stack *middleware.Stack, opt if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addCreateTopicResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpCreateTopicValidationMiddleware(stack); err != nil { @@ -187,7 +187,7 @@ func (c *Client) addOperationCreateTopicMiddlewares(stack *middleware.Stack, opt if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -197,130 +197,6 @@ func newServiceMetadataMiddleware_opCreateTopic(region string) *awsmiddleware.Re return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sns", OperationName: "CreateTopic", } } - -type opCreateTopicResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opCreateTopicResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opCreateTopicResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sns" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sns" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sns") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addCreateTopicResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opCreateTopicResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_DeleteEndpoint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_DeleteEndpoint.go index 96cf5f02f..c9b104e15 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_DeleteEndpoint.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_DeleteEndpoint.go @@ -4,13 +4,9 @@ package sns import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -54,6 +50,9 @@ type DeleteEndpointOutput struct { } func (c *Client) addOperationDeleteEndpointMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpDeleteEndpoint{}, middleware.After) if err != nil { return err @@ -62,6 +61,10 @@ func (c *Client) addOperationDeleteEndpointMiddlewares(stack *middleware.Stack, if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteEndpoint"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -83,9 +86,6 @@ func (c *Client) addOperationDeleteEndpointMiddlewares(stack *middleware.Stack, if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -101,7 +101,7 @@ func (c *Client) addOperationDeleteEndpointMiddlewares(stack *middleware.Stack, if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addDeleteEndpointResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpDeleteEndpointValidationMiddleware(stack); err != nil { @@ -122,7 +122,7 @@ func (c *Client) addOperationDeleteEndpointMiddlewares(stack *middleware.Stack, if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -132,130 +132,6 @@ func newServiceMetadataMiddleware_opDeleteEndpoint(region string) *awsmiddleware return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sns", OperationName: "DeleteEndpoint", } } - -type opDeleteEndpointResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opDeleteEndpointResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opDeleteEndpointResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sns" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sns" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sns") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addDeleteEndpointResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opDeleteEndpointResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_DeletePlatformApplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_DeletePlatformApplication.go index 417390597..737ba3db9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_DeletePlatformApplication.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_DeletePlatformApplication.go @@ -4,13 +4,9 @@ package sns import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -53,6 +49,9 @@ type DeletePlatformApplicationOutput struct { } func (c *Client) addOperationDeletePlatformApplicationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpDeletePlatformApplication{}, middleware.After) if err != nil { return err @@ -61,6 +60,10 @@ func (c *Client) addOperationDeletePlatformApplicationMiddlewares(stack *middlew if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeletePlatformApplication"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -82,9 +85,6 @@ func (c *Client) addOperationDeletePlatformApplicationMiddlewares(stack *middlew if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -100,7 +100,7 @@ func (c *Client) addOperationDeletePlatformApplicationMiddlewares(stack *middlew if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addDeletePlatformApplicationResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpDeletePlatformApplicationValidationMiddleware(stack); err != nil { @@ -121,7 +121,7 @@ func (c *Client) addOperationDeletePlatformApplicationMiddlewares(stack *middlew if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -131,130 +131,6 @@ func newServiceMetadataMiddleware_opDeletePlatformApplication(region string) *aw return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sns", OperationName: "DeletePlatformApplication", } } - -type opDeletePlatformApplicationResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opDeletePlatformApplicationResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opDeletePlatformApplicationResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sns" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sns" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sns") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addDeletePlatformApplicationResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opDeletePlatformApplicationResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_DeleteSMSSandboxPhoneNumber.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_DeleteSMSSandboxPhoneNumber.go index 511bb4ce8..97ed42326 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_DeleteSMSSandboxPhoneNumber.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_DeleteSMSSandboxPhoneNumber.go @@ -4,13 +4,9 @@ package sns import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -58,6 +54,9 @@ type DeleteSMSSandboxPhoneNumberOutput struct { } func (c *Client) addOperationDeleteSMSSandboxPhoneNumberMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpDeleteSMSSandboxPhoneNumber{}, middleware.After) if err != nil { return err @@ -66,6 +65,10 @@ func (c *Client) addOperationDeleteSMSSandboxPhoneNumberMiddlewares(stack *middl if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteSMSSandboxPhoneNumber"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -87,9 +90,6 @@ func (c *Client) addOperationDeleteSMSSandboxPhoneNumberMiddlewares(stack *middl if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -105,7 +105,7 @@ func (c *Client) addOperationDeleteSMSSandboxPhoneNumberMiddlewares(stack *middl if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addDeleteSMSSandboxPhoneNumberResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpDeleteSMSSandboxPhoneNumberValidationMiddleware(stack); err != nil { @@ -126,7 +126,7 @@ func (c *Client) addOperationDeleteSMSSandboxPhoneNumberMiddlewares(stack *middl if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -136,130 +136,6 @@ func newServiceMetadataMiddleware_opDeleteSMSSandboxPhoneNumber(region string) * return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sns", OperationName: "DeleteSMSSandboxPhoneNumber", } } - -type opDeleteSMSSandboxPhoneNumberResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opDeleteSMSSandboxPhoneNumberResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opDeleteSMSSandboxPhoneNumberResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sns" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sns" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sns") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addDeleteSMSSandboxPhoneNumberResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opDeleteSMSSandboxPhoneNumberResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_DeleteTopic.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_DeleteTopic.go index 67c95a1ec..ef5b9a620 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_DeleteTopic.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_DeleteTopic.go @@ -4,13 +4,9 @@ package sns import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -52,6 +48,9 @@ type DeleteTopicOutput struct { } func (c *Client) addOperationDeleteTopicMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpDeleteTopic{}, middleware.After) if err != nil { return err @@ -60,6 +59,10 @@ func (c *Client) addOperationDeleteTopicMiddlewares(stack *middleware.Stack, opt if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteTopic"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -81,9 +84,6 @@ func (c *Client) addOperationDeleteTopicMiddlewares(stack *middleware.Stack, opt if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -99,7 +99,7 @@ func (c *Client) addOperationDeleteTopicMiddlewares(stack *middleware.Stack, opt if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addDeleteTopicResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpDeleteTopicValidationMiddleware(stack); err != nil { @@ -120,7 +120,7 @@ func (c *Client) addOperationDeleteTopicMiddlewares(stack *middleware.Stack, opt if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -130,130 +130,6 @@ func newServiceMetadataMiddleware_opDeleteTopic(region string) *awsmiddleware.Re return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sns", OperationName: "DeleteTopic", } } - -type opDeleteTopicResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opDeleteTopicResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opDeleteTopicResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sns" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sns" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sns") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addDeleteTopicResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opDeleteTopicResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_GetDataProtectionPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_GetDataProtectionPolicy.go index b0daa3ffe..bc1d33290 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_GetDataProtectionPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_GetDataProtectionPolicy.go @@ -4,13 +4,9 @@ package sns import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -56,6 +52,9 @@ type GetDataProtectionPolicyOutput struct { } func (c *Client) addOperationGetDataProtectionPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpGetDataProtectionPolicy{}, middleware.After) if err != nil { return err @@ -64,6 +63,10 @@ func (c *Client) addOperationGetDataProtectionPolicyMiddlewares(stack *middlewar if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetDataProtectionPolicy"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -85,9 +88,6 @@ func (c *Client) addOperationGetDataProtectionPolicyMiddlewares(stack *middlewar if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -103,7 +103,7 @@ func (c *Client) addOperationGetDataProtectionPolicyMiddlewares(stack *middlewar if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addGetDataProtectionPolicyResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpGetDataProtectionPolicyValidationMiddleware(stack); err != nil { @@ -124,7 +124,7 @@ func (c *Client) addOperationGetDataProtectionPolicyMiddlewares(stack *middlewar if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -134,130 +134,6 @@ func newServiceMetadataMiddleware_opGetDataProtectionPolicy(region string) *awsm return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sns", OperationName: "GetDataProtectionPolicy", } } - -type opGetDataProtectionPolicyResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opGetDataProtectionPolicyResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opGetDataProtectionPolicyResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sns" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sns" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sns") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addGetDataProtectionPolicyResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opGetDataProtectionPolicyResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_GetEndpointAttributes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_GetEndpointAttributes.go index 6ca21143d..d0a156f39 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_GetEndpointAttributes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_GetEndpointAttributes.go @@ -4,13 +4,9 @@ package sns import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -68,6 +64,9 @@ type GetEndpointAttributesOutput struct { } func (c *Client) addOperationGetEndpointAttributesMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpGetEndpointAttributes{}, middleware.After) if err != nil { return err @@ -76,6 +75,10 @@ func (c *Client) addOperationGetEndpointAttributesMiddlewares(stack *middleware. if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetEndpointAttributes"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -97,9 +100,6 @@ func (c *Client) addOperationGetEndpointAttributesMiddlewares(stack *middleware. if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -115,7 +115,7 @@ func (c *Client) addOperationGetEndpointAttributesMiddlewares(stack *middleware. if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addGetEndpointAttributesResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpGetEndpointAttributesValidationMiddleware(stack); err != nil { @@ -136,7 +136,7 @@ func (c *Client) addOperationGetEndpointAttributesMiddlewares(stack *middleware. if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -146,130 +146,6 @@ func newServiceMetadataMiddleware_opGetEndpointAttributes(region string) *awsmid return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sns", OperationName: "GetEndpointAttributes", } } - -type opGetEndpointAttributesResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opGetEndpointAttributesResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opGetEndpointAttributesResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sns" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sns" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sns") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addGetEndpointAttributesResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opGetEndpointAttributesResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_GetPlatformApplicationAttributes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_GetPlatformApplicationAttributes.go index c977c3492..accb8cb29 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_GetPlatformApplicationAttributes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_GetPlatformApplicationAttributes.go @@ -4,13 +4,9 @@ package sns import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -73,6 +69,9 @@ type GetPlatformApplicationAttributesOutput struct { } func (c *Client) addOperationGetPlatformApplicationAttributesMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpGetPlatformApplicationAttributes{}, middleware.After) if err != nil { return err @@ -81,6 +80,10 @@ func (c *Client) addOperationGetPlatformApplicationAttributesMiddlewares(stack * if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetPlatformApplicationAttributes"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -102,9 +105,6 @@ func (c *Client) addOperationGetPlatformApplicationAttributesMiddlewares(stack * if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -120,7 +120,7 @@ func (c *Client) addOperationGetPlatformApplicationAttributesMiddlewares(stack * if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addGetPlatformApplicationAttributesResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpGetPlatformApplicationAttributesValidationMiddleware(stack); err != nil { @@ -141,7 +141,7 @@ func (c *Client) addOperationGetPlatformApplicationAttributesMiddlewares(stack * if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -151,130 +151,6 @@ func newServiceMetadataMiddleware_opGetPlatformApplicationAttributes(region stri return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sns", OperationName: "GetPlatformApplicationAttributes", } } - -type opGetPlatformApplicationAttributesResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opGetPlatformApplicationAttributesResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opGetPlatformApplicationAttributesResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sns" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sns" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sns") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addGetPlatformApplicationAttributesResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opGetPlatformApplicationAttributesResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_GetSMSAttributes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_GetSMSAttributes.go index d8b2dffd3..41d50f53c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_GetSMSAttributes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_GetSMSAttributes.go @@ -4,13 +4,9 @@ package sns import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -56,6 +52,9 @@ type GetSMSAttributesOutput struct { } func (c *Client) addOperationGetSMSAttributesMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpGetSMSAttributes{}, middleware.After) if err != nil { return err @@ -64,6 +63,10 @@ func (c *Client) addOperationGetSMSAttributesMiddlewares(stack *middleware.Stack if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetSMSAttributes"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -85,9 +88,6 @@ func (c *Client) addOperationGetSMSAttributesMiddlewares(stack *middleware.Stack if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -103,7 +103,7 @@ func (c *Client) addOperationGetSMSAttributesMiddlewares(stack *middleware.Stack if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addGetSMSAttributesResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetSMSAttributes(options.Region), middleware.Before); err != nil { @@ -121,7 +121,7 @@ func (c *Client) addOperationGetSMSAttributesMiddlewares(stack *middleware.Stack if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -131,130 +131,6 @@ func newServiceMetadataMiddleware_opGetSMSAttributes(region string) *awsmiddlewa return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sns", OperationName: "GetSMSAttributes", } } - -type opGetSMSAttributesResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opGetSMSAttributesResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opGetSMSAttributesResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sns" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sns" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sns") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addGetSMSAttributesResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opGetSMSAttributesResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_GetSMSSandboxAccountStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_GetSMSSandboxAccountStatus.go index 2dca270d3..41f0a7766 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_GetSMSSandboxAccountStatus.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_GetSMSSandboxAccountStatus.go @@ -4,13 +4,9 @@ package sns import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -58,6 +54,9 @@ type GetSMSSandboxAccountStatusOutput struct { } func (c *Client) addOperationGetSMSSandboxAccountStatusMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpGetSMSSandboxAccountStatus{}, middleware.After) if err != nil { return err @@ -66,6 +65,10 @@ func (c *Client) addOperationGetSMSSandboxAccountStatusMiddlewares(stack *middle if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetSMSSandboxAccountStatus"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -87,9 +90,6 @@ func (c *Client) addOperationGetSMSSandboxAccountStatusMiddlewares(stack *middle if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -105,7 +105,7 @@ func (c *Client) addOperationGetSMSSandboxAccountStatusMiddlewares(stack *middle if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addGetSMSSandboxAccountStatusResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetSMSSandboxAccountStatus(options.Region), middleware.Before); err != nil { @@ -123,7 +123,7 @@ func (c *Client) addOperationGetSMSSandboxAccountStatusMiddlewares(stack *middle if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -133,130 +133,6 @@ func newServiceMetadataMiddleware_opGetSMSSandboxAccountStatus(region string) *a return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sns", OperationName: "GetSMSSandboxAccountStatus", } } - -type opGetSMSSandboxAccountStatusResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opGetSMSSandboxAccountStatusResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opGetSMSSandboxAccountStatusResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sns" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sns" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sns") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addGetSMSSandboxAccountStatusResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opGetSMSSandboxAccountStatusResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_GetSubscriptionAttributes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_GetSubscriptionAttributes.go index 7d44eaa81..9b7c1e734 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_GetSubscriptionAttributes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_GetSubscriptionAttributes.go @@ -4,13 +4,9 @@ package sns import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -94,6 +90,9 @@ type GetSubscriptionAttributesOutput struct { } func (c *Client) addOperationGetSubscriptionAttributesMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpGetSubscriptionAttributes{}, middleware.After) if err != nil { return err @@ -102,6 +101,10 @@ func (c *Client) addOperationGetSubscriptionAttributesMiddlewares(stack *middlew if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetSubscriptionAttributes"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -123,9 +126,6 @@ func (c *Client) addOperationGetSubscriptionAttributesMiddlewares(stack *middlew if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -141,7 +141,7 @@ func (c *Client) addOperationGetSubscriptionAttributesMiddlewares(stack *middlew if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addGetSubscriptionAttributesResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpGetSubscriptionAttributesValidationMiddleware(stack); err != nil { @@ -162,7 +162,7 @@ func (c *Client) addOperationGetSubscriptionAttributesMiddlewares(stack *middlew if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -172,130 +172,6 @@ func newServiceMetadataMiddleware_opGetSubscriptionAttributes(region string) *aw return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sns", OperationName: "GetSubscriptionAttributes", } } - -type opGetSubscriptionAttributesResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opGetSubscriptionAttributesResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opGetSubscriptionAttributesResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sns" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sns" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sns") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addGetSubscriptionAttributesResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opGetSubscriptionAttributesResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_GetTopicAttributes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_GetTopicAttributes.go index daac7a6bf..3ffacb824 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_GetTopicAttributes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_GetTopicAttributes.go @@ -4,13 +4,9 @@ package sns import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -104,6 +100,9 @@ type GetTopicAttributesOutput struct { } func (c *Client) addOperationGetTopicAttributesMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpGetTopicAttributes{}, middleware.After) if err != nil { return err @@ -112,6 +111,10 @@ func (c *Client) addOperationGetTopicAttributesMiddlewares(stack *middleware.Sta if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetTopicAttributes"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -133,9 +136,6 @@ func (c *Client) addOperationGetTopicAttributesMiddlewares(stack *middleware.Sta if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -151,7 +151,7 @@ func (c *Client) addOperationGetTopicAttributesMiddlewares(stack *middleware.Sta if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addGetTopicAttributesResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpGetTopicAttributesValidationMiddleware(stack); err != nil { @@ -172,7 +172,7 @@ func (c *Client) addOperationGetTopicAttributesMiddlewares(stack *middleware.Sta if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -182,130 +182,6 @@ func newServiceMetadataMiddleware_opGetTopicAttributes(region string) *awsmiddle return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sns", OperationName: "GetTopicAttributes", } } - -type opGetTopicAttributesResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opGetTopicAttributesResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opGetTopicAttributesResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sns" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sns" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sns") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addGetTopicAttributesResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opGetTopicAttributesResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_ListEndpointsByPlatformApplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_ListEndpointsByPlatformApplication.go index e8e5a1824..833edce0c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_ListEndpointsByPlatformApplication.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_ListEndpointsByPlatformApplication.go @@ -4,14 +4,10 @@ package sns import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" "github.com/aws/aws-sdk-go-v2/service/sns/types" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -73,6 +69,9 @@ type ListEndpointsByPlatformApplicationOutput struct { } func (c *Client) addOperationListEndpointsByPlatformApplicationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpListEndpointsByPlatformApplication{}, middleware.After) if err != nil { return err @@ -81,6 +80,10 @@ func (c *Client) addOperationListEndpointsByPlatformApplicationMiddlewares(stack if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListEndpointsByPlatformApplication"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -102,9 +105,6 @@ func (c *Client) addOperationListEndpointsByPlatformApplicationMiddlewares(stack if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -120,7 +120,7 @@ func (c *Client) addOperationListEndpointsByPlatformApplicationMiddlewares(stack if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addListEndpointsByPlatformApplicationResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpListEndpointsByPlatformApplicationValidationMiddleware(stack); err != nil { @@ -141,7 +141,7 @@ func (c *Client) addOperationListEndpointsByPlatformApplicationMiddlewares(stack if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -232,130 +232,6 @@ func newServiceMetadataMiddleware_opListEndpointsByPlatformApplication(region st return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sns", OperationName: "ListEndpointsByPlatformApplication", } } - -type opListEndpointsByPlatformApplicationResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opListEndpointsByPlatformApplicationResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opListEndpointsByPlatformApplicationResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sns" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sns" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sns") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addListEndpointsByPlatformApplicationResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opListEndpointsByPlatformApplicationResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_ListOriginationNumbers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_ListOriginationNumbers.go index e359a9881..4ff97eee0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_ListOriginationNumbers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_ListOriginationNumbers.go @@ -4,14 +4,10 @@ package sns import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" "github.com/aws/aws-sdk-go-v2/service/sns/types" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -62,6 +58,9 @@ type ListOriginationNumbersOutput struct { } func (c *Client) addOperationListOriginationNumbersMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpListOriginationNumbers{}, middleware.After) if err != nil { return err @@ -70,6 +69,10 @@ func (c *Client) addOperationListOriginationNumbersMiddlewares(stack *middleware if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListOriginationNumbers"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -91,9 +94,6 @@ func (c *Client) addOperationListOriginationNumbersMiddlewares(stack *middleware if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -109,7 +109,7 @@ func (c *Client) addOperationListOriginationNumbersMiddlewares(stack *middleware if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addListOriginationNumbersResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListOriginationNumbers(options.Region), middleware.Before); err != nil { @@ -127,7 +127,7 @@ func (c *Client) addOperationListOriginationNumbersMiddlewares(stack *middleware if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -228,130 +228,6 @@ func newServiceMetadataMiddleware_opListOriginationNumbers(region string) *awsmi return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sns", OperationName: "ListOriginationNumbers", } } - -type opListOriginationNumbersResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opListOriginationNumbersResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opListOriginationNumbersResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sns" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sns" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sns") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addListOriginationNumbersResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opListOriginationNumbersResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_ListPhoneNumbersOptedOut.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_ListPhoneNumbersOptedOut.go index 661ca5998..b0abc1d8e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_ListPhoneNumbersOptedOut.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_ListPhoneNumbersOptedOut.go @@ -4,13 +4,9 @@ package sns import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -65,6 +61,9 @@ type ListPhoneNumbersOptedOutOutput struct { } func (c *Client) addOperationListPhoneNumbersOptedOutMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpListPhoneNumbersOptedOut{}, middleware.After) if err != nil { return err @@ -73,6 +72,10 @@ func (c *Client) addOperationListPhoneNumbersOptedOutMiddlewares(stack *middlewa if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListPhoneNumbersOptedOut"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -94,9 +97,6 @@ func (c *Client) addOperationListPhoneNumbersOptedOutMiddlewares(stack *middlewa if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -112,7 +112,7 @@ func (c *Client) addOperationListPhoneNumbersOptedOutMiddlewares(stack *middlewa if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addListPhoneNumbersOptedOutResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListPhoneNumbersOptedOut(options.Region), middleware.Before); err != nil { @@ -130,7 +130,7 @@ func (c *Client) addOperationListPhoneNumbersOptedOutMiddlewares(stack *middlewa if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -220,130 +220,6 @@ func newServiceMetadataMiddleware_opListPhoneNumbersOptedOut(region string) *aws return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sns", OperationName: "ListPhoneNumbersOptedOut", } } - -type opListPhoneNumbersOptedOutResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opListPhoneNumbersOptedOutResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opListPhoneNumbersOptedOutResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sns" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sns" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sns") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addListPhoneNumbersOptedOutResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opListPhoneNumbersOptedOutResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_ListPlatformApplications.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_ListPlatformApplications.go index c7f1f2fd7..f7237c94b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_ListPlatformApplications.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_ListPlatformApplications.go @@ -4,14 +4,10 @@ package sns import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" "github.com/aws/aws-sdk-go-v2/service/sns/types" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -68,6 +64,9 @@ type ListPlatformApplicationsOutput struct { } func (c *Client) addOperationListPlatformApplicationsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpListPlatformApplications{}, middleware.After) if err != nil { return err @@ -76,6 +75,10 @@ func (c *Client) addOperationListPlatformApplicationsMiddlewares(stack *middlewa if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListPlatformApplications"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -97,9 +100,6 @@ func (c *Client) addOperationListPlatformApplicationsMiddlewares(stack *middlewa if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -115,7 +115,7 @@ func (c *Client) addOperationListPlatformApplicationsMiddlewares(stack *middlewa if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addListPlatformApplicationsResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListPlatformApplications(options.Region), middleware.Before); err != nil { @@ -133,7 +133,7 @@ func (c *Client) addOperationListPlatformApplicationsMiddlewares(stack *middlewa if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -223,130 +223,6 @@ func newServiceMetadataMiddleware_opListPlatformApplications(region string) *aws return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sns", OperationName: "ListPlatformApplications", } } - -type opListPlatformApplicationsResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opListPlatformApplicationsResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opListPlatformApplicationsResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sns" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sns" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sns") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addListPlatformApplicationsResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opListPlatformApplicationsResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_ListSMSSandboxPhoneNumbers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_ListSMSSandboxPhoneNumbers.go index 82627b123..38012d8c1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_ListSMSSandboxPhoneNumbers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_ListSMSSandboxPhoneNumbers.go @@ -4,14 +4,10 @@ package sns import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" "github.com/aws/aws-sdk-go-v2/service/sns/types" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -70,6 +66,9 @@ type ListSMSSandboxPhoneNumbersOutput struct { } func (c *Client) addOperationListSMSSandboxPhoneNumbersMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpListSMSSandboxPhoneNumbers{}, middleware.After) if err != nil { return err @@ -78,6 +77,10 @@ func (c *Client) addOperationListSMSSandboxPhoneNumbersMiddlewares(stack *middle if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListSMSSandboxPhoneNumbers"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -99,9 +102,6 @@ func (c *Client) addOperationListSMSSandboxPhoneNumbersMiddlewares(stack *middle if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -117,7 +117,7 @@ func (c *Client) addOperationListSMSSandboxPhoneNumbersMiddlewares(stack *middle if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addListSMSSandboxPhoneNumbersResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListSMSSandboxPhoneNumbers(options.Region), middleware.Before); err != nil { @@ -135,7 +135,7 @@ func (c *Client) addOperationListSMSSandboxPhoneNumbersMiddlewares(stack *middle if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -238,130 +238,6 @@ func newServiceMetadataMiddleware_opListSMSSandboxPhoneNumbers(region string) *a return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sns", OperationName: "ListSMSSandboxPhoneNumbers", } } - -type opListSMSSandboxPhoneNumbersResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opListSMSSandboxPhoneNumbersResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opListSMSSandboxPhoneNumbersResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sns" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sns" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sns") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addListSMSSandboxPhoneNumbersResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opListSMSSandboxPhoneNumbersResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_ListSubscriptions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_ListSubscriptions.go index 982da9ae4..416687395 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_ListSubscriptions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_ListSubscriptions.go @@ -4,14 +4,10 @@ package sns import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" "github.com/aws/aws-sdk-go-v2/service/sns/types" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -62,6 +58,9 @@ type ListSubscriptionsOutput struct { } func (c *Client) addOperationListSubscriptionsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpListSubscriptions{}, middleware.After) if err != nil { return err @@ -70,6 +69,10 @@ func (c *Client) addOperationListSubscriptionsMiddlewares(stack *middleware.Stac if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListSubscriptions"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -91,9 +94,6 @@ func (c *Client) addOperationListSubscriptionsMiddlewares(stack *middleware.Stac if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -109,7 +109,7 @@ func (c *Client) addOperationListSubscriptionsMiddlewares(stack *middleware.Stac if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addListSubscriptionsResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListSubscriptions(options.Region), middleware.Before); err != nil { @@ -127,7 +127,7 @@ func (c *Client) addOperationListSubscriptionsMiddlewares(stack *middleware.Stac if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -215,130 +215,6 @@ func newServiceMetadataMiddleware_opListSubscriptions(region string) *awsmiddlew return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sns", OperationName: "ListSubscriptions", } } - -type opListSubscriptionsResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opListSubscriptionsResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opListSubscriptionsResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sns" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sns" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sns") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addListSubscriptionsResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opListSubscriptionsResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_ListSubscriptionsByTopic.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_ListSubscriptionsByTopic.go index 8027e0c74..1266d4ff9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_ListSubscriptionsByTopic.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_ListSubscriptionsByTopic.go @@ -4,14 +4,10 @@ package sns import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" "github.com/aws/aws-sdk-go-v2/service/sns/types" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -67,6 +63,9 @@ type ListSubscriptionsByTopicOutput struct { } func (c *Client) addOperationListSubscriptionsByTopicMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpListSubscriptionsByTopic{}, middleware.After) if err != nil { return err @@ -75,6 +74,10 @@ func (c *Client) addOperationListSubscriptionsByTopicMiddlewares(stack *middlewa if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListSubscriptionsByTopic"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -96,9 +99,6 @@ func (c *Client) addOperationListSubscriptionsByTopicMiddlewares(stack *middlewa if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -114,7 +114,7 @@ func (c *Client) addOperationListSubscriptionsByTopicMiddlewares(stack *middlewa if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addListSubscriptionsByTopicResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpListSubscriptionsByTopicValidationMiddleware(stack); err != nil { @@ -135,7 +135,7 @@ func (c *Client) addOperationListSubscriptionsByTopicMiddlewares(stack *middlewa if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -225,130 +225,6 @@ func newServiceMetadataMiddleware_opListSubscriptionsByTopic(region string) *aws return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sns", OperationName: "ListSubscriptionsByTopic", } } - -type opListSubscriptionsByTopicResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opListSubscriptionsByTopicResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opListSubscriptionsByTopicResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sns" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sns" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sns") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addListSubscriptionsByTopicResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opListSubscriptionsByTopicResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_ListTagsForResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_ListTagsForResource.go index d38ab7211..cf0975135 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_ListTagsForResource.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_ListTagsForResource.go @@ -4,14 +4,10 @@ package sns import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" "github.com/aws/aws-sdk-go-v2/service/sns/types" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -56,6 +52,9 @@ type ListTagsForResourceOutput struct { } func (c *Client) addOperationListTagsForResourceMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpListTagsForResource{}, middleware.After) if err != nil { return err @@ -64,6 +63,10 @@ func (c *Client) addOperationListTagsForResourceMiddlewares(stack *middleware.St if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListTagsForResource"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -85,9 +88,6 @@ func (c *Client) addOperationListTagsForResourceMiddlewares(stack *middleware.St if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -103,7 +103,7 @@ func (c *Client) addOperationListTagsForResourceMiddlewares(stack *middleware.St if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addListTagsForResourceResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpListTagsForResourceValidationMiddleware(stack); err != nil { @@ -124,7 +124,7 @@ func (c *Client) addOperationListTagsForResourceMiddlewares(stack *middleware.St if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -134,130 +134,6 @@ func newServiceMetadataMiddleware_opListTagsForResource(region string) *awsmiddl return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sns", OperationName: "ListTagsForResource", } } - -type opListTagsForResourceResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opListTagsForResourceResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opListTagsForResourceResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sns" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sns" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sns") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addListTagsForResourceResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opListTagsForResourceResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_ListTopics.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_ListTopics.go index f4f5648ca..7203582fd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_ListTopics.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_ListTopics.go @@ -4,14 +4,10 @@ package sns import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" "github.com/aws/aws-sdk-go-v2/service/sns/types" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -60,6 +56,9 @@ type ListTopicsOutput struct { } func (c *Client) addOperationListTopicsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpListTopics{}, middleware.After) if err != nil { return err @@ -68,6 +67,10 @@ func (c *Client) addOperationListTopicsMiddlewares(stack *middleware.Stack, opti if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListTopics"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -89,9 +92,6 @@ func (c *Client) addOperationListTopicsMiddlewares(stack *middleware.Stack, opti if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -107,7 +107,7 @@ func (c *Client) addOperationListTopicsMiddlewares(stack *middleware.Stack, opti if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addListTopicsResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListTopics(options.Region), middleware.Before); err != nil { @@ -125,7 +125,7 @@ func (c *Client) addOperationListTopicsMiddlewares(stack *middleware.Stack, opti if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -212,130 +212,6 @@ func newServiceMetadataMiddleware_opListTopics(region string) *awsmiddleware.Reg return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sns", OperationName: "ListTopics", } } - -type opListTopicsResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opListTopicsResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opListTopicsResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sns" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sns" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sns") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addListTopicsResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opListTopicsResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_OptInPhoneNumber.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_OptInPhoneNumber.go index 1d6ff9525..14ab7ba86 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_OptInPhoneNumber.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_OptInPhoneNumber.go @@ -4,13 +4,9 @@ package sns import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -53,6 +49,9 @@ type OptInPhoneNumberOutput struct { } func (c *Client) addOperationOptInPhoneNumberMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpOptInPhoneNumber{}, middleware.After) if err != nil { return err @@ -61,6 +60,10 @@ func (c *Client) addOperationOptInPhoneNumberMiddlewares(stack *middleware.Stack if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "OptInPhoneNumber"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -82,9 +85,6 @@ func (c *Client) addOperationOptInPhoneNumberMiddlewares(stack *middleware.Stack if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -100,7 +100,7 @@ func (c *Client) addOperationOptInPhoneNumberMiddlewares(stack *middleware.Stack if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addOptInPhoneNumberResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpOptInPhoneNumberValidationMiddleware(stack); err != nil { @@ -121,7 +121,7 @@ func (c *Client) addOperationOptInPhoneNumberMiddlewares(stack *middleware.Stack if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -131,130 +131,6 @@ func newServiceMetadataMiddleware_opOptInPhoneNumber(region string) *awsmiddlewa return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sns", OperationName: "OptInPhoneNumber", } } - -type opOptInPhoneNumberResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opOptInPhoneNumberResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opOptInPhoneNumberResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sns" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sns" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sns") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addOptInPhoneNumberResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opOptInPhoneNumberResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_Publish.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_Publish.go index 743e15764..82a10a6af 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_Publish.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_Publish.go @@ -4,14 +4,10 @@ package sns import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" "github.com/aws/aws-sdk-go-v2/service/sns/types" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -160,6 +156,9 @@ type PublishOutput struct { } func (c *Client) addOperationPublishMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpPublish{}, middleware.After) if err != nil { return err @@ -168,6 +167,10 @@ func (c *Client) addOperationPublishMiddlewares(stack *middleware.Stack, options if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "Publish"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -189,9 +192,6 @@ func (c *Client) addOperationPublishMiddlewares(stack *middleware.Stack, options if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -207,7 +207,7 @@ func (c *Client) addOperationPublishMiddlewares(stack *middleware.Stack, options if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addPublishResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpPublishValidationMiddleware(stack); err != nil { @@ -228,7 +228,7 @@ func (c *Client) addOperationPublishMiddlewares(stack *middleware.Stack, options if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -238,130 +238,6 @@ func newServiceMetadataMiddleware_opPublish(region string) *awsmiddleware.Regist return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sns", OperationName: "Publish", } } - -type opPublishResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opPublishResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opPublishResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sns" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sns" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sns") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addPublishResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opPublishResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_PublishBatch.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_PublishBatch.go index 21a7c5400..168831b8a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_PublishBatch.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_PublishBatch.go @@ -4,14 +4,10 @@ package sns import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" "github.com/aws/aws-sdk-go-v2/service/sns/types" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -78,6 +74,9 @@ type PublishBatchOutput struct { } func (c *Client) addOperationPublishBatchMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpPublishBatch{}, middleware.After) if err != nil { return err @@ -86,6 +85,10 @@ func (c *Client) addOperationPublishBatchMiddlewares(stack *middleware.Stack, op if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "PublishBatch"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -107,9 +110,6 @@ func (c *Client) addOperationPublishBatchMiddlewares(stack *middleware.Stack, op if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -125,7 +125,7 @@ func (c *Client) addOperationPublishBatchMiddlewares(stack *middleware.Stack, op if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addPublishBatchResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpPublishBatchValidationMiddleware(stack); err != nil { @@ -146,7 +146,7 @@ func (c *Client) addOperationPublishBatchMiddlewares(stack *middleware.Stack, op if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -156,130 +156,6 @@ func newServiceMetadataMiddleware_opPublishBatch(region string) *awsmiddleware.R return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sns", OperationName: "PublishBatch", } } - -type opPublishBatchResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opPublishBatchResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opPublishBatchResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sns" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sns" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sns") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addPublishBatchResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opPublishBatchResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_PutDataProtectionPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_PutDataProtectionPolicy.go index 5120bdefa..8be0322a3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_PutDataProtectionPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_PutDataProtectionPolicy.go @@ -4,13 +4,9 @@ package sns import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -59,6 +55,9 @@ type PutDataProtectionPolicyOutput struct { } func (c *Client) addOperationPutDataProtectionPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpPutDataProtectionPolicy{}, middleware.After) if err != nil { return err @@ -67,6 +66,10 @@ func (c *Client) addOperationPutDataProtectionPolicyMiddlewares(stack *middlewar if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutDataProtectionPolicy"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -88,9 +91,6 @@ func (c *Client) addOperationPutDataProtectionPolicyMiddlewares(stack *middlewar if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -106,7 +106,7 @@ func (c *Client) addOperationPutDataProtectionPolicyMiddlewares(stack *middlewar if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addPutDataProtectionPolicyResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpPutDataProtectionPolicyValidationMiddleware(stack); err != nil { @@ -127,7 +127,7 @@ func (c *Client) addOperationPutDataProtectionPolicyMiddlewares(stack *middlewar if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -137,130 +137,6 @@ func newServiceMetadataMiddleware_opPutDataProtectionPolicy(region string) *awsm return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sns", OperationName: "PutDataProtectionPolicy", } } - -type opPutDataProtectionPolicyResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opPutDataProtectionPolicyResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opPutDataProtectionPolicyResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sns" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sns" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sns") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addPutDataProtectionPolicyResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opPutDataProtectionPolicyResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_RemovePermission.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_RemovePermission.go index 52b13f538..926517c89 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_RemovePermission.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_RemovePermission.go @@ -4,13 +4,9 @@ package sns import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -57,6 +53,9 @@ type RemovePermissionOutput struct { } func (c *Client) addOperationRemovePermissionMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpRemovePermission{}, middleware.After) if err != nil { return err @@ -65,6 +64,10 @@ func (c *Client) addOperationRemovePermissionMiddlewares(stack *middleware.Stack if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "RemovePermission"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -86,9 +89,6 @@ func (c *Client) addOperationRemovePermissionMiddlewares(stack *middleware.Stack if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -104,7 +104,7 @@ func (c *Client) addOperationRemovePermissionMiddlewares(stack *middleware.Stack if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addRemovePermissionResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpRemovePermissionValidationMiddleware(stack); err != nil { @@ -125,7 +125,7 @@ func (c *Client) addOperationRemovePermissionMiddlewares(stack *middleware.Stack if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -135,130 +135,6 @@ func newServiceMetadataMiddleware_opRemovePermission(region string) *awsmiddlewa return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sns", OperationName: "RemovePermission", } } - -type opRemovePermissionResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opRemovePermissionResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opRemovePermissionResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sns" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sns" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sns") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addRemovePermissionResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opRemovePermissionResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_SetEndpointAttributes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_SetEndpointAttributes.go index 1161f8790..b207ad31f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_SetEndpointAttributes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_SetEndpointAttributes.go @@ -4,13 +4,9 @@ package sns import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -67,6 +63,9 @@ type SetEndpointAttributesOutput struct { } func (c *Client) addOperationSetEndpointAttributesMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpSetEndpointAttributes{}, middleware.After) if err != nil { return err @@ -75,6 +74,10 @@ func (c *Client) addOperationSetEndpointAttributesMiddlewares(stack *middleware. if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "SetEndpointAttributes"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -96,9 +99,6 @@ func (c *Client) addOperationSetEndpointAttributesMiddlewares(stack *middleware. if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -114,7 +114,7 @@ func (c *Client) addOperationSetEndpointAttributesMiddlewares(stack *middleware. if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addSetEndpointAttributesResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpSetEndpointAttributesValidationMiddleware(stack); err != nil { @@ -135,7 +135,7 @@ func (c *Client) addOperationSetEndpointAttributesMiddlewares(stack *middleware. if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -145,130 +145,6 @@ func newServiceMetadataMiddleware_opSetEndpointAttributes(region string) *awsmid return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sns", OperationName: "SetEndpointAttributes", } } - -type opSetEndpointAttributesResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opSetEndpointAttributesResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opSetEndpointAttributesResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sns" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sns" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sns") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addSetEndpointAttributesResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opSetEndpointAttributesResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_SetPlatformApplicationAttributes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_SetPlatformApplicationAttributes.go index d8d2b3c1c..2ecdb50e1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_SetPlatformApplicationAttributes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_SetPlatformApplicationAttributes.go @@ -4,13 +4,9 @@ package sns import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -97,6 +93,9 @@ type SetPlatformApplicationAttributesOutput struct { } func (c *Client) addOperationSetPlatformApplicationAttributesMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpSetPlatformApplicationAttributes{}, middleware.After) if err != nil { return err @@ -105,6 +104,10 @@ func (c *Client) addOperationSetPlatformApplicationAttributesMiddlewares(stack * if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "SetPlatformApplicationAttributes"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -126,9 +129,6 @@ func (c *Client) addOperationSetPlatformApplicationAttributesMiddlewares(stack * if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -144,7 +144,7 @@ func (c *Client) addOperationSetPlatformApplicationAttributesMiddlewares(stack * if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addSetPlatformApplicationAttributesResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpSetPlatformApplicationAttributesValidationMiddleware(stack); err != nil { @@ -165,7 +165,7 @@ func (c *Client) addOperationSetPlatformApplicationAttributesMiddlewares(stack * if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -175,130 +175,6 @@ func newServiceMetadataMiddleware_opSetPlatformApplicationAttributes(region stri return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sns", OperationName: "SetPlatformApplicationAttributes", } } - -type opSetPlatformApplicationAttributesResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opSetPlatformApplicationAttributesResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opSetPlatformApplicationAttributesResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sns" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sns" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sns") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addSetPlatformApplicationAttributesResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opSetPlatformApplicationAttributesResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_SetSMSAttributes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_SetSMSAttributes.go index 4098e4bbe..6f0e8b5f6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_SetSMSAttributes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_SetSMSAttributes.go @@ -4,13 +4,9 @@ package sns import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -106,6 +102,9 @@ type SetSMSAttributesOutput struct { } func (c *Client) addOperationSetSMSAttributesMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpSetSMSAttributes{}, middleware.After) if err != nil { return err @@ -114,6 +113,10 @@ func (c *Client) addOperationSetSMSAttributesMiddlewares(stack *middleware.Stack if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "SetSMSAttributes"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -135,9 +138,6 @@ func (c *Client) addOperationSetSMSAttributesMiddlewares(stack *middleware.Stack if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -153,7 +153,7 @@ func (c *Client) addOperationSetSMSAttributesMiddlewares(stack *middleware.Stack if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addSetSMSAttributesResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpSetSMSAttributesValidationMiddleware(stack); err != nil { @@ -174,7 +174,7 @@ func (c *Client) addOperationSetSMSAttributesMiddlewares(stack *middleware.Stack if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -184,130 +184,6 @@ func newServiceMetadataMiddleware_opSetSMSAttributes(region string) *awsmiddlewa return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sns", OperationName: "SetSMSAttributes", } } - -type opSetSMSAttributesResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opSetSMSAttributesResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opSetSMSAttributesResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sns" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sns" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sns") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addSetSMSAttributesResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opSetSMSAttributesResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_SetSubscriptionAttributes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_SetSubscriptionAttributes.go index 827897fc2..8c5e77710 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_SetSubscriptionAttributes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_SetSubscriptionAttributes.go @@ -4,13 +4,9 @@ package sns import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -88,6 +84,9 @@ type SetSubscriptionAttributesOutput struct { } func (c *Client) addOperationSetSubscriptionAttributesMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpSetSubscriptionAttributes{}, middleware.After) if err != nil { return err @@ -96,6 +95,10 @@ func (c *Client) addOperationSetSubscriptionAttributesMiddlewares(stack *middlew if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "SetSubscriptionAttributes"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -117,9 +120,6 @@ func (c *Client) addOperationSetSubscriptionAttributesMiddlewares(stack *middlew if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -135,7 +135,7 @@ func (c *Client) addOperationSetSubscriptionAttributesMiddlewares(stack *middlew if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addSetSubscriptionAttributesResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpSetSubscriptionAttributesValidationMiddleware(stack); err != nil { @@ -156,7 +156,7 @@ func (c *Client) addOperationSetSubscriptionAttributesMiddlewares(stack *middlew if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -166,130 +166,6 @@ func newServiceMetadataMiddleware_opSetSubscriptionAttributes(region string) *aw return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sns", OperationName: "SetSubscriptionAttributes", } } - -type opSetSubscriptionAttributesResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opSetSubscriptionAttributesResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opSetSubscriptionAttributesResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sns" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sns" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sns") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addSetSubscriptionAttributesResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opSetSubscriptionAttributesResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_SetTopicAttributes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_SetTopicAttributes.go index eed432e54..a89aaf50d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_SetTopicAttributes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_SetTopicAttributes.go @@ -4,13 +4,9 @@ package sns import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -152,6 +148,9 @@ type SetTopicAttributesOutput struct { } func (c *Client) addOperationSetTopicAttributesMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpSetTopicAttributes{}, middleware.After) if err != nil { return err @@ -160,6 +159,10 @@ func (c *Client) addOperationSetTopicAttributesMiddlewares(stack *middleware.Sta if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "SetTopicAttributes"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -181,9 +184,6 @@ func (c *Client) addOperationSetTopicAttributesMiddlewares(stack *middleware.Sta if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -199,7 +199,7 @@ func (c *Client) addOperationSetTopicAttributesMiddlewares(stack *middleware.Sta if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addSetTopicAttributesResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpSetTopicAttributesValidationMiddleware(stack); err != nil { @@ -220,7 +220,7 @@ func (c *Client) addOperationSetTopicAttributesMiddlewares(stack *middleware.Sta if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -230,130 +230,6 @@ func newServiceMetadataMiddleware_opSetTopicAttributes(region string) *awsmiddle return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sns", OperationName: "SetTopicAttributes", } } - -type opSetTopicAttributesResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opSetTopicAttributesResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opSetTopicAttributesResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sns" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sns" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sns") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addSetTopicAttributesResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opSetTopicAttributesResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_Subscribe.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_Subscribe.go index c63372eca..fb4787b29 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_Subscribe.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_Subscribe.go @@ -4,13 +4,9 @@ package sns import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -152,6 +148,9 @@ type SubscribeOutput struct { } func (c *Client) addOperationSubscribeMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpSubscribe{}, middleware.After) if err != nil { return err @@ -160,6 +159,10 @@ func (c *Client) addOperationSubscribeMiddlewares(stack *middleware.Stack, optio if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "Subscribe"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -181,9 +184,6 @@ func (c *Client) addOperationSubscribeMiddlewares(stack *middleware.Stack, optio if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -199,7 +199,7 @@ func (c *Client) addOperationSubscribeMiddlewares(stack *middleware.Stack, optio if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addSubscribeResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpSubscribeValidationMiddleware(stack); err != nil { @@ -220,7 +220,7 @@ func (c *Client) addOperationSubscribeMiddlewares(stack *middleware.Stack, optio if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -230,130 +230,6 @@ func newServiceMetadataMiddleware_opSubscribe(region string) *awsmiddleware.Regi return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sns", OperationName: "Subscribe", } } - -type opSubscribeResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opSubscribeResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opSubscribeResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sns" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sns" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sns") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addSubscribeResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opSubscribeResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_TagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_TagResource.go index 0f24f0a13..b0bd65718 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_TagResource.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_TagResource.go @@ -4,14 +4,10 @@ package sns import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" "github.com/aws/aws-sdk-go-v2/service/sns/types" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -68,6 +64,9 @@ type TagResourceOutput struct { } func (c *Client) addOperationTagResourceMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpTagResource{}, middleware.After) if err != nil { return err @@ -76,6 +75,10 @@ func (c *Client) addOperationTagResourceMiddlewares(stack *middleware.Stack, opt if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "TagResource"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -97,9 +100,6 @@ func (c *Client) addOperationTagResourceMiddlewares(stack *middleware.Stack, opt if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -115,7 +115,7 @@ func (c *Client) addOperationTagResourceMiddlewares(stack *middleware.Stack, opt if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addTagResourceResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpTagResourceValidationMiddleware(stack); err != nil { @@ -136,7 +136,7 @@ func (c *Client) addOperationTagResourceMiddlewares(stack *middleware.Stack, opt if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -146,130 +146,6 @@ func newServiceMetadataMiddleware_opTagResource(region string) *awsmiddleware.Re return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sns", OperationName: "TagResource", } } - -type opTagResourceResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opTagResourceResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opTagResourceResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sns" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sns" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sns") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addTagResourceResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opTagResourceResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_Unsubscribe.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_Unsubscribe.go index ad05dd342..a36ed5cd4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_Unsubscribe.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_Unsubscribe.go @@ -4,13 +4,9 @@ package sns import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -59,6 +55,9 @@ type UnsubscribeOutput struct { } func (c *Client) addOperationUnsubscribeMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpUnsubscribe{}, middleware.After) if err != nil { return err @@ -67,6 +66,10 @@ func (c *Client) addOperationUnsubscribeMiddlewares(stack *middleware.Stack, opt if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "Unsubscribe"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -88,9 +91,6 @@ func (c *Client) addOperationUnsubscribeMiddlewares(stack *middleware.Stack, opt if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -106,7 +106,7 @@ func (c *Client) addOperationUnsubscribeMiddlewares(stack *middleware.Stack, opt if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addUnsubscribeResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpUnsubscribeValidationMiddleware(stack); err != nil { @@ -127,7 +127,7 @@ func (c *Client) addOperationUnsubscribeMiddlewares(stack *middleware.Stack, opt if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -137,130 +137,6 @@ func newServiceMetadataMiddleware_opUnsubscribe(region string) *awsmiddleware.Re return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sns", OperationName: "Unsubscribe", } } - -type opUnsubscribeResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opUnsubscribeResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opUnsubscribeResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sns" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sns" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sns") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addUnsubscribeResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opUnsubscribeResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_UntagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_UntagResource.go index 39707000b..b912f4a9f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_UntagResource.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_UntagResource.go @@ -4,13 +4,9 @@ package sns import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -56,6 +52,9 @@ type UntagResourceOutput struct { } func (c *Client) addOperationUntagResourceMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpUntagResource{}, middleware.After) if err != nil { return err @@ -64,6 +63,10 @@ func (c *Client) addOperationUntagResourceMiddlewares(stack *middleware.Stack, o if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "UntagResource"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -85,9 +88,6 @@ func (c *Client) addOperationUntagResourceMiddlewares(stack *middleware.Stack, o if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -103,7 +103,7 @@ func (c *Client) addOperationUntagResourceMiddlewares(stack *middleware.Stack, o if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addUntagResourceResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpUntagResourceValidationMiddleware(stack); err != nil { @@ -124,7 +124,7 @@ func (c *Client) addOperationUntagResourceMiddlewares(stack *middleware.Stack, o if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -134,130 +134,6 @@ func newServiceMetadataMiddleware_opUntagResource(region string) *awsmiddleware. return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sns", OperationName: "UntagResource", } } - -type opUntagResourceResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opUntagResourceResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opUntagResourceResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sns" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sns" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sns") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addUntagResourceResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opUntagResourceResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_VerifySMSSandboxPhoneNumber.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_VerifySMSSandboxPhoneNumber.go index fb9dfcc30..3c9f7fcf9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_VerifySMSSandboxPhoneNumber.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/api_op_VerifySMSSandboxPhoneNumber.go @@ -4,13 +4,9 @@ package sns import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -65,6 +61,9 @@ type VerifySMSSandboxPhoneNumberOutput struct { } func (c *Client) addOperationVerifySMSSandboxPhoneNumberMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpVerifySMSSandboxPhoneNumber{}, middleware.After) if err != nil { return err @@ -73,6 +72,10 @@ func (c *Client) addOperationVerifySMSSandboxPhoneNumberMiddlewares(stack *middl if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "VerifySMSSandboxPhoneNumber"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -94,9 +97,6 @@ func (c *Client) addOperationVerifySMSSandboxPhoneNumberMiddlewares(stack *middl if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -112,7 +112,7 @@ func (c *Client) addOperationVerifySMSSandboxPhoneNumberMiddlewares(stack *middl if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addVerifySMSSandboxPhoneNumberResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpVerifySMSSandboxPhoneNumberValidationMiddleware(stack); err != nil { @@ -133,7 +133,7 @@ func (c *Client) addOperationVerifySMSSandboxPhoneNumberMiddlewares(stack *middl if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -143,130 +143,6 @@ func newServiceMetadataMiddleware_opVerifySMSSandboxPhoneNumber(region string) * return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sns", OperationName: "VerifySMSSandboxPhoneNumber", } } - -type opVerifySMSSandboxPhoneNumberResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opVerifySMSSandboxPhoneNumberResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opVerifySMSSandboxPhoneNumberResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sns" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sns" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sns") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addVerifySMSSandboxPhoneNumberResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opVerifySMSSandboxPhoneNumberResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/auth.go new file mode 100644 index 000000000..0ddddf7d1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/auth.go @@ -0,0 +1,256 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sns + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +func bindAuthParamsRegion(params *AuthResolverParameters, _ interface{}, options Options) { + params.Region = options.Region +} + +type setLegacyContextSigningOptionsMiddleware struct { +} + +func (*setLegacyContextSigningOptionsMiddleware) ID() string { + return "setLegacyContextSigningOptions" +} + +func (m *setLegacyContextSigningOptionsMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + rscheme := getResolvedAuthScheme(ctx) + schemeID := rscheme.Scheme.SchemeID() + + if sn := awsmiddleware.GetSigningName(ctx); sn != "" { + if schemeID == "aws.auth#sigv4" { + smithyhttp.SetSigV4SigningName(&rscheme.SignerProperties, sn) + } else if schemeID == "aws.auth#sigv4a" { + smithyhttp.SetSigV4ASigningName(&rscheme.SignerProperties, sn) + } + } + + if sr := awsmiddleware.GetSigningRegion(ctx); sr != "" { + if schemeID == "aws.auth#sigv4" { + smithyhttp.SetSigV4SigningRegion(&rscheme.SignerProperties, sr) + } else if schemeID == "aws.auth#sigv4a" { + smithyhttp.SetSigV4ASigningRegions(&rscheme.SignerProperties, []string{sr}) + } + } + + return next.HandleFinalize(ctx, in) +} + +func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error { + return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before) +} + +// AuthResolverParameters contains the set of inputs necessary for auth scheme +// resolution. +type AuthResolverParameters struct { + // The name of the operation being invoked. + Operation string + + // The region in which the operation is being invoked. + Region string +} + +func bindAuthResolverParams(operation string, input interface{}, options Options) *AuthResolverParameters { + params := &AuthResolverParameters{ + Operation: operation, + } + + bindAuthParamsRegion(params, input, options) + + return params +} + +// AuthSchemeResolver returns a set of possible authentication options for an +// operation. +type AuthSchemeResolver interface { + ResolveAuthSchemes(context.Context, *AuthResolverParameters) ([]*smithyauth.Option, error) +} + +type defaultAuthSchemeResolver struct{} + +var _ AuthSchemeResolver = (*defaultAuthSchemeResolver)(nil) + +func (*defaultAuthSchemeResolver) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { + if overrides, ok := operationAuthOptions[params.Operation]; ok { + return overrides(params), nil + } + return serviceAuthOptions(params), nil +} + +var operationAuthOptions = map[string]func(*AuthResolverParameters) []*smithyauth.Option{} + +func serviceAuthOptions(params *AuthResolverParameters) []*smithyauth.Option { + return []*smithyauth.Option{ + { + SchemeID: smithyauth.SchemeIDSigV4, + SignerProperties: func() smithy.Properties { + var props smithy.Properties + smithyhttp.SetSigV4SigningName(&props, "sns") + smithyhttp.SetSigV4SigningRegion(&props, params.Region) + return props + }(), + }, + } +} + +type resolveAuthSchemeMiddleware struct { + operation string + options Options +} + +func (*resolveAuthSchemeMiddleware) ID() string { + return "ResolveAuthScheme" +} + +func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + params := bindAuthResolverParams(m.operation, getOperationInput(ctx), m.options) + options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) + if err != nil { + return out, metadata, fmt.Errorf("resolve auth scheme: %v", err) + } + + scheme, ok := m.selectScheme(options) + if !ok { + return out, metadata, fmt.Errorf("could not select an auth scheme") + } + + ctx = setResolvedAuthScheme(ctx, scheme) + return next.HandleFinalize(ctx, in) +} + +func (m *resolveAuthSchemeMiddleware) selectScheme(options []*smithyauth.Option) (*resolvedAuthScheme, bool) { + for _, option := range options { + if option.SchemeID == smithyauth.SchemeIDAnonymous { + return newResolvedAuthScheme(smithyhttp.NewAnonymousScheme(), option), true + } + + for _, scheme := range m.options.AuthSchemes { + if scheme.SchemeID() != option.SchemeID { + continue + } + + if scheme.IdentityResolver(m.options) != nil { + return newResolvedAuthScheme(scheme, option), true + } + } + } + + return nil, false +} + +type resolvedAuthSchemeKey struct{} + +type resolvedAuthScheme struct { + Scheme smithyhttp.AuthScheme + IdentityProperties smithy.Properties + SignerProperties smithy.Properties +} + +func newResolvedAuthScheme(scheme smithyhttp.AuthScheme, option *smithyauth.Option) *resolvedAuthScheme { + return &resolvedAuthScheme{ + Scheme: scheme, + IdentityProperties: option.IdentityProperties, + SignerProperties: option.SignerProperties, + } +} + +func setResolvedAuthScheme(ctx context.Context, scheme *resolvedAuthScheme) context.Context { + return middleware.WithStackValue(ctx, resolvedAuthSchemeKey{}, scheme) +} + +func getResolvedAuthScheme(ctx context.Context) *resolvedAuthScheme { + v, _ := middleware.GetStackValue(ctx, resolvedAuthSchemeKey{}).(*resolvedAuthScheme) + return v +} + +type getIdentityMiddleware struct { + options Options +} + +func (*getIdentityMiddleware) ID() string { + return "GetIdentity" +} + +func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + rscheme := getResolvedAuthScheme(ctx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + resolver := rscheme.Scheme.IdentityResolver(m.options) + if resolver == nil { + return out, metadata, fmt.Errorf("no identity resolver") + } + + identity, err := resolver.GetIdentity(ctx, rscheme.IdentityProperties) + if err != nil { + return out, metadata, fmt.Errorf("get identity: %v", err) + } + + ctx = setIdentity(ctx, identity) + return next.HandleFinalize(ctx, in) +} + +type identityKey struct{} + +func setIdentity(ctx context.Context, identity smithyauth.Identity) context.Context { + return middleware.WithStackValue(ctx, identityKey{}, identity) +} + +func getIdentity(ctx context.Context) smithyauth.Identity { + v, _ := middleware.GetStackValue(ctx, identityKey{}).(smithyauth.Identity) + return v +} + +type signRequestMiddleware struct { +} + +func (*signRequestMiddleware) ID() string { + return "Signing" +} + +func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request) + } + + rscheme := getResolvedAuthScheme(ctx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + identity := getIdentity(ctx) + if identity == nil { + return out, metadata, fmt.Errorf("no identity") + } + + signer := rscheme.Scheme.Signer() + if signer == nil { + return out, metadata, fmt.Errorf("no signer") + } + + if err := signer.SignRequest(ctx, req, identity, rscheme.SignerProperties); err != nil { + return out, metadata, fmt.Errorf("sign request: %v", err) + } + + return next.HandleFinalize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/endpoints.go index 7568d01af..57a5d5628 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/endpoints.go @@ -8,14 +8,18 @@ import ( "fmt" "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + "github.com/aws/aws-sdk-go-v2/internal/endpoints" "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn" internalendpoints "github.com/aws/aws-sdk-go-v2/service/sns/internal/endpoints" + smithyauth "github.com/aws/smithy-go/auth" smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" "net/http" "net/url" + "os" "strings" ) @@ -194,70 +198,22 @@ func resolveEndpointResolverV2(options *Options) { } } -// Utility function to aid with translating pseudo-regions to classical regions -// with the appropriate setting indicated by the pseudo-region -func mapPseudoRegion(pr string) (region string, fips aws.FIPSEndpointState) { - const fipsInfix = "-fips-" - const fipsPrefix = "fips-" - const fipsSuffix = "-fips" - - if strings.Contains(pr, fipsInfix) || - strings.Contains(pr, fipsPrefix) || - strings.Contains(pr, fipsSuffix) { - region = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( - pr, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") - fips = aws.FIPSEndpointStateEnabled - } else { - region = pr +func resolveBaseEndpoint(cfg aws.Config, o *Options) { + if cfg.BaseEndpoint != nil { + o.BaseEndpoint = cfg.BaseEndpoint } - return region, fips -} - -// builtInParameterResolver is the interface responsible for resolving BuiltIn -// values during the sourcing of EndpointParameters -type builtInParameterResolver interface { - ResolveBuiltIns(*EndpointParameters) error -} - -// builtInResolver resolves modeled BuiltIn values using only the members defined -// below. -type builtInResolver struct { - // The AWS region used to dispatch the request. - Region string - - // Sourced BuiltIn value in a historical enabled or disabled state. - UseDualStack aws.DualStackEndpointState - - // Sourced BuiltIn value in a historical enabled or disabled state. - UseFIPS aws.FIPSEndpointState + _, g := os.LookupEnv("AWS_ENDPOINT_URL") + _, s := os.LookupEnv("AWS_ENDPOINT_URL_SNS") - // Base endpoint that can potentially be modified during Endpoint resolution. - Endpoint *string -} - -// Invoked at runtime to resolve BuiltIn Values. Only resolution code specific to -// each BuiltIn value is generated. -func (b *builtInResolver) ResolveBuiltIns(params *EndpointParameters) error { - - region, _ := mapPseudoRegion(b.Region) - if len(region) == 0 { - return fmt.Errorf("Could not resolve AWS::Region") - } else { - params.Region = aws.String(region) - } - if b.UseDualStack == aws.DualStackEndpointStateEnabled { - params.UseDualStack = aws.Bool(true) - } else { - params.UseDualStack = aws.Bool(false) + if g && !s { + return } - if b.UseFIPS == aws.FIPSEndpointStateEnabled { - params.UseFIPS = aws.Bool(true) - } else { - params.UseFIPS = aws.Bool(false) + + value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "SNS", cfg.ConfigSources) + if found && err == nil { + o.BaseEndpoint = &value } - params.Endpoint = b.Endpoint - return nil } // EndpointParameters provides the parameters that influence how endpoints are @@ -504,3 +460,76 @@ func (r *resolver) ResolveEndpoint( } return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Missing Region") } + +type endpointParamsBinder interface { + bindEndpointParams(*EndpointParameters) +} + +func bindEndpointParams(input interface{}, options Options) *EndpointParameters { + params := &EndpointParameters{} + + params.Region = aws.String(endpoints.MapFIPSRegion(options.Region)) + params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled) + params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled) + params.Endpoint = options.BaseEndpoint + + if b, ok := input.(endpointParamsBinder); ok { + b.bindEndpointParams(params) + } + + return params +} + +type resolveEndpointV2Middleware struct { + options Options +} + +func (*resolveEndpointV2Middleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleFinalize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.options.EndpointResolverV2 == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + params := bindEndpointParams(getOperationInput(ctx), m.options) + endpt, err := m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + if endpt.URI.RawPath == "" && req.URL.RawPath != "" { + endpt.URI.RawPath = endpt.URI.Path + } + req.URL.Scheme = endpt.URI.Scheme + req.URL.Host = endpt.URI.Host + req.URL.Path = smithyhttp.JoinPath(endpt.URI.Path, req.URL.Path) + req.URL.RawPath = smithyhttp.JoinPath(endpt.URI.RawPath, req.URL.RawPath) + for k := range endpt.Headers { + req.Header.Set(k, endpt.Headers.Get(k)) + } + + rscheme := getResolvedAuthScheme(ctx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + opts, _ := smithyauth.GetAuthOptions(&endpt.Properties) + for _, o := range opts { + rscheme.SignerProperties.SetAll(&o.SignerProperties) + } + + return next.HandleFinalize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/generated.json index bdfc3e0b4..2703a913f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/generated.json @@ -51,13 +51,16 @@ "api_op_Unsubscribe.go", "api_op_UntagResource.go", "api_op_VerifySMSSandboxPhoneNumber.go", + "auth.go", "deserializers.go", "doc.go", "endpoints.go", + "endpoints_config_test.go", "endpoints_test.go", "generated.json", "internal/endpoints/endpoints.go", "internal/endpoints/endpoints_test.go", + "options.go", "protocol_test.go", "serializers.go", "types/enums.go", diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/go_module_metadata.go index 68b58bde7..da0fbe022 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/go_module_metadata.go @@ -3,4 +3,4 @@ package sns // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.23.0" +const goModuleVersion = "1.25.2" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sns/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/options.go new file mode 100644 index 000000000..238ceb78b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sns/options.go @@ -0,0 +1,219 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sns + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" + smithyauth "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/http" +) + +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +type Options struct { + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // The optional application specific identifier appended to the User-Agent header. + AppID string + + // This endpoint will be given as input to an EndpointResolverV2. It is used for + // providing a custom base endpoint that is subject to modifications by the + // processing EndpointResolverV2. + BaseEndpoint *string + + // Configures the events that will be sent to the configured logger. + ClientLogMode aws.ClientLogMode + + // The credentials object to use when signing requests. + Credentials aws.CredentialsProvider + + // The configuration DefaultsMode that the SDK should use when constructing the + // clients initial default settings. + DefaultsMode aws.DefaultsMode + + // The endpoint options to be used when attempting to resolve an endpoint. + EndpointOptions EndpointResolverOptions + + // The service endpoint resolver. + // + // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a + // value for this field will likely prevent you from using any endpoint-related + // service features released after the introduction of EndpointResolverV2 and + // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom + // endpoint, set the client option BaseEndpoint instead. + EndpointResolver EndpointResolver + + // Resolves the endpoint used for a particular service operation. This should be + // used over the deprecated EndpointResolver. + EndpointResolverV2 EndpointResolverV2 + + // Signature Version 4 (SigV4) Signer + HTTPSignerV4 HTTPSignerV4 + + // The logger writer interface to write logging messages to. + Logger logging.Logger + + // The region to send requests to. (Required) + Region string + + // RetryMaxAttempts specifies the maximum number attempts an API client will call + // an operation that fails with a retryable error. A value of 0 is ignored, and + // will not be used to configure the API client created default retryer, or modify + // per operation call's retry max attempts. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. If specified in an operation call's functional + // options with a value that is different than the constructed client's Options, + // the Client's Retryer will be wrapped to use the operation's specific + // RetryMaxAttempts value. + RetryMaxAttempts int + + // RetryMode specifies the retry mode the API client will be created with, if + // Retryer option is not also specified. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. Currently does not support per operation call + // overrides, may in the future. + RetryMode aws.RetryMode + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. The kind of + // default retry created by the API client can be changed with the RetryMode + // option. + Retryer aws.Retryer + + // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set + // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You + // should not populate this structure programmatically, or rely on the values here + // within your applications. + RuntimeEnvironment aws.RuntimeEnvironment + + // The initial DefaultsMode used when the client options were constructed. If the + // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved + // value was at that point in time. Currently does not support per operation call + // overrides, may in the future. + resolvedDefaultsMode aws.DefaultsMode + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP + // implementation if nil. + HTTPClient HTTPClient + + // The auth scheme resolver which determines how to authenticate for each + // operation. + AuthSchemeResolver AuthSchemeResolver + + // The list of auth schemes supported by the client. + AuthSchemes []smithyhttp.AuthScheme +} + +// Copy creates a clone where the APIOptions list is deep copied. +func (o Options) Copy() Options { + to := o + to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) + copy(to.APIOptions, o.APIOptions) + + return to +} + +func (o Options) GetIdentityResolver(schemeID string) smithyauth.IdentityResolver { + if schemeID == "aws.auth#sigv4" { + return getSigV4IdentityResolver(o) + } + if schemeID == "smithy.api#noAuth" { + return &smithyauth.AnonymousIdentityResolver{} + } + return nil +} + +// WithAPIOptions returns a functional option for setting the Client's APIOptions +// option. +func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { + return func(o *Options) { + o.APIOptions = append(o.APIOptions, optFns...) + } +} + +// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for +// this field will likely prevent you from using any endpoint-related service +// features released after the introduction of EndpointResolverV2 and BaseEndpoint. +// To migrate an EndpointResolver implementation that uses a custom endpoint, set +// the client option BaseEndpoint instead. +func WithEndpointResolver(v EndpointResolver) func(*Options) { + return func(o *Options) { + o.EndpointResolver = v + } +} + +// WithEndpointResolverV2 returns a functional option for setting the Client's +// EndpointResolverV2 option. +func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) { + return func(o *Options) { + o.EndpointResolverV2 = v + } +} + +func getSigV4IdentityResolver(o Options) smithyauth.IdentityResolver { + if o.Credentials != nil { + return &internalauthsmithy.CredentialsProviderAdapter{Provider: o.Credentials} + } + return nil +} + +// WithSigV4SigningName applies an override to the authentication workflow to +// use the given signing name for SigV4-authenticated operations. +// +// This is an advanced setting. The value here is FINAL, taking precedence over +// the resolved signing name from both auth scheme resolution and endpoint +// resolution. +func WithSigV4SigningName(name string) func(*Options) { + fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, + ) { + return next.HandleInitialize(awsmiddleware.SetSigningName(ctx, name), in) + } + return func(o *Options) { + o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { + return s.Initialize.Add( + middleware.InitializeMiddlewareFunc("withSigV4SigningName", fn), + middleware.Before, + ) + }) + } +} + +// WithSigV4SigningRegion applies an override to the authentication workflow to +// use the given signing region for SigV4-authenticated operations. +// +// This is an advanced setting. The value here is FINAL, taking precedence over +// the resolved signing region from both auth scheme resolution and endpoint +// resolution. +func WithSigV4SigningRegion(region string) func(*Options) { + fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, + ) { + return next.HandleInitialize(awsmiddleware.SetSigningRegion(ctx, region), in) + } + return func(o *Options) { + o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { + return s.Initialize.Add( + middleware.InitializeMiddlewareFunc("withSigV4SigningRegion", fn), + middleware.Before, + ) + }) + } +} + +func ignoreAnonymousAuth(options *Options) { + if _, ok := options.Credentials.(aws.AnonymousCredentials); ok { + options.Credentials = nil + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/CHANGELOG.md index 56da15ec1..c802a0ef9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/CHANGELOG.md @@ -1,3 +1,12 @@ +# v1.23.4 (2023-07-28) + +* **Documentation**: Documentation changes related to SQS APIs. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.23.3 (2023-07-13) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.23.2 (2023-06-15) * No change notes available for this release. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_client.go index e93d9b926..08fe7c891 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_client.go @@ -65,6 +65,9 @@ type Options struct { // modify this list for per operation behavior. APIOptions []func(*middleware.Stack) error + // The optional application specific identifier appended to the User-Agent header. + AppID string + // Configures the events that will be sent to the configured logger. ClientLogMode aws.ClientLogMode @@ -238,6 +241,7 @@ func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { APIOptions: cfg.APIOptions, Logger: cfg.Logger, ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, } resolveAWSRetryerProvider(cfg, &opts) resolveAWSRetryMaxAttempts(cfg, &opts) @@ -351,8 +355,16 @@ func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions, NewDefaultEndpointResolver()) } -func addClientUserAgent(stack *middleware.Stack) error { - return awsmiddleware.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "sqs", goModuleVersion)(stack) +func addClientUserAgent(stack *middleware.Stack, options Options) error { + if err := awsmiddleware.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "sqs", goModuleVersion)(stack); err != nil { + return err + } + + if len(options.AppID) > 0 { + return awsmiddleware.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID)(stack) + } + + return nil } func addHTTPSignerV4Middleware(stack *middleware.Stack, o Options) error { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_AddPermission.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_AddPermission.go index 49a2b631f..a1033a204 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_AddPermission.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_AddPermission.go @@ -125,7 +125,7 @@ func (c *Client) addOperationAddPermissionMiddlewares(stack *middleware.Stack, o if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_CancelMessageMoveTask.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_CancelMessageMoveTask.go index 5b87843db..8b4590297 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_CancelMessageMoveTask.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_CancelMessageMoveTask.go @@ -10,11 +10,18 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Cancels a specified message movement task. -// - A message movement can only be cancelled when the current status is -// RUNNING. -// - Cancelling a message movement task does not revert the messages that have -// already been moved. It can only stop the messages that have not been moved yet. +// Cancels a specified message movement task. A message movement can only be +// cancelled when the current status is RUNNING. Cancelling a message movement task +// does not revert the messages that have already been moved. It can only stop the +// messages that have not been moved yet. +// - This action is currently limited to supporting message redrive from +// dead-letter queues (DLQs) (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) +// only. In this context, the source queue is the dead-letter queue (DLQ), while +// the destination queue can be the original source queue (from which the messages +// were driven to the dead-letter-queue), or a custom destination queue. +// - Currently, only standard queues are supported. +// - Only one active message movement task is supported per queue at any given +// time. func (c *Client) CancelMessageMoveTask(ctx context.Context, params *CancelMessageMoveTaskInput, optFns ...func(*Options)) (*CancelMessageMoveTaskOutput, error) { if params == nil { params = &CancelMessageMoveTaskInput{} @@ -87,7 +94,7 @@ func (c *Client) addOperationCancelMessageMoveTaskMiddlewares(stack *middleware. if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ChangeMessageVisibility.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ChangeMessageVisibility.go index 4757a9208..13642d30e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ChangeMessageVisibility.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ChangeMessageVisibility.go @@ -132,7 +132,7 @@ func (c *Client) addOperationChangeMessageVisibilityMiddlewares(stack *middlewar if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ChangeMessageVisibilityBatch.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ChangeMessageVisibilityBatch.go index 6cc4e7694..11e0ffc71 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ChangeMessageVisibilityBatch.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ChangeMessageVisibilityBatch.go @@ -107,7 +107,7 @@ func (c *Client) addOperationChangeMessageVisibilityBatchMiddlewares(stack *midd if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_CreateQueue.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_CreateQueue.go index 754eca0fd..31d525dec 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_CreateQueue.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_CreateQueue.go @@ -259,7 +259,7 @@ func (c *Client) addOperationCreateQueueMiddlewares(stack *middleware.Stack, opt if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_DeleteMessage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_DeleteMessage.go index 21d80a446..5e44332ca 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_DeleteMessage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_DeleteMessage.go @@ -101,7 +101,7 @@ func (c *Client) addOperationDeleteMessageMiddlewares(stack *middleware.Stack, o if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_DeleteMessageBatch.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_DeleteMessageBatch.go index 4639fc985..6e0d00605 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_DeleteMessageBatch.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_DeleteMessageBatch.go @@ -104,7 +104,7 @@ func (c *Client) addOperationDeleteMessageBatchMiddlewares(stack *middleware.Sta if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_DeleteQueue.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_DeleteQueue.go index 1736f4871..c36585387 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_DeleteQueue.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_DeleteQueue.go @@ -90,7 +90,7 @@ func (c *Client) addOperationDeleteQueueMiddlewares(stack *middleware.Stack, opt if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_GetQueueAttributes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_GetQueueAttributes.go index 39e36d159..d83bd1448 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_GetQueueAttributes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_GetQueueAttributes.go @@ -201,7 +201,7 @@ func (c *Client) addOperationGetQueueAttributesMiddlewares(stack *middleware.Sta if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_GetQueueUrl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_GetQueueUrl.go index c2dad2593..d01ee537d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_GetQueueUrl.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_GetQueueUrl.go @@ -95,7 +95,7 @@ func (c *Client) addOperationGetQueueUrlMiddlewares(stack *middleware.Stack, opt if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ListDeadLetterSourceQueues.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ListDeadLetterSourceQueues.go index 0dca4b995..5fe9b7513 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ListDeadLetterSourceQueues.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ListDeadLetterSourceQueues.go @@ -110,7 +110,7 @@ func (c *Client) addOperationListDeadLetterSourceQueuesMiddlewares(stack *middle if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ListMessageMoveTasks.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ListMessageMoveTasks.go index 83bcc8e09..1bca34afd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ListMessageMoveTasks.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ListMessageMoveTasks.go @@ -13,6 +13,14 @@ import ( // Gets the most recent message movement tasks (up to 10) under a specific source // queue. +// - This action is currently limited to supporting message redrive from +// dead-letter queues (DLQs) (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) +// only. In this context, the source queue is the dead-letter queue (DLQ), while +// the destination queue can be the original source queue (from which the messages +// were driven to the dead-letter-queue), or a custom destination queue. +// - Currently, only standard queues are supported. +// - Only one active message movement task is supported per queue at any given +// time. func (c *Client) ListMessageMoveTasks(ctx context.Context, params *ListMessageMoveTasksInput, optFns ...func(*Options)) (*ListMessageMoveTasksOutput, error) { if params == nil { params = &ListMessageMoveTasksInput{} @@ -89,7 +97,7 @@ func (c *Client) addOperationListMessageMoveTasksMiddlewares(stack *middleware.S if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ListQueueTags.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ListQueueTags.go index 28543261a..e61a08cec 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ListQueueTags.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ListQueueTags.go @@ -88,7 +88,7 @@ func (c *Client) addOperationListQueueTagsMiddlewares(stack *middleware.Stack, o if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ListQueues.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ListQueues.go index c214011c9..aa6455f82 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ListQueues.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ListQueues.go @@ -109,7 +109,7 @@ func (c *Client) addOperationListQueuesMiddlewares(stack *middleware.Stack, opti if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_PurgeQueue.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_PurgeQueue.go index 80b3a4667..95fa30f8f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_PurgeQueue.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_PurgeQueue.go @@ -10,13 +10,13 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes the messages in a queue specified by the QueueURL parameter. When you -// use the PurgeQueue action, you can't retrieve any messages deleted from a -// queue. The message deletion process takes up to 60 seconds. We recommend waiting -// for 60 seconds regardless of your queue's size. Messages sent to the queue -// before you call PurgeQueue might be received but are deleted within the next -// minute. Messages sent to the queue after you call PurgeQueue might be deleted -// while the queue is being purged. +// Deletes available messages in a queue (including in-flight messages) specified +// by the QueueURL parameter. When you use the PurgeQueue action, you can't +// retrieve any messages deleted from a queue. The message deletion process takes +// up to 60 seconds. We recommend waiting for 60 seconds regardless of your queue's +// size. Messages sent to the queue before you call PurgeQueue might be received +// but are deleted within the next minute. Messages sent to the queue after you +// call PurgeQueue might be deleted while the queue is being purged. func (c *Client) PurgeQueue(ctx context.Context, params *PurgeQueueInput, optFns ...func(*Options)) (*PurgeQueueOutput, error) { if params == nil { params = &PurgeQueueInput{} @@ -86,7 +86,7 @@ func (c *Client) addOperationPurgeQueueMiddlewares(stack *middleware.Stack, opti if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ReceiveMessage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ReceiveMessage.go index d377b94e6..14e0b1c3f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ReceiveMessage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_ReceiveMessage.go @@ -223,7 +223,7 @@ func (c *Client) addOperationReceiveMessageMiddlewares(stack *middleware.Stack, if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_RemovePermission.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_RemovePermission.go index 797014bc6..27db84f52 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_RemovePermission.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_RemovePermission.go @@ -94,7 +94,7 @@ func (c *Client) addOperationRemovePermissionMiddlewares(stack *middleware.Stack if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_SendMessage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_SendMessage.go index 83cac943f..c853db94d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_SendMessage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_SendMessage.go @@ -204,7 +204,7 @@ func (c *Client) addOperationSendMessageMiddlewares(stack *middleware.Stack, opt if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_SendMessageBatch.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_SendMessageBatch.go index 53f2d3b63..64425d56c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_SendMessageBatch.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_SendMessageBatch.go @@ -116,7 +116,7 @@ func (c *Client) addOperationSendMessageBatchMiddlewares(stack *middleware.Stack if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_SetQueueAttributes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_SetQueueAttributes.go index 914badb7e..5afaee53b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_SetQueueAttributes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_SetQueueAttributes.go @@ -216,7 +216,7 @@ func (c *Client) addOperationSetQueueAttributesMiddlewares(stack *middleware.Sta if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_StartMessageMoveTask.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_StartMessageMoveTask.go index 8150d7421..f46233c35 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_StartMessageMoveTask.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_StartMessageMoveTask.go @@ -12,12 +12,16 @@ import ( // Starts an asynchronous task to move messages from a specified source queue to a // specified destination queue. -// - This action is currently limited to supporting message redrive from -// dead-letter queues (DLQs) only. In this context, the source queue is the -// dead-letter queue (DLQ), while the destination queue can be the original source -// queue (from which the messages were driven to the dead-letter-queue), or a -// custom destination queue. -// - Currently, only standard queues are supported. +// - This action is currently limited to supporting message redrive from queues +// that are configured as dead-letter queues (DLQs) (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) +// of other Amazon SQS queues only. Non-SQS queue sources of dead-letter queues, +// such as Lambda or Amazon SNS topics, are currently not supported. +// - In dead-letter queues redrive context, the StartMessageMoveTask the source +// queue is the DLQ, while the destination queue can be the original source queue +// (from which the messages were driven to the dead-letter-queue), or a custom +// destination queue. +// - Currently, only standard queues support redrive. FIFO queues don't support +// redrive. // - Only one active message movement task is supported per queue at any given // time. func (c *Client) StartMessageMoveTask(ctx context.Context, params *StartMessageMoveTaskInput, optFns ...func(*Options)) (*StartMessageMoveTaskOutput, error) { @@ -38,7 +42,9 @@ func (c *Client) StartMessageMoveTask(ctx context.Context, params *StartMessageM type StartMessageMoveTaskInput struct { // The ARN of the queue that contains the messages to be moved to another queue. - // Currently, only dead-letter queue (DLQ) ARNs are accepted. + // Currently, only ARNs of dead-letter queues (DLQs) whose sources are other Amazon + // SQS queues are accepted. DLQs whose sources are non-SQS queues, such as Lambda + // or Amazon SNS topics, are not currently supported. // // This member is required. SourceArn *string @@ -108,7 +114,7 @@ func (c *Client) addOperationStartMessageMoveTaskMiddlewares(stack *middleware.S if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_TagQueue.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_TagQueue.go index a722a2915..f67a0e93c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_TagQueue.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_TagQueue.go @@ -99,7 +99,7 @@ func (c *Client) addOperationTagQueueMiddlewares(stack *middleware.Stack, option if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_UntagQueue.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_UntagQueue.go index 25e9eed80..f605c9663 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_UntagQueue.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/api_op_UntagQueue.go @@ -89,7 +89,7 @@ func (c *Client) addOperationUntagQueueMiddlewares(stack *middleware.Stack, opti if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/go_module_metadata.go index 458e89bdd..a403b673d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sqs/go_module_metadata.go @@ -3,4 +3,4 @@ package sqs // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.23.2" +const goModuleVersion = "1.23.4" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md index 04222cda2..6ae8318ac 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md @@ -1,3 +1,11 @@ +# v1.17.2 (2023-11-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.1 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.17.0 (2023-11-01) * **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go index a25792273..7d4e6651e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go @@ -11,6 +11,8 @@ import ( "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" smithy "github.com/aws/smithy-go" smithydocument "github.com/aws/smithy-go/document" @@ -46,10 +48,18 @@ func New(options Options, optFns ...func(*Options)) *Client { resolveHTTPSignerV4(&options) + resolveEndpointResolverV2(&options) + + resolveAuthSchemeResolver(&options) + for _, fn := range optFns { fn(&options) } + ignoreAnonymousAuth(&options) + + resolveAuthSchemes(&options) + client := &Client{ options: options, } @@ -57,140 +67,10 @@ func New(options Options, optFns ...func(*Options)) *Client { return client } -type Options struct { - // Set of options to modify how an operation is invoked. These apply to all - // operations invoked for this client. Use functional options on operation call to - // modify this list for per operation behavior. - APIOptions []func(*middleware.Stack) error - - // The optional application specific identifier appended to the User-Agent header. - AppID string - - // This endpoint will be given as input to an EndpointResolverV2. It is used for - // providing a custom base endpoint that is subject to modifications by the - // processing EndpointResolverV2. - BaseEndpoint *string - - // Configures the events that will be sent to the configured logger. - ClientLogMode aws.ClientLogMode - - // The credentials object to use when signing requests. - Credentials aws.CredentialsProvider - - // The configuration DefaultsMode that the SDK should use when constructing the - // clients initial default settings. - DefaultsMode aws.DefaultsMode - - // The endpoint options to be used when attempting to resolve an endpoint. - EndpointOptions EndpointResolverOptions - - // The service endpoint resolver. - // - // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a - // value for this field will likely prevent you from using any endpoint-related - // service features released after the introduction of EndpointResolverV2 and - // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom - // endpoint, set the client option BaseEndpoint instead. - EndpointResolver EndpointResolver - - // Resolves the endpoint used for a particular service. This should be used over - // the deprecated EndpointResolver - EndpointResolverV2 EndpointResolverV2 - - // Signature Version 4 (SigV4) Signer - HTTPSignerV4 HTTPSignerV4 - - // The logger writer interface to write logging messages to. - Logger logging.Logger - - // The region to send requests to. (Required) - Region string - - // RetryMaxAttempts specifies the maximum number attempts an API client will call - // an operation that fails with a retryable error. A value of 0 is ignored, and - // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. When creating a new API Clients this - // member will only be used if the Retryer Options member is nil. This value will - // be ignored if Retryer is not nil. If specified in an operation call's functional - // options with a value that is different than the constructed client's Options, - // the Client's Retryer will be wrapped to use the operation's specific - // RetryMaxAttempts value. - RetryMaxAttempts int - - // RetryMode specifies the retry mode the API client will be created with, if - // Retryer option is not also specified. When creating a new API Clients this - // member will only be used if the Retryer Options member is nil. This value will - // be ignored if Retryer is not nil. Currently does not support per operation call - // overrides, may in the future. - RetryMode aws.RetryMode - - // Retryer guides how HTTP requests should be retried in case of recoverable - // failures. When nil the API client will use a default retryer. The kind of - // default retry created by the API client can be changed with the RetryMode - // option. - Retryer aws.Retryer - - // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set - // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You - // should not populate this structure programmatically, or rely on the values here - // within your applications. - RuntimeEnvironment aws.RuntimeEnvironment - - // The initial DefaultsMode used when the client options were constructed. If the - // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved - // value was at that point in time. Currently does not support per operation call - // overrides, may in the future. - resolvedDefaultsMode aws.DefaultsMode - - // The HTTP client to invoke API calls with. Defaults to client's default HTTP - // implementation if nil. - HTTPClient HTTPClient -} - -// WithAPIOptions returns a functional option for setting the Client's APIOptions -// option. -func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { - return func(o *Options) { - o.APIOptions = append(o.APIOptions, optFns...) - } -} - -// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for -// this field will likely prevent you from using any endpoint-related service -// features released after the introduction of EndpointResolverV2 and BaseEndpoint. -// To migrate an EndpointResolver implementation that uses a custom endpoint, set -// the client option BaseEndpoint instead. -func WithEndpointResolver(v EndpointResolver) func(*Options) { - return func(o *Options) { - o.EndpointResolver = v - } -} - -// WithEndpointResolverV2 returns a functional option for setting the Client's -// EndpointResolverV2 option. -func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) { - return func(o *Options) { - o.EndpointResolverV2 = v - } -} - -type HTTPClient interface { - Do(*http.Request) (*http.Response, error) -} - -// Copy creates a clone where the APIOptions list is deep copied. -func (o Options) Copy() Options { - to := o - to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) - copy(to.APIOptions, o.APIOptions) - - return to -} func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { ctx = middleware.ClearStackValues(ctx) stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) options := c.options.Copy() - resolveEndpointResolverV2(&options) for _, fn := range optFns { fn(&options) @@ -224,6 +104,63 @@ func (c *Client) invokeOperation(ctx context.Context, opID string, params interf return result, metadata, err } +type operationInputKey struct{} + +func setOperationInput(ctx context.Context, input interface{}) context.Context { + return middleware.WithStackValue(ctx, operationInputKey{}, input) +} + +func getOperationInput(ctx context.Context) interface{} { + return middleware.GetStackValue(ctx, operationInputKey{}) +} + +type setOperationInputMiddleware struct { +} + +func (*setOperationInputMiddleware) ID() string { + return "setOperationInput" +} + +func (m *setOperationInputMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + ctx = setOperationInput(ctx, in.Parameters) + return next.HandleSerialize(ctx, in) +} + +func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error { + if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil { + return fmt.Errorf("add ResolveAuthScheme: %v", err) + } + if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil { + return fmt.Errorf("add GetIdentity: %v", err) + } + if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil { + return fmt.Errorf("add ResolveEndpointV2: %v", err) + } + if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", middleware.After); err != nil { + return fmt.Errorf("add Signing: %v", err) + } + return nil +} +func resolveAuthSchemeResolver(options *Options) { + if options.AuthSchemeResolver == nil { + options.AuthSchemeResolver = &defaultAuthSchemeResolver{} + } +} + +func resolveAuthSchemes(options *Options) { + if options.AuthSchemes == nil { + options.AuthSchemes = []smithyhttp.AuthScheme{ + internalauth.NewHTTPAuthScheme("aws.auth#sigv4", &internalauthsmithy.V4SignerAdapter{ + Signer: options.HTTPSignerV4, + Logger: options.Logger, + LogSigning: options.ClientLogMode.IsSigning(), + }), + } + } +} + type noSmithyDocumentSerde = smithydocument.NoSerde type legacyEndpointContextSetter struct { @@ -414,15 +351,6 @@ func addClientUserAgent(stack *middleware.Stack, options Options) error { return nil } -func addHTTPSignerV4Middleware(stack *middleware.Stack, o Options) error { - mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{ - CredentialsProvider: o.Credentials, - Signer: o.HTTPSignerV4, - LogSigning: o.ClientLogMode.IsSigning(), - }) - return stack.Finalize.Add(mw, middleware.After) -} - type HTTPSignerV4 interface { SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error } @@ -496,31 +424,31 @@ func addRequestResponseLogging(stack *middleware.Stack, o Options) error { }, middleware.After) } -type endpointDisableHTTPSMiddleware struct { - EndpointDisableHTTPS bool +type disableHTTPSMiddleware struct { + DisableHTTPS bool } -func (*endpointDisableHTTPSMiddleware) ID() string { - return "endpointDisableHTTPSMiddleware" +func (*disableHTTPSMiddleware) ID() string { + return "disableHTTPS" } -func (m *endpointDisableHTTPSMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, +func (m *disableHTTPSMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) } - if m.EndpointDisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) { + if m.DisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) { req.URL.Scheme = "http" } - return next.HandleSerialize(ctx, in) - + return next.HandleFinalize(ctx, in) } -func addendpointDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { - return stack.Serialize.Insert(&endpointDisableHTTPSMiddleware{ - EndpointDisableHTTPS: o.EndpointOptions.DisableHTTPS, - }, "OperationSerializer", middleware.Before) + +func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { + return stack.Finalize.Insert(&disableHTTPSMiddleware{ + DisableHTTPS: o.EndpointOptions.DisableHTTPS, + }, "ResolveEndpointV2", middleware.After) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go index 0383bb0bd..436eadc86 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go @@ -4,13 +4,9 @@ package sso import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" "github.com/aws/aws-sdk-go-v2/service/sso/types" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -66,6 +62,9 @@ type GetRoleCredentialsOutput struct { } func (c *Client) addOperationGetRoleCredentialsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestjson1_serializeOpGetRoleCredentials{}, middleware.After) if err != nil { return err @@ -74,6 +73,10 @@ func (c *Client) addOperationGetRoleCredentialsMiddlewares(stack *middleware.Sta if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetRoleCredentials"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -107,7 +110,7 @@ func (c *Client) addOperationGetRoleCredentialsMiddlewares(stack *middleware.Sta if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addGetRoleCredentialsResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpGetRoleCredentialsValidationMiddleware(stack); err != nil { @@ -128,7 +131,7 @@ func (c *Client) addOperationGetRoleCredentialsMiddlewares(stack *middleware.Sta if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -141,126 +144,3 @@ func newServiceMetadataMiddleware_opGetRoleCredentials(region string) *awsmiddle OperationName: "GetRoleCredentials", } } - -type opGetRoleCredentialsResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opGetRoleCredentialsResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opGetRoleCredentialsResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "awsssoportal" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "awsssoportal" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("awsssoportal") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addGetRoleCredentialsResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opGetRoleCredentialsResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go index cc28543f8..d81b06770 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go @@ -4,13 +4,9 @@ package sso import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" "github.com/aws/aws-sdk-go-v2/service/sso/types" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -71,6 +67,9 @@ type ListAccountRolesOutput struct { } func (c *Client) addOperationListAccountRolesMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestjson1_serializeOpListAccountRoles{}, middleware.After) if err != nil { return err @@ -79,6 +78,10 @@ func (c *Client) addOperationListAccountRolesMiddlewares(stack *middleware.Stack if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListAccountRoles"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -112,7 +115,7 @@ func (c *Client) addOperationListAccountRolesMiddlewares(stack *middleware.Stack if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addListAccountRolesResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpListAccountRolesValidationMiddleware(stack); err != nil { @@ -133,7 +136,7 @@ func (c *Client) addOperationListAccountRolesMiddlewares(stack *middleware.Stack if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -236,126 +239,3 @@ func newServiceMetadataMiddleware_opListAccountRoles(region string) *awsmiddlewa OperationName: "ListAccountRoles", } } - -type opListAccountRolesResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opListAccountRolesResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opListAccountRolesResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "awsssoportal" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "awsssoportal" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("awsssoportal") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addListAccountRolesResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opListAccountRolesResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go index 567f6c669..38f8472ae 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go @@ -4,13 +4,9 @@ package sso import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" "github.com/aws/aws-sdk-go-v2/service/sso/types" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -69,6 +65,9 @@ type ListAccountsOutput struct { } func (c *Client) addOperationListAccountsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestjson1_serializeOpListAccounts{}, middleware.After) if err != nil { return err @@ -77,6 +76,10 @@ func (c *Client) addOperationListAccountsMiddlewares(stack *middleware.Stack, op if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListAccounts"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -110,7 +113,7 @@ func (c *Client) addOperationListAccountsMiddlewares(stack *middleware.Stack, op if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addListAccountsResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpListAccountsValidationMiddleware(stack); err != nil { @@ -131,7 +134,7 @@ func (c *Client) addOperationListAccountsMiddlewares(stack *middleware.Stack, op if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -233,126 +236,3 @@ func newServiceMetadataMiddleware_opListAccounts(region string) *awsmiddleware.R OperationName: "ListAccounts", } } - -type opListAccountsResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opListAccountsResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opListAccountsResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "awsssoportal" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "awsssoportal" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("awsssoportal") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addListAccountsResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opListAccountsResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go index c30da0296..82e98a894 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go @@ -4,12 +4,8 @@ package sso import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -61,6 +57,9 @@ type LogoutOutput struct { } func (c *Client) addOperationLogoutMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestjson1_serializeOpLogout{}, middleware.After) if err != nil { return err @@ -69,6 +68,10 @@ func (c *Client) addOperationLogoutMiddlewares(stack *middleware.Stack, options if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "Logout"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -102,7 +105,7 @@ func (c *Client) addOperationLogoutMiddlewares(stack *middleware.Stack, options if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addLogoutResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpLogoutValidationMiddleware(stack); err != nil { @@ -123,7 +126,7 @@ func (c *Client) addOperationLogoutMiddlewares(stack *middleware.Stack, options if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -136,126 +139,3 @@ func newServiceMetadataMiddleware_opLogout(region string) *awsmiddleware.Registe OperationName: "Logout", } } - -type opLogoutResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opLogoutResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opLogoutResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "awsssoportal" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "awsssoportal" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("awsssoportal") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addLogoutResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opLogoutResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go new file mode 100644 index 000000000..292df01cf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go @@ -0,0 +1,280 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +func bindAuthParamsRegion(params *AuthResolverParameters, _ interface{}, options Options) { + params.Region = options.Region +} + +type setLegacyContextSigningOptionsMiddleware struct { +} + +func (*setLegacyContextSigningOptionsMiddleware) ID() string { + return "setLegacyContextSigningOptions" +} + +func (m *setLegacyContextSigningOptionsMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + rscheme := getResolvedAuthScheme(ctx) + schemeID := rscheme.Scheme.SchemeID() + + if sn := awsmiddleware.GetSigningName(ctx); sn != "" { + if schemeID == "aws.auth#sigv4" { + smithyhttp.SetSigV4SigningName(&rscheme.SignerProperties, sn) + } else if schemeID == "aws.auth#sigv4a" { + smithyhttp.SetSigV4ASigningName(&rscheme.SignerProperties, sn) + } + } + + if sr := awsmiddleware.GetSigningRegion(ctx); sr != "" { + if schemeID == "aws.auth#sigv4" { + smithyhttp.SetSigV4SigningRegion(&rscheme.SignerProperties, sr) + } else if schemeID == "aws.auth#sigv4a" { + smithyhttp.SetSigV4ASigningRegions(&rscheme.SignerProperties, []string{sr}) + } + } + + return next.HandleFinalize(ctx, in) +} + +func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error { + return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before) +} + +// AuthResolverParameters contains the set of inputs necessary for auth scheme +// resolution. +type AuthResolverParameters struct { + // The name of the operation being invoked. + Operation string + + // The region in which the operation is being invoked. + Region string +} + +func bindAuthResolverParams(operation string, input interface{}, options Options) *AuthResolverParameters { + params := &AuthResolverParameters{ + Operation: operation, + } + + bindAuthParamsRegion(params, input, options) + + return params +} + +// AuthSchemeResolver returns a set of possible authentication options for an +// operation. +type AuthSchemeResolver interface { + ResolveAuthSchemes(context.Context, *AuthResolverParameters) ([]*smithyauth.Option, error) +} + +type defaultAuthSchemeResolver struct{} + +var _ AuthSchemeResolver = (*defaultAuthSchemeResolver)(nil) + +func (*defaultAuthSchemeResolver) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { + if overrides, ok := operationAuthOptions[params.Operation]; ok { + return overrides(params), nil + } + return serviceAuthOptions(params), nil +} + +var operationAuthOptions = map[string]func(*AuthResolverParameters) []*smithyauth.Option{ + "GetRoleCredentials": func(params *AuthResolverParameters) []*smithyauth.Option { + return []*smithyauth.Option{ + {SchemeID: smithyauth.SchemeIDAnonymous}, + } + }, + + "ListAccountRoles": func(params *AuthResolverParameters) []*smithyauth.Option { + return []*smithyauth.Option{ + {SchemeID: smithyauth.SchemeIDAnonymous}, + } + }, + + "ListAccounts": func(params *AuthResolverParameters) []*smithyauth.Option { + return []*smithyauth.Option{ + {SchemeID: smithyauth.SchemeIDAnonymous}, + } + }, + + "Logout": func(params *AuthResolverParameters) []*smithyauth.Option { + return []*smithyauth.Option{ + {SchemeID: smithyauth.SchemeIDAnonymous}, + } + }, +} + +func serviceAuthOptions(params *AuthResolverParameters) []*smithyauth.Option { + return []*smithyauth.Option{ + { + SchemeID: smithyauth.SchemeIDSigV4, + SignerProperties: func() smithy.Properties { + var props smithy.Properties + smithyhttp.SetSigV4SigningName(&props, "awsssoportal") + smithyhttp.SetSigV4SigningRegion(&props, params.Region) + return props + }(), + }, + } +} + +type resolveAuthSchemeMiddleware struct { + operation string + options Options +} + +func (*resolveAuthSchemeMiddleware) ID() string { + return "ResolveAuthScheme" +} + +func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + params := bindAuthResolverParams(m.operation, getOperationInput(ctx), m.options) + options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) + if err != nil { + return out, metadata, fmt.Errorf("resolve auth scheme: %v", err) + } + + scheme, ok := m.selectScheme(options) + if !ok { + return out, metadata, fmt.Errorf("could not select an auth scheme") + } + + ctx = setResolvedAuthScheme(ctx, scheme) + return next.HandleFinalize(ctx, in) +} + +func (m *resolveAuthSchemeMiddleware) selectScheme(options []*smithyauth.Option) (*resolvedAuthScheme, bool) { + for _, option := range options { + if option.SchemeID == smithyauth.SchemeIDAnonymous { + return newResolvedAuthScheme(smithyhttp.NewAnonymousScheme(), option), true + } + + for _, scheme := range m.options.AuthSchemes { + if scheme.SchemeID() != option.SchemeID { + continue + } + + if scheme.IdentityResolver(m.options) != nil { + return newResolvedAuthScheme(scheme, option), true + } + } + } + + return nil, false +} + +type resolvedAuthSchemeKey struct{} + +type resolvedAuthScheme struct { + Scheme smithyhttp.AuthScheme + IdentityProperties smithy.Properties + SignerProperties smithy.Properties +} + +func newResolvedAuthScheme(scheme smithyhttp.AuthScheme, option *smithyauth.Option) *resolvedAuthScheme { + return &resolvedAuthScheme{ + Scheme: scheme, + IdentityProperties: option.IdentityProperties, + SignerProperties: option.SignerProperties, + } +} + +func setResolvedAuthScheme(ctx context.Context, scheme *resolvedAuthScheme) context.Context { + return middleware.WithStackValue(ctx, resolvedAuthSchemeKey{}, scheme) +} + +func getResolvedAuthScheme(ctx context.Context) *resolvedAuthScheme { + v, _ := middleware.GetStackValue(ctx, resolvedAuthSchemeKey{}).(*resolvedAuthScheme) + return v +} + +type getIdentityMiddleware struct { + options Options +} + +func (*getIdentityMiddleware) ID() string { + return "GetIdentity" +} + +func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + rscheme := getResolvedAuthScheme(ctx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + resolver := rscheme.Scheme.IdentityResolver(m.options) + if resolver == nil { + return out, metadata, fmt.Errorf("no identity resolver") + } + + identity, err := resolver.GetIdentity(ctx, rscheme.IdentityProperties) + if err != nil { + return out, metadata, fmt.Errorf("get identity: %v", err) + } + + ctx = setIdentity(ctx, identity) + return next.HandleFinalize(ctx, in) +} + +type identityKey struct{} + +func setIdentity(ctx context.Context, identity smithyauth.Identity) context.Context { + return middleware.WithStackValue(ctx, identityKey{}, identity) +} + +func getIdentity(ctx context.Context) smithyauth.Identity { + v, _ := middleware.GetStackValue(ctx, identityKey{}).(smithyauth.Identity) + return v +} + +type signRequestMiddleware struct { +} + +func (*signRequestMiddleware) ID() string { + return "Signing" +} + +func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request) + } + + rscheme := getResolvedAuthScheme(ctx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + identity := getIdentity(ctx) + if identity == nil { + return out, metadata, fmt.Errorf("no identity") + } + + signer := rscheme.Scheme.Signer() + if signer == nil { + return out, metadata, fmt.Errorf("no signer") + } + + if err := signer.SignRequest(ctx, req, identity, rscheme.SignerProperties); err != nil { + return out, metadata, fmt.Errorf("sign request: %v", err) + } + + return next.HandleFinalize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go index 115387059..d31380cf2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go @@ -9,8 +9,10 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + "github.com/aws/aws-sdk-go-v2/internal/endpoints" "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn" internalendpoints "github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints" + smithyauth "github.com/aws/smithy-go/auth" smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" "github.com/aws/smithy-go/ptr" @@ -214,72 +216,6 @@ func resolveBaseEndpoint(cfg aws.Config, o *Options) { } } -// Utility function to aid with translating pseudo-regions to classical regions -// with the appropriate setting indicated by the pseudo-region -func mapPseudoRegion(pr string) (region string, fips aws.FIPSEndpointState) { - const fipsInfix = "-fips-" - const fipsPrefix = "fips-" - const fipsSuffix = "-fips" - - if strings.Contains(pr, fipsInfix) || - strings.Contains(pr, fipsPrefix) || - strings.Contains(pr, fipsSuffix) { - region = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( - pr, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") - fips = aws.FIPSEndpointStateEnabled - } else { - region = pr - } - - return region, fips -} - -// builtInParameterResolver is the interface responsible for resolving BuiltIn -// values during the sourcing of EndpointParameters -type builtInParameterResolver interface { - ResolveBuiltIns(*EndpointParameters) error -} - -// builtInResolver resolves modeled BuiltIn values using only the members defined -// below. -type builtInResolver struct { - // The AWS region used to dispatch the request. - Region string - - // Sourced BuiltIn value in a historical enabled or disabled state. - UseDualStack aws.DualStackEndpointState - - // Sourced BuiltIn value in a historical enabled or disabled state. - UseFIPS aws.FIPSEndpointState - - // Base endpoint that can potentially be modified during Endpoint resolution. - Endpoint *string -} - -// Invoked at runtime to resolve BuiltIn Values. Only resolution code specific to -// each BuiltIn value is generated. -func (b *builtInResolver) ResolveBuiltIns(params *EndpointParameters) error { - - region, _ := mapPseudoRegion(b.Region) - if len(region) == 0 { - return fmt.Errorf("Could not resolve AWS::Region") - } else { - params.Region = aws.String(region) - } - if b.UseDualStack == aws.DualStackEndpointStateEnabled { - params.UseDualStack = aws.Bool(true) - } else { - params.UseDualStack = aws.Bool(false) - } - if b.UseFIPS == aws.FIPSEndpointStateEnabled { - params.UseFIPS = aws.Bool(true) - } else { - params.UseFIPS = aws.Bool(false) - } - params.Endpoint = b.Endpoint - return nil -} - // EndpointParameters provides the parameters that influence how endpoints are // resolved. type EndpointParameters struct { @@ -517,3 +453,76 @@ func (r *resolver) ResolveEndpoint( } return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Missing Region") } + +type endpointParamsBinder interface { + bindEndpointParams(*EndpointParameters) +} + +func bindEndpointParams(input interface{}, options Options) *EndpointParameters { + params := &EndpointParameters{} + + params.Region = aws.String(endpoints.MapFIPSRegion(options.Region)) + params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled) + params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled) + params.Endpoint = options.BaseEndpoint + + if b, ok := input.(endpointParamsBinder); ok { + b.bindEndpointParams(params) + } + + return params +} + +type resolveEndpointV2Middleware struct { + options Options +} + +func (*resolveEndpointV2Middleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleFinalize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.options.EndpointResolverV2 == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + params := bindEndpointParams(getOperationInput(ctx), m.options) + endpt, err := m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + if endpt.URI.RawPath == "" && req.URL.RawPath != "" { + endpt.URI.RawPath = endpt.URI.Path + } + req.URL.Scheme = endpt.URI.Scheme + req.URL.Host = endpt.URI.Host + req.URL.Path = smithyhttp.JoinPath(endpt.URI.Path, req.URL.Path) + req.URL.RawPath = smithyhttp.JoinPath(endpt.URI.RawPath, req.URL.RawPath) + for k := range endpt.Headers { + req.Header.Set(k, endpt.Headers.Get(k)) + } + + rscheme := getResolvedAuthScheme(ctx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + opts, _ := smithyauth.GetAuthOptions(&endpt.Properties) + for _, o := range opts { + rscheme.SignerProperties.SetAll(&o.SignerProperties) + } + + return next.HandleFinalize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json index 8e6184187..53060bccf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json @@ -13,6 +13,7 @@ "api_op_ListAccountRoles.go", "api_op_ListAccounts.go", "api_op_Logout.go", + "auth.go", "deserializers.go", "doc.go", "endpoints.go", @@ -21,6 +22,7 @@ "generated.json", "internal/endpoints/endpoints.go", "internal/endpoints/endpoints_test.go", + "options.go", "protocol_test.go", "serializers.go", "types/errors.go", diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go index d5611c5cd..9abc3e105 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go @@ -3,4 +3,4 @@ package sso // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.17.0" +const goModuleVersion = "1.17.2" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go new file mode 100644 index 000000000..555f383de --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go @@ -0,0 +1,219 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" + smithyauth "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/http" +) + +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +type Options struct { + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // The optional application specific identifier appended to the User-Agent header. + AppID string + + // This endpoint will be given as input to an EndpointResolverV2. It is used for + // providing a custom base endpoint that is subject to modifications by the + // processing EndpointResolverV2. + BaseEndpoint *string + + // Configures the events that will be sent to the configured logger. + ClientLogMode aws.ClientLogMode + + // The credentials object to use when signing requests. + Credentials aws.CredentialsProvider + + // The configuration DefaultsMode that the SDK should use when constructing the + // clients initial default settings. + DefaultsMode aws.DefaultsMode + + // The endpoint options to be used when attempting to resolve an endpoint. + EndpointOptions EndpointResolverOptions + + // The service endpoint resolver. + // + // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a + // value for this field will likely prevent you from using any endpoint-related + // service features released after the introduction of EndpointResolverV2 and + // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom + // endpoint, set the client option BaseEndpoint instead. + EndpointResolver EndpointResolver + + // Resolves the endpoint used for a particular service operation. This should be + // used over the deprecated EndpointResolver. + EndpointResolverV2 EndpointResolverV2 + + // Signature Version 4 (SigV4) Signer + HTTPSignerV4 HTTPSignerV4 + + // The logger writer interface to write logging messages to. + Logger logging.Logger + + // The region to send requests to. (Required) + Region string + + // RetryMaxAttempts specifies the maximum number attempts an API client will call + // an operation that fails with a retryable error. A value of 0 is ignored, and + // will not be used to configure the API client created default retryer, or modify + // per operation call's retry max attempts. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. If specified in an operation call's functional + // options with a value that is different than the constructed client's Options, + // the Client's Retryer will be wrapped to use the operation's specific + // RetryMaxAttempts value. + RetryMaxAttempts int + + // RetryMode specifies the retry mode the API client will be created with, if + // Retryer option is not also specified. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. Currently does not support per operation call + // overrides, may in the future. + RetryMode aws.RetryMode + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. The kind of + // default retry created by the API client can be changed with the RetryMode + // option. + Retryer aws.Retryer + + // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set + // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You + // should not populate this structure programmatically, or rely on the values here + // within your applications. + RuntimeEnvironment aws.RuntimeEnvironment + + // The initial DefaultsMode used when the client options were constructed. If the + // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved + // value was at that point in time. Currently does not support per operation call + // overrides, may in the future. + resolvedDefaultsMode aws.DefaultsMode + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP + // implementation if nil. + HTTPClient HTTPClient + + // The auth scheme resolver which determines how to authenticate for each + // operation. + AuthSchemeResolver AuthSchemeResolver + + // The list of auth schemes supported by the client. + AuthSchemes []smithyhttp.AuthScheme +} + +// Copy creates a clone where the APIOptions list is deep copied. +func (o Options) Copy() Options { + to := o + to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) + copy(to.APIOptions, o.APIOptions) + + return to +} + +func (o Options) GetIdentityResolver(schemeID string) smithyauth.IdentityResolver { + if schemeID == "aws.auth#sigv4" { + return getSigV4IdentityResolver(o) + } + if schemeID == "smithy.api#noAuth" { + return &smithyauth.AnonymousIdentityResolver{} + } + return nil +} + +// WithAPIOptions returns a functional option for setting the Client's APIOptions +// option. +func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { + return func(o *Options) { + o.APIOptions = append(o.APIOptions, optFns...) + } +} + +// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for +// this field will likely prevent you from using any endpoint-related service +// features released after the introduction of EndpointResolverV2 and BaseEndpoint. +// To migrate an EndpointResolver implementation that uses a custom endpoint, set +// the client option BaseEndpoint instead. +func WithEndpointResolver(v EndpointResolver) func(*Options) { + return func(o *Options) { + o.EndpointResolver = v + } +} + +// WithEndpointResolverV2 returns a functional option for setting the Client's +// EndpointResolverV2 option. +func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) { + return func(o *Options) { + o.EndpointResolverV2 = v + } +} + +func getSigV4IdentityResolver(o Options) smithyauth.IdentityResolver { + if o.Credentials != nil { + return &internalauthsmithy.CredentialsProviderAdapter{Provider: o.Credentials} + } + return nil +} + +// WithSigV4SigningName applies an override to the authentication workflow to +// use the given signing name for SigV4-authenticated operations. +// +// This is an advanced setting. The value here is FINAL, taking precedence over +// the resolved signing name from both auth scheme resolution and endpoint +// resolution. +func WithSigV4SigningName(name string) func(*Options) { + fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, + ) { + return next.HandleInitialize(awsmiddleware.SetSigningName(ctx, name), in) + } + return func(o *Options) { + o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { + return s.Initialize.Add( + middleware.InitializeMiddlewareFunc("withSigV4SigningName", fn), + middleware.Before, + ) + }) + } +} + +// WithSigV4SigningRegion applies an override to the authentication workflow to +// use the given signing region for SigV4-authenticated operations. +// +// This is an advanced setting. The value here is FINAL, taking precedence over +// the resolved signing region from both auth scheme resolution and endpoint +// resolution. +func WithSigV4SigningRegion(region string) func(*Options) { + fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, + ) { + return next.HandleInitialize(awsmiddleware.SetSigningRegion(ctx, region), in) + } + return func(o *Options) { + o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { + return s.Initialize.Add( + middleware.InitializeMiddlewareFunc("withSigV4SigningRegion", fn), + middleware.Before, + ) + }) + } +} + +func ignoreAnonymousAuth(options *Options) { + if _, ok := options.Credentials.(aws.AnonymousCredentials); ok { + options.Credentials = nil + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md index 057f9a25e..8f0932e49 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md @@ -1,3 +1,15 @@ +# v1.20.0 (2023-11-17) + +* **Feature**: Adding support for `sso-oauth:CreateTokenWithIAM`. + +# v1.19.2 (2023-11-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.19.1 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.19.0 (2023-11-01) * **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go index 6a56093d8..027a67a57 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go @@ -11,6 +11,8 @@ import ( "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" smithy "github.com/aws/smithy-go" smithydocument "github.com/aws/smithy-go/document" @@ -46,10 +48,18 @@ func New(options Options, optFns ...func(*Options)) *Client { resolveHTTPSignerV4(&options) + resolveEndpointResolverV2(&options) + + resolveAuthSchemeResolver(&options) + for _, fn := range optFns { fn(&options) } + ignoreAnonymousAuth(&options) + + resolveAuthSchemes(&options) + client := &Client{ options: options, } @@ -57,140 +67,10 @@ func New(options Options, optFns ...func(*Options)) *Client { return client } -type Options struct { - // Set of options to modify how an operation is invoked. These apply to all - // operations invoked for this client. Use functional options on operation call to - // modify this list for per operation behavior. - APIOptions []func(*middleware.Stack) error - - // The optional application specific identifier appended to the User-Agent header. - AppID string - - // This endpoint will be given as input to an EndpointResolverV2. It is used for - // providing a custom base endpoint that is subject to modifications by the - // processing EndpointResolverV2. - BaseEndpoint *string - - // Configures the events that will be sent to the configured logger. - ClientLogMode aws.ClientLogMode - - // The credentials object to use when signing requests. - Credentials aws.CredentialsProvider - - // The configuration DefaultsMode that the SDK should use when constructing the - // clients initial default settings. - DefaultsMode aws.DefaultsMode - - // The endpoint options to be used when attempting to resolve an endpoint. - EndpointOptions EndpointResolverOptions - - // The service endpoint resolver. - // - // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a - // value for this field will likely prevent you from using any endpoint-related - // service features released after the introduction of EndpointResolverV2 and - // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom - // endpoint, set the client option BaseEndpoint instead. - EndpointResolver EndpointResolver - - // Resolves the endpoint used for a particular service. This should be used over - // the deprecated EndpointResolver - EndpointResolverV2 EndpointResolverV2 - - // Signature Version 4 (SigV4) Signer - HTTPSignerV4 HTTPSignerV4 - - // The logger writer interface to write logging messages to. - Logger logging.Logger - - // The region to send requests to. (Required) - Region string - - // RetryMaxAttempts specifies the maximum number attempts an API client will call - // an operation that fails with a retryable error. A value of 0 is ignored, and - // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. When creating a new API Clients this - // member will only be used if the Retryer Options member is nil. This value will - // be ignored if Retryer is not nil. If specified in an operation call's functional - // options with a value that is different than the constructed client's Options, - // the Client's Retryer will be wrapped to use the operation's specific - // RetryMaxAttempts value. - RetryMaxAttempts int - - // RetryMode specifies the retry mode the API client will be created with, if - // Retryer option is not also specified. When creating a new API Clients this - // member will only be used if the Retryer Options member is nil. This value will - // be ignored if Retryer is not nil. Currently does not support per operation call - // overrides, may in the future. - RetryMode aws.RetryMode - - // Retryer guides how HTTP requests should be retried in case of recoverable - // failures. When nil the API client will use a default retryer. The kind of - // default retry created by the API client can be changed with the RetryMode - // option. - Retryer aws.Retryer - - // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set - // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You - // should not populate this structure programmatically, or rely on the values here - // within your applications. - RuntimeEnvironment aws.RuntimeEnvironment - - // The initial DefaultsMode used when the client options were constructed. If the - // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved - // value was at that point in time. Currently does not support per operation call - // overrides, may in the future. - resolvedDefaultsMode aws.DefaultsMode - - // The HTTP client to invoke API calls with. Defaults to client's default HTTP - // implementation if nil. - HTTPClient HTTPClient -} - -// WithAPIOptions returns a functional option for setting the Client's APIOptions -// option. -func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { - return func(o *Options) { - o.APIOptions = append(o.APIOptions, optFns...) - } -} - -// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for -// this field will likely prevent you from using any endpoint-related service -// features released after the introduction of EndpointResolverV2 and BaseEndpoint. -// To migrate an EndpointResolver implementation that uses a custom endpoint, set -// the client option BaseEndpoint instead. -func WithEndpointResolver(v EndpointResolver) func(*Options) { - return func(o *Options) { - o.EndpointResolver = v - } -} - -// WithEndpointResolverV2 returns a functional option for setting the Client's -// EndpointResolverV2 option. -func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) { - return func(o *Options) { - o.EndpointResolverV2 = v - } -} - -type HTTPClient interface { - Do(*http.Request) (*http.Response, error) -} - -// Copy creates a clone where the APIOptions list is deep copied. -func (o Options) Copy() Options { - to := o - to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) - copy(to.APIOptions, o.APIOptions) - - return to -} func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { ctx = middleware.ClearStackValues(ctx) stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) options := c.options.Copy() - resolveEndpointResolverV2(&options) for _, fn := range optFns { fn(&options) @@ -224,6 +104,63 @@ func (c *Client) invokeOperation(ctx context.Context, opID string, params interf return result, metadata, err } +type operationInputKey struct{} + +func setOperationInput(ctx context.Context, input interface{}) context.Context { + return middleware.WithStackValue(ctx, operationInputKey{}, input) +} + +func getOperationInput(ctx context.Context) interface{} { + return middleware.GetStackValue(ctx, operationInputKey{}) +} + +type setOperationInputMiddleware struct { +} + +func (*setOperationInputMiddleware) ID() string { + return "setOperationInput" +} + +func (m *setOperationInputMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + ctx = setOperationInput(ctx, in.Parameters) + return next.HandleSerialize(ctx, in) +} + +func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error { + if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil { + return fmt.Errorf("add ResolveAuthScheme: %v", err) + } + if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil { + return fmt.Errorf("add GetIdentity: %v", err) + } + if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil { + return fmt.Errorf("add ResolveEndpointV2: %v", err) + } + if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", middleware.After); err != nil { + return fmt.Errorf("add Signing: %v", err) + } + return nil +} +func resolveAuthSchemeResolver(options *Options) { + if options.AuthSchemeResolver == nil { + options.AuthSchemeResolver = &defaultAuthSchemeResolver{} + } +} + +func resolveAuthSchemes(options *Options) { + if options.AuthSchemes == nil { + options.AuthSchemes = []smithyhttp.AuthScheme{ + internalauth.NewHTTPAuthScheme("aws.auth#sigv4", &internalauthsmithy.V4SignerAdapter{ + Signer: options.HTTPSignerV4, + Logger: options.Logger, + LogSigning: options.ClientLogMode.IsSigning(), + }), + } + } +} + type noSmithyDocumentSerde = smithydocument.NoSerde type legacyEndpointContextSetter struct { @@ -414,15 +351,6 @@ func addClientUserAgent(stack *middleware.Stack, options Options) error { return nil } -func addHTTPSignerV4Middleware(stack *middleware.Stack, o Options) error { - mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{ - CredentialsProvider: o.Credentials, - Signer: o.HTTPSignerV4, - LogSigning: o.ClientLogMode.IsSigning(), - }) - return stack.Finalize.Add(mw, middleware.After) -} - type HTTPSignerV4 interface { SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error } @@ -496,31 +424,31 @@ func addRequestResponseLogging(stack *middleware.Stack, o Options) error { }, middleware.After) } -type endpointDisableHTTPSMiddleware struct { - EndpointDisableHTTPS bool +type disableHTTPSMiddleware struct { + DisableHTTPS bool } -func (*endpointDisableHTTPSMiddleware) ID() string { - return "endpointDisableHTTPSMiddleware" +func (*disableHTTPSMiddleware) ID() string { + return "disableHTTPS" } -func (m *endpointDisableHTTPSMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, +func (m *disableHTTPSMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) } - if m.EndpointDisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) { + if m.DisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) { req.URL.Scheme = "http" } - return next.HandleSerialize(ctx, in) - + return next.HandleFinalize(ctx, in) } -func addendpointDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { - return stack.Serialize.Insert(&endpointDisableHTTPSMiddleware{ - EndpointDisableHTTPS: o.EndpointOptions.DisableHTTPS, - }, "OperationSerializer", middleware.Before) + +func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { + return stack.Finalize.Insert(&disableHTTPSMiddleware{ + DisableHTTPS: o.EndpointOptions.DisableHTTPS, + }, "ResolveEndpointV2", middleware.After) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go index 43df6256c..424642941 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go @@ -4,19 +4,16 @@ package ssooidc import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Creates and returns an access token for the authorized client. The access token -// issued will be used to fetch short-term credentials for the assigned roles in -// the AWS account. +// Creates and returns access and refresh tokens for clients that are +// authenticated using client secrets. The access token can be used to fetch +// short-term credentials for the assigned AWS accounts or to access application +// APIs using bearer authentication. func (c *Client) CreateToken(ctx context.Context, params *CreateTokenInput, optFns ...func(*Options)) (*CreateTokenOutput, error) { if params == nil { params = &CreateTokenInput{} @@ -34,8 +31,8 @@ func (c *Client) CreateToken(ctx context.Context, params *CreateTokenInput, optF type CreateTokenInput struct { - // The unique identifier string for each client. This value should come from the - // persisted result of the RegisterClient API. + // The unique identifier string for the client or application. This value comes + // from the result of the RegisterClient API. // // This member is required. ClientId *string @@ -46,37 +43,42 @@ type CreateTokenInput struct { // This member is required. ClientSecret *string - // Supports grant types for the authorization code, refresh token, and device code - // request. For device code requests, specify the following value: - // urn:ietf:params:oauth:grant-type:device_code For information about how to - // obtain the device code, see the StartDeviceAuthorization topic. + // Supports the following OAuth grant types: Device Code and Refresh Token. + // Specify either of the following values, depending on the grant type that you + // want: * Device Code - urn:ietf:params:oauth:grant-type:device_code * Refresh + // Token - refresh_token For information about how to obtain the device code, see + // the StartDeviceAuthorization topic. // // This member is required. GrantType *string - // The authorization code received from the authorization service. This parameter - // is required to perform an authorization grant request to get access to a token. + // Used only when calling this API for the Authorization Code grant type. The + // short-term code is used to identify this authorization request. This grant type + // is currently unsupported for the CreateToken API. Code *string - // Used only when calling this API for the device code grant type. This short-term - // code is used to identify this authentication attempt. This should come from an - // in-memory reference to the result of the StartDeviceAuthorization API. + // Used only when calling this API for the Device Code grant type. This short-term + // code is used to identify this authorization request. This comes from the result + // of the StartDeviceAuthorization API. DeviceCode *string - // The location of the application that will receive the authorization code. Users - // authorize the service to send the request to this location. + // Used only when calling this API for the Authorization Code grant type. This + // value specifies the location of the client or application that has registered to + // receive the authorization code. RedirectUri *string - // Currently, refreshToken is not yet implemented and is not supported. For more - // information about the features and limitations of the current IAM Identity - // Center OIDC implementation, see Considerations for Using this Guide in the IAM - // Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) - // . The token used to obtain an access token in the event that the access token is - // invalid or expired. + // Used only when calling this API for the Refresh Token grant type. This token is + // used to refresh short-term tokens, such as the access token, that might expire. + // For more information about the features and limitations of the current IAM + // Identity Center OIDC implementation, see Considerations for Using this Guide in + // the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) + // . RefreshToken *string - // The list of scopes that is defined by the client. Upon authorization, this list - // is used to restrict permissions when granting an access token. + // The list of scopes for which authorization is requested. The access token that + // is issued is limited to the scopes that are granted. If this value is not + // specified, IAM Identity Center authorizes all scopes that are configured for the + // client during the call to RegisterClient . Scope []string noSmithyDocumentSerde @@ -84,29 +86,30 @@ type CreateTokenInput struct { type CreateTokenOutput struct { - // An opaque token to access IAM Identity Center resources assigned to a user. + // A bearer token to access AWS accounts and applications assigned to a user. AccessToken *string // Indicates the time in seconds when an access token will expire. ExpiresIn int32 - // Currently, idToken is not yet implemented and is not supported. For more - // information about the features and limitations of the current IAM Identity - // Center OIDC implementation, see Considerations for Using this Guide in the IAM - // Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) - // . The identifier of the user that associated with the access token, if present. + // The idToken is not implemented or supported. For more information about the + // features and limitations of the current IAM Identity Center OIDC implementation, + // see Considerations for Using this Guide in the IAM Identity Center OIDC API + // Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) + // . A JSON Web Token (JWT) that identifies who is associated with the issued + // access token. IdToken *string - // Currently, refreshToken is not yet implemented and is not supported. For more - // information about the features and limitations of the current IAM Identity - // Center OIDC implementation, see Considerations for Using this Guide in the IAM - // Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) - // . A token that, if present, can be used to refresh a previously issued access - // token that might have expired. + // A token that, if present, can be used to refresh a previously issued access + // token that might have expired. For more information about the features and + // limitations of the current IAM Identity Center OIDC implementation, see + // Considerations for Using this Guide in the IAM Identity Center OIDC API + // Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) + // . RefreshToken *string // Used to notify the client that the returned token is an access token. The - // supported type is BearerToken . + // supported token type is Bearer . TokenType *string // Metadata pertaining to the operation's result. @@ -116,6 +119,9 @@ type CreateTokenOutput struct { } func (c *Client) addOperationCreateTokenMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestjson1_serializeOpCreateToken{}, middleware.After) if err != nil { return err @@ -124,6 +130,10 @@ func (c *Client) addOperationCreateTokenMiddlewares(stack *middleware.Stack, opt if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreateToken"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -157,7 +167,7 @@ func (c *Client) addOperationCreateTokenMiddlewares(stack *middleware.Stack, opt if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addCreateTokenResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpCreateTokenValidationMiddleware(stack); err != nil { @@ -178,7 +188,7 @@ func (c *Client) addOperationCreateTokenMiddlewares(stack *middleware.Stack, opt if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -191,126 +201,3 @@ func newServiceMetadataMiddleware_opCreateToken(region string) *awsmiddleware.Re OperationName: "CreateToken", } } - -type opCreateTokenResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opCreateTokenResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opCreateTokenResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "awsssooidc" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "awsssooidc" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("awsssooidc") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addCreateTokenResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opCreateTokenResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go new file mode 100644 index 000000000..ed4b98f76 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go @@ -0,0 +1,230 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates and returns access and refresh tokens for clients and applications that +// are authenticated using IAM entities. The access token can be used to fetch +// short-term credentials for the assigned AWS accounts or to access application +// APIs using bearer authentication. +func (c *Client) CreateTokenWithIAM(ctx context.Context, params *CreateTokenWithIAMInput, optFns ...func(*Options)) (*CreateTokenWithIAMOutput, error) { + if params == nil { + params = &CreateTokenWithIAMInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateTokenWithIAM", params, optFns, c.addOperationCreateTokenWithIAMMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateTokenWithIAMOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateTokenWithIAMInput struct { + + // The unique identifier string for the client or application. This value is an + // application ARN that has OAuth grants configured. + // + // This member is required. + ClientId *string + + // Supports the following OAuth grant types: Authorization Code, Refresh Token, + // JWT Bearer, and Token Exchange. Specify one of the following values, depending + // on the grant type that you want: * Authorization Code - authorization_code * + // Refresh Token - refresh_token * JWT Bearer - + // urn:ietf:params:oauth:grant-type:jwt-bearer * Token Exchange - + // urn:ietf:params:oauth:grant-type:token-exchange + // + // This member is required. + GrantType *string + + // Used only when calling this API for the JWT Bearer grant type. This value + // specifies the JSON Web Token (JWT) issued by a trusted token issuer. To + // authorize a trusted token issuer, configure the JWT Bearer GrantOptions for the + // application. + Assertion *string + + // Used only when calling this API for the Authorization Code grant type. This + // short-term code is used to identify this authorization request. The code is + // obtained through a redirect from IAM Identity Center to a redirect URI persisted + // in the Authorization Code GrantOptions for the application. + Code *string + + // Used only when calling this API for the Authorization Code grant type. This + // value specifies the location of the client or application that has registered to + // receive the authorization code. + RedirectUri *string + + // Used only when calling this API for the Refresh Token grant type. This token is + // used to refresh short-term tokens, such as the access token, that might expire. + // For more information about the features and limitations of the current IAM + // Identity Center OIDC implementation, see Considerations for Using this Guide in + // the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) + // . + RefreshToken *string + + // Used only when calling this API for the Token Exchange grant type. This value + // specifies the type of token that the requester can receive. The following values + // are supported: * Access Token - urn:ietf:params:oauth:token-type:access_token * + // Refresh Token - urn:ietf:params:oauth:token-type:refresh_token + RequestedTokenType *string + + // The list of scopes for which authorization is requested. The access token that + // is issued is limited to the scopes that are granted. If the value is not + // specified, IAM Identity Center authorizes all scopes configured for the + // application, including the following default scopes: openid , aws , + // sts:identity_context . + Scope []string + + // Used only when calling this API for the Token Exchange grant type. This value + // specifies the subject of the exchange. The value of the subject token must be an + // access token issued by IAM Identity Center to a different client or application. + // The access token must have authorized scopes that indicate the requested + // application as a target audience. + SubjectToken *string + + // Used only when calling this API for the Token Exchange grant type. This value + // specifies the type of token that is passed as the subject of the exchange. The + // following value is supported: * Access Token - + // urn:ietf:params:oauth:token-type:access_token + SubjectTokenType *string + + noSmithyDocumentSerde +} + +type CreateTokenWithIAMOutput struct { + + // A bearer token to access AWS accounts and applications assigned to a user. + AccessToken *string + + // Indicates the time in seconds when an access token will expire. + ExpiresIn int32 + + // A JSON Web Token (JWT) that identifies the user associated with the issued + // access token. + IdToken *string + + // Indicates the type of tokens that are issued by IAM Identity Center. The + // following values are supported: * Access Token - + // urn:ietf:params:oauth:token-type:access_token * Refresh Token - + // urn:ietf:params:oauth:token-type:refresh_token + IssuedTokenType *string + + // A token that, if present, can be used to refresh a previously issued access + // token that might have expired. For more information about the features and + // limitations of the current IAM Identity Center OIDC implementation, see + // Considerations for Using this Guide in the IAM Identity Center OIDC API + // Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) + // . + RefreshToken *string + + // The list of scopes for which authorization is granted. The access token that is + // issued is limited to the scopes that are granted. + Scope []string + + // Used to notify the requester that the returned token is an access token. The + // supported token type is Bearer . + TokenType *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateTokenWithIAMMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestjson1_serializeOpCreateTokenWithIAM{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpCreateTokenWithIAM{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreateTokenWithIAM"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addOpCreateTokenWithIAMValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateTokenWithIAM(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateTokenWithIAM(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CreateTokenWithIAM", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go index b88ebb706..7aee90491 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go @@ -4,12 +4,8 @@ package ssooidc import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -54,7 +50,7 @@ type RegisterClientInput struct { type RegisterClientOutput struct { - // The endpoint where the client can request authorization. + // An endpoint that the client can use to request authorization. AuthorizationEndpoint *string // The unique identifier string for each client. This client uses this identifier @@ -71,7 +67,7 @@ type RegisterClientOutput struct { // Indicates the time at which the clientId and clientSecret will become invalid. ClientSecretExpiresAt int64 - // The endpoint where the client can get an access token. + // An endpoint that the client can use to create tokens. TokenEndpoint *string // Metadata pertaining to the operation's result. @@ -81,6 +77,9 @@ type RegisterClientOutput struct { } func (c *Client) addOperationRegisterClientMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestjson1_serializeOpRegisterClient{}, middleware.After) if err != nil { return err @@ -89,6 +88,10 @@ func (c *Client) addOperationRegisterClientMiddlewares(stack *middleware.Stack, if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "RegisterClient"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -122,7 +125,7 @@ func (c *Client) addOperationRegisterClientMiddlewares(stack *middleware.Stack, if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addRegisterClientResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpRegisterClientValidationMiddleware(stack); err != nil { @@ -143,7 +146,7 @@ func (c *Client) addOperationRegisterClientMiddlewares(stack *middleware.Stack, if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -156,126 +159,3 @@ func newServiceMetadataMiddleware_opRegisterClient(region string) *awsmiddleware OperationName: "RegisterClient", } } - -type opRegisterClientResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opRegisterClientResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opRegisterClientResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "awsssooidc" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "awsssooidc" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("awsssooidc") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addRegisterClientResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opRegisterClientResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go index 327da5f73..d30349e6b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go @@ -4,12 +4,8 @@ package ssooidc import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -46,8 +42,8 @@ type StartDeviceAuthorizationInput struct { // This member is required. ClientSecret *string - // The URL for the AWS access portal. For more information, see Using the AWS - // access portal (https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html) + // The URL for the Amazon Web Services access portal. For more information, see + // Using the Amazon Web Services access portal (https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html) // in the IAM Identity Center User Guide. // // This member is required. @@ -89,6 +85,9 @@ type StartDeviceAuthorizationOutput struct { } func (c *Client) addOperationStartDeviceAuthorizationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestjson1_serializeOpStartDeviceAuthorization{}, middleware.After) if err != nil { return err @@ -97,6 +96,10 @@ func (c *Client) addOperationStartDeviceAuthorizationMiddlewares(stack *middlewa if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "StartDeviceAuthorization"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -130,7 +133,7 @@ func (c *Client) addOperationStartDeviceAuthorizationMiddlewares(stack *middlewa if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addStartDeviceAuthorizationResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpStartDeviceAuthorizationValidationMiddleware(stack); err != nil { @@ -151,7 +154,7 @@ func (c *Client) addOperationStartDeviceAuthorizationMiddlewares(stack *middlewa if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -164,126 +167,3 @@ func newServiceMetadataMiddleware_opStartDeviceAuthorization(region string) *aws OperationName: "StartDeviceAuthorization", } } - -type opStartDeviceAuthorizationResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opStartDeviceAuthorizationResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opStartDeviceAuthorizationResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "awsssooidc" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "awsssooidc" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("awsssooidc") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addStartDeviceAuthorizationResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opStartDeviceAuthorizationResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go new file mode 100644 index 000000000..2562611a4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go @@ -0,0 +1,274 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +func bindAuthParamsRegion(params *AuthResolverParameters, _ interface{}, options Options) { + params.Region = options.Region +} + +type setLegacyContextSigningOptionsMiddleware struct { +} + +func (*setLegacyContextSigningOptionsMiddleware) ID() string { + return "setLegacyContextSigningOptions" +} + +func (m *setLegacyContextSigningOptionsMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + rscheme := getResolvedAuthScheme(ctx) + schemeID := rscheme.Scheme.SchemeID() + + if sn := awsmiddleware.GetSigningName(ctx); sn != "" { + if schemeID == "aws.auth#sigv4" { + smithyhttp.SetSigV4SigningName(&rscheme.SignerProperties, sn) + } else if schemeID == "aws.auth#sigv4a" { + smithyhttp.SetSigV4ASigningName(&rscheme.SignerProperties, sn) + } + } + + if sr := awsmiddleware.GetSigningRegion(ctx); sr != "" { + if schemeID == "aws.auth#sigv4" { + smithyhttp.SetSigV4SigningRegion(&rscheme.SignerProperties, sr) + } else if schemeID == "aws.auth#sigv4a" { + smithyhttp.SetSigV4ASigningRegions(&rscheme.SignerProperties, []string{sr}) + } + } + + return next.HandleFinalize(ctx, in) +} + +func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error { + return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before) +} + +// AuthResolverParameters contains the set of inputs necessary for auth scheme +// resolution. +type AuthResolverParameters struct { + // The name of the operation being invoked. + Operation string + + // The region in which the operation is being invoked. + Region string +} + +func bindAuthResolverParams(operation string, input interface{}, options Options) *AuthResolverParameters { + params := &AuthResolverParameters{ + Operation: operation, + } + + bindAuthParamsRegion(params, input, options) + + return params +} + +// AuthSchemeResolver returns a set of possible authentication options for an +// operation. +type AuthSchemeResolver interface { + ResolveAuthSchemes(context.Context, *AuthResolverParameters) ([]*smithyauth.Option, error) +} + +type defaultAuthSchemeResolver struct{} + +var _ AuthSchemeResolver = (*defaultAuthSchemeResolver)(nil) + +func (*defaultAuthSchemeResolver) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { + if overrides, ok := operationAuthOptions[params.Operation]; ok { + return overrides(params), nil + } + return serviceAuthOptions(params), nil +} + +var operationAuthOptions = map[string]func(*AuthResolverParameters) []*smithyauth.Option{ + "CreateToken": func(params *AuthResolverParameters) []*smithyauth.Option { + return []*smithyauth.Option{ + {SchemeID: smithyauth.SchemeIDAnonymous}, + } + }, + + "RegisterClient": func(params *AuthResolverParameters) []*smithyauth.Option { + return []*smithyauth.Option{ + {SchemeID: smithyauth.SchemeIDAnonymous}, + } + }, + + "StartDeviceAuthorization": func(params *AuthResolverParameters) []*smithyauth.Option { + return []*smithyauth.Option{ + {SchemeID: smithyauth.SchemeIDAnonymous}, + } + }, +} + +func serviceAuthOptions(params *AuthResolverParameters) []*smithyauth.Option { + return []*smithyauth.Option{ + { + SchemeID: smithyauth.SchemeIDSigV4, + SignerProperties: func() smithy.Properties { + var props smithy.Properties + smithyhttp.SetSigV4SigningName(&props, "sso-oauth") + smithyhttp.SetSigV4SigningRegion(&props, params.Region) + return props + }(), + }, + } +} + +type resolveAuthSchemeMiddleware struct { + operation string + options Options +} + +func (*resolveAuthSchemeMiddleware) ID() string { + return "ResolveAuthScheme" +} + +func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + params := bindAuthResolverParams(m.operation, getOperationInput(ctx), m.options) + options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) + if err != nil { + return out, metadata, fmt.Errorf("resolve auth scheme: %v", err) + } + + scheme, ok := m.selectScheme(options) + if !ok { + return out, metadata, fmt.Errorf("could not select an auth scheme") + } + + ctx = setResolvedAuthScheme(ctx, scheme) + return next.HandleFinalize(ctx, in) +} + +func (m *resolveAuthSchemeMiddleware) selectScheme(options []*smithyauth.Option) (*resolvedAuthScheme, bool) { + for _, option := range options { + if option.SchemeID == smithyauth.SchemeIDAnonymous { + return newResolvedAuthScheme(smithyhttp.NewAnonymousScheme(), option), true + } + + for _, scheme := range m.options.AuthSchemes { + if scheme.SchemeID() != option.SchemeID { + continue + } + + if scheme.IdentityResolver(m.options) != nil { + return newResolvedAuthScheme(scheme, option), true + } + } + } + + return nil, false +} + +type resolvedAuthSchemeKey struct{} + +type resolvedAuthScheme struct { + Scheme smithyhttp.AuthScheme + IdentityProperties smithy.Properties + SignerProperties smithy.Properties +} + +func newResolvedAuthScheme(scheme smithyhttp.AuthScheme, option *smithyauth.Option) *resolvedAuthScheme { + return &resolvedAuthScheme{ + Scheme: scheme, + IdentityProperties: option.IdentityProperties, + SignerProperties: option.SignerProperties, + } +} + +func setResolvedAuthScheme(ctx context.Context, scheme *resolvedAuthScheme) context.Context { + return middleware.WithStackValue(ctx, resolvedAuthSchemeKey{}, scheme) +} + +func getResolvedAuthScheme(ctx context.Context) *resolvedAuthScheme { + v, _ := middleware.GetStackValue(ctx, resolvedAuthSchemeKey{}).(*resolvedAuthScheme) + return v +} + +type getIdentityMiddleware struct { + options Options +} + +func (*getIdentityMiddleware) ID() string { + return "GetIdentity" +} + +func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + rscheme := getResolvedAuthScheme(ctx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + resolver := rscheme.Scheme.IdentityResolver(m.options) + if resolver == nil { + return out, metadata, fmt.Errorf("no identity resolver") + } + + identity, err := resolver.GetIdentity(ctx, rscheme.IdentityProperties) + if err != nil { + return out, metadata, fmt.Errorf("get identity: %v", err) + } + + ctx = setIdentity(ctx, identity) + return next.HandleFinalize(ctx, in) +} + +type identityKey struct{} + +func setIdentity(ctx context.Context, identity smithyauth.Identity) context.Context { + return middleware.WithStackValue(ctx, identityKey{}, identity) +} + +func getIdentity(ctx context.Context) smithyauth.Identity { + v, _ := middleware.GetStackValue(ctx, identityKey{}).(smithyauth.Identity) + return v +} + +type signRequestMiddleware struct { +} + +func (*signRequestMiddleware) ID() string { + return "Signing" +} + +func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request) + } + + rscheme := getResolvedAuthScheme(ctx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + identity := getIdentity(ctx) + if identity == nil { + return out, metadata, fmt.Errorf("no identity") + } + + signer := rscheme.Scheme.Signer() + if signer == nil { + return out, metadata, fmt.Errorf("no signer") + } + + if err := signer.SignRequest(ctx, req, identity, rscheme.SignerProperties); err != nil { + return out, metadata, fmt.Errorf("sign request: %v", err) + } + + return next.HandleFinalize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go index ca30d22f9..76a1160ec 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go @@ -239,6 +239,244 @@ func awsRestjson1_deserializeOpDocumentCreateTokenOutput(v **CreateTokenOutput, return nil } +type awsRestjson1_deserializeOpCreateTokenWithIAM struct { +} + +func (*awsRestjson1_deserializeOpCreateTokenWithIAM) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpCreateTokenWithIAM) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorCreateTokenWithIAM(response, &metadata) + } + output := &CreateTokenWithIAMOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentCreateTokenWithIAMOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorCreateTokenWithIAM(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("AuthorizationPendingException", errorCode): + return awsRestjson1_deserializeErrorAuthorizationPendingException(response, errorBody) + + case strings.EqualFold("ExpiredTokenException", errorCode): + return awsRestjson1_deserializeErrorExpiredTokenException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidClientException", errorCode): + return awsRestjson1_deserializeErrorInvalidClientException(response, errorBody) + + case strings.EqualFold("InvalidGrantException", errorCode): + return awsRestjson1_deserializeErrorInvalidGrantException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("InvalidRequestRegionException", errorCode): + return awsRestjson1_deserializeErrorInvalidRequestRegionException(response, errorBody) + + case strings.EqualFold("InvalidScopeException", errorCode): + return awsRestjson1_deserializeErrorInvalidScopeException(response, errorBody) + + case strings.EqualFold("SlowDownException", errorCode): + return awsRestjson1_deserializeErrorSlowDownException(response, errorBody) + + case strings.EqualFold("UnauthorizedClientException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedClientException(response, errorBody) + + case strings.EqualFold("UnsupportedGrantTypeException", errorCode): + return awsRestjson1_deserializeErrorUnsupportedGrantTypeException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentCreateTokenWithIAMOutput(v **CreateTokenWithIAMOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateTokenWithIAMOutput + if *v == nil { + sv = &CreateTokenWithIAMOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "accessToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AccessToken to be of type string, got %T instead", value) + } + sv.AccessToken = ptr.String(jtv) + } + + case "expiresIn": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected ExpirationInSeconds to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ExpiresIn = int32(i64) + } + + case "idToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected IdToken to be of type string, got %T instead", value) + } + sv.IdToken = ptr.String(jtv) + } + + case "issuedTokenType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TokenTypeURI to be of type string, got %T instead", value) + } + sv.IssuedTokenType = ptr.String(jtv) + } + + case "refreshToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RefreshToken to be of type string, got %T instead", value) + } + sv.RefreshToken = ptr.String(jtv) + } + + case "scope": + if err := awsRestjson1_deserializeDocumentScopes(&sv.Scope, value); err != nil { + return err + } + + case "tokenType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TokenType to be of type string, got %T instead", value) + } + sv.TokenType = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + type awsRestjson1_deserializeOpRegisterClient struct { } @@ -956,6 +1194,42 @@ func awsRestjson1_deserializeErrorInvalidRequestException(response *smithyhttp.R return output } +func awsRestjson1_deserializeErrorInvalidRequestRegionException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidRequestRegionException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInvalidRequestRegionException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + func awsRestjson1_deserializeErrorInvalidScopeException(response *smithyhttp.Response, errorBody *bytes.Reader) error { output := &types.InvalidScopeException{} var buff [1024]byte @@ -1492,6 +1766,73 @@ func awsRestjson1_deserializeDocumentInvalidRequestException(v **types.InvalidRe return nil } +func awsRestjson1_deserializeDocumentInvalidRequestRegionException(v **types.InvalidRequestRegionException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidRequestRegionException + if *v == nil { + sv = &types.InvalidRequestRegionException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "endpoint": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Location to be of type string, got %T instead", value) + } + sv.Endpoint = ptr.String(jtv) + } + + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + case "region": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Region to be of type string, got %T instead", value) + } + sv.Region = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsRestjson1_deserializeDocumentInvalidScopeException(v **types.InvalidScopeException, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -1541,6 +1882,42 @@ func awsRestjson1_deserializeDocumentInvalidScopeException(v **types.InvalidScop return nil } +func awsRestjson1_deserializeDocumentScopes(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Scope to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + func awsRestjson1_deserializeDocumentSlowDownException(v **types.SlowDownException, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go index 2239427d8..53cd4f55a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go @@ -3,31 +3,33 @@ // Package ssooidc provides the API client, operations, and parameter types for // AWS SSO OIDC. // -// AWS IAM Identity Center (successor to AWS Single Sign-On) OpenID Connect (OIDC) -// is a web service that enables a client (such as AWS CLI or a native application) -// to register with IAM Identity Center. The service also enables the client to -// fetch the user’s access token upon successful authentication and authorization -// with IAM Identity Center. Although AWS Single Sign-On was renamed, the sso and -// identitystore API namespaces will continue to retain their original name for -// backward compatibility purposes. For more information, see IAM Identity Center -// rename (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed) -// . Considerations for Using This Guide Before you begin using this guide, we -// recommend that you first review the following important information about how -// the IAM Identity Center OIDC service works. +// IAM Identity Center OpenID Connect (OIDC) is a web service that enables a +// client (such as CLI or a native application) to register with IAM Identity +// Center. The service also enables the client to fetch the user’s access token +// upon successful authentication and authorization with IAM Identity Center. IAM +// Identity Center uses the sso and identitystore API namespaces. Considerations +// for Using This Guide Before you begin using this guide, we recommend that you +// first review the following important information about how the IAM Identity +// Center OIDC service works. // - The IAM Identity Center OIDC service currently implements only the portions // of the OAuth 2.0 Device Authorization Grant standard ( // https://tools.ietf.org/html/rfc8628 (https://tools.ietf.org/html/rfc8628) ) -// that are necessary to enable single sign-on authentication with the AWS CLI. -// Support for other OIDC flows frequently needed for native applications, such as -// Authorization Code Flow (+ PKCE), will be addressed in future releases. -// - The service emits only OIDC access tokens, such that obtaining a new token -// (For example, token refresh) requires explicit user re-authentication. -// - The access tokens provided by this service grant access to all AWS account -// entitlements assigned to an IAM Identity Center user, not just a particular -// application. +// that are necessary to enable single sign-on authentication with the CLI. +// - With older versions of the CLI, the service only emits OIDC access tokens, +// so to obtain a new token, users must explicitly re-authenticate. To access the +// OIDC flow that supports token refresh and doesn’t require re-authentication, +// update to the latest CLI version (1.27.10 for CLI V1 and 2.9.0 for CLI V2) with +// support for OIDC token refresh and configurable IAM Identity Center session +// durations. For more information, see Configure Amazon Web Services access +// portal session duration (https://docs.aws.amazon.com/singlesignon/latest/userguide/configure-user-session.html) +// . +// - The access tokens provided by this service grant access to all Amazon Web +// Services account entitlements assigned to an IAM Identity Center user, not just +// a particular application. // - The documentation in this guide does not describe the mechanism to convert -// the access token into AWS Auth (“sigv4”) credentials for use with IAM-protected -// AWS service endpoints. For more information, see GetRoleCredentials (https://docs.aws.amazon.com/singlesignon/latest/PortalAPIReference/API_GetRoleCredentials.html) +// the access token into Amazon Web Services Auth (“sigv4”) credentials for use +// with IAM-protected Amazon Web Services service endpoints. For more information, +// see GetRoleCredentials (https://docs.aws.amazon.com/singlesignon/latest/PortalAPIReference/API_GetRoleCredentials.html) // in the IAM Identity Center Portal API Reference Guide. // // For general information about IAM Identity Center, see What is IAM Identity diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go index 50b490cbd..85b870890 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go @@ -9,8 +9,10 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + "github.com/aws/aws-sdk-go-v2/internal/endpoints" "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn" internalendpoints "github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints" + smithyauth "github.com/aws/smithy-go/auth" smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" "github.com/aws/smithy-go/ptr" @@ -113,7 +115,7 @@ func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.Ser if len(awsmiddleware.GetSigningName(ctx)) == 0 { signingName := endpoint.SigningName if len(signingName) == 0 { - signingName = "awsssooidc" + signingName = "sso-oauth" } ctx = awsmiddleware.SetSigningName(ctx, signingName) } @@ -214,72 +216,6 @@ func resolveBaseEndpoint(cfg aws.Config, o *Options) { } } -// Utility function to aid with translating pseudo-regions to classical regions -// with the appropriate setting indicated by the pseudo-region -func mapPseudoRegion(pr string) (region string, fips aws.FIPSEndpointState) { - const fipsInfix = "-fips-" - const fipsPrefix = "fips-" - const fipsSuffix = "-fips" - - if strings.Contains(pr, fipsInfix) || - strings.Contains(pr, fipsPrefix) || - strings.Contains(pr, fipsSuffix) { - region = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( - pr, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") - fips = aws.FIPSEndpointStateEnabled - } else { - region = pr - } - - return region, fips -} - -// builtInParameterResolver is the interface responsible for resolving BuiltIn -// values during the sourcing of EndpointParameters -type builtInParameterResolver interface { - ResolveBuiltIns(*EndpointParameters) error -} - -// builtInResolver resolves modeled BuiltIn values using only the members defined -// below. -type builtInResolver struct { - // The AWS region used to dispatch the request. - Region string - - // Sourced BuiltIn value in a historical enabled or disabled state. - UseDualStack aws.DualStackEndpointState - - // Sourced BuiltIn value in a historical enabled or disabled state. - UseFIPS aws.FIPSEndpointState - - // Base endpoint that can potentially be modified during Endpoint resolution. - Endpoint *string -} - -// Invoked at runtime to resolve BuiltIn Values. Only resolution code specific to -// each BuiltIn value is generated. -func (b *builtInResolver) ResolveBuiltIns(params *EndpointParameters) error { - - region, _ := mapPseudoRegion(b.Region) - if len(region) == 0 { - return fmt.Errorf("Could not resolve AWS::Region") - } else { - params.Region = aws.String(region) - } - if b.UseDualStack == aws.DualStackEndpointStateEnabled { - params.UseDualStack = aws.Bool(true) - } else { - params.UseDualStack = aws.Bool(false) - } - if b.UseFIPS == aws.FIPSEndpointStateEnabled { - params.UseFIPS = aws.Bool(true) - } else { - params.UseFIPS = aws.Bool(false) - } - params.Endpoint = b.Endpoint - return nil -} - // EndpointParameters provides the parameters that influence how endpoints are // resolved. type EndpointParameters struct { @@ -430,8 +366,8 @@ func (r *resolver) ResolveEndpoint( } } if _UseFIPS == true { - if true == _PartitionResult.SupportsFIPS { - if "aws-us-gov" == _PartitionResult.Name { + if _PartitionResult.SupportsFIPS == true { + if _PartitionResult.Name == "aws-us-gov" { uriString := func() string { var out strings.Builder out.WriteString("https://oidc.") @@ -517,3 +453,76 @@ func (r *resolver) ResolveEndpoint( } return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Missing Region") } + +type endpointParamsBinder interface { + bindEndpointParams(*EndpointParameters) +} + +func bindEndpointParams(input interface{}, options Options) *EndpointParameters { + params := &EndpointParameters{} + + params.Region = aws.String(endpoints.MapFIPSRegion(options.Region)) + params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled) + params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled) + params.Endpoint = options.BaseEndpoint + + if b, ok := input.(endpointParamsBinder); ok { + b.bindEndpointParams(params) + } + + return params +} + +type resolveEndpointV2Middleware struct { + options Options +} + +func (*resolveEndpointV2Middleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleFinalize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.options.EndpointResolverV2 == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + params := bindEndpointParams(getOperationInput(ctx), m.options) + endpt, err := m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + if endpt.URI.RawPath == "" && req.URL.RawPath != "" { + endpt.URI.RawPath = endpt.URI.Path + } + req.URL.Scheme = endpt.URI.Scheme + req.URL.Host = endpt.URI.Host + req.URL.Path = smithyhttp.JoinPath(endpt.URI.Path, req.URL.Path) + req.URL.RawPath = smithyhttp.JoinPath(endpt.URI.RawPath, req.URL.RawPath) + for k := range endpt.Headers { + req.Header.Set(k, endpt.Headers.Get(k)) + } + + rscheme := getResolvedAuthScheme(ctx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + opts, _ := smithyauth.GetAuthOptions(&endpt.Properties) + for _, o := range opts { + rscheme.SignerProperties.SetAll(&o.SignerProperties) + } + + return next.HandleFinalize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json index 403fac7c5..0a6b34935 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json @@ -10,8 +10,10 @@ "api_client.go", "api_client_test.go", "api_op_CreateToken.go", + "api_op_CreateTokenWithIAM.go", "api_op_RegisterClient.go", "api_op_StartDeviceAuthorization.go", + "auth.go", "deserializers.go", "doc.go", "endpoints.go", @@ -20,6 +22,7 @@ "generated.json", "internal/endpoints/endpoints.go", "internal/endpoints/endpoints_test.go", + "options.go", "protocol_test.go", "serializers.go", "types/errors.go", diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go index 856141700..ee1bc3508 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go @@ -3,4 +3,4 @@ package ssooidc // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.19.0" +const goModuleVersion = "1.20.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go new file mode 100644 index 000000000..c1ba5619b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go @@ -0,0 +1,219 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" + smithyauth "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/http" +) + +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +type Options struct { + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // The optional application specific identifier appended to the User-Agent header. + AppID string + + // This endpoint will be given as input to an EndpointResolverV2. It is used for + // providing a custom base endpoint that is subject to modifications by the + // processing EndpointResolverV2. + BaseEndpoint *string + + // Configures the events that will be sent to the configured logger. + ClientLogMode aws.ClientLogMode + + // The credentials object to use when signing requests. + Credentials aws.CredentialsProvider + + // The configuration DefaultsMode that the SDK should use when constructing the + // clients initial default settings. + DefaultsMode aws.DefaultsMode + + // The endpoint options to be used when attempting to resolve an endpoint. + EndpointOptions EndpointResolverOptions + + // The service endpoint resolver. + // + // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a + // value for this field will likely prevent you from using any endpoint-related + // service features released after the introduction of EndpointResolverV2 and + // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom + // endpoint, set the client option BaseEndpoint instead. + EndpointResolver EndpointResolver + + // Resolves the endpoint used for a particular service operation. This should be + // used over the deprecated EndpointResolver. + EndpointResolverV2 EndpointResolverV2 + + // Signature Version 4 (SigV4) Signer + HTTPSignerV4 HTTPSignerV4 + + // The logger writer interface to write logging messages to. + Logger logging.Logger + + // The region to send requests to. (Required) + Region string + + // RetryMaxAttempts specifies the maximum number attempts an API client will call + // an operation that fails with a retryable error. A value of 0 is ignored, and + // will not be used to configure the API client created default retryer, or modify + // per operation call's retry max attempts. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. If specified in an operation call's functional + // options with a value that is different than the constructed client's Options, + // the Client's Retryer will be wrapped to use the operation's specific + // RetryMaxAttempts value. + RetryMaxAttempts int + + // RetryMode specifies the retry mode the API client will be created with, if + // Retryer option is not also specified. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. Currently does not support per operation call + // overrides, may in the future. + RetryMode aws.RetryMode + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. The kind of + // default retry created by the API client can be changed with the RetryMode + // option. + Retryer aws.Retryer + + // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set + // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You + // should not populate this structure programmatically, or rely on the values here + // within your applications. + RuntimeEnvironment aws.RuntimeEnvironment + + // The initial DefaultsMode used when the client options were constructed. If the + // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved + // value was at that point in time. Currently does not support per operation call + // overrides, may in the future. + resolvedDefaultsMode aws.DefaultsMode + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP + // implementation if nil. + HTTPClient HTTPClient + + // The auth scheme resolver which determines how to authenticate for each + // operation. + AuthSchemeResolver AuthSchemeResolver + + // The list of auth schemes supported by the client. + AuthSchemes []smithyhttp.AuthScheme +} + +// Copy creates a clone where the APIOptions list is deep copied. +func (o Options) Copy() Options { + to := o + to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) + copy(to.APIOptions, o.APIOptions) + + return to +} + +func (o Options) GetIdentityResolver(schemeID string) smithyauth.IdentityResolver { + if schemeID == "aws.auth#sigv4" { + return getSigV4IdentityResolver(o) + } + if schemeID == "smithy.api#noAuth" { + return &smithyauth.AnonymousIdentityResolver{} + } + return nil +} + +// WithAPIOptions returns a functional option for setting the Client's APIOptions +// option. +func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { + return func(o *Options) { + o.APIOptions = append(o.APIOptions, optFns...) + } +} + +// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for +// this field will likely prevent you from using any endpoint-related service +// features released after the introduction of EndpointResolverV2 and BaseEndpoint. +// To migrate an EndpointResolver implementation that uses a custom endpoint, set +// the client option BaseEndpoint instead. +func WithEndpointResolver(v EndpointResolver) func(*Options) { + return func(o *Options) { + o.EndpointResolver = v + } +} + +// WithEndpointResolverV2 returns a functional option for setting the Client's +// EndpointResolverV2 option. +func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) { + return func(o *Options) { + o.EndpointResolverV2 = v + } +} + +func getSigV4IdentityResolver(o Options) smithyauth.IdentityResolver { + if o.Credentials != nil { + return &internalauthsmithy.CredentialsProviderAdapter{Provider: o.Credentials} + } + return nil +} + +// WithSigV4SigningName applies an override to the authentication workflow to +// use the given signing name for SigV4-authenticated operations. +// +// This is an advanced setting. The value here is FINAL, taking precedence over +// the resolved signing name from both auth scheme resolution and endpoint +// resolution. +func WithSigV4SigningName(name string) func(*Options) { + fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, + ) { + return next.HandleInitialize(awsmiddleware.SetSigningName(ctx, name), in) + } + return func(o *Options) { + o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { + return s.Initialize.Add( + middleware.InitializeMiddlewareFunc("withSigV4SigningName", fn), + middleware.Before, + ) + }) + } +} + +// WithSigV4SigningRegion applies an override to the authentication workflow to +// use the given signing region for SigV4-authenticated operations. +// +// This is an advanced setting. The value here is FINAL, taking precedence over +// the resolved signing region from both auth scheme resolution and endpoint +// resolution. +func WithSigV4SigningRegion(region string) func(*Options) { + fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, + ) { + return next.HandleInitialize(awsmiddleware.SetSigningRegion(ctx, region), in) + } + return func(o *Options) { + o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { + return s.Initialize.Add( + middleware.InitializeMiddlewareFunc("withSigV4SigningRegion", fn), + middleware.Before, + ) + }) + } +} + +func ignoreAnonymousAuth(options *Options) { + if _, ok := options.Credentials.(aws.AnonymousCredentials); ok { + options.Credentials = nil + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go index efca8b250..754218b78 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go @@ -125,6 +125,128 @@ func awsRestjson1_serializeOpDocumentCreateTokenInput(v *CreateTokenInput, value return nil } +type awsRestjson1_serializeOpCreateTokenWithIAM struct { +} + +func (*awsRestjson1_serializeOpCreateTokenWithIAM) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpCreateTokenWithIAM) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateTokenWithIAMInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/token?aws_iam=t") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentCreateTokenWithIAMInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsCreateTokenWithIAMInput(v *CreateTokenWithIAMInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +func awsRestjson1_serializeOpDocumentCreateTokenWithIAMInput(v *CreateTokenWithIAMInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Assertion != nil { + ok := object.Key("assertion") + ok.String(*v.Assertion) + } + + if v.ClientId != nil { + ok := object.Key("clientId") + ok.String(*v.ClientId) + } + + if v.Code != nil { + ok := object.Key("code") + ok.String(*v.Code) + } + + if v.GrantType != nil { + ok := object.Key("grantType") + ok.String(*v.GrantType) + } + + if v.RedirectUri != nil { + ok := object.Key("redirectUri") + ok.String(*v.RedirectUri) + } + + if v.RefreshToken != nil { + ok := object.Key("refreshToken") + ok.String(*v.RefreshToken) + } + + if v.RequestedTokenType != nil { + ok := object.Key("requestedTokenType") + ok.String(*v.RequestedTokenType) + } + + if v.Scope != nil { + ok := object.Key("scope") + if err := awsRestjson1_serializeDocumentScopes(v.Scope, ok); err != nil { + return err + } + } + + if v.SubjectToken != nil { + ok := object.Key("subjectToken") + ok.String(*v.SubjectToken) + } + + if v.SubjectTokenType != nil { + ok := object.Key("subjectTokenType") + ok.String(*v.SubjectTokenType) + } + + return nil +} + type awsRestjson1_serializeOpRegisterClient struct { } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go index 115a51a9e..86b62049f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go @@ -247,6 +247,38 @@ func (e *InvalidRequestException) ErrorCode() string { } func (e *InvalidRequestException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } +// Indicates that a token provided as input to the request was issued by and is +// only usable by calling IAM Identity Center endpoints in another region. +type InvalidRequestRegionException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + Endpoint *string + Region *string + + noSmithyDocumentSerde +} + +func (e *InvalidRequestRegionException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidRequestRegionException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidRequestRegionException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidRequestRegionException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidRequestRegionException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + // Indicates that the scope provided in the request is invalid. type InvalidScopeException struct { Message *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/validators.go index 5a309484e..9c17e4c8e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/validators.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/validators.go @@ -29,6 +29,26 @@ func (m *validateOpCreateToken) HandleInitialize(ctx context.Context, in middlew return next.HandleInitialize(ctx, in) } +type validateOpCreateTokenWithIAM struct { +} + +func (*validateOpCreateTokenWithIAM) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateTokenWithIAM) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateTokenWithIAMInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateTokenWithIAMInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpRegisterClient struct { } @@ -73,6 +93,10 @@ func addOpCreateTokenValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpCreateToken{}, middleware.After) } +func addOpCreateTokenWithIAMValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateTokenWithIAM{}, middleware.After) +} + func addOpRegisterClientValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpRegisterClient{}, middleware.After) } @@ -102,6 +126,24 @@ func validateOpCreateTokenInput(v *CreateTokenInput) error { } } +func validateOpCreateTokenWithIAMInput(v *CreateTokenWithIAMInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateTokenWithIAMInput"} + if v.ClientId == nil { + invalidParams.Add(smithy.NewErrParamRequired("ClientId")) + } + if v.GrantType == nil { + invalidParams.Add(smithy.NewErrParamRequired("GrantType")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpRegisterClientInput(v *RegisterClientInput) error { if v == nil { return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md index 2e5ca8f87..f6142a0c3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md @@ -1,3 +1,15 @@ +# v1.25.3 (2023-11-17) + +* **Documentation**: API updates for the AWS Security Token Service + +# v1.25.2 (2023-11-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.1 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.25.0 (2023-11-01) * **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go index c29d8cad1..52e10270a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go @@ -12,7 +12,10 @@ import ( "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + acceptencodingcust "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding" presignedurlcust "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url" smithy "github.com/aws/smithy-go" smithydocument "github.com/aws/smithy-go/document" @@ -49,10 +52,18 @@ func New(options Options, optFns ...func(*Options)) *Client { resolveHTTPSignerV4(&options) + resolveEndpointResolverV2(&options) + + resolveAuthSchemeResolver(&options) + for _, fn := range optFns { fn(&options) } + ignoreAnonymousAuth(&options) + + resolveAuthSchemes(&options) + client := &Client{ options: options, } @@ -60,140 +71,10 @@ func New(options Options, optFns ...func(*Options)) *Client { return client } -type Options struct { - // Set of options to modify how an operation is invoked. These apply to all - // operations invoked for this client. Use functional options on operation call to - // modify this list for per operation behavior. - APIOptions []func(*middleware.Stack) error - - // The optional application specific identifier appended to the User-Agent header. - AppID string - - // This endpoint will be given as input to an EndpointResolverV2. It is used for - // providing a custom base endpoint that is subject to modifications by the - // processing EndpointResolverV2. - BaseEndpoint *string - - // Configures the events that will be sent to the configured logger. - ClientLogMode aws.ClientLogMode - - // The credentials object to use when signing requests. - Credentials aws.CredentialsProvider - - // The configuration DefaultsMode that the SDK should use when constructing the - // clients initial default settings. - DefaultsMode aws.DefaultsMode - - // The endpoint options to be used when attempting to resolve an endpoint. - EndpointOptions EndpointResolverOptions - - // The service endpoint resolver. - // - // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a - // value for this field will likely prevent you from using any endpoint-related - // service features released after the introduction of EndpointResolverV2 and - // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom - // endpoint, set the client option BaseEndpoint instead. - EndpointResolver EndpointResolver - - // Resolves the endpoint used for a particular service. This should be used over - // the deprecated EndpointResolver - EndpointResolverV2 EndpointResolverV2 - - // Signature Version 4 (SigV4) Signer - HTTPSignerV4 HTTPSignerV4 - - // The logger writer interface to write logging messages to. - Logger logging.Logger - - // The region to send requests to. (Required) - Region string - - // RetryMaxAttempts specifies the maximum number attempts an API client will call - // an operation that fails with a retryable error. A value of 0 is ignored, and - // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. When creating a new API Clients this - // member will only be used if the Retryer Options member is nil. This value will - // be ignored if Retryer is not nil. If specified in an operation call's functional - // options with a value that is different than the constructed client's Options, - // the Client's Retryer will be wrapped to use the operation's specific - // RetryMaxAttempts value. - RetryMaxAttempts int - - // RetryMode specifies the retry mode the API client will be created with, if - // Retryer option is not also specified. When creating a new API Clients this - // member will only be used if the Retryer Options member is nil. This value will - // be ignored if Retryer is not nil. Currently does not support per operation call - // overrides, may in the future. - RetryMode aws.RetryMode - - // Retryer guides how HTTP requests should be retried in case of recoverable - // failures. When nil the API client will use a default retryer. The kind of - // default retry created by the API client can be changed with the RetryMode - // option. - Retryer aws.Retryer - - // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set - // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You - // should not populate this structure programmatically, or rely on the values here - // within your applications. - RuntimeEnvironment aws.RuntimeEnvironment - - // The initial DefaultsMode used when the client options were constructed. If the - // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved - // value was at that point in time. Currently does not support per operation call - // overrides, may in the future. - resolvedDefaultsMode aws.DefaultsMode - - // The HTTP client to invoke API calls with. Defaults to client's default HTTP - // implementation if nil. - HTTPClient HTTPClient -} - -// WithAPIOptions returns a functional option for setting the Client's APIOptions -// option. -func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { - return func(o *Options) { - o.APIOptions = append(o.APIOptions, optFns...) - } -} - -// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for -// this field will likely prevent you from using any endpoint-related service -// features released after the introduction of EndpointResolverV2 and BaseEndpoint. -// To migrate an EndpointResolver implementation that uses a custom endpoint, set -// the client option BaseEndpoint instead. -func WithEndpointResolver(v EndpointResolver) func(*Options) { - return func(o *Options) { - o.EndpointResolver = v - } -} - -// WithEndpointResolverV2 returns a functional option for setting the Client's -// EndpointResolverV2 option. -func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) { - return func(o *Options) { - o.EndpointResolverV2 = v - } -} - -type HTTPClient interface { - Do(*http.Request) (*http.Response, error) -} - -// Copy creates a clone where the APIOptions list is deep copied. -func (o Options) Copy() Options { - to := o - to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) - copy(to.APIOptions, o.APIOptions) - - return to -} func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { ctx = middleware.ClearStackValues(ctx) stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) options := c.options.Copy() - resolveEndpointResolverV2(&options) for _, fn := range optFns { fn(&options) @@ -227,6 +108,63 @@ func (c *Client) invokeOperation(ctx context.Context, opID string, params interf return result, metadata, err } +type operationInputKey struct{} + +func setOperationInput(ctx context.Context, input interface{}) context.Context { + return middleware.WithStackValue(ctx, operationInputKey{}, input) +} + +func getOperationInput(ctx context.Context) interface{} { + return middleware.GetStackValue(ctx, operationInputKey{}) +} + +type setOperationInputMiddleware struct { +} + +func (*setOperationInputMiddleware) ID() string { + return "setOperationInput" +} + +func (m *setOperationInputMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + ctx = setOperationInput(ctx, in.Parameters) + return next.HandleSerialize(ctx, in) +} + +func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error { + if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil { + return fmt.Errorf("add ResolveAuthScheme: %v", err) + } + if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil { + return fmt.Errorf("add GetIdentity: %v", err) + } + if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil { + return fmt.Errorf("add ResolveEndpointV2: %v", err) + } + if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", middleware.After); err != nil { + return fmt.Errorf("add Signing: %v", err) + } + return nil +} +func resolveAuthSchemeResolver(options *Options) { + if options.AuthSchemeResolver == nil { + options.AuthSchemeResolver = &defaultAuthSchemeResolver{} + } +} + +func resolveAuthSchemes(options *Options) { + if options.AuthSchemes == nil { + options.AuthSchemes = []smithyhttp.AuthScheme{ + internalauth.NewHTTPAuthScheme("aws.auth#sigv4", &internalauthsmithy.V4SignerAdapter{ + Signer: options.HTTPSignerV4, + Logger: options.Logger, + LogSigning: options.ClientLogMode.IsSigning(), + }), + } + } +} + type noSmithyDocumentSerde = smithydocument.NoSerde type legacyEndpointContextSetter struct { @@ -417,15 +355,6 @@ func addClientUserAgent(stack *middleware.Stack, options Options) error { return nil } -func addHTTPSignerV4Middleware(stack *middleware.Stack, o Options) error { - mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{ - CredentialsProvider: o.Credentials, - Signer: o.HTTPSignerV4, - LogSigning: o.ClientLogMode.IsSigning(), - }) - return stack.Finalize.Add(mw, middleware.After) -} - type HTTPSignerV4 interface { SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error } @@ -560,20 +489,61 @@ func withNopHTTPClientAPIOption(o *Options) { o.HTTPClient = smithyhttp.NopClient{} } +type presignContextPolyfillMiddleware struct { +} + +func (*presignContextPolyfillMiddleware) ID() string { + return "presignContextPolyfill" +} + +func (m *presignContextPolyfillMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + rscheme := getResolvedAuthScheme(ctx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + schemeID := rscheme.Scheme.SchemeID() + + if schemeID == "aws.auth#sigv4" { + if sn, ok := smithyhttp.GetSigV4SigningName(&rscheme.SignerProperties); ok { + ctx = awsmiddleware.SetSigningName(ctx, sn) + } + if sr, ok := smithyhttp.GetSigV4SigningRegion(&rscheme.SignerProperties); ok { + ctx = awsmiddleware.SetSigningRegion(ctx, sr) + } + } else if schemeID == "aws.auth#sigv4a" { + if sn, ok := smithyhttp.GetSigV4ASigningName(&rscheme.SignerProperties); ok { + ctx = awsmiddleware.SetSigningName(ctx, sn) + } + if sr, ok := smithyhttp.GetSigV4ASigningRegions(&rscheme.SignerProperties); ok { + ctx = awsmiddleware.SetSigningRegion(ctx, sr[0]) + } + } + + return next.HandleFinalize(ctx, in) +} + type presignConverter PresignOptions func (c presignConverter) convertToPresignMiddleware(stack *middleware.Stack, options Options) (err error) { - stack.Finalize.Clear() + if _, ok := stack.Finalize.Get((*acceptencodingcust.DisableGzip)(nil).ID()); ok { + stack.Finalize.Remove((*acceptencodingcust.DisableGzip)(nil).ID()) + } stack.Deserialize.Clear() stack.Build.Remove((*awsmiddleware.ClientRequestID)(nil).ID()) stack.Build.Remove("UserAgent") + if err := stack.Finalize.Insert(&presignContextPolyfillMiddleware{}, "Signing", middleware.Before); err != nil { + return err + } + pmw := v4.NewPresignHTTPRequestMiddleware(v4.PresignHTTPRequestMiddlewareOptions{ CredentialsProvider: options.Credentials, Presigner: c.Presigner, LogSigning: options.ClientLogMode.IsSigning(), }) - err = stack.Finalize.Add(pmw, middleware.After) - if err != nil { + if _, err := stack.Finalize.Swap("Signing", pmw); err != nil { return err } if err = smithyhttp.AddNoPayloadDefaultContentTypeRemover(stack); err != nil { @@ -600,31 +570,31 @@ func addRequestResponseLogging(stack *middleware.Stack, o Options) error { }, middleware.After) } -type endpointDisableHTTPSMiddleware struct { - EndpointDisableHTTPS bool +type disableHTTPSMiddleware struct { + DisableHTTPS bool } -func (*endpointDisableHTTPSMiddleware) ID() string { - return "endpointDisableHTTPSMiddleware" +func (*disableHTTPSMiddleware) ID() string { + return "disableHTTPS" } -func (m *endpointDisableHTTPSMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, +func (m *disableHTTPSMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) } - if m.EndpointDisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) { + if m.DisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) { req.URL.Scheme = "http" } - return next.HandleSerialize(ctx, in) - + return next.HandleFinalize(ctx, in) } -func addendpointDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { - return stack.Serialize.Insert(&endpointDisableHTTPSMiddleware{ - EndpointDisableHTTPS: o.EndpointOptions.DisableHTTPS, - }, "OperationSerializer", middleware.Before) + +func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { + return stack.Finalize.Insert(&disableHTTPSMiddleware{ + DisableHTTPS: o.EndpointOptions.DisableHTTPS, + }, "ResolveEndpointV2", middleware.After) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go index 0ef7affc5..ea2e4595e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go @@ -4,14 +4,10 @@ package sts import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" "github.com/aws/aws-sdk-go-v2/service/sts/types" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -204,7 +200,12 @@ type AssumeRoleInput struct { // in the IAM User Guide. PolicyArns []types.PolicyDescriptorType - // Reserved for future use. + // A list of previously acquired trusted context assertions in the format of a + // JSON array. The trusted context assertion is signed and encrypted by Amazon Web + // Services STS. The following is an example of a ProvidedContext value that + // includes a single trusted context assertion and the ARN of the context provider + // from which the trusted context assertion was generated. + // [{"ProviderArn":"arn:aws:iam::aws:contextProvider/identitycenter","ContextAssertion":"trusted-context-assertion"}] ProvidedContexts []types.ProvidedContext // The identification number of the MFA device that is associated with the user @@ -327,6 +328,9 @@ type AssumeRoleOutput struct { } func (c *Client) addOperationAssumeRoleMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRole{}, middleware.After) if err != nil { return err @@ -335,6 +339,10 @@ func (c *Client) addOperationAssumeRoleMiddlewares(stack *middleware.Stack, opti if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "AssumeRole"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -356,9 +364,6 @@ func (c *Client) addOperationAssumeRoleMiddlewares(stack *middleware.Stack, opti if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -374,7 +379,7 @@ func (c *Client) addOperationAssumeRoleMiddlewares(stack *middleware.Stack, opti if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addAssumeRoleResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpAssumeRoleValidationMiddleware(stack); err != nil { @@ -395,7 +400,7 @@ func (c *Client) addOperationAssumeRoleMiddlewares(stack *middleware.Stack, opti if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -405,7 +410,6 @@ func newServiceMetadataMiddleware_opAssumeRole(region string) *awsmiddleware.Reg return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sts", OperationName: "AssumeRole", } } @@ -433,126 +437,3 @@ func (c *PresignClient) PresignAssumeRole(ctx context.Context, params *AssumeRol out := result.(*v4.PresignedHTTPRequest) return out, nil } - -type opAssumeRoleResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opAssumeRoleResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opAssumeRoleResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sts" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sts" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sts") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addAssumeRoleResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opAssumeRoleResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go index 9c33720d4..ef576b640 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go @@ -4,13 +4,9 @@ package sts import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" "github.com/aws/aws-sdk-go-v2/service/sts/types" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -281,6 +277,9 @@ type AssumeRoleWithSAMLOutput struct { } func (c *Client) addOperationAssumeRoleWithSAMLMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRoleWithSAML{}, middleware.After) if err != nil { return err @@ -289,6 +288,10 @@ func (c *Client) addOperationAssumeRoleWithSAMLMiddlewares(stack *middleware.Sta if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "AssumeRoleWithSAML"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -322,7 +325,7 @@ func (c *Client) addOperationAssumeRoleWithSAMLMiddlewares(stack *middleware.Sta if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addAssumeRoleWithSAMLResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpAssumeRoleWithSAMLValidationMiddleware(stack); err != nil { @@ -343,7 +346,7 @@ func (c *Client) addOperationAssumeRoleWithSAMLMiddlewares(stack *middleware.Sta if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -353,130 +356,6 @@ func newServiceMetadataMiddleware_opAssumeRoleWithSAML(region string) *awsmiddle return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sts", OperationName: "AssumeRoleWithSAML", } } - -type opAssumeRoleWithSAMLResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opAssumeRoleWithSAMLResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opAssumeRoleWithSAMLResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sts" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sts" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sts") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addAssumeRoleWithSAMLResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opAssumeRoleWithSAMLResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go index fa4a60845..b2f126b1d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go @@ -4,13 +4,9 @@ package sts import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" "github.com/aws/aws-sdk-go-v2/service/sts/types" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -300,6 +296,9 @@ type AssumeRoleWithWebIdentityOutput struct { } func (c *Client) addOperationAssumeRoleWithWebIdentityMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRoleWithWebIdentity{}, middleware.After) if err != nil { return err @@ -308,6 +307,10 @@ func (c *Client) addOperationAssumeRoleWithWebIdentityMiddlewares(stack *middlew if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "AssumeRoleWithWebIdentity"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -341,7 +344,7 @@ func (c *Client) addOperationAssumeRoleWithWebIdentityMiddlewares(stack *middlew if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addAssumeRoleWithWebIdentityResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpAssumeRoleWithWebIdentityValidationMiddleware(stack); err != nil { @@ -362,7 +365,7 @@ func (c *Client) addOperationAssumeRoleWithWebIdentityMiddlewares(stack *middlew if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -372,130 +375,6 @@ func newServiceMetadataMiddleware_opAssumeRoleWithWebIdentity(region string) *aw return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sts", OperationName: "AssumeRoleWithWebIdentity", } } - -type opAssumeRoleWithWebIdentityResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opAssumeRoleWithWebIdentityResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opAssumeRoleWithWebIdentityResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sts" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sts" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sts") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addAssumeRoleWithWebIdentityResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opAssumeRoleWithWebIdentityResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go index baf2f9686..97a00b97d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go @@ -4,13 +4,9 @@ package sts import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -78,6 +74,9 @@ type DecodeAuthorizationMessageOutput struct { } func (c *Client) addOperationDecodeAuthorizationMessageMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpDecodeAuthorizationMessage{}, middleware.After) if err != nil { return err @@ -86,6 +85,10 @@ func (c *Client) addOperationDecodeAuthorizationMessageMiddlewares(stack *middle if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "DecodeAuthorizationMessage"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -107,9 +110,6 @@ func (c *Client) addOperationDecodeAuthorizationMessageMiddlewares(stack *middle if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -125,7 +125,7 @@ func (c *Client) addOperationDecodeAuthorizationMessageMiddlewares(stack *middle if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addDecodeAuthorizationMessageResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpDecodeAuthorizationMessageValidationMiddleware(stack); err != nil { @@ -146,7 +146,7 @@ func (c *Client) addOperationDecodeAuthorizationMessageMiddlewares(stack *middle if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -156,130 +156,6 @@ func newServiceMetadataMiddleware_opDecodeAuthorizationMessage(region string) *a return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sts", OperationName: "DecodeAuthorizationMessage", } } - -type opDecodeAuthorizationMessageResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opDecodeAuthorizationMessageResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opDecodeAuthorizationMessageResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sts" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sts" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sts") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addDecodeAuthorizationMessageResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opDecodeAuthorizationMessageResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go index f1dd167da..e01fcebfe 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go @@ -4,13 +4,9 @@ package sts import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -71,6 +67,9 @@ type GetAccessKeyInfoOutput struct { } func (c *Client) addOperationGetAccessKeyInfoMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpGetAccessKeyInfo{}, middleware.After) if err != nil { return err @@ -79,6 +78,10 @@ func (c *Client) addOperationGetAccessKeyInfoMiddlewares(stack *middleware.Stack if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetAccessKeyInfo"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -100,9 +103,6 @@ func (c *Client) addOperationGetAccessKeyInfoMiddlewares(stack *middleware.Stack if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -118,7 +118,7 @@ func (c *Client) addOperationGetAccessKeyInfoMiddlewares(stack *middleware.Stack if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addGetAccessKeyInfoResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpGetAccessKeyInfoValidationMiddleware(stack); err != nil { @@ -139,7 +139,7 @@ func (c *Client) addOperationGetAccessKeyInfoMiddlewares(stack *middleware.Stack if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -149,130 +149,6 @@ func newServiceMetadataMiddleware_opGetAccessKeyInfo(region string) *awsmiddlewa return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sts", OperationName: "GetAccessKeyInfo", } } - -type opGetAccessKeyInfoResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opGetAccessKeyInfoResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opGetAccessKeyInfoResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sts" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sts" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sts") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addGetAccessKeyInfoResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opGetAccessKeyInfoResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go index 66e5d99d4..802969408 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go @@ -4,13 +4,9 @@ package sts import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -66,6 +62,9 @@ type GetCallerIdentityOutput struct { } func (c *Client) addOperationGetCallerIdentityMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpGetCallerIdentity{}, middleware.After) if err != nil { return err @@ -74,6 +73,10 @@ func (c *Client) addOperationGetCallerIdentityMiddlewares(stack *middleware.Stac if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetCallerIdentity"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -95,9 +98,6 @@ func (c *Client) addOperationGetCallerIdentityMiddlewares(stack *middleware.Stac if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -113,7 +113,7 @@ func (c *Client) addOperationGetCallerIdentityMiddlewares(stack *middleware.Stac if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addGetCallerIdentityResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetCallerIdentity(options.Region), middleware.Before); err != nil { @@ -131,7 +131,7 @@ func (c *Client) addOperationGetCallerIdentityMiddlewares(stack *middleware.Stac if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -141,7 +141,6 @@ func newServiceMetadataMiddleware_opGetCallerIdentity(region string) *awsmiddlew return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sts", OperationName: "GetCallerIdentity", } } @@ -169,126 +168,3 @@ func (c *PresignClient) PresignGetCallerIdentity(ctx context.Context, params *Ge out := result.(*v4.PresignedHTTPRequest) return out, nil } - -type opGetCallerIdentityResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opGetCallerIdentityResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opGetCallerIdentityResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sts" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sts" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sts") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addGetCallerIdentityResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opGetCallerIdentityResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go index d577ef686..efaba119c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go @@ -4,14 +4,10 @@ package sts import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" "github.com/aws/aws-sdk-go-v2/service/sts/types" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -238,6 +234,9 @@ type GetFederationTokenOutput struct { } func (c *Client) addOperationGetFederationTokenMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpGetFederationToken{}, middleware.After) if err != nil { return err @@ -246,6 +245,10 @@ func (c *Client) addOperationGetFederationTokenMiddlewares(stack *middleware.Sta if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetFederationToken"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -267,9 +270,6 @@ func (c *Client) addOperationGetFederationTokenMiddlewares(stack *middleware.Sta if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -285,7 +285,7 @@ func (c *Client) addOperationGetFederationTokenMiddlewares(stack *middleware.Sta if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addGetFederationTokenResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = addOpGetFederationTokenValidationMiddleware(stack); err != nil { @@ -306,7 +306,7 @@ func (c *Client) addOperationGetFederationTokenMiddlewares(stack *middleware.Sta if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -316,130 +316,6 @@ func newServiceMetadataMiddleware_opGetFederationToken(region string) *awsmiddle return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sts", OperationName: "GetFederationToken", } } - -type opGetFederationTokenResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opGetFederationTokenResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opGetFederationTokenResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sts" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sts" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sts") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addGetFederationTokenResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opGetFederationTokenResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go index 7a2345e80..7b07435f2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go @@ -4,14 +4,10 @@ package sts import ( "context" - "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" "github.com/aws/aws-sdk-go-v2/service/sts/types" - smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -124,6 +120,9 @@ type GetSessionTokenOutput struct { } func (c *Client) addOperationGetSessionTokenMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsAwsquery_serializeOpGetSessionToken{}, middleware.After) if err != nil { return err @@ -132,6 +131,10 @@ func (c *Client) addOperationGetSessionTokenMiddlewares(stack *middleware.Stack, if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetSessionToken"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } @@ -153,9 +156,6 @@ func (c *Client) addOperationGetSessionTokenMiddlewares(stack *middleware.Stack, if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } @@ -171,7 +171,7 @@ func (c *Client) addOperationGetSessionTokenMiddlewares(stack *middleware.Stack, if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = addGetSessionTokenResolveEndpointMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetSessionToken(options.Region), middleware.Before); err != nil { @@ -189,7 +189,7 @@ func (c *Client) addOperationGetSessionTokenMiddlewares(stack *middleware.Stack, if err = addRequestResponseLogging(stack, options); err != nil { return err } - if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil @@ -199,130 +199,6 @@ func newServiceMetadataMiddleware_opGetSessionToken(region string) *awsmiddlewar return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "sts", OperationName: "GetSessionToken", } } - -type opGetSessionTokenResolveEndpointMiddleware struct { - EndpointResolver EndpointResolverV2 - BuiltInResolver builtInParameterResolver -} - -func (*opGetSessionTokenResolveEndpointMiddleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *opGetSessionTokenResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.EndpointResolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := EndpointParameters{} - - m.BuiltInResolver.ResolveBuiltIns(¶ms) - - var resolvedEndpoint smithyendpoints.Endpoint - resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL = &resolvedEndpoint.URI - - for k := range resolvedEndpoint.Headers { - req.Header.Set( - k, - resolvedEndpoint.Headers.Get(k), - ) - } - - authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) - if err != nil { - var nfe *internalauth.NoAuthenticationSchemesFoundError - if errors.As(err, &nfe) { - // if no auth scheme is found, default to sigv4 - signingName := "sts" - signingRegion := m.BuiltInResolver.(*builtInResolver).Region - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - - } - var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError - if errors.As(err, &ue) { - return out, metadata, fmt.Errorf( - "This operation requests signer version(s) %v but the client only supports %v", - ue.UnsupportedSchemes, - internalauth.SupportedSchemes, - ) - } - } - - for _, authScheme := range authSchemes { - switch authScheme.(type) { - case *internalauth.AuthenticationSchemeV4: - v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) - var signingName, signingRegion string - if v4Scheme.SigningName == nil { - signingName = "sts" - } else { - signingName = *v4Scheme.SigningName - } - if v4Scheme.SigningRegion == nil { - signingRegion = m.BuiltInResolver.(*builtInResolver).Region - } else { - signingRegion = *v4Scheme.SigningRegion - } - if v4Scheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) - break - case *internalauth.AuthenticationSchemeV4A: - v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) - if v4aScheme.SigningName == nil { - v4aScheme.SigningName = aws.String("sts") - } - if v4aScheme.DisableDoubleEncoding != nil { - // The signer sets an equivalent value at client initialization time. - // Setting this context value will cause the signer to extract it - // and override the value set at client initialization time. - ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) - } - ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) - ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) - break - case *internalauth.AuthenticationSchemeNone: - break - } - } - - return next.HandleSerialize(ctx, in) -} - -func addGetSessionTokenResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert(&opGetSessionTokenResolveEndpointMiddleware{ - EndpointResolver: options.EndpointResolverV2, - BuiltInResolver: &builtInResolver{ - Region: options.Region, - UseDualStack: options.EndpointOptions.UseDualStackEndpoint, - UseFIPS: options.EndpointOptions.UseFIPSEndpoint, - Endpoint: options.BaseEndpoint, - }, - }, "ResolveEndpoint", middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go new file mode 100644 index 000000000..d8b6f5357 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go @@ -0,0 +1,290 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +func bindAuthParamsRegion(params *AuthResolverParameters, _ interface{}, options Options) { + params.Region = options.Region +} + +type setLegacyContextSigningOptionsMiddleware struct { +} + +func (*setLegacyContextSigningOptionsMiddleware) ID() string { + return "setLegacyContextSigningOptions" +} + +func (m *setLegacyContextSigningOptionsMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + rscheme := getResolvedAuthScheme(ctx) + schemeID := rscheme.Scheme.SchemeID() + + if sn := awsmiddleware.GetSigningName(ctx); sn != "" { + if schemeID == "aws.auth#sigv4" { + smithyhttp.SetSigV4SigningName(&rscheme.SignerProperties, sn) + } else if schemeID == "aws.auth#sigv4a" { + smithyhttp.SetSigV4ASigningName(&rscheme.SignerProperties, sn) + } + } + + if sr := awsmiddleware.GetSigningRegion(ctx); sr != "" { + if schemeID == "aws.auth#sigv4" { + smithyhttp.SetSigV4SigningRegion(&rscheme.SignerProperties, sr) + } else if schemeID == "aws.auth#sigv4a" { + smithyhttp.SetSigV4ASigningRegions(&rscheme.SignerProperties, []string{sr}) + } + } + + return next.HandleFinalize(ctx, in) +} + +func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error { + return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before) +} + +// AuthResolverParameters contains the set of inputs necessary for auth scheme +// resolution. +type AuthResolverParameters struct { + // The name of the operation being invoked. + Operation string + + // The region in which the operation is being invoked. + Region string +} + +func bindAuthResolverParams(operation string, input interface{}, options Options) *AuthResolverParameters { + params := &AuthResolverParameters{ + Operation: operation, + } + + bindAuthParamsRegion(params, input, options) + + return params +} + +// AuthSchemeResolver returns a set of possible authentication options for an +// operation. +type AuthSchemeResolver interface { + ResolveAuthSchemes(context.Context, *AuthResolverParameters) ([]*smithyauth.Option, error) +} + +type defaultAuthSchemeResolver struct{} + +var _ AuthSchemeResolver = (*defaultAuthSchemeResolver)(nil) + +func (*defaultAuthSchemeResolver) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { + if overrides, ok := operationAuthOptions[params.Operation]; ok { + return overrides(params), nil + } + return serviceAuthOptions(params), nil +} + +var operationAuthOptions = map[string]func(*AuthResolverParameters) []*smithyauth.Option{ + "AssumeRoleWithSAML": func(params *AuthResolverParameters) []*smithyauth.Option { + return []*smithyauth.Option{ + { + SchemeID: smithyauth.SchemeIDSigV4, + SignerProperties: func() smithy.Properties { + var props smithy.Properties + smithyhttp.SetSigV4SigningName(&props, "sts") + smithyhttp.SetSigV4SigningRegion(&props, params.Region) + + return props + }(), + }, + + {SchemeID: smithyauth.SchemeIDAnonymous}, + } + }, + + "AssumeRoleWithWebIdentity": func(params *AuthResolverParameters) []*smithyauth.Option { + return []*smithyauth.Option{ + { + SchemeID: smithyauth.SchemeIDSigV4, + SignerProperties: func() smithy.Properties { + var props smithy.Properties + smithyhttp.SetSigV4SigningName(&props, "sts") + smithyhttp.SetSigV4SigningRegion(&props, params.Region) + + return props + }(), + }, + + {SchemeID: smithyauth.SchemeIDAnonymous}, + } + }, +} + +func serviceAuthOptions(params *AuthResolverParameters) []*smithyauth.Option { + return []*smithyauth.Option{ + { + SchemeID: smithyauth.SchemeIDSigV4, + SignerProperties: func() smithy.Properties { + var props smithy.Properties + smithyhttp.SetSigV4SigningName(&props, "sts") + smithyhttp.SetSigV4SigningRegion(&props, params.Region) + return props + }(), + }, + } +} + +type resolveAuthSchemeMiddleware struct { + operation string + options Options +} + +func (*resolveAuthSchemeMiddleware) ID() string { + return "ResolveAuthScheme" +} + +func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + params := bindAuthResolverParams(m.operation, getOperationInput(ctx), m.options) + options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) + if err != nil { + return out, metadata, fmt.Errorf("resolve auth scheme: %v", err) + } + + scheme, ok := m.selectScheme(options) + if !ok { + return out, metadata, fmt.Errorf("could not select an auth scheme") + } + + ctx = setResolvedAuthScheme(ctx, scheme) + return next.HandleFinalize(ctx, in) +} + +func (m *resolveAuthSchemeMiddleware) selectScheme(options []*smithyauth.Option) (*resolvedAuthScheme, bool) { + for _, option := range options { + if option.SchemeID == smithyauth.SchemeIDAnonymous { + return newResolvedAuthScheme(smithyhttp.NewAnonymousScheme(), option), true + } + + for _, scheme := range m.options.AuthSchemes { + if scheme.SchemeID() != option.SchemeID { + continue + } + + if scheme.IdentityResolver(m.options) != nil { + return newResolvedAuthScheme(scheme, option), true + } + } + } + + return nil, false +} + +type resolvedAuthSchemeKey struct{} + +type resolvedAuthScheme struct { + Scheme smithyhttp.AuthScheme + IdentityProperties smithy.Properties + SignerProperties smithy.Properties +} + +func newResolvedAuthScheme(scheme smithyhttp.AuthScheme, option *smithyauth.Option) *resolvedAuthScheme { + return &resolvedAuthScheme{ + Scheme: scheme, + IdentityProperties: option.IdentityProperties, + SignerProperties: option.SignerProperties, + } +} + +func setResolvedAuthScheme(ctx context.Context, scheme *resolvedAuthScheme) context.Context { + return middleware.WithStackValue(ctx, resolvedAuthSchemeKey{}, scheme) +} + +func getResolvedAuthScheme(ctx context.Context) *resolvedAuthScheme { + v, _ := middleware.GetStackValue(ctx, resolvedAuthSchemeKey{}).(*resolvedAuthScheme) + return v +} + +type getIdentityMiddleware struct { + options Options +} + +func (*getIdentityMiddleware) ID() string { + return "GetIdentity" +} + +func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + rscheme := getResolvedAuthScheme(ctx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + resolver := rscheme.Scheme.IdentityResolver(m.options) + if resolver == nil { + return out, metadata, fmt.Errorf("no identity resolver") + } + + identity, err := resolver.GetIdentity(ctx, rscheme.IdentityProperties) + if err != nil { + return out, metadata, fmt.Errorf("get identity: %v", err) + } + + ctx = setIdentity(ctx, identity) + return next.HandleFinalize(ctx, in) +} + +type identityKey struct{} + +func setIdentity(ctx context.Context, identity smithyauth.Identity) context.Context { + return middleware.WithStackValue(ctx, identityKey{}, identity) +} + +func getIdentity(ctx context.Context) smithyauth.Identity { + v, _ := middleware.GetStackValue(ctx, identityKey{}).(smithyauth.Identity) + return v +} + +type signRequestMiddleware struct { +} + +func (*signRequestMiddleware) ID() string { + return "Signing" +} + +func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request) + } + + rscheme := getResolvedAuthScheme(ctx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + identity := getIdentity(ctx) + if identity == nil { + return out, metadata, fmt.Errorf("no identity") + } + + signer := rscheme.Scheme.Signer() + if signer == nil { + return out, metadata, fmt.Errorf("no signer") + } + + if err := signer.SignRequest(ctx, req, identity, rscheme.SignerProperties); err != nil { + return out, metadata, fmt.Errorf("sign request: %v", err) + } + + return next.HandleFinalize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go index cb5d56fd9..9f7932f9a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go @@ -9,9 +9,11 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + "github.com/aws/aws-sdk-go-v2/internal/endpoints" "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn" internalendpoints "github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints" smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" "github.com/aws/smithy-go/ptr" @@ -215,77 +217,6 @@ func resolveBaseEndpoint(cfg aws.Config, o *Options) { } } -// Utility function to aid with translating pseudo-regions to classical regions -// with the appropriate setting indicated by the pseudo-region -func mapPseudoRegion(pr string) (region string, fips aws.FIPSEndpointState) { - const fipsInfix = "-fips-" - const fipsPrefix = "fips-" - const fipsSuffix = "-fips" - - if strings.Contains(pr, fipsInfix) || - strings.Contains(pr, fipsPrefix) || - strings.Contains(pr, fipsSuffix) { - region = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( - pr, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") - fips = aws.FIPSEndpointStateEnabled - } else { - region = pr - } - - return region, fips -} - -// builtInParameterResolver is the interface responsible for resolving BuiltIn -// values during the sourcing of EndpointParameters -type builtInParameterResolver interface { - ResolveBuiltIns(*EndpointParameters) error -} - -// builtInResolver resolves modeled BuiltIn values using only the members defined -// below. -type builtInResolver struct { - // The AWS region used to dispatch the request. - Region string - - // Sourced BuiltIn value in a historical enabled or disabled state. - UseDualStack aws.DualStackEndpointState - - // Sourced BuiltIn value in a historical enabled or disabled state. - UseFIPS aws.FIPSEndpointState - - // Base endpoint that can potentially be modified during Endpoint resolution. - Endpoint *string - - // Whether the global endpoint should be used, rather then the regional endpoint - // for us-east-1. - UseGlobalEndpoint bool -} - -// Invoked at runtime to resolve BuiltIn Values. Only resolution code specific to -// each BuiltIn value is generated. -func (b *builtInResolver) ResolveBuiltIns(params *EndpointParameters) error { - - region, _ := mapPseudoRegion(b.Region) - if len(region) == 0 { - return fmt.Errorf("Could not resolve AWS::Region") - } else { - params.Region = aws.String(region) - } - if b.UseDualStack == aws.DualStackEndpointStateEnabled { - params.UseDualStack = aws.Bool(true) - } else { - params.UseDualStack = aws.Bool(false) - } - if b.UseFIPS == aws.FIPSEndpointStateEnabled { - params.UseFIPS = aws.Bool(true) - } else { - params.UseFIPS = aws.Bool(false) - } - params.Endpoint = b.Endpoint - params.UseGlobalEndpoint = aws.Bool(b.UseGlobalEndpoint) - return nil -} - // EndpointParameters provides the parameters that influence how endpoints are // resolved. type EndpointParameters struct { @@ -422,11 +353,17 @@ func (r *resolver) ResolveEndpoint( Headers: http.Header{}, Properties: func() smithy.Properties { var out smithy.Properties - out.Set("authSchemes", []interface{}{ - map[string]interface{}{ - "name": "sigv4", - "signingName": "sts", - "signingRegion": "us-east-1", + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "sts") + smithyhttp.SetSigV4ASigningName(&sp, "sts") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), }, }) return out @@ -446,11 +383,17 @@ func (r *resolver) ResolveEndpoint( Headers: http.Header{}, Properties: func() smithy.Properties { var out smithy.Properties - out.Set("authSchemes", []interface{}{ - map[string]interface{}{ - "name": "sigv4", - "signingName": "sts", - "signingRegion": "us-east-1", + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "sts") + smithyhttp.SetSigV4ASigningName(&sp, "sts") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), }, }) return out @@ -470,11 +413,17 @@ func (r *resolver) ResolveEndpoint( Headers: http.Header{}, Properties: func() smithy.Properties { var out smithy.Properties - out.Set("authSchemes", []interface{}{ - map[string]interface{}{ - "name": "sigv4", - "signingName": "sts", - "signingRegion": "us-east-1", + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "sts") + smithyhttp.SetSigV4ASigningName(&sp, "sts") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), }, }) return out @@ -494,11 +443,17 @@ func (r *resolver) ResolveEndpoint( Headers: http.Header{}, Properties: func() smithy.Properties { var out smithy.Properties - out.Set("authSchemes", []interface{}{ - map[string]interface{}{ - "name": "sigv4", - "signingName": "sts", - "signingRegion": "us-east-1", + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "sts") + smithyhttp.SetSigV4ASigningName(&sp, "sts") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), }, }) return out @@ -518,11 +473,17 @@ func (r *resolver) ResolveEndpoint( Headers: http.Header{}, Properties: func() smithy.Properties { var out smithy.Properties - out.Set("authSchemes", []interface{}{ - map[string]interface{}{ - "name": "sigv4", - "signingName": "sts", - "signingRegion": "us-east-1", + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "sts") + smithyhttp.SetSigV4ASigningName(&sp, "sts") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), }, }) return out @@ -542,11 +503,17 @@ func (r *resolver) ResolveEndpoint( Headers: http.Header{}, Properties: func() smithy.Properties { var out smithy.Properties - out.Set("authSchemes", []interface{}{ - map[string]interface{}{ - "name": "sigv4", - "signingName": "sts", - "signingRegion": "us-east-1", + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "sts") + smithyhttp.SetSigV4ASigningName(&sp, "sts") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), }, }) return out @@ -566,11 +533,17 @@ func (r *resolver) ResolveEndpoint( Headers: http.Header{}, Properties: func() smithy.Properties { var out smithy.Properties - out.Set("authSchemes", []interface{}{ - map[string]interface{}{ - "name": "sigv4", - "signingName": "sts", - "signingRegion": "us-east-1", + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "sts") + smithyhttp.SetSigV4ASigningName(&sp, "sts") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), }, }) return out @@ -590,11 +563,17 @@ func (r *resolver) ResolveEndpoint( Headers: http.Header{}, Properties: func() smithy.Properties { var out smithy.Properties - out.Set("authSchemes", []interface{}{ - map[string]interface{}{ - "name": "sigv4", - "signingName": "sts", - "signingRegion": "us-east-1", + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "sts") + smithyhttp.SetSigV4ASigningName(&sp, "sts") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), }, }) return out @@ -614,11 +593,17 @@ func (r *resolver) ResolveEndpoint( Headers: http.Header{}, Properties: func() smithy.Properties { var out smithy.Properties - out.Set("authSchemes", []interface{}{ - map[string]interface{}{ - "name": "sigv4", - "signingName": "sts", - "signingRegion": "us-east-1", + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "sts") + smithyhttp.SetSigV4ASigningName(&sp, "sts") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), }, }) return out @@ -638,11 +623,17 @@ func (r *resolver) ResolveEndpoint( Headers: http.Header{}, Properties: func() smithy.Properties { var out smithy.Properties - out.Set("authSchemes", []interface{}{ - map[string]interface{}{ - "name": "sigv4", - "signingName": "sts", - "signingRegion": "us-east-1", + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "sts") + smithyhttp.SetSigV4ASigningName(&sp, "sts") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), }, }) return out @@ -662,11 +653,17 @@ func (r *resolver) ResolveEndpoint( Headers: http.Header{}, Properties: func() smithy.Properties { var out smithy.Properties - out.Set("authSchemes", []interface{}{ - map[string]interface{}{ - "name": "sigv4", - "signingName": "sts", - "signingRegion": "us-east-1", + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "sts") + smithyhttp.SetSigV4ASigningName(&sp, "sts") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), }, }) return out @@ -686,11 +683,17 @@ func (r *resolver) ResolveEndpoint( Headers: http.Header{}, Properties: func() smithy.Properties { var out smithy.Properties - out.Set("authSchemes", []interface{}{ - map[string]interface{}{ - "name": "sigv4", - "signingName": "sts", - "signingRegion": "us-east-1", + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "sts") + smithyhttp.SetSigV4ASigningName(&sp, "sts") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), }, }) return out @@ -710,11 +713,17 @@ func (r *resolver) ResolveEndpoint( Headers: http.Header{}, Properties: func() smithy.Properties { var out smithy.Properties - out.Set("authSchemes", []interface{}{ - map[string]interface{}{ - "name": "sigv4", - "signingName": "sts", - "signingRegion": "us-east-1", + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "sts") + smithyhttp.SetSigV4ASigningName(&sp, "sts") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), }, }) return out @@ -734,11 +743,17 @@ func (r *resolver) ResolveEndpoint( Headers: http.Header{}, Properties: func() smithy.Properties { var out smithy.Properties - out.Set("authSchemes", []interface{}{ - map[string]interface{}{ - "name": "sigv4", - "signingName": "sts", - "signingRegion": "us-east-1", + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "sts") + smithyhttp.SetSigV4ASigningName(&sp, "sts") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), }, }) return out @@ -758,11 +773,17 @@ func (r *resolver) ResolveEndpoint( Headers: http.Header{}, Properties: func() smithy.Properties { var out smithy.Properties - out.Set("authSchemes", []interface{}{ - map[string]interface{}{ - "name": "sigv4", - "signingName": "sts", - "signingRegion": "us-east-1", + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "sts") + smithyhttp.SetSigV4ASigningName(&sp, "sts") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), }, }) return out @@ -782,11 +803,17 @@ func (r *resolver) ResolveEndpoint( Headers: http.Header{}, Properties: func() smithy.Properties { var out smithy.Properties - out.Set("authSchemes", []interface{}{ - map[string]interface{}{ - "name": "sigv4", - "signingName": "sts", - "signingRegion": "us-east-1", + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "sts") + smithyhttp.SetSigV4ASigningName(&sp, "sts") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), }, }) return out @@ -812,11 +839,17 @@ func (r *resolver) ResolveEndpoint( Headers: http.Header{}, Properties: func() smithy.Properties { var out smithy.Properties - out.Set("authSchemes", []interface{}{ - map[string]interface{}{ - "name": "sigv4", - "signingName": "sts", - "signingRegion": _Region, + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "sts") + smithyhttp.SetSigV4ASigningName(&sp, "sts") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), }, }) return out @@ -883,8 +916,8 @@ func (r *resolver) ResolveEndpoint( } } if _UseFIPS == true { - if true == _PartitionResult.SupportsFIPS { - if "aws-us-gov" == _PartitionResult.Name { + if _PartitionResult.SupportsFIPS == true { + if _PartitionResult.Name == "aws-us-gov" { uriString := func() string { var out strings.Builder out.WriteString("https://sts.") @@ -960,11 +993,17 @@ func (r *resolver) ResolveEndpoint( Headers: http.Header{}, Properties: func() smithy.Properties { var out smithy.Properties - out.Set("authSchemes", []interface{}{ - map[string]interface{}{ - "name": "sigv4", - "signingName": "sts", - "signingRegion": "us-east-1", + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "sts") + smithyhttp.SetSigV4ASigningName(&sp, "sts") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), }, }) return out @@ -994,3 +1033,76 @@ func (r *resolver) ResolveEndpoint( } return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Missing Region") } + +type endpointParamsBinder interface { + bindEndpointParams(*EndpointParameters) +} + +func bindEndpointParams(input interface{}, options Options) *EndpointParameters { + params := &EndpointParameters{} + + params.Region = aws.String(endpoints.MapFIPSRegion(options.Region)) + params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled) + params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled) + params.Endpoint = options.BaseEndpoint + + if b, ok := input.(endpointParamsBinder); ok { + b.bindEndpointParams(params) + } + + return params +} + +type resolveEndpointV2Middleware struct { + options Options +} + +func (*resolveEndpointV2Middleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleFinalize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.options.EndpointResolverV2 == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + params := bindEndpointParams(getOperationInput(ctx), m.options) + endpt, err := m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + if endpt.URI.RawPath == "" && req.URL.RawPath != "" { + endpt.URI.RawPath = endpt.URI.Path + } + req.URL.Scheme = endpt.URI.Scheme + req.URL.Host = endpt.URI.Host + req.URL.Path = smithyhttp.JoinPath(endpt.URI.Path, req.URL.Path) + req.URL.RawPath = smithyhttp.JoinPath(endpt.URI.RawPath, req.URL.RawPath) + for k := range endpt.Headers { + req.Header.Set(k, endpt.Headers.Get(k)) + } + + rscheme := getResolvedAuthScheme(ctx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + opts, _ := smithyauth.GetAuthOptions(&endpt.Properties) + for _, o := range opts { + rscheme.SignerProperties.SetAll(&o.SignerProperties) + } + + return next.HandleFinalize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json index e44e7d149..d90b8bce4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json @@ -3,6 +3,7 @@ "github.com/aws/aws-sdk-go-v2": "v1.4.0", "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding": "v1.0.5", "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url": "v1.0.7", "github.com/aws/smithy-go": "v1.4.0", "github.com/google/go-cmp": "v0.5.4" @@ -18,6 +19,7 @@ "api_op_GetCallerIdentity.go", "api_op_GetFederationToken.go", "api_op_GetSessionToken.go", + "auth.go", "deserializers.go", "doc.go", "endpoints.go", @@ -26,6 +28,7 @@ "generated.json", "internal/endpoints/endpoints.go", "internal/endpoints/endpoints_test.go", + "options.go", "protocol_test.go", "serializers.go", "types/errors.go", diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go index dac009e1c..acfa6d15e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go @@ -3,4 +3,4 @@ package sts // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.25.0" +const goModuleVersion = "1.25.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go new file mode 100644 index 000000000..1736c8355 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go @@ -0,0 +1,219 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" + smithyauth "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/http" +) + +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +type Options struct { + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // The optional application specific identifier appended to the User-Agent header. + AppID string + + // This endpoint will be given as input to an EndpointResolverV2. It is used for + // providing a custom base endpoint that is subject to modifications by the + // processing EndpointResolverV2. + BaseEndpoint *string + + // Configures the events that will be sent to the configured logger. + ClientLogMode aws.ClientLogMode + + // The credentials object to use when signing requests. + Credentials aws.CredentialsProvider + + // The configuration DefaultsMode that the SDK should use when constructing the + // clients initial default settings. + DefaultsMode aws.DefaultsMode + + // The endpoint options to be used when attempting to resolve an endpoint. + EndpointOptions EndpointResolverOptions + + // The service endpoint resolver. + // + // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a + // value for this field will likely prevent you from using any endpoint-related + // service features released after the introduction of EndpointResolverV2 and + // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom + // endpoint, set the client option BaseEndpoint instead. + EndpointResolver EndpointResolver + + // Resolves the endpoint used for a particular service operation. This should be + // used over the deprecated EndpointResolver. + EndpointResolverV2 EndpointResolverV2 + + // Signature Version 4 (SigV4) Signer + HTTPSignerV4 HTTPSignerV4 + + // The logger writer interface to write logging messages to. + Logger logging.Logger + + // The region to send requests to. (Required) + Region string + + // RetryMaxAttempts specifies the maximum number attempts an API client will call + // an operation that fails with a retryable error. A value of 0 is ignored, and + // will not be used to configure the API client created default retryer, or modify + // per operation call's retry max attempts. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. If specified in an operation call's functional + // options with a value that is different than the constructed client's Options, + // the Client's Retryer will be wrapped to use the operation's specific + // RetryMaxAttempts value. + RetryMaxAttempts int + + // RetryMode specifies the retry mode the API client will be created with, if + // Retryer option is not also specified. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. Currently does not support per operation call + // overrides, may in the future. + RetryMode aws.RetryMode + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. The kind of + // default retry created by the API client can be changed with the RetryMode + // option. + Retryer aws.Retryer + + // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set + // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You + // should not populate this structure programmatically, or rely on the values here + // within your applications. + RuntimeEnvironment aws.RuntimeEnvironment + + // The initial DefaultsMode used when the client options were constructed. If the + // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved + // value was at that point in time. Currently does not support per operation call + // overrides, may in the future. + resolvedDefaultsMode aws.DefaultsMode + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP + // implementation if nil. + HTTPClient HTTPClient + + // The auth scheme resolver which determines how to authenticate for each + // operation. + AuthSchemeResolver AuthSchemeResolver + + // The list of auth schemes supported by the client. + AuthSchemes []smithyhttp.AuthScheme +} + +// Copy creates a clone where the APIOptions list is deep copied. +func (o Options) Copy() Options { + to := o + to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) + copy(to.APIOptions, o.APIOptions) + + return to +} + +func (o Options) GetIdentityResolver(schemeID string) smithyauth.IdentityResolver { + if schemeID == "aws.auth#sigv4" { + return getSigV4IdentityResolver(o) + } + if schemeID == "smithy.api#noAuth" { + return &smithyauth.AnonymousIdentityResolver{} + } + return nil +} + +// WithAPIOptions returns a functional option for setting the Client's APIOptions +// option. +func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { + return func(o *Options) { + o.APIOptions = append(o.APIOptions, optFns...) + } +} + +// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for +// this field will likely prevent you from using any endpoint-related service +// features released after the introduction of EndpointResolverV2 and BaseEndpoint. +// To migrate an EndpointResolver implementation that uses a custom endpoint, set +// the client option BaseEndpoint instead. +func WithEndpointResolver(v EndpointResolver) func(*Options) { + return func(o *Options) { + o.EndpointResolver = v + } +} + +// WithEndpointResolverV2 returns a functional option for setting the Client's +// EndpointResolverV2 option. +func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) { + return func(o *Options) { + o.EndpointResolverV2 = v + } +} + +func getSigV4IdentityResolver(o Options) smithyauth.IdentityResolver { + if o.Credentials != nil { + return &internalauthsmithy.CredentialsProviderAdapter{Provider: o.Credentials} + } + return nil +} + +// WithSigV4SigningName applies an override to the authentication workflow to +// use the given signing name for SigV4-authenticated operations. +// +// This is an advanced setting. The value here is FINAL, taking precedence over +// the resolved signing name from both auth scheme resolution and endpoint +// resolution. +func WithSigV4SigningName(name string) func(*Options) { + fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, + ) { + return next.HandleInitialize(awsmiddleware.SetSigningName(ctx, name), in) + } + return func(o *Options) { + o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { + return s.Initialize.Add( + middleware.InitializeMiddlewareFunc("withSigV4SigningName", fn), + middleware.Before, + ) + }) + } +} + +// WithSigV4SigningRegion applies an override to the authentication workflow to +// use the given signing region for SigV4-authenticated operations. +// +// This is an advanced setting. The value here is FINAL, taking precedence over +// the resolved signing region from both auth scheme resolution and endpoint +// resolution. +func WithSigV4SigningRegion(region string) func(*Options) { + fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, + ) { + return next.HandleInitialize(awsmiddleware.SetSigningRegion(ctx, region), in) + } + return func(o *Options) { + o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { + return s.Initialize.Add( + middleware.InitializeMiddlewareFunc("withSigV4SigningRegion", fn), + middleware.Before, + ) + }) + } +} + +func ignoreAnonymousAuth(options *Options) { + if _, ok := options.Credentials.(aws.AnonymousCredentials); ok { + options.Credentials = nil + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go index 572a70512..e3701d11d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go @@ -89,13 +89,17 @@ type PolicyDescriptorType struct { noSmithyDocumentSerde } -// Reserved for future use. +// Contains information about the provided context. This includes the signed and +// encrypted trusted context assertion and the context provider ARN from which the +// trusted context assertion was generated. type ProvidedContext struct { - // Reserved for future use. + // The signed and encrypted trusted context assertion generated by the context + // provider. The trusted context assertion is signed and encrypted by Amazon Web + // Services STS. ContextAssertion *string - // Reserved for future use. + // The context provider ARN from which the trusted context assertion was generated. ProviderArn *string noSmithyDocumentSerde diff --git a/vendor/github.com/aws/smithy-go/CHANGELOG.md b/vendor/github.com/aws/smithy-go/CHANGELOG.md index 9cca07b55..e6afaccfc 100644 --- a/vendor/github.com/aws/smithy-go/CHANGELOG.md +++ b/vendor/github.com/aws/smithy-go/CHANGELOG.md @@ -1,3 +1,9 @@ +# Release (2023-11-15) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.17.0 + * **Feature**: Support identity/auth components of client reference architecture. + # Release (2023-10-31) ## Module Highlights diff --git a/vendor/github.com/aws/smithy-go/auth/auth.go b/vendor/github.com/aws/smithy-go/auth/auth.go new file mode 100644 index 000000000..5bdb70c9a --- /dev/null +++ b/vendor/github.com/aws/smithy-go/auth/auth.go @@ -0,0 +1,3 @@ +// Package auth defines protocol-agnostic authentication types for smithy +// clients. +package auth diff --git a/vendor/github.com/aws/smithy-go/auth/identity.go b/vendor/github.com/aws/smithy-go/auth/identity.go new file mode 100644 index 000000000..ba8cf70d4 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/auth/identity.go @@ -0,0 +1,47 @@ +package auth + +import ( + "context" + "time" + + "github.com/aws/smithy-go" +) + +// Identity contains information that identifies who the user making the +// request is. +type Identity interface { + Expiration() time.Time +} + +// IdentityResolver defines the interface through which an Identity is +// retrieved. +type IdentityResolver interface { + GetIdentity(context.Context, smithy.Properties) (Identity, error) +} + +// IdentityResolverOptions defines the interface through which an entity can be +// queried to retrieve an IdentityResolver for a given auth scheme. +type IdentityResolverOptions interface { + GetIdentityResolver(schemeID string) IdentityResolver +} + +// AnonymousIdentity is a sentinel to indicate no identity. +type AnonymousIdentity struct{} + +var _ Identity = (*AnonymousIdentity)(nil) + +// Expiration returns the zero value for time, as anonymous identity never +// expires. +func (*AnonymousIdentity) Expiration() time.Time { + return time.Time{} +} + +// AnonymousIdentityResolver returns AnonymousIdentity. +type AnonymousIdentityResolver struct{} + +var _ IdentityResolver = (*AnonymousIdentityResolver)(nil) + +// GetIdentity returns AnonymousIdentity. +func (*AnonymousIdentityResolver) GetIdentity(_ context.Context, _ smithy.Properties) (Identity, error) { + return &AnonymousIdentity{}, nil +} diff --git a/vendor/github.com/aws/smithy-go/auth/option.go b/vendor/github.com/aws/smithy-go/auth/option.go new file mode 100644 index 000000000..d5dabff04 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/auth/option.go @@ -0,0 +1,25 @@ +package auth + +import "github.com/aws/smithy-go" + +type ( + authOptionsKey struct{} +) + +// Option represents a possible authentication method for an operation. +type Option struct { + SchemeID string + IdentityProperties smithy.Properties + SignerProperties smithy.Properties +} + +// GetAuthOptions gets auth Options from Properties. +func GetAuthOptions(p *smithy.Properties) ([]*Option, bool) { + v, ok := p.Get(authOptionsKey{}).([]*Option) + return v, ok +} + +// SetAuthOptions sets auth Options on Properties. +func SetAuthOptions(p *smithy.Properties, options []*Option) { + p.Set(authOptionsKey{}, options) +} diff --git a/vendor/github.com/aws/smithy-go/auth/scheme_id.go b/vendor/github.com/aws/smithy-go/auth/scheme_id.go new file mode 100644 index 000000000..fb6a57c64 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/auth/scheme_id.go @@ -0,0 +1,20 @@ +package auth + +// Anonymous +const ( + SchemeIDAnonymous = "smithy.api#noAuth" +) + +// HTTP auth schemes +const ( + SchemeIDHTTPBasic = "smithy.api#httpBasicAuth" + SchemeIDHTTPDigest = "smithy.api#httpDigestAuth" + SchemeIDHTTPBearer = "smithy.api#httpBearerAuth" + SchemeIDHTTPAPIKey = "smithy.api#httpApiKeyAuth" +) + +// AWS auth schemes +const ( + SchemeIDSigV4 = "aws.auth#sigv4" + SchemeIDSigV4A = "aws.auth#sigv4a" +) diff --git a/vendor/github.com/aws/smithy-go/go_module_metadata.go b/vendor/github.com/aws/smithy-go/go_module_metadata.go index d96be806d..184aaae1c 100644 --- a/vendor/github.com/aws/smithy-go/go_module_metadata.go +++ b/vendor/github.com/aws/smithy-go/go_module_metadata.go @@ -3,4 +3,4 @@ package smithy // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.16.0" +const goModuleVersion = "1.17.0" diff --git a/vendor/github.com/aws/smithy-go/properties.go b/vendor/github.com/aws/smithy-go/properties.go index 17d659c53..c9af66c0e 100644 --- a/vendor/github.com/aws/smithy-go/properties.go +++ b/vendor/github.com/aws/smithy-go/properties.go @@ -7,12 +7,10 @@ type PropertiesReader interface { } // Properties provides storing and reading metadata values. Keys may be any -// comparable value type. Get and set will panic if key is not a comparable -// value type. +// comparable value type. Get and Set will panic if a key is not comparable. // -// Properties uses lazy initialization, and Set method must be called as an -// addressable value, or pointer. Not doing so may cause key/value pair to not -// be set. +// The zero value for a Properties instance is ready for reads/writes without +// any additional initialization. type Properties struct { values map[interface{}]interface{} } @@ -22,21 +20,16 @@ type Properties struct { // // Panics if key type is not comparable. func (m *Properties) Get(key interface{}) interface{} { + m.lazyInit() return m.values[key] } // Set stores the value pointed to by the key. If a value already exists at // that key it will be replaced with the new value. // -// Set method must be called as an addressable value, or pointer. If Set is not -// called as an addressable value or pointer, the key value pair being set may -// be lost. -// // Panics if the key type is not comparable. func (m *Properties) Set(key, value interface{}) { - if m.values == nil { - m.values = map[interface{}]interface{}{} - } + m.lazyInit() m.values[key] = value } @@ -44,9 +37,26 @@ func (m *Properties) Set(key, value interface{}) { // // Panics if the key type is not comparable. func (m *Properties) Has(key interface{}) bool { - if m.values == nil { - return false - } + m.lazyInit() _, ok := m.values[key] return ok } + +// SetAll accepts all of the given Properties into the receiver, overwriting +// any existing keys in the case of conflicts. +func (m *Properties) SetAll(other *Properties) { + if other.values == nil { + return + } + + m.lazyInit() + for k, v := range other.values { + m.values[k] = v + } +} + +func (m *Properties) lazyInit() { + if m.values == nil { + m.values = map[interface{}]interface{}{} + } +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/auth.go b/vendor/github.com/aws/smithy-go/transport/http/auth.go new file mode 100644 index 000000000..58e1ab5ef --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/auth.go @@ -0,0 +1,21 @@ +package http + +import ( + "context" + + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/auth" +) + +// AuthScheme defines an HTTP authentication scheme. +type AuthScheme interface { + SchemeID() string + IdentityResolver(auth.IdentityResolverOptions) auth.IdentityResolver + Signer() Signer +} + +// Signer defines the interface through which HTTP requests are supplemented +// with an Identity. +type Signer interface { + SignRequest(context.Context, *Request, auth.Identity, smithy.Properties) error +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/auth_schemes.go b/vendor/github.com/aws/smithy-go/transport/http/auth_schemes.go new file mode 100644 index 000000000..d60cf2a60 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/auth_schemes.go @@ -0,0 +1,45 @@ +package http + +import ( + "context" + + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/auth" +) + +// NewAnonymousScheme returns the anonymous HTTP auth scheme. +func NewAnonymousScheme() AuthScheme { + return &authScheme{ + schemeID: auth.SchemeIDAnonymous, + signer: &nopSigner{}, + } +} + +// authScheme is parameterized to generically implement the exported AuthScheme +// interface +type authScheme struct { + schemeID string + signer Signer +} + +var _ AuthScheme = (*authScheme)(nil) + +func (s *authScheme) SchemeID() string { + return s.schemeID +} + +func (s *authScheme) IdentityResolver(o auth.IdentityResolverOptions) auth.IdentityResolver { + return o.GetIdentityResolver(s.schemeID) +} + +func (s *authScheme) Signer() Signer { + return s.signer +} + +type nopSigner struct{} + +var _ Signer = (*nopSigner)(nil) + +func (*nopSigner) SignRequest(context.Context, *Request, auth.Identity, smithy.Properties) error { + return nil +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/properties.go b/vendor/github.com/aws/smithy-go/transport/http/properties.go new file mode 100644 index 000000000..c65aa3932 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/properties.go @@ -0,0 +1,80 @@ +package http + +import smithy "github.com/aws/smithy-go" + +type ( + sigV4SigningNameKey struct{} + sigV4SigningRegionKey struct{} + + sigV4ASigningNameKey struct{} + sigV4ASigningRegionsKey struct{} + + isUnsignedPayloadKey struct{} + disableDoubleEncodingKey struct{} +) + +// GetSigV4SigningName gets the signing name from Properties. +func GetSigV4SigningName(p *smithy.Properties) (string, bool) { + v, ok := p.Get(sigV4SigningNameKey{}).(string) + return v, ok +} + +// SetSigV4SigningName sets the signing name on Properties. +func SetSigV4SigningName(p *smithy.Properties, name string) { + p.Set(sigV4SigningNameKey{}, name) +} + +// GetSigV4SigningRegion gets the signing region from Properties. +func GetSigV4SigningRegion(p *smithy.Properties) (string, bool) { + v, ok := p.Get(sigV4SigningRegionKey{}).(string) + return v, ok +} + +// SetSigV4SigningRegion sets the signing region on Properties. +func SetSigV4SigningRegion(p *smithy.Properties, region string) { + p.Set(sigV4SigningRegionKey{}, region) +} + +// GetSigV4ASigningName gets the v4a signing name from Properties. +func GetSigV4ASigningName(p *smithy.Properties) (string, bool) { + v, ok := p.Get(sigV4ASigningNameKey{}).(string) + return v, ok +} + +// SetSigV4ASigningName sets the signing name on Properties. +func SetSigV4ASigningName(p *smithy.Properties, name string) { + p.Set(sigV4ASigningNameKey{}, name) +} + +// GetSigV4ASigningRegion gets the v4a signing region set from Properties. +func GetSigV4ASigningRegions(p *smithy.Properties) ([]string, bool) { + v, ok := p.Get(sigV4ASigningRegionsKey{}).([]string) + return v, ok +} + +// SetSigV4ASigningRegions sets the v4a signing region set on Properties. +func SetSigV4ASigningRegions(p *smithy.Properties, regions []string) { + p.Set(sigV4ASigningRegionsKey{}, regions) +} + +// GetIsUnsignedPayload gets whether the payload is unsigned from Properties. +func GetIsUnsignedPayload(p *smithy.Properties) (bool, bool) { + v, ok := p.Get(isUnsignedPayloadKey{}).(bool) + return v, ok +} + +// SetIsUnsignedPayload sets whether the payload is unsigned on Properties. +func SetIsUnsignedPayload(p *smithy.Properties, isUnsignedPayload bool) { + p.Set(isUnsignedPayloadKey{}, isUnsignedPayload) +} + +// GetDisableDoubleEncoding gets whether the payload is unsigned from Properties. +func GetDisableDoubleEncoding(p *smithy.Properties) (bool, bool) { + v, ok := p.Get(disableDoubleEncodingKey{}).(bool) + return v, ok +} + +// SetDisableDoubleEncoding sets whether the payload is unsigned on Properties. +func SetDisableDoubleEncoding(p *smithy.Properties, disableDoubleEncoding bool) { + p.Set(disableDoubleEncodingKey{}, disableDoubleEncoding) +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/CHANGELOG.md b/vendor/github.com/elastic/go-elasticsearch/v8/CHANGELOG.md index ff39bc003..75492746e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/CHANGELOG.md +++ b/vendor/github.com/elastic/go-elasticsearch/v8/CHANGELOG.md @@ -1,3 +1,94 @@ +# 8.11.0 + +## API + +**Experimental APIs** + +* `EsqlQuery` [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-query-api.html) +* `InferenceDeleteModel` [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-inference-api.html) +* `InferenceGetModel` [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/get-inference-api.html) +* `InferenceInference` [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/post-inference-api.html) +* `InferencePutModel` [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/put-inference-api.html) + +## Typed API + +* Mandatory URL parameters are not exposed as functions anymore as they already exist in the constructor. + +# New Compatibility Policy + +Starting from version `8.12.0`, this library follow the Go language [policy](https://go.dev/doc/devel/release#policy). Each major Go release is supported until there are two newer major releases. For example, Go 1.5 was supported until the Go 1.7 release, and Go 1.6 was supported until the Go 1.8 release. + +If you have any questions or concerns, please do not hesitate to reach out to us. + +# 8.10.1 + +## Typed API + +Update APIs to latest [elasticsearch-specification 8.10](https://github.com/elastic/elasticsearch-specification/commit/3b09f9d8e90178243f8a340a7bc324aab152c602) + +# 8.10.0 + +## API +**Experimental APIs for internal use** +* `FleetDeleteSecret` +* `FleetGetSecret` +* `FleetPostSecret` + +**Exprimental APIs** + +`QueryRulesetList` + +**Stable APIs** + +`Security.GetSettings` +`Security.UpdateSettings` + +## Typed API +**Exprimental APIs** + +`QueryRuleset.List` + +**Technical Preview** +* [QueryRuleSet](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-rules-apis.html) + +**Beta** +* [Synonyms](https://www.elastic.co/guide/en/elasticsearch/reference/current/synonyms-apis.html) + +# 8.9.0 + +## API +**New API** + +* `Cluster.Info` [Documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-info.html) + +**Experimental APIs** + +* `QueryRulesetGet` [Documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/get-query-ruleset.html) +* `QueryRulesetDelete` [Documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-query-ruleset.html) +* `QueryRulesetPut` [Documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/put-query-ruleset.html) +* `SearchApplicationRenderQuery` [Documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/search-application-render-query.html) +* `Security.CreateCrossClusterAPIKey` [Documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-cross-cluster-api-key.html) +* `Security.UpdateCrossClusterAPIKey` [Documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-update-cross-cluster-api-key.html) + +## Typed API + +* Propagated request fields towards the endpoint for ease of access, taking priority over same-name query string fields. +* Added a stub for Do methods on endpoints that only support a boolean response such as `core.exists`. +* NDJSON endpoints support with custom serialization like `core.bulk`. +* Link to endpoints documentation in API index to better display and ease of use. + +**fixes** + +* Fixed a deserialization issue for `Property` & `Analyzer` #696 + +# 8.8.2 + +## Typed API + +* Fixed deserialization for `Suggest` in search responses. +* Fixed double-quoted strings in deserialization for unions normalized as string. #684 +* Fixed handling of `core.Get` response when the index did not exist. #678 + # 8.7.0 ## API diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/Makefile b/vendor/github.com/elastic/go-elasticsearch/v8/Makefile index d4b27d761..ae1920291 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/Makefile +++ b/vendor/github.com/elastic/go-elasticsearch/v8/Makefile @@ -1,6 +1,6 @@ SHELL := /bin/bash -ELASTICSEARCH_DEFAULT_BUILD_VERSION = "8.7.0-SNAPSHOT" +ELASTICSEARCH_DEFAULT_BUILD_VERSION = "8.11.0-SNAPSHOT" ##@ Test test-unit: ## Run unit tests @@ -8,7 +8,11 @@ test-unit: ## Run unit tests ifdef race $(eval testunitargs += "-race") endif +ifeq ($(OS),Windows_NT) + $(eval testunitargs += "./...") +else $(eval testunitargs += "-cover" "-coverprofile=tmp/unit.cov" "./...") +endif @mkdir -p tmp @if which gotestsum > /dev/null 2>&1 ; then \ echo "gotestsum --format=short-verbose --junitfile=tmp/unit-report.xml --" $(testunitargs); \ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/README.md b/vendor/github.com/elastic/go-elasticsearch/v8/README.md index c6a721573..f8b674e70 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/README.md +++ b/vendor/github.com/elastic/go-elasticsearch/v8/README.md @@ -10,7 +10,13 @@ The official Go client for [Elasticsearch](https://www.elastic.co/products/elast [![Integration](https://github.com/elastic/go-elasticsearch/workflows/Integration/badge.svg)](https://github.com/elastic/go-elasticsearch/actions?query=branch%3Amain) [![API](https://github.com/elastic/go-elasticsearch/workflows/API/badge.svg)](https://github.com/elastic/go-elasticsearch/actions?query=branch%3Amain) -## Compatibility +# Compatibility + +## Go + +Starting from version `8.12.0`, this library follow the Go language [policy](https://go.dev/doc/devel/release#policy). Each major Go release is supported until there are two newer major releases. For example, Go 1.5 was supported until the Go 1.7 release, and Go 1.6 was supported until the Go 1.8 release. + +## Elasticsearch Language clients are forward compatible; meaning that clients support communicating with greater or equal minor versions of Elasticsearch. Elasticsearch language clients are only backwards compatible with default distributions and without guarantees made. @@ -41,335 +47,25 @@ The `main` branch of the client is compatible with the current `master` branch o ## Installation -Add the package to your `go.mod` file: - - require github.com/elastic/go-elasticsearch/v8 main - -Or, clone the repository: - - git clone --branch main https://github.com/elastic/go-elasticsearch.git $GOPATH/src/github.com/elastic/go-elasticsearch - -A complete example: - -```bash -mkdir my-elasticsearch-app && cd my-elasticsearch-app - -cat > go.mod <<-END - module my-elasticsearch-app - - require github.com/elastic/go-elasticsearch/v8 main -END - -cat > main.go <<-END - package main - - import ( - "log" - - "github.com/elastic/go-elasticsearch/v8" - ) - - func main() { - es, _ := elasticsearch.NewDefaultClient() - log.Println(elasticsearch.Version) - log.Println(es.Info()) - } -END - -go run main.go -``` - +Refer to the [Installation section](https://www.elastic.co/guide/en/elasticsearch/client/go-api/current/getting-started-go.html#_installation) +of the getting started documentation. -## Usage - -The `elasticsearch` package ties together two separate packages for calling the Elasticsearch APIs and transferring data over HTTP: `esapi` and `elastictransport`, respectively. - -Use the `elasticsearch.NewDefaultClient()` function to create the client with the default settings. - -```golang -es, err := elasticsearch.NewDefaultClient() -if err != nil { - log.Fatalf("Error creating the client: %s", err) -} - -res, err := es.Info() -if err != nil { - log.Fatalf("Error getting response: %s", err) -} - -defer res.Body.Close() -log.Println(res) - -// [200 OK] { -// "name" : "node-1", -// "cluster_name" : "go-elasticsearch" -// ... -``` - -> NOTE: It is _critical_ to both close the response body _and_ to consume it, in order to re-use persistent TCP connections in the default HTTP transport. If you're not interested in the response body, call `io.Copy(ioutil.Discard, res.Body)`. - -When you export the `ELASTICSEARCH_URL` environment variable, -it will be used to set the cluster endpoint(s). Separate multiple addresses by a comma. - -To set the cluster endpoint(s) programmatically, pass a configuration object -to the `elasticsearch.NewClient()` function. - -```golang -cfg := elasticsearch.Config{ - Addresses: []string{ - "https://localhost:9200", - "https://localhost:9201", - }, - // ... -} -es, err := elasticsearch.NewClient(cfg) -``` - -To set the username and password, include them in the endpoint URL, -or use the corresponding configuration options. - -```golang -cfg := elasticsearch.Config{ - // ... - Username: "foo", - Password: "bar", -} -``` - -To set a custom certificate authority used to sign the certificates of cluster nodes, -use the `CACert` configuration option. - -```golang -cert, _ := ioutil.ReadFile(*cacert) - -cfg := elasticsearch.Config{ - // ... - CACert: cert, -} -``` - -To set a fingerprint to validate the HTTPS connection use the `CertificateFingerprint` configuration option. - -```golang -cfg := elasticsearch.Config{ - // ... - CertificateFingerprint: fingerPrint, -} -``` - -To configure other HTTP settings, pass an [`http.Transport`](https://golang.org/pkg/net/http/#Transport) -object in the configuration object. - -```golang -cfg := elasticsearch.Config{ - Transport: &http.Transport{ - MaxIdleConnsPerHost: 10, - ResponseHeaderTimeout: time.Second, - TLSClientConfig: &tls.Config{ - MinVersion: tls.VersionTLS12, - // ... - }, - // ... - }, -} -``` - -See the [`_examples/configuration.go`](_examples/configuration.go) and -[`_examples/customization.go`](_examples/customization.go) files for -more examples of configuration and customization of the client. -See the [`_examples/security`](_examples/security) for an example of a security configuration. - -The following example demonstrates a more complex usage. It fetches the Elasticsearch version from the cluster, indexes a couple of documents concurrently, and prints the search results, using a lightweight wrapper around the response body. - -```golang -// $ go run _examples/main.go - -package main - -import ( - "bytes" - "context" - "encoding/json" - "log" - "strconv" - "strings" - "sync" - - "github.com/elastic/go-elasticsearch/v8" - "github.com/elastic/go-elasticsearch/v8/esapi" -) - -func main() { - log.SetFlags(0) - - var ( - r map[string]interface{} - wg sync.WaitGroup - ) - - // Initialize a client with the default settings. - // - // An `ELASTICSEARCH_URL` environment variable will be used when exported. - // - es, err := elasticsearch.NewDefaultClient() - if err != nil { - log.Fatalf("Error creating the client: %s", err) - } - - // 1. Get cluster info - // - res, err := es.Info() - if err != nil { - log.Fatalf("Error getting response: %s", err) - } - defer res.Body.Close() - // Check response status - if res.IsError() { - log.Fatalf("Error: %s", res.String()) - } - // Deserialize the response into a map. - if err := json.NewDecoder(res.Body).Decode(&r); err != nil { - log.Fatalf("Error parsing the response body: %s", err) - } - // Print client and server version numbers. - log.Printf("Client: %s", elasticsearch.Version) - log.Printf("Server: %s", r["version"].(map[string]interface{})["number"]) - log.Println(strings.Repeat("~", 37)) - - // 2. Index documents concurrently - // - for i, title := range []string{"Test One", "Test Two"} { - wg.Add(1) - - go func(i int, title string) { - defer wg.Done() - - // Build the request body. - data, err := json.Marshal(struct { - Title string `json:"title"` - }{Title: title}) - if err != nil { - log.Fatalf("Error marshaling document: %s", err) - } - - // Set up the request object. - req := esapi.IndexRequest{ - Index: "test", - DocumentID: strconv.Itoa(i + 1), - Body: bytes.NewReader(data), - Refresh: "true", - } - - // Perform the request with the client. - res, err := req.Do(context.Background(), es) - if err != nil { - log.Fatalf("Error getting response: %s", err) - } - defer res.Body.Close() - - if res.IsError() { - log.Printf("[%s] Error indexing document ID=%d", res.Status(), i+1) - } else { - // Deserialize the response into a map. - var r map[string]interface{} - if err := json.NewDecoder(res.Body).Decode(&r); err != nil { - log.Printf("Error parsing the response body: %s", err) - } else { - // Print the response status and indexed document version. - log.Printf("[%s] %s; version=%d", res.Status(), r["result"], int(r["_version"].(float64))) - } - } - }(i, title) - } - wg.Wait() - - log.Println(strings.Repeat("-", 37)) - - // 3. Search for the indexed documents - // - // Build the request body. - var buf bytes.Buffer - query := map[string]interface{}{ - "query": map[string]interface{}{ - "match": map[string]interface{}{ - "title": "test", - }, - }, - } - if err := json.NewEncoder(&buf).Encode(query); err != nil { - log.Fatalf("Error encoding query: %s", err) - } - - // Perform the search request. - res, err = es.Search( - es.Search.WithContext(context.Background()), - es.Search.WithIndex("test"), - es.Search.WithBody(&buf), - es.Search.WithTrackTotalHits(true), - es.Search.WithPretty(), - ) - if err != nil { - log.Fatalf("Error getting response: %s", err) - } - defer res.Body.Close() - - if res.IsError() { - var e map[string]interface{} - if err := json.NewDecoder(res.Body).Decode(&e); err != nil { - log.Fatalf("Error parsing the response body: %s", err) - } else { - // Print the response status and error information. - log.Fatalf("[%s] %s: %s", - res.Status(), - e["error"].(map[string]interface{})["type"], - e["error"].(map[string]interface{})["reason"], - ) - } - } - - if err := json.NewDecoder(res.Body).Decode(&r); err != nil { - log.Fatalf("Error parsing the response body: %s", err) - } - // Print the response status, number of results, and request duration. - log.Printf( - "[%s] %d hits; took: %dms", - res.Status(), - int(r["hits"].(map[string]interface{})["total"].(map[string]interface{})["value"].(float64)), - int(r["took"].(float64)), - ) - // Print the ID and document source for each hit. - for _, hit := range r["hits"].(map[string]interface{})["hits"].([]interface{}) { - log.Printf(" * ID=%s, %s", hit.(map[string]interface{})["_id"], hit.(map[string]interface{})["_source"]) - } - - log.Println(strings.Repeat("=", 37)) -} - -// Client: 8.0.0-SNAPSHOT -// Server: 8.0.0-SNAPSHOT -// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -// [201 Created] updated; version=1 -// [201 Created] updated; version=1 -// ------------------------------------- -// [200 OK] 2 hits; took: 5ms -// * ID=1, map[title:Test One] -// * ID=2, map[title:Test Two] -// ===================================== -``` - -As you see in the example above, the `esapi` package allows to call the Elasticsearch APIs in two distinct ways: either by creating a struct, such as `IndexRequest`, and calling its `Do()` method by passing it a context and the client, or by calling the `Search()` function on the client directly, using the option functions such as `WithIndex()`. See more information and examples in the -[package documentation](https://godoc.org/github.com/elastic/go-elasticsearch/esapi). - -The `elastictransport` package handles the transfer of data to and from Elasticsearch, including retrying failed requests, keeping a connection pool, discovering cluster nodes and logging. +## Connecting -Read more about the client internals and usage in the following blog posts: +Refer to the [Connecting section](https://www.elastic.co/guide/en/elasticsearch/client/go-api/current/getting-started-go.html#_connecting) +of the getting started documentation. -* https://www.elastic.co/blog/the-go-client-for-elasticsearch-introduction -* https://www.elastic.co/blog/the-go-client-for-elasticsearch-configuration-and-customization -* https://www.elastic.co/blog/the-go-client-for-elasticsearch-working-with-data +## Operations +* [Creating an index](https://www.elastic.co/guide/en/elasticsearch/client/go-api/current/getting-started-go.html#_creating_an_index) +* [Indexing documents](https://www.elastic.co/guide/en/elasticsearch/client/go-api/current/getting-started-go.html#_indexing_documents) +* [Getting documents](https://www.elastic.co/guide/en/elasticsearch/client/go-api/current/getting-started-go.html#_getting_documents) +* [Searching documents](https://www.elastic.co/guide/en/elasticsearch/client/go-api/current/getting-started-go.html#_searching_documents) +* [Updating documents](https://www.elastic.co/guide/en/elasticsearch/client/go-api/current/getting-started-go.html#_updating_documents) +* [Deleting documents](https://www.elastic.co/guide/en/elasticsearch/client/go-api/current/getting-started-go.html#_deleting_documents) +* [Deleting an index](https://www.elastic.co/guide/en/elasticsearch/client/go-api/current/getting-started-go.html#_deleting_an_index) ## Helpers diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api._.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api._.go index 1e362961a..bb41a1053 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api._.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api._.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0 (a3e1f4c): DO NOT EDIT +// Code generated from specification version 8.11.0 (3dbb504): DO NOT EDIT package esapi @@ -43,103 +43,132 @@ type API struct { Watcher *Watcher XPack *XPack - AutoscalingDeleteAutoscalingPolicy AutoscalingDeleteAutoscalingPolicy - AutoscalingGetAutoscalingCapacity AutoscalingGetAutoscalingCapacity - AutoscalingGetAutoscalingPolicy AutoscalingGetAutoscalingPolicy - AutoscalingPutAutoscalingPolicy AutoscalingPutAutoscalingPolicy - Bulk Bulk - ClearScroll ClearScroll - ClosePointInTime ClosePointInTime - Count Count - Create Create - DanglingIndicesDeleteDanglingIndex DanglingIndicesDeleteDanglingIndex - DanglingIndicesImportDanglingIndex DanglingIndicesImportDanglingIndex - DanglingIndicesListDanglingIndices DanglingIndicesListDanglingIndices - DeleteByQuery DeleteByQuery - DeleteByQueryRethrottle DeleteByQueryRethrottle - Delete Delete - DeleteScript DeleteScript - EnrichDeletePolicy EnrichDeletePolicy - EnrichExecutePolicy EnrichExecutePolicy - EnrichGetPolicy EnrichGetPolicy - EnrichPutPolicy EnrichPutPolicy - EnrichStats EnrichStats - EqlDelete EqlDelete - EqlGet EqlGet - EqlGetStatus EqlGetStatus - EqlSearch EqlSearch - Exists Exists - ExistsSource ExistsSource - Explain Explain - FeaturesGetFeatures FeaturesGetFeatures - FeaturesResetFeatures FeaturesResetFeatures - FieldCaps FieldCaps - FleetGlobalCheckpoints FleetGlobalCheckpoints - FleetMsearch FleetMsearch - FleetSearch FleetSearch - Get Get - GetScriptContext GetScriptContext - GetScriptLanguages GetScriptLanguages - GetScript GetScript - GetSource GetSource - GraphExplore GraphExplore - HealthReport HealthReport - Index Index - Info Info - KnnSearch KnnSearch - LogstashDeletePipeline LogstashDeletePipeline - LogstashGetPipeline LogstashGetPipeline - LogstashPutPipeline LogstashPutPipeline - Mget Mget - Msearch Msearch - MsearchTemplate MsearchTemplate - Mtermvectors Mtermvectors - OpenPointInTime OpenPointInTime - Ping Ping - PutScript PutScript - RankEval RankEval - Reindex Reindex - ReindexRethrottle ReindexRethrottle - RenderSearchTemplate RenderSearchTemplate - ScriptsPainlessExecute ScriptsPainlessExecute - Scroll Scroll - SearchMvt SearchMvt - Search Search - SearchShards SearchShards - SearchTemplate SearchTemplate - SearchableSnapshotsCacheStats SearchableSnapshotsCacheStats - SearchableSnapshotsClearCache SearchableSnapshotsClearCache - SearchableSnapshotsMount SearchableSnapshotsMount - SearchableSnapshotsStats SearchableSnapshotsStats - ShutdownDeleteNode ShutdownDeleteNode - ShutdownGetNode ShutdownGetNode - ShutdownPutNode ShutdownPutNode - SlmDeleteLifecycle SlmDeleteLifecycle - SlmExecuteLifecycle SlmExecuteLifecycle - SlmExecuteRetention SlmExecuteRetention - SlmGetLifecycle SlmGetLifecycle - SlmGetStats SlmGetStats - SlmGetStatus SlmGetStatus - SlmPutLifecycle SlmPutLifecycle - SlmStart SlmStart - SlmStop SlmStop - TermsEnum TermsEnum - Termvectors Termvectors - TextStructureFindStructure TextStructureFindStructure - TransformDeleteTransform TransformDeleteTransform - TransformGetTransform TransformGetTransform - TransformGetTransformStats TransformGetTransformStats - TransformPreviewTransform TransformPreviewTransform - TransformPutTransform TransformPutTransform - TransformResetTransform TransformResetTransform - TransformScheduleNowTransform TransformScheduleNowTransform - TransformStartTransform TransformStartTransform - TransformStopTransform TransformStopTransform - TransformUpdateTransform TransformUpdateTransform - TransformUpgradeTransforms TransformUpgradeTransforms - UpdateByQuery UpdateByQuery - UpdateByQueryRethrottle UpdateByQueryRethrottle - Update Update + AutoscalingDeleteAutoscalingPolicy AutoscalingDeleteAutoscalingPolicy + AutoscalingGetAutoscalingCapacity AutoscalingGetAutoscalingCapacity + AutoscalingGetAutoscalingPolicy AutoscalingGetAutoscalingPolicy + AutoscalingPutAutoscalingPolicy AutoscalingPutAutoscalingPolicy + Bulk Bulk + ClearScroll ClearScroll + ClosePointInTime ClosePointInTime + Count Count + Create Create + DanglingIndicesDeleteDanglingIndex DanglingIndicesDeleteDanglingIndex + DanglingIndicesImportDanglingIndex DanglingIndicesImportDanglingIndex + DanglingIndicesListDanglingIndices DanglingIndicesListDanglingIndices + DeleteByQuery DeleteByQuery + DeleteByQueryRethrottle DeleteByQueryRethrottle + Delete Delete + DeleteScript DeleteScript + EnrichDeletePolicy EnrichDeletePolicy + EnrichExecutePolicy EnrichExecutePolicy + EnrichGetPolicy EnrichGetPolicy + EnrichPutPolicy EnrichPutPolicy + EnrichStats EnrichStats + EqlDelete EqlDelete + EqlGet EqlGet + EqlGetStatus EqlGetStatus + EqlSearch EqlSearch + EsqlQuery EsqlQuery + Exists Exists + ExistsSource ExistsSource + Explain Explain + FeaturesGetFeatures FeaturesGetFeatures + FeaturesResetFeatures FeaturesResetFeatures + FieldCaps FieldCaps + FleetDeleteSecret FleetDeleteSecret + FleetGetSecret FleetGetSecret + FleetGlobalCheckpoints FleetGlobalCheckpoints + FleetMsearch FleetMsearch + FleetPostSecret FleetPostSecret + FleetSearch FleetSearch + Get Get + GetScriptContext GetScriptContext + GetScriptLanguages GetScriptLanguages + GetScript GetScript + GetSource GetSource + GraphExplore GraphExplore + HealthReport HealthReport + Index Index + InferenceDeleteModel InferenceDeleteModel + InferenceGetModel InferenceGetModel + InferenceInference InferenceInference + InferencePutModel InferencePutModel + Info Info + KnnSearch KnnSearch + LogstashDeletePipeline LogstashDeletePipeline + LogstashGetPipeline LogstashGetPipeline + LogstashPutPipeline LogstashPutPipeline + Mget Mget + Msearch Msearch + MsearchTemplate MsearchTemplate + Mtermvectors Mtermvectors + OpenPointInTime OpenPointInTime + Ping Ping + PutScript PutScript + QueryRulesetDelete QueryRulesetDelete + QueryRulesetGet QueryRulesetGet + QueryRulesetList QueryRulesetList + QueryRulesetPut QueryRulesetPut + RankEval RankEval + Reindex Reindex + ReindexRethrottle ReindexRethrottle + RenderSearchTemplate RenderSearchTemplate + ScriptsPainlessExecute ScriptsPainlessExecute + Scroll Scroll + SearchApplicationDeleteBehavioralAnalytics SearchApplicationDeleteBehavioralAnalytics + SearchApplicationDelete SearchApplicationDelete + SearchApplicationGetBehavioralAnalytics SearchApplicationGetBehavioralAnalytics + SearchApplicationGet SearchApplicationGet + SearchApplicationList SearchApplicationList + SearchApplicationPostBehavioralAnalyticsEvent SearchApplicationPostBehavioralAnalyticsEvent + SearchApplicationPutBehavioralAnalytics SearchApplicationPutBehavioralAnalytics + SearchApplicationPut SearchApplicationPut + SearchApplicationRenderQuery SearchApplicationRenderQuery + SearchApplicationSearch SearchApplicationSearch + SearchMvt SearchMvt + Search Search + SearchShards SearchShards + SearchTemplate SearchTemplate + SearchableSnapshotsCacheStats SearchableSnapshotsCacheStats + SearchableSnapshotsClearCache SearchableSnapshotsClearCache + SearchableSnapshotsMount SearchableSnapshotsMount + SearchableSnapshotsStats SearchableSnapshotsStats + ShutdownDeleteNode ShutdownDeleteNode + ShutdownGetNode ShutdownGetNode + ShutdownPutNode ShutdownPutNode + SlmDeleteLifecycle SlmDeleteLifecycle + SlmExecuteLifecycle SlmExecuteLifecycle + SlmExecuteRetention SlmExecuteRetention + SlmGetLifecycle SlmGetLifecycle + SlmGetStats SlmGetStats + SlmGetStatus SlmGetStatus + SlmPutLifecycle SlmPutLifecycle + SlmStart SlmStart + SlmStop SlmStop + SynonymsDeleteSynonym SynonymsDeleteSynonym + SynonymsDeleteSynonymRule SynonymsDeleteSynonymRule + SynonymsGetSynonym SynonymsGetSynonym + SynonymsGetSynonymRule SynonymsGetSynonymRule + SynonymsGetSynonymsSets SynonymsGetSynonymsSets + SynonymsPutSynonym SynonymsPutSynonym + SynonymsPutSynonymRule SynonymsPutSynonymRule + TermsEnum TermsEnum + Termvectors Termvectors + TextStructureFindStructure TextStructureFindStructure + TransformDeleteTransform TransformDeleteTransform + TransformGetTransform TransformGetTransform + TransformGetTransformStats TransformGetTransformStats + TransformPreviewTransform TransformPreviewTransform + TransformPutTransform TransformPutTransform + TransformResetTransform TransformResetTransform + TransformScheduleNowTransform TransformScheduleNowTransform + TransformStartTransform TransformStartTransform + TransformStopTransform TransformStopTransform + TransformUpdateTransform TransformUpdateTransform + TransformUpgradeTransforms TransformUpgradeTransforms + UpdateByQuery UpdateByQuery + UpdateByQueryRethrottle UpdateByQueryRethrottle + Update Update } // Cat contains the Cat APIs @@ -181,6 +210,7 @@ type Cluster struct { GetComponentTemplate ClusterGetComponentTemplate GetSettings ClusterGetSettings Health ClusterHealth + Info ClusterInfo PendingTasks ClusterPendingTasks PostVotingConfigExclusions ClusterPostVotingConfigExclusions PutComponentTemplate ClusterPutComponentTemplate @@ -202,6 +232,7 @@ type Indices struct { Create IndicesCreate DataStreamsStats IndicesDataStreamsStats DeleteAlias IndicesDeleteAlias + DeleteDataLifecycle IndicesDeleteDataLifecycle DeleteDataStream IndicesDeleteDataStream DeleteIndexTemplate IndicesDeleteIndexTemplate Delete IndicesDelete @@ -212,10 +243,12 @@ type Indices struct { ExistsIndexTemplate IndicesExistsIndexTemplate Exists IndicesExists ExistsTemplate IndicesExistsTemplate + ExplainDataLifecycle IndicesExplainDataLifecycle FieldUsageStats IndicesFieldUsageStats Flush IndicesFlush Forcemerge IndicesForcemerge GetAlias IndicesGetAlias + GetDataLifecycle IndicesGetDataLifecycle GetDataStream IndicesGetDataStream GetFieldMapping IndicesGetFieldMapping GetIndexTemplate IndicesGetIndexTemplate @@ -228,6 +261,7 @@ type Indices struct { Open IndicesOpen PromoteDataStream IndicesPromoteDataStream PutAlias IndicesPutAlias + PutDataLifecycle IndicesPutDataLifecycle PutIndexTemplate IndicesPutIndexTemplate PutMapping IndicesPutMapping PutSettings IndicesPutSettings @@ -461,6 +495,7 @@ type Security struct { ClearCachedRoles SecurityClearCachedRoles ClearCachedServiceTokens SecurityClearCachedServiceTokens CreateAPIKey SecurityCreateAPIKey + CreateCrossClusterAPIKey SecurityCreateCrossClusterAPIKey CreateServiceToken SecurityCreateServiceToken DeletePrivileges SecurityDeletePrivileges DeleteRoleMapping SecurityDeleteRoleMapping @@ -480,6 +515,7 @@ type Security struct { GetRole SecurityGetRole GetServiceAccounts SecurityGetServiceAccounts GetServiceCredentials SecurityGetServiceCredentials + GetSettings SecurityGetSettings GetToken SecurityGetToken GetUserPrivileges SecurityGetUserPrivileges GetUserProfile SecurityGetUserProfile @@ -505,6 +541,8 @@ type Security struct { SamlServiceProviderMetadata SecuritySamlServiceProviderMetadata SuggestUserProfiles SecuritySuggestUserProfiles UpdateAPIKey SecurityUpdateAPIKey + UpdateCrossClusterAPIKey SecurityUpdateCrossClusterAPIKey + UpdateSettings SecurityUpdateSettings UpdateUserProfileData SecurityUpdateUserProfileData } @@ -530,12 +568,14 @@ type Watcher struct { DeactivateWatch WatcherDeactivateWatch DeleteWatch WatcherDeleteWatch ExecuteWatch WatcherExecuteWatch + GetSettings WatcherGetSettings GetWatch WatcherGetWatch PutWatch WatcherPutWatch QueryWatches WatcherQueryWatches Start WatcherStart Stats WatcherStats Stop WatcherStop + UpdateSettings WatcherUpdateSettings } // XPack contains the XPack APIs @@ -572,14 +612,18 @@ func New(t Transport) *API { EqlGet: newEqlGetFunc(t), EqlGetStatus: newEqlGetStatusFunc(t), EqlSearch: newEqlSearchFunc(t), + EsqlQuery: newEsqlQueryFunc(t), Exists: newExistsFunc(t), ExistsSource: newExistsSourceFunc(t), Explain: newExplainFunc(t), FeaturesGetFeatures: newFeaturesGetFeaturesFunc(t), FeaturesResetFeatures: newFeaturesResetFeaturesFunc(t), FieldCaps: newFieldCapsFunc(t), + FleetDeleteSecret: newFleetDeleteSecretFunc(t), + FleetGetSecret: newFleetGetSecretFunc(t), FleetGlobalCheckpoints: newFleetGlobalCheckpointsFunc(t), FleetMsearch: newFleetMsearchFunc(t), + FleetPostSecret: newFleetPostSecretFunc(t), FleetSearch: newFleetSearchFunc(t), Get: newGetFunc(t), GetScriptContext: newGetScriptContextFunc(t), @@ -589,6 +633,10 @@ func New(t Transport) *API { GraphExplore: newGraphExploreFunc(t), HealthReport: newHealthReportFunc(t), Index: newIndexFunc(t), + InferenceDeleteModel: newInferenceDeleteModelFunc(t), + InferenceGetModel: newInferenceGetModelFunc(t), + InferenceInference: newInferenceInferenceFunc(t), + InferencePutModel: newInferencePutModelFunc(t), Info: newInfoFunc(t), KnnSearch: newKnnSearchFunc(t), LogstashDeletePipeline: newLogstashDeletePipelineFunc(t), @@ -601,49 +649,70 @@ func New(t Transport) *API { OpenPointInTime: newOpenPointInTimeFunc(t), Ping: newPingFunc(t), PutScript: newPutScriptFunc(t), + QueryRulesetDelete: newQueryRulesetDeleteFunc(t), + QueryRulesetGet: newQueryRulesetGetFunc(t), + QueryRulesetList: newQueryRulesetListFunc(t), + QueryRulesetPut: newQueryRulesetPutFunc(t), RankEval: newRankEvalFunc(t), Reindex: newReindexFunc(t), ReindexRethrottle: newReindexRethrottleFunc(t), RenderSearchTemplate: newRenderSearchTemplateFunc(t), ScriptsPainlessExecute: newScriptsPainlessExecuteFunc(t), Scroll: newScrollFunc(t), - SearchMvt: newSearchMvtFunc(t), - Search: newSearchFunc(t), - SearchShards: newSearchShardsFunc(t), - SearchTemplate: newSearchTemplateFunc(t), - SearchableSnapshotsCacheStats: newSearchableSnapshotsCacheStatsFunc(t), - SearchableSnapshotsClearCache: newSearchableSnapshotsClearCacheFunc(t), - SearchableSnapshotsMount: newSearchableSnapshotsMountFunc(t), - SearchableSnapshotsStats: newSearchableSnapshotsStatsFunc(t), - ShutdownDeleteNode: newShutdownDeleteNodeFunc(t), - ShutdownGetNode: newShutdownGetNodeFunc(t), - ShutdownPutNode: newShutdownPutNodeFunc(t), - SlmDeleteLifecycle: newSlmDeleteLifecycleFunc(t), - SlmExecuteLifecycle: newSlmExecuteLifecycleFunc(t), - SlmExecuteRetention: newSlmExecuteRetentionFunc(t), - SlmGetLifecycle: newSlmGetLifecycleFunc(t), - SlmGetStats: newSlmGetStatsFunc(t), - SlmGetStatus: newSlmGetStatusFunc(t), - SlmPutLifecycle: newSlmPutLifecycleFunc(t), - SlmStart: newSlmStartFunc(t), - SlmStop: newSlmStopFunc(t), - TermsEnum: newTermsEnumFunc(t), - Termvectors: newTermvectorsFunc(t), - TextStructureFindStructure: newTextStructureFindStructureFunc(t), - TransformDeleteTransform: newTransformDeleteTransformFunc(t), - TransformGetTransform: newTransformGetTransformFunc(t), - TransformGetTransformStats: newTransformGetTransformStatsFunc(t), - TransformPreviewTransform: newTransformPreviewTransformFunc(t), - TransformPutTransform: newTransformPutTransformFunc(t), - TransformResetTransform: newTransformResetTransformFunc(t), - TransformScheduleNowTransform: newTransformScheduleNowTransformFunc(t), - TransformStartTransform: newTransformStartTransformFunc(t), - TransformStopTransform: newTransformStopTransformFunc(t), - TransformUpdateTransform: newTransformUpdateTransformFunc(t), - TransformUpgradeTransforms: newTransformUpgradeTransformsFunc(t), - UpdateByQuery: newUpdateByQueryFunc(t), - UpdateByQueryRethrottle: newUpdateByQueryRethrottleFunc(t), - Update: newUpdateFunc(t), + SearchApplicationDeleteBehavioralAnalytics: newSearchApplicationDeleteBehavioralAnalyticsFunc(t), + SearchApplicationDelete: newSearchApplicationDeleteFunc(t), + SearchApplicationGetBehavioralAnalytics: newSearchApplicationGetBehavioralAnalyticsFunc(t), + SearchApplicationGet: newSearchApplicationGetFunc(t), + SearchApplicationList: newSearchApplicationListFunc(t), + SearchApplicationPostBehavioralAnalyticsEvent: newSearchApplicationPostBehavioralAnalyticsEventFunc(t), + SearchApplicationPutBehavioralAnalytics: newSearchApplicationPutBehavioralAnalyticsFunc(t), + SearchApplicationPut: newSearchApplicationPutFunc(t), + SearchApplicationRenderQuery: newSearchApplicationRenderQueryFunc(t), + SearchApplicationSearch: newSearchApplicationSearchFunc(t), + SearchMvt: newSearchMvtFunc(t), + Search: newSearchFunc(t), + SearchShards: newSearchShardsFunc(t), + SearchTemplate: newSearchTemplateFunc(t), + SearchableSnapshotsCacheStats: newSearchableSnapshotsCacheStatsFunc(t), + SearchableSnapshotsClearCache: newSearchableSnapshotsClearCacheFunc(t), + SearchableSnapshotsMount: newSearchableSnapshotsMountFunc(t), + SearchableSnapshotsStats: newSearchableSnapshotsStatsFunc(t), + ShutdownDeleteNode: newShutdownDeleteNodeFunc(t), + ShutdownGetNode: newShutdownGetNodeFunc(t), + ShutdownPutNode: newShutdownPutNodeFunc(t), + SlmDeleteLifecycle: newSlmDeleteLifecycleFunc(t), + SlmExecuteLifecycle: newSlmExecuteLifecycleFunc(t), + SlmExecuteRetention: newSlmExecuteRetentionFunc(t), + SlmGetLifecycle: newSlmGetLifecycleFunc(t), + SlmGetStats: newSlmGetStatsFunc(t), + SlmGetStatus: newSlmGetStatusFunc(t), + SlmPutLifecycle: newSlmPutLifecycleFunc(t), + SlmStart: newSlmStartFunc(t), + SlmStop: newSlmStopFunc(t), + SynonymsDeleteSynonym: newSynonymsDeleteSynonymFunc(t), + SynonymsDeleteSynonymRule: newSynonymsDeleteSynonymRuleFunc(t), + SynonymsGetSynonym: newSynonymsGetSynonymFunc(t), + SynonymsGetSynonymRule: newSynonymsGetSynonymRuleFunc(t), + SynonymsGetSynonymsSets: newSynonymsGetSynonymsSetsFunc(t), + SynonymsPutSynonym: newSynonymsPutSynonymFunc(t), + SynonymsPutSynonymRule: newSynonymsPutSynonymRuleFunc(t), + TermsEnum: newTermsEnumFunc(t), + Termvectors: newTermvectorsFunc(t), + TextStructureFindStructure: newTextStructureFindStructureFunc(t), + TransformDeleteTransform: newTransformDeleteTransformFunc(t), + TransformGetTransform: newTransformGetTransformFunc(t), + TransformGetTransformStats: newTransformGetTransformStatsFunc(t), + TransformPreviewTransform: newTransformPreviewTransformFunc(t), + TransformPutTransform: newTransformPutTransformFunc(t), + TransformResetTransform: newTransformResetTransformFunc(t), + TransformScheduleNowTransform: newTransformScheduleNowTransformFunc(t), + TransformStartTransform: newTransformStartTransformFunc(t), + TransformStopTransform: newTransformStopTransformFunc(t), + TransformUpdateTransform: newTransformUpdateTransformFunc(t), + TransformUpgradeTransforms: newTransformUpgradeTransformsFunc(t), + UpdateByQuery: newUpdateByQueryFunc(t), + UpdateByQueryRethrottle: newUpdateByQueryRethrottleFunc(t), + Update: newUpdateFunc(t), Cat: &Cat{ Aliases: newCatAliasesFunc(t), Allocation: newCatAllocationFunc(t), @@ -680,6 +749,7 @@ func New(t Transport) *API { GetComponentTemplate: newClusterGetComponentTemplateFunc(t), GetSettings: newClusterGetSettingsFunc(t), Health: newClusterHealthFunc(t), + Info: newClusterInfoFunc(t), PendingTasks: newClusterPendingTasksFunc(t), PostVotingConfigExclusions: newClusterPostVotingConfigExclusionsFunc(t), PutComponentTemplate: newClusterPutComponentTemplateFunc(t), @@ -699,6 +769,7 @@ func New(t Transport) *API { Create: newIndicesCreateFunc(t), DataStreamsStats: newIndicesDataStreamsStatsFunc(t), DeleteAlias: newIndicesDeleteAliasFunc(t), + DeleteDataLifecycle: newIndicesDeleteDataLifecycleFunc(t), DeleteDataStream: newIndicesDeleteDataStreamFunc(t), DeleteIndexTemplate: newIndicesDeleteIndexTemplateFunc(t), Delete: newIndicesDeleteFunc(t), @@ -709,10 +780,12 @@ func New(t Transport) *API { ExistsIndexTemplate: newIndicesExistsIndexTemplateFunc(t), Exists: newIndicesExistsFunc(t), ExistsTemplate: newIndicesExistsTemplateFunc(t), + ExplainDataLifecycle: newIndicesExplainDataLifecycleFunc(t), FieldUsageStats: newIndicesFieldUsageStatsFunc(t), Flush: newIndicesFlushFunc(t), Forcemerge: newIndicesForcemergeFunc(t), GetAlias: newIndicesGetAliasFunc(t), + GetDataLifecycle: newIndicesGetDataLifecycleFunc(t), GetDataStream: newIndicesGetDataStreamFunc(t), GetFieldMapping: newIndicesGetFieldMappingFunc(t), GetIndexTemplate: newIndicesGetIndexTemplateFunc(t), @@ -725,6 +798,7 @@ func New(t Transport) *API { Open: newIndicesOpenFunc(t), PromoteDataStream: newIndicesPromoteDataStreamFunc(t), PutAlias: newIndicesPutAliasFunc(t), + PutDataLifecycle: newIndicesPutDataLifecycleFunc(t), PutIndexTemplate: newIndicesPutIndexTemplateFunc(t), PutMapping: newIndicesPutMappingFunc(t), PutSettings: newIndicesPutSettingsFunc(t), @@ -929,6 +1003,7 @@ func New(t Transport) *API { ClearCachedRoles: newSecurityClearCachedRolesFunc(t), ClearCachedServiceTokens: newSecurityClearCachedServiceTokensFunc(t), CreateAPIKey: newSecurityCreateAPIKeyFunc(t), + CreateCrossClusterAPIKey: newSecurityCreateCrossClusterAPIKeyFunc(t), CreateServiceToken: newSecurityCreateServiceTokenFunc(t), DeletePrivileges: newSecurityDeletePrivilegesFunc(t), DeleteRoleMapping: newSecurityDeleteRoleMappingFunc(t), @@ -948,6 +1023,7 @@ func New(t Transport) *API { GetRole: newSecurityGetRoleFunc(t), GetServiceAccounts: newSecurityGetServiceAccountsFunc(t), GetServiceCredentials: newSecurityGetServiceCredentialsFunc(t), + GetSettings: newSecurityGetSettingsFunc(t), GetToken: newSecurityGetTokenFunc(t), GetUserPrivileges: newSecurityGetUserPrivilegesFunc(t), GetUserProfile: newSecurityGetUserProfileFunc(t), @@ -973,6 +1049,8 @@ func New(t Transport) *API { SamlServiceProviderMetadata: newSecuritySamlServiceProviderMetadataFunc(t), SuggestUserProfiles: newSecuritySuggestUserProfilesFunc(t), UpdateAPIKey: newSecurityUpdateAPIKeyFunc(t), + UpdateCrossClusterAPIKey: newSecurityUpdateCrossClusterAPIKeyFunc(t), + UpdateSettings: newSecurityUpdateSettingsFunc(t), UpdateUserProfileData: newSecurityUpdateUserProfileDataFunc(t), }, SQL: &SQL{ @@ -992,12 +1070,14 @@ func New(t Transport) *API { DeactivateWatch: newWatcherDeactivateWatchFunc(t), DeleteWatch: newWatcherDeleteWatchFunc(t), ExecuteWatch: newWatcherExecuteWatchFunc(t), + GetSettings: newWatcherGetSettingsFunc(t), GetWatch: newWatcherGetWatchFunc(t), PutWatch: newWatcherPutWatchFunc(t), QueryWatches: newWatcherQueryWatchesFunc(t), Start: newWatcherStartFunc(t), Stats: newWatcherStatsFunc(t), Stop: newWatcherStopFunc(t), + UpdateSettings: newWatcherUpdateSettingsFunc(t), }, XPack: &XPack{ Info: newXPackInfoFunc(t), diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.bulk.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.bulk.go index aaa44c070..d4343112f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.bulk.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.bulk.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.aliases.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.aliases.go index 0b96556c5..8546a5b75 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.aliases.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.aliases.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.allocation.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.allocation.go index a581baacb..2642b96ae 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.allocation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.allocation.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.component_templates.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.component_templates.go index 04ae349f9..2a700a162 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.component_templates.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.component_templates.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi @@ -41,7 +41,7 @@ func newCatComponentTemplatesFunc(t Transport) CatComponentTemplates { // CatComponentTemplates returns information about existing component_templates templates. // -// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-compoentn-templates.html. +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-component-templates.html. type CatComponentTemplates func(o ...func(*CatComponentTemplatesRequest)) (*Response, error) // CatComponentTemplatesRequest configures the Cat Component Templates API request. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.count.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.count.go index f8ecbdc6d..e85ab3984 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.count.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.count.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.fielddata.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.fielddata.go index 7cf58a773..eaa6ad74b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.fielddata.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.fielddata.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.health.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.health.go index 9fffa1231..7c62e5e41 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.health.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.health.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.help.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.help.go index d835b47ca..dabe08876 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.help.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.help.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.indices.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.indices.go index f701696c9..59571d64a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.indices.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.indices.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.master.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.master.go index 799f65925..6c3225aab 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.master.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.master.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.nodeattrs.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.nodeattrs.go index 5d17be94c..7a0764d61 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.nodeattrs.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.nodeattrs.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.nodes.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.nodes.go index 0f4b14afd..62b489db5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.nodes.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.nodes.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.pending_tasks.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.pending_tasks.go index 4840675d6..e0650b7e5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.pending_tasks.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.pending_tasks.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.plugins.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.plugins.go index fe1b922be..b403b5952 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.plugins.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.plugins.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.recovery.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.recovery.go index e12e9c480..aad7048e7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.recovery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.recovery.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.repositories.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.repositories.go index c14b46f8d..85bbe7c63 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.repositories.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.repositories.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.segments.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.segments.go index 09b7bac99..a2c591fff 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.segments.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.segments.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.shards.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.shards.go index ff77c119c..2e6a96828 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.shards.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.shards.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.snapshots.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.snapshots.go index a9be08f2a..db06acc1d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.snapshots.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.snapshots.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.tasks.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.tasks.go index 0d773ab9d..3fb814426 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.tasks.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.tasks.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.templates.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.templates.go index 506da1ef8..ee8ba44f8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.templates.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.templates.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.thread_pool.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.thread_pool.go index a16523db8..700e01fb3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.thread_pool.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cat.thread_pool.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.clear_scroll.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.clear_scroll.go index ad728e76a..f6829b615 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.clear_scroll.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.clear_scroll.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.allocation_explain.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.allocation_explain.go index c54be4d1b..e79c78791 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.allocation_explain.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.allocation_explain.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.delete_component_template.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.delete_component_template.go index 05d4394b1..6fef752ee 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.delete_component_template.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.delete_component_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.delete_voting_config_exclusions.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.delete_voting_config_exclusions.go index 2770a57f6..f2b850c49 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.delete_voting_config_exclusions.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.delete_voting_config_exclusions.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.exists_component_template.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.exists_component_template.go index ca275e33a..628f166cc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.exists_component_template.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.exists_component_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.get_component_template.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.get_component_template.go index 3dbb9f3db..4aaea1b29 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.get_component_template.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.get_component_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi @@ -48,8 +48,9 @@ type ClusterGetComponentTemplate func(o ...func(*ClusterGetComponentTemplateRequ type ClusterGetComponentTemplateRequest struct { Name []string - Local *bool - MasterTimeout time.Duration + IncludeDefaults *bool + Local *bool + MasterTimeout time.Duration Pretty bool Human bool @@ -82,6 +83,10 @@ func (r ClusterGetComponentTemplateRequest) Do(ctx context.Context, transport Tr params = make(map[string]string) + if r.IncludeDefaults != nil { + params["include_defaults"] = strconv.FormatBool(*r.IncludeDefaults) + } + if r.Local != nil { params["local"] = strconv.FormatBool(*r.Local) } @@ -163,6 +168,13 @@ func (f ClusterGetComponentTemplate) WithName(v ...string) func(*ClusterGetCompo } } +// WithIncludeDefaults - return all default configurations for the component template (default: false). +func (f ClusterGetComponentTemplate) WithIncludeDefaults(v bool) func(*ClusterGetComponentTemplateRequest) { + return func(r *ClusterGetComponentTemplateRequest) { + r.IncludeDefaults = &v + } +} + // WithLocal - return local information, do not retrieve the state from master node (default: false). func (f ClusterGetComponentTemplate) WithLocal(v bool) func(*ClusterGetComponentTemplateRequest) { return func(r *ClusterGetComponentTemplateRequest) { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.get_settings.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.get_settings.go index 8e1aafb16..f8f3438d2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.get_settings.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.get_settings.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.health.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.health.go index 57d13af89..c5fd6ad46 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.health.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.health.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.info.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.info.go new file mode 100644 index 000000000..bb5c459d1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.info.go @@ -0,0 +1,197 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.11.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "net/http" + "strings" +) + +func newClusterInfoFunc(t Transport) ClusterInfo { + return func(target []string, o ...func(*ClusterInfoRequest)) (*Response, error) { + var r = ClusterInfoRequest{Target: target} + for _, f := range o { + f(&r) + } + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// ClusterInfo returns different information about the cluster. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-info.html. +type ClusterInfo func(target []string, o ...func(*ClusterInfoRequest)) (*Response, error) + +// ClusterInfoRequest configures the Cluster Info API request. +type ClusterInfoRequest struct { + Target []string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context +} + +// Do executes the request and returns response or error. +func (r ClusterInfoRequest) Do(ctx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ) + + method = "GET" + + if len(r.Target) == 0 { + return nil, errors.New("target is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len("_info") + 1 + len(strings.Join(r.Target, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_info") + path.WriteString("/") + path.WriteString(strings.Join(r.Target, ",")) + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + res, err := transport.Perform(req) + if err != nil { + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f ClusterInfo) WithContext(v context.Context) func(*ClusterInfoRequest) { + return func(r *ClusterInfoRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f ClusterInfo) WithPretty() func(*ClusterInfoRequest) { + return func(r *ClusterInfoRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f ClusterInfo) WithHuman() func(*ClusterInfoRequest) { + return func(r *ClusterInfoRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f ClusterInfo) WithErrorTrace() func(*ClusterInfoRequest) { + return func(r *ClusterInfoRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f ClusterInfo) WithFilterPath(v ...string) func(*ClusterInfoRequest) { + return func(r *ClusterInfoRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f ClusterInfo) WithHeader(h map[string]string) func(*ClusterInfoRequest) { + return func(r *ClusterInfoRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f ClusterInfo) WithOpaqueID(s string) func(*ClusterInfoRequest) { + return func(r *ClusterInfoRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.pending_tasks.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.pending_tasks.go index 17e086f4d..1d41acaf5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.pending_tasks.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.pending_tasks.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.post_voting_config_exclusions.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.post_voting_config_exclusions.go index a4f86c0e4..5a718c980 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.post_voting_config_exclusions.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.post_voting_config_exclusions.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.put_component_template.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.put_component_template.go index 965ad2cfa..da9b8e520 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.put_component_template.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.put_component_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.put_settings.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.put_settings.go index 1a5b6aba5..48bb9515d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.put_settings.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.put_settings.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.remote_info.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.remote_info.go index 8082b60c1..bf27a1e1b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.remote_info.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.remote_info.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.reroute.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.reroute.go index 59a5df9ad..c8f75deea 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.reroute.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.reroute.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.state.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.state.go index efa9ce811..5c0ffb135 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.state.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.state.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.stats.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.stats.go index 5a891c207..b0a20a487 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.stats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.cluster.stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.count.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.count.go index f277e2601..37a34cd08 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.count.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.count.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.create.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.create.go index cad932a53..fbf613aae 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.create.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.create.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.dangling_indices.delete_dangling_index.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.dangling_indices.delete_dangling_index.go index 8b2e3cc2e..f0523ec94 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.dangling_indices.delete_dangling_index.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.dangling_indices.delete_dangling_index.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.dangling_indices.import_dangling_index.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.dangling_indices.import_dangling_index.go index 93ff5e842..87765868a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.dangling_indices.import_dangling_index.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.dangling_indices.import_dangling_index.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.dangling_indices.list_dangling_indices.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.dangling_indices.list_dangling_indices.go index 44dabe274..7e8ffc46a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.dangling_indices.list_dangling_indices.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.dangling_indices.list_dangling_indices.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.delete.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.delete.go index 8a18c735b..916bf376e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.delete.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.delete.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.delete_by_query.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.delete_by_query.go index 00edf3637..27054a076 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.delete_by_query.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.delete_by_query.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.delete_by_query_rethrottle.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.delete_by_query_rethrottle.go index d5efa2356..a63a553af 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.delete_by_query_rethrottle.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.delete_by_query_rethrottle.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.delete_script.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.delete_script.go index f5b8e615c..f7c5e6962 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.delete_script.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.delete_script.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.exists.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.exists.go index 8c4d6c67c..06931bd8d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.exists.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.exists.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.exists_source.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.exists_source.go index 691a34e30..752162612 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.exists_source.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.exists_source.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.explain.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.explain.go index 4f587a816..b0b1253e0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.explain.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.explain.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.features.get_features.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.features.get_features.go index 5e8a8b728..0e0f5a759 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.features.get_features.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.features.get_features.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.features.reset_features.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.features.reset_features.go index d7c78b358..60b7ec36c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.features.reset_features.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.features.reset_features.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.field_caps.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.field_caps.go index 63e8dc2fb..083f25eb3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.field_caps.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.field_caps.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.fleet.delete_secret.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.fleet.delete_secret.go new file mode 100644 index 000000000..7bdad5a01 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.fleet.delete_secret.go @@ -0,0 +1,194 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.11.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newFleetDeleteSecretFunc(t Transport) FleetDeleteSecret { + return func(id string, o ...func(*FleetDeleteSecretRequest)) (*Response, error) { + var r = FleetDeleteSecretRequest{DocumentID: id} + for _, f := range o { + f(&r) + } + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// FleetDeleteSecret deletes a secret stored by Fleet. +// +// This API is experimental. +type FleetDeleteSecret func(id string, o ...func(*FleetDeleteSecretRequest)) (*Response, error) + +// FleetDeleteSecretRequest configures the Fleet Delete Secret API request. +type FleetDeleteSecretRequest struct { + DocumentID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context +} + +// Do executes the request and returns response or error. +func (r FleetDeleteSecretRequest) Do(ctx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ) + + method = "DELETE" + + path.Grow(7 + 1 + len("_fleet") + 1 + len("secret") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_fleet") + path.WriteString("/") + path.WriteString("secret") + path.WriteString("/") + path.WriteString(r.DocumentID) + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + res, err := transport.Perform(req) + if err != nil { + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f FleetDeleteSecret) WithContext(v context.Context) func(*FleetDeleteSecretRequest) { + return func(r *FleetDeleteSecretRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f FleetDeleteSecret) WithPretty() func(*FleetDeleteSecretRequest) { + return func(r *FleetDeleteSecretRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f FleetDeleteSecret) WithHuman() func(*FleetDeleteSecretRequest) { + return func(r *FleetDeleteSecretRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f FleetDeleteSecret) WithErrorTrace() func(*FleetDeleteSecretRequest) { + return func(r *FleetDeleteSecretRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f FleetDeleteSecret) WithFilterPath(v ...string) func(*FleetDeleteSecretRequest) { + return func(r *FleetDeleteSecretRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f FleetDeleteSecret) WithHeader(h map[string]string) func(*FleetDeleteSecretRequest) { + return func(r *FleetDeleteSecretRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f FleetDeleteSecret) WithOpaqueID(s string) func(*FleetDeleteSecretRequest) { + return func(r *FleetDeleteSecretRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.fleet.get_secret.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.fleet.get_secret.go new file mode 100644 index 000000000..f1425fb11 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.fleet.get_secret.go @@ -0,0 +1,194 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.11.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newFleetGetSecretFunc(t Transport) FleetGetSecret { + return func(id string, o ...func(*FleetGetSecretRequest)) (*Response, error) { + var r = FleetGetSecretRequest{DocumentID: id} + for _, f := range o { + f(&r) + } + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// FleetGetSecret retrieves a secret stored by Fleet. +// +// This API is experimental. +type FleetGetSecret func(id string, o ...func(*FleetGetSecretRequest)) (*Response, error) + +// FleetGetSecretRequest configures the Fleet Get Secret API request. +type FleetGetSecretRequest struct { + DocumentID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context +} + +// Do executes the request and returns response or error. +func (r FleetGetSecretRequest) Do(ctx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ) + + method = "GET" + + path.Grow(7 + 1 + len("_fleet") + 1 + len("secret") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_fleet") + path.WriteString("/") + path.WriteString("secret") + path.WriteString("/") + path.WriteString(r.DocumentID) + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + res, err := transport.Perform(req) + if err != nil { + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f FleetGetSecret) WithContext(v context.Context) func(*FleetGetSecretRequest) { + return func(r *FleetGetSecretRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f FleetGetSecret) WithPretty() func(*FleetGetSecretRequest) { + return func(r *FleetGetSecretRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f FleetGetSecret) WithHuman() func(*FleetGetSecretRequest) { + return func(r *FleetGetSecretRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f FleetGetSecret) WithErrorTrace() func(*FleetGetSecretRequest) { + return func(r *FleetGetSecretRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f FleetGetSecret) WithFilterPath(v ...string) func(*FleetGetSecretRequest) { + return func(r *FleetGetSecretRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f FleetGetSecret) WithHeader(h map[string]string) func(*FleetGetSecretRequest) { + return func(r *FleetGetSecretRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f FleetGetSecret) WithOpaqueID(s string) func(*FleetGetSecretRequest) { + return func(r *FleetGetSecretRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.fleet.global_checkpoints.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.fleet.global_checkpoints.go index 3d36179ac..3425bf188 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.fleet.global_checkpoints.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.fleet.global_checkpoints.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.fleet.msearch.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.fleet.msearch.go index 156441bcc..b221bb7a5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.fleet.msearch.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.fleet.msearch.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.fleet.post_secret.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.fleet.post_secret.go new file mode 100644 index 000000000..c057ea210 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.fleet.post_secret.go @@ -0,0 +1,194 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.11.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newFleetPostSecretFunc(t Transport) FleetPostSecret { + return func(body io.Reader, o ...func(*FleetPostSecretRequest)) (*Response, error) { + var r = FleetPostSecretRequest{Body: body} + for _, f := range o { + f(&r) + } + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// FleetPostSecret creates a secret stored by Fleet. +// +// This API is experimental. +type FleetPostSecret func(body io.Reader, o ...func(*FleetPostSecretRequest)) (*Response, error) + +// FleetPostSecretRequest configures the Fleet Post Secret API request. +type FleetPostSecretRequest struct { + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context +} + +// Do executes the request and returns response or error. +func (r FleetPostSecretRequest) Do(ctx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ) + + method = "POST" + + path.Grow(7 + len("/_fleet/secret")) + path.WriteString("http://") + path.WriteString("/_fleet/secret") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + res, err := transport.Perform(req) + if err != nil { + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f FleetPostSecret) WithContext(v context.Context) func(*FleetPostSecretRequest) { + return func(r *FleetPostSecretRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f FleetPostSecret) WithPretty() func(*FleetPostSecretRequest) { + return func(r *FleetPostSecretRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f FleetPostSecret) WithHuman() func(*FleetPostSecretRequest) { + return func(r *FleetPostSecretRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f FleetPostSecret) WithErrorTrace() func(*FleetPostSecretRequest) { + return func(r *FleetPostSecretRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f FleetPostSecret) WithFilterPath(v ...string) func(*FleetPostSecretRequest) { + return func(r *FleetPostSecretRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f FleetPostSecret) WithHeader(h map[string]string) func(*FleetPostSecretRequest) { + return func(r *FleetPostSecretRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f FleetPostSecret) WithOpaqueID(s string) func(*FleetPostSecretRequest) { + return func(r *FleetPostSecretRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.fleet.search.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.fleet.search.go index 87de4cc3c..a9072c61b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.fleet.search.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.fleet.search.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.get.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.get.go index 90d2772d6..11607febf 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.get.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.get.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.get_script.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.get_script.go index 67c65d8fb..a9a79e5fd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.get_script.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.get_script.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.get_script_context.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.get_script_context.go index ac364c48d..36b543368 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.get_script_context.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.get_script_context.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.get_script_languages.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.get_script_languages.go index 0f6b3dfdc..6a6d2031c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.get_script_languages.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.get_script_languages.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.get_source.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.get_source.go index 86eaa92a1..01270823b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.get_source.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.get_source.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.health_report.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.health_report.go index 5c463d04b..9c4b439da 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.health_report.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.health_report.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.index.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.index.go index ed2787d91..0cce98b3e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.index.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.index.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.add_block.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.add_block.go index b84727389..ca4aa76cb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.add_block.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.add_block.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.analyze.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.analyze.go index bf6ea452e..7473b563b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.analyze.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.analyze.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.clear_cache.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.clear_cache.go index 6c9a8e4b8..0333278b3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.clear_cache.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.clear_cache.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.clone.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.clone.go index 50923bb7d..6e197bac3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.clone.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.clone.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.close.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.close.go index d6d7d0d4e..0d3d4195e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.close.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.close.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.create.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.create.go index 56cae9e1f..2b7b8a24d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.create.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.create.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.delete.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.delete.go index 849556ca6..3208e24b6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.delete.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.delete.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.delete_alias.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.delete_alias.go index 8c87ad82f..eb9336e50 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.delete_alias.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.delete_alias.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.delete_data_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.delete_data_lifecycle.go new file mode 100644 index 000000000..f7670e735 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.delete_data_lifecycle.go @@ -0,0 +1,239 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.11.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "net/http" + "strings" + "time" +) + +func newIndicesDeleteDataLifecycleFunc(t Transport) IndicesDeleteDataLifecycle { + return func(name []string, o ...func(*IndicesDeleteDataLifecycleRequest)) (*Response, error) { + var r = IndicesDeleteDataLifecycleRequest{Name: name} + for _, f := range o { + f(&r) + } + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesDeleteDataLifecycle deletes the data stream lifecycle of the selected data streams. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-delete-lifecycle.html. +type IndicesDeleteDataLifecycle func(name []string, o ...func(*IndicesDeleteDataLifecycleRequest)) (*Response, error) + +// IndicesDeleteDataLifecycleRequest configures the Indices Delete Data Lifecycle API request. +type IndicesDeleteDataLifecycleRequest struct { + Name []string + + ExpandWildcards string + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context +} + +// Do executes the request and returns response or error. +func (r IndicesDeleteDataLifecycleRequest) Do(ctx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ) + + method = "DELETE" + + if len(r.Name) == 0 { + return nil, errors.New("name is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len("_data_stream") + 1 + len(strings.Join(r.Name, ",")) + 1 + len("_lifecycle")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_data_stream") + path.WriteString("/") + path.WriteString(strings.Join(r.Name, ",")) + path.WriteString("/") + path.WriteString("_lifecycle") + + params = make(map[string]string) + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + res, err := transport.Perform(req) + if err != nil { + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesDeleteDataLifecycle) WithContext(v context.Context) func(*IndicesDeleteDataLifecycleRequest) { + return func(r *IndicesDeleteDataLifecycleRequest) { + r.ctx = v + } +} + +// WithExpandWildcards - whether wildcard expressions should get expanded to open or closed indices (default: open). +func (f IndicesDeleteDataLifecycle) WithExpandWildcards(v string) func(*IndicesDeleteDataLifecycleRequest) { + return func(r *IndicesDeleteDataLifecycleRequest) { + r.ExpandWildcards = v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f IndicesDeleteDataLifecycle) WithMasterTimeout(v time.Duration) func(*IndicesDeleteDataLifecycleRequest) { + return func(r *IndicesDeleteDataLifecycleRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit timestamp for the document. +func (f IndicesDeleteDataLifecycle) WithTimeout(v time.Duration) func(*IndicesDeleteDataLifecycleRequest) { + return func(r *IndicesDeleteDataLifecycleRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesDeleteDataLifecycle) WithPretty() func(*IndicesDeleteDataLifecycleRequest) { + return func(r *IndicesDeleteDataLifecycleRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesDeleteDataLifecycle) WithHuman() func(*IndicesDeleteDataLifecycleRequest) { + return func(r *IndicesDeleteDataLifecycleRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesDeleteDataLifecycle) WithErrorTrace() func(*IndicesDeleteDataLifecycleRequest) { + return func(r *IndicesDeleteDataLifecycleRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesDeleteDataLifecycle) WithFilterPath(v ...string) func(*IndicesDeleteDataLifecycleRequest) { + return func(r *IndicesDeleteDataLifecycleRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesDeleteDataLifecycle) WithHeader(h map[string]string) func(*IndicesDeleteDataLifecycleRequest) { + return func(r *IndicesDeleteDataLifecycleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesDeleteDataLifecycle) WithOpaqueID(s string) func(*IndicesDeleteDataLifecycleRequest) { + return func(r *IndicesDeleteDataLifecycleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.delete_index_template.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.delete_index_template.go index 8bf9e54a4..1b5bf94ab 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.delete_index_template.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.delete_index_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi @@ -40,7 +40,7 @@ func newIndicesDeleteIndexTemplateFunc(t Transport) IndicesDeleteIndexTemplate { // IndicesDeleteIndexTemplate deletes an index template. // -// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html. +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-delete-template.html. type IndicesDeleteIndexTemplate func(name string, o ...func(*IndicesDeleteIndexTemplateRequest)) (*Response, error) // IndicesDeleteIndexTemplateRequest configures the Indices Delete Index Template API request. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.delete_template.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.delete_template.go index 009ba48fc..60897f74a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.delete_template.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.delete_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi @@ -40,7 +40,7 @@ func newIndicesDeleteTemplateFunc(t Transport) IndicesDeleteTemplate { // IndicesDeleteTemplate deletes an index template. // -// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html. +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-delete-template-v1.html. type IndicesDeleteTemplate func(name string, o ...func(*IndicesDeleteTemplateRequest)) (*Response, error) // IndicesDeleteTemplateRequest configures the Indices Delete Template API request. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.disk_usage.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.disk_usage.go index bb22f91bf..c27f0cdc9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.disk_usage.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.disk_usage.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.downsample.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.downsample.go index 694fb0f39..fd3026c15 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.downsample.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.downsample.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.exists.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.exists.go index c9f7322c7..b8ff71a0b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.exists.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.exists.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.exists_alias.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.exists_alias.go index 1492a399b..d24827d8a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.exists_alias.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.exists_alias.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.exists_index_template.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.exists_index_template.go index 533e5daa3..e6edc0bca 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.exists_index_template.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.exists_index_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi @@ -41,7 +41,7 @@ func newIndicesExistsIndexTemplateFunc(t Transport) IndicesExistsIndexTemplate { // IndicesExistsIndexTemplate returns information about whether a particular index template exists. // -// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html. +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/index-templates.html. type IndicesExistsIndexTemplate func(name string, o ...func(*IndicesExistsIndexTemplateRequest)) (*Response, error) // IndicesExistsIndexTemplateRequest configures the Indices Exists Index Template API request. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.exists_template.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.exists_template.go index a5427075f..ab4b6d5db 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.exists_template.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.exists_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi @@ -42,7 +42,7 @@ func newIndicesExistsTemplateFunc(t Transport) IndicesExistsTemplate { // IndicesExistsTemplate returns information about whether a particular index template exists. // -// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html. +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-template-exists-v1.html. type IndicesExistsTemplate func(name []string, o ...func(*IndicesExistsTemplateRequest)) (*Response, error) // IndicesExistsTemplateRequest configures the Indices Exists Template API request. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.explain_data_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.explain_data_lifecycle.go new file mode 100644 index 000000000..5f816e06a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.explain_data_lifecycle.go @@ -0,0 +1,223 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.11.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" +) + +func newIndicesExplainDataLifecycleFunc(t Transport) IndicesExplainDataLifecycle { + return func(index string, o ...func(*IndicesExplainDataLifecycleRequest)) (*Response, error) { + var r = IndicesExplainDataLifecycleRequest{Index: index} + for _, f := range o { + f(&r) + } + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesExplainDataLifecycle retrieves information about the index's current data stream lifecycle, such as any potential encountered error, time since creation etc. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams-explain-lifecycle.html. +type IndicesExplainDataLifecycle func(index string, o ...func(*IndicesExplainDataLifecycleRequest)) (*Response, error) + +// IndicesExplainDataLifecycleRequest configures the Indices Explain Data Lifecycle API request. +type IndicesExplainDataLifecycleRequest struct { + Index string + + IncludeDefaults *bool + MasterTimeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context +} + +// Do executes the request and returns response or error. +func (r IndicesExplainDataLifecycleRequest) Do(ctx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ) + + method = "GET" + + path.Grow(7 + 1 + len(r.Index) + 1 + len("_lifecycle") + 1 + len("explain")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString(r.Index) + path.WriteString("/") + path.WriteString("_lifecycle") + path.WriteString("/") + path.WriteString("explain") + + params = make(map[string]string) + + if r.IncludeDefaults != nil { + params["include_defaults"] = strconv.FormatBool(*r.IncludeDefaults) + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + res, err := transport.Perform(req) + if err != nil { + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesExplainDataLifecycle) WithContext(v context.Context) func(*IndicesExplainDataLifecycleRequest) { + return func(r *IndicesExplainDataLifecycleRequest) { + r.ctx = v + } +} + +// WithIncludeDefaults - indicates if the api should return the default values the system uses for the index's lifecycle. +func (f IndicesExplainDataLifecycle) WithIncludeDefaults(v bool) func(*IndicesExplainDataLifecycleRequest) { + return func(r *IndicesExplainDataLifecycleRequest) { + r.IncludeDefaults = &v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f IndicesExplainDataLifecycle) WithMasterTimeout(v time.Duration) func(*IndicesExplainDataLifecycleRequest) { + return func(r *IndicesExplainDataLifecycleRequest) { + r.MasterTimeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesExplainDataLifecycle) WithPretty() func(*IndicesExplainDataLifecycleRequest) { + return func(r *IndicesExplainDataLifecycleRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesExplainDataLifecycle) WithHuman() func(*IndicesExplainDataLifecycleRequest) { + return func(r *IndicesExplainDataLifecycleRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesExplainDataLifecycle) WithErrorTrace() func(*IndicesExplainDataLifecycleRequest) { + return func(r *IndicesExplainDataLifecycleRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesExplainDataLifecycle) WithFilterPath(v ...string) func(*IndicesExplainDataLifecycleRequest) { + return func(r *IndicesExplainDataLifecycleRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesExplainDataLifecycle) WithHeader(h map[string]string) func(*IndicesExplainDataLifecycleRequest) { + return func(r *IndicesExplainDataLifecycleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesExplainDataLifecycle) WithOpaqueID(s string) func(*IndicesExplainDataLifecycleRequest) { + return func(r *IndicesExplainDataLifecycleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.field_usage_stats.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.field_usage_stats.go index a523771f0..9b6f44117 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.field_usage_stats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.field_usage_stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.flush.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.flush.go index a3214fc74..bef965c22 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.flush.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.flush.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.forcemerge.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.forcemerge.go index df98d1516..c97c2f833 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.forcemerge.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.forcemerge.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.get.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.get.go index 4a14502f9..d9e41095e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.get.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.get.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.get_alias.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.get_alias.go index b1be843b2..9af6e63b6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.get_alias.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.get_alias.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.get_data_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.get_data_lifecycle.go new file mode 100644 index 000000000..e99aa414c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.get_data_lifecycle.go @@ -0,0 +1,227 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.11.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "net/http" + "strconv" + "strings" +) + +func newIndicesGetDataLifecycleFunc(t Transport) IndicesGetDataLifecycle { + return func(name []string, o ...func(*IndicesGetDataLifecycleRequest)) (*Response, error) { + var r = IndicesGetDataLifecycleRequest{Name: name} + for _, f := range o { + f(&r) + } + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesGetDataLifecycle returns the data stream lifecycle of the selected data streams. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-get-lifecycle.html. +type IndicesGetDataLifecycle func(name []string, o ...func(*IndicesGetDataLifecycleRequest)) (*Response, error) + +// IndicesGetDataLifecycleRequest configures the Indices Get Data Lifecycle API request. +type IndicesGetDataLifecycleRequest struct { + Name []string + + ExpandWildcards string + IncludeDefaults *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context +} + +// Do executes the request and returns response or error. +func (r IndicesGetDataLifecycleRequest) Do(ctx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ) + + method = "GET" + + if len(r.Name) == 0 { + return nil, errors.New("name is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len("_data_stream") + 1 + len(strings.Join(r.Name, ",")) + 1 + len("_lifecycle")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_data_stream") + path.WriteString("/") + path.WriteString(strings.Join(r.Name, ",")) + path.WriteString("/") + path.WriteString("_lifecycle") + + params = make(map[string]string) + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.IncludeDefaults != nil { + params["include_defaults"] = strconv.FormatBool(*r.IncludeDefaults) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + res, err := transport.Perform(req) + if err != nil { + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesGetDataLifecycle) WithContext(v context.Context) func(*IndicesGetDataLifecycleRequest) { + return func(r *IndicesGetDataLifecycleRequest) { + r.ctx = v + } +} + +// WithExpandWildcards - whether wildcard expressions should get expanded to open or closed indices (default: open). +func (f IndicesGetDataLifecycle) WithExpandWildcards(v string) func(*IndicesGetDataLifecycleRequest) { + return func(r *IndicesGetDataLifecycleRequest) { + r.ExpandWildcards = v + } +} + +// WithIncludeDefaults - return all relevant default configurations for the data stream (default: false). +func (f IndicesGetDataLifecycle) WithIncludeDefaults(v bool) func(*IndicesGetDataLifecycleRequest) { + return func(r *IndicesGetDataLifecycleRequest) { + r.IncludeDefaults = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesGetDataLifecycle) WithPretty() func(*IndicesGetDataLifecycleRequest) { + return func(r *IndicesGetDataLifecycleRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesGetDataLifecycle) WithHuman() func(*IndicesGetDataLifecycleRequest) { + return func(r *IndicesGetDataLifecycleRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesGetDataLifecycle) WithErrorTrace() func(*IndicesGetDataLifecycleRequest) { + return func(r *IndicesGetDataLifecycleRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesGetDataLifecycle) WithFilterPath(v ...string) func(*IndicesGetDataLifecycleRequest) { + return func(r *IndicesGetDataLifecycleRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesGetDataLifecycle) WithHeader(h map[string]string) func(*IndicesGetDataLifecycleRequest) { + return func(r *IndicesGetDataLifecycleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesGetDataLifecycle) WithOpaqueID(s string) func(*IndicesGetDataLifecycleRequest) { + return func(r *IndicesGetDataLifecycleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.get_field_mapping.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.get_field_mapping.go index c52438b39..5b6553d82 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.get_field_mapping.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.get_field_mapping.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.get_index_template.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.get_index_template.go index 73444d7bf..f0cb5689d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.get_index_template.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.get_index_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi @@ -41,16 +41,17 @@ func newIndicesGetIndexTemplateFunc(t Transport) IndicesGetIndexTemplate { // IndicesGetIndexTemplate returns an index template. // -// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html. +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-template.html. type IndicesGetIndexTemplate func(o ...func(*IndicesGetIndexTemplateRequest)) (*Response, error) // IndicesGetIndexTemplateRequest configures the Indices Get Index Template API request. type IndicesGetIndexTemplateRequest struct { Name string - FlatSettings *bool - Local *bool - MasterTimeout time.Duration + FlatSettings *bool + IncludeDefaults *bool + Local *bool + MasterTimeout time.Duration Pretty bool Human bool @@ -87,6 +88,10 @@ func (r IndicesGetIndexTemplateRequest) Do(ctx context.Context, transport Transp params["flat_settings"] = strconv.FormatBool(*r.FlatSettings) } + if r.IncludeDefaults != nil { + params["include_defaults"] = strconv.FormatBool(*r.IncludeDefaults) + } + if r.Local != nil { params["local"] = strconv.FormatBool(*r.Local) } @@ -175,6 +180,13 @@ func (f IndicesGetIndexTemplate) WithFlatSettings(v bool) func(*IndicesGetIndexT } } +// WithIncludeDefaults - return all relevant default configurations for the index template (default: false). +func (f IndicesGetIndexTemplate) WithIncludeDefaults(v bool) func(*IndicesGetIndexTemplateRequest) { + return func(r *IndicesGetIndexTemplateRequest) { + r.IncludeDefaults = &v + } +} + // WithLocal - return local information, do not retrieve the state from master node (default: false). func (f IndicesGetIndexTemplate) WithLocal(v bool) func(*IndicesGetIndexTemplateRequest) { return func(r *IndicesGetIndexTemplateRequest) { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.get_mapping.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.get_mapping.go index e17e9693b..e700c5717 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.get_mapping.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.get_mapping.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.get_settings.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.get_settings.go index a459bb537..cd0f73f0e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.get_settings.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.get_settings.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.get_template.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.get_template.go index 6a22df21d..0b7d9bcf4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.get_template.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.get_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi @@ -41,7 +41,7 @@ func newIndicesGetTemplateFunc(t Transport) IndicesGetTemplate { // IndicesGetTemplate returns an index template. // -// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html. +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-template-v1.html. type IndicesGetTemplate func(o ...func(*IndicesGetTemplateRequest)) (*Response, error) // IndicesGetTemplateRequest configures the Indices Get Template API request. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.modify_data_stream.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.modify_data_stream.go index 155559949..04c2faacf 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.modify_data_stream.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.modify_data_stream.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.open.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.open.go index a008cddf4..bf73f4ae8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.open.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.open.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.put_alias.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.put_alias.go index 57398486b..170cfaec0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.put_alias.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.put_alias.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.put_data_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.put_data_lifecycle.go new file mode 100644 index 000000000..50b281239 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.put_data_lifecycle.go @@ -0,0 +1,253 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.11.0: DO NOT EDIT + +package esapi + +import ( + "context" + "errors" + "io" + "net/http" + "strings" + "time" +) + +func newIndicesPutDataLifecycleFunc(t Transport) IndicesPutDataLifecycle { + return func(name []string, o ...func(*IndicesPutDataLifecycleRequest)) (*Response, error) { + var r = IndicesPutDataLifecycleRequest{Name: name} + for _, f := range o { + f(&r) + } + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// IndicesPutDataLifecycle updates the data stream lifecycle of the selected data streams. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-put-lifecycle.html. +type IndicesPutDataLifecycle func(name []string, o ...func(*IndicesPutDataLifecycleRequest)) (*Response, error) + +// IndicesPutDataLifecycleRequest configures the Indices Put Data Lifecycle API request. +type IndicesPutDataLifecycleRequest struct { + Body io.Reader + + Name []string + + ExpandWildcards string + MasterTimeout time.Duration + Timeout time.Duration + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context +} + +// Do executes the request and returns response or error. +func (r IndicesPutDataLifecycleRequest) Do(ctx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ) + + method = "PUT" + + if len(r.Name) == 0 { + return nil, errors.New("name is required and cannot be nil or empty") + } + + path.Grow(7 + 1 + len("_data_stream") + 1 + len(strings.Join(r.Name, ",")) + 1 + len("_lifecycle")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_data_stream") + path.WriteString("/") + path.WriteString(strings.Join(r.Name, ",")) + path.WriteString("/") + path.WriteString("_lifecycle") + + params = make(map[string]string) + + if r.ExpandWildcards != "" { + params["expand_wildcards"] = r.ExpandWildcards + } + + if r.MasterTimeout != 0 { + params["master_timeout"] = formatDuration(r.MasterTimeout) + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + res, err := transport.Perform(req) + if err != nil { + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f IndicesPutDataLifecycle) WithContext(v context.Context) func(*IndicesPutDataLifecycleRequest) { + return func(r *IndicesPutDataLifecycleRequest) { + r.ctx = v + } +} + +// WithBody - The data stream lifecycle configuration that consist of the data retention. +func (f IndicesPutDataLifecycle) WithBody(v io.Reader) func(*IndicesPutDataLifecycleRequest) { + return func(r *IndicesPutDataLifecycleRequest) { + r.Body = v + } +} + +// WithExpandWildcards - whether wildcard expressions should get expanded to open or closed indices (default: open). +func (f IndicesPutDataLifecycle) WithExpandWildcards(v string) func(*IndicesPutDataLifecycleRequest) { + return func(r *IndicesPutDataLifecycleRequest) { + r.ExpandWildcards = v + } +} + +// WithMasterTimeout - specify timeout for connection to master. +func (f IndicesPutDataLifecycle) WithMasterTimeout(v time.Duration) func(*IndicesPutDataLifecycleRequest) { + return func(r *IndicesPutDataLifecycleRequest) { + r.MasterTimeout = v + } +} + +// WithTimeout - explicit timestamp for the document. +func (f IndicesPutDataLifecycle) WithTimeout(v time.Duration) func(*IndicesPutDataLifecycleRequest) { + return func(r *IndicesPutDataLifecycleRequest) { + r.Timeout = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f IndicesPutDataLifecycle) WithPretty() func(*IndicesPutDataLifecycleRequest) { + return func(r *IndicesPutDataLifecycleRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f IndicesPutDataLifecycle) WithHuman() func(*IndicesPutDataLifecycleRequest) { + return func(r *IndicesPutDataLifecycleRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f IndicesPutDataLifecycle) WithErrorTrace() func(*IndicesPutDataLifecycleRequest) { + return func(r *IndicesPutDataLifecycleRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f IndicesPutDataLifecycle) WithFilterPath(v ...string) func(*IndicesPutDataLifecycleRequest) { + return func(r *IndicesPutDataLifecycleRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f IndicesPutDataLifecycle) WithHeader(h map[string]string) func(*IndicesPutDataLifecycleRequest) { + return func(r *IndicesPutDataLifecycleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f IndicesPutDataLifecycle) WithOpaqueID(s string) func(*IndicesPutDataLifecycleRequest) { + return func(r *IndicesPutDataLifecycleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.put_index_template.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.put_index_template.go index 5270cfd9b..b7232c7e6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.put_index_template.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.put_index_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi @@ -42,7 +42,7 @@ func newIndicesPutIndexTemplateFunc(t Transport) IndicesPutIndexTemplate { // IndicesPutIndexTemplate creates or updates an index template. // -// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html. +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-put-template.html. type IndicesPutIndexTemplate func(name string, body io.Reader, o ...func(*IndicesPutIndexTemplateRequest)) (*Response, error) // IndicesPutIndexTemplateRequest configures the Indices Put Index Template API request. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.put_mapping.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.put_mapping.go index e8541b874..30077ba25 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.put_mapping.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.put_mapping.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.put_settings.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.put_settings.go index 4dcf0dfc0..9df13fcc3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.put_settings.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.put_settings.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.put_template.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.put_template.go index f2d92abe3..e87a80af7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.put_template.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.put_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi @@ -42,7 +42,7 @@ func newIndicesPutTemplateFunc(t Transport) IndicesPutTemplate { // IndicesPutTemplate creates or updates an index template. // -// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html. +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates-v1.html. type IndicesPutTemplate func(name string, body io.Reader, o ...func(*IndicesPutTemplateRequest)) (*Response, error) // IndicesPutTemplateRequest configures the Indices Put Template API request. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.recovery.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.recovery.go index f2bb275a2..09dfc88b3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.recovery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.recovery.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.refresh.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.refresh.go index 1746879c9..21d4f78c6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.refresh.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.refresh.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.resolve_index.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.resolve_index.go index c22c713dd..aba410930 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.resolve_index.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.resolve_index.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.rollover.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.rollover.go index e61b742c5..d436938bf 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.rollover.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.rollover.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.segments.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.segments.go index 883c4e66c..26b461346 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.segments.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.segments.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.shard_stores.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.shard_stores.go index 2e46c0122..ac28971fa 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.shard_stores.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.shard_stores.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.shrink.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.shrink.go index d49dc3139..72a2ffa89 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.shrink.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.shrink.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.simulate_index_template.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.simulate_index_template.go index 8b1ebc6f3..de7812883 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.simulate_index_template.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.simulate_index_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi @@ -42,7 +42,7 @@ func newIndicesSimulateIndexTemplateFunc(t Transport) IndicesSimulateIndexTempla // IndicesSimulateIndexTemplate simulate matching the given index name against the index templates in the system // -// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html. +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-simulate-index.html. type IndicesSimulateIndexTemplate func(name string, o ...func(*IndicesSimulateIndexTemplateRequest)) (*Response, error) // IndicesSimulateIndexTemplateRequest configures the Indices Simulate Index Template API request. @@ -51,9 +51,10 @@ type IndicesSimulateIndexTemplateRequest struct { Name string - Cause string - Create *bool - MasterTimeout time.Duration + Cause string + Create *bool + IncludeDefaults *bool + MasterTimeout time.Duration Pretty bool Human bool @@ -94,6 +95,10 @@ func (r IndicesSimulateIndexTemplateRequest) Do(ctx context.Context, transport T params["create"] = strconv.FormatBool(*r.Create) } + if r.IncludeDefaults != nil { + params["include_defaults"] = strconv.FormatBool(*r.IncludeDefaults) + } + if r.MasterTimeout != 0 { params["master_timeout"] = formatDuration(r.MasterTimeout) } @@ -189,6 +194,13 @@ func (f IndicesSimulateIndexTemplate) WithCreate(v bool) func(*IndicesSimulateIn } } +// WithIncludeDefaults - return all relevant default configurations for this index template simulation (default: false). +func (f IndicesSimulateIndexTemplate) WithIncludeDefaults(v bool) func(*IndicesSimulateIndexTemplateRequest) { + return func(r *IndicesSimulateIndexTemplateRequest) { + r.IncludeDefaults = &v + } +} + // WithMasterTimeout - specify timeout for connection to master. func (f IndicesSimulateIndexTemplate) WithMasterTimeout(v time.Duration) func(*IndicesSimulateIndexTemplateRequest) { return func(r *IndicesSimulateIndexTemplateRequest) { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.simulate_template.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.simulate_template.go index e24ef0201..80b0ce27c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.simulate_template.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.simulate_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi @@ -42,7 +42,7 @@ func newIndicesSimulateTemplateFunc(t Transport) IndicesSimulateTemplate { // IndicesSimulateTemplate simulate resolving the given template name or body // -// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html. +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-simulate-template.html. type IndicesSimulateTemplate func(o ...func(*IndicesSimulateTemplateRequest)) (*Response, error) // IndicesSimulateTemplateRequest configures the Indices Simulate Template API request. @@ -51,9 +51,10 @@ type IndicesSimulateTemplateRequest struct { Name string - Cause string - Create *bool - MasterTimeout time.Duration + Cause string + Create *bool + IncludeDefaults *bool + MasterTimeout time.Duration Pretty bool Human bool @@ -96,6 +97,10 @@ func (r IndicesSimulateTemplateRequest) Do(ctx context.Context, transport Transp params["create"] = strconv.FormatBool(*r.Create) } + if r.IncludeDefaults != nil { + params["include_defaults"] = strconv.FormatBool(*r.IncludeDefaults) + } + if r.MasterTimeout != 0 { params["master_timeout"] = formatDuration(r.MasterTimeout) } @@ -198,6 +203,13 @@ func (f IndicesSimulateTemplate) WithCreate(v bool) func(*IndicesSimulateTemplat } } +// WithIncludeDefaults - return all relevant default configurations for this template simulation (default: false). +func (f IndicesSimulateTemplate) WithIncludeDefaults(v bool) func(*IndicesSimulateTemplateRequest) { + return func(r *IndicesSimulateTemplateRequest) { + r.IncludeDefaults = &v + } +} + // WithMasterTimeout - specify timeout for connection to master. func (f IndicesSimulateTemplate) WithMasterTimeout(v time.Duration) func(*IndicesSimulateTemplateRequest) { return func(r *IndicesSimulateTemplateRequest) { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.split.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.split.go index 74c73ab19..476b6cce4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.split.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.split.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.stats.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.stats.go index 206af44b5..a84b99d79 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.stats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.update_aliases.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.update_aliases.go index 455a4e2cf..51419ae2b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.update_aliases.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.update_aliases.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.validate_query.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.validate_query.go index 4b976d812..57a5c744f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.validate_query.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.indices.validate_query.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.inference.delete_model.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.inference.delete_model.go new file mode 100644 index 000000000..3210092cb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.inference.delete_model.go @@ -0,0 +1,197 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.11.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newInferenceDeleteModelFunc(t Transport) InferenceDeleteModel { + return func(model_id string, task_type string, o ...func(*InferenceDeleteModelRequest)) (*Response, error) { + var r = InferenceDeleteModelRequest{ModelID: model_id, TaskType: task_type} + for _, f := range o { + f(&r) + } + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferenceDeleteModel delete model in the Inference API +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-inference-api.html. +type InferenceDeleteModel func(model_id string, task_type string, o ...func(*InferenceDeleteModelRequest)) (*Response, error) + +// InferenceDeleteModelRequest configures the Inference Delete Model API request. +type InferenceDeleteModelRequest struct { + ModelID string + TaskType string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context +} + +// Do executes the request and returns response or error. +func (r InferenceDeleteModelRequest) Do(ctx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ) + + method = "DELETE" + + path.Grow(7 + 1 + len("_inference") + 1 + len(r.TaskType) + 1 + len(r.ModelID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString(r.TaskType) + path.WriteString("/") + path.WriteString(r.ModelID) + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + res, err := transport.Perform(req) + if err != nil { + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferenceDeleteModel) WithContext(v context.Context) func(*InferenceDeleteModelRequest) { + return func(r *InferenceDeleteModelRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferenceDeleteModel) WithPretty() func(*InferenceDeleteModelRequest) { + return func(r *InferenceDeleteModelRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferenceDeleteModel) WithHuman() func(*InferenceDeleteModelRequest) { + return func(r *InferenceDeleteModelRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferenceDeleteModel) WithErrorTrace() func(*InferenceDeleteModelRequest) { + return func(r *InferenceDeleteModelRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferenceDeleteModel) WithFilterPath(v ...string) func(*InferenceDeleteModelRequest) { + return func(r *InferenceDeleteModelRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferenceDeleteModel) WithHeader(h map[string]string) func(*InferenceDeleteModelRequest) { + return func(r *InferenceDeleteModelRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferenceDeleteModel) WithOpaqueID(s string) func(*InferenceDeleteModelRequest) { + return func(r *InferenceDeleteModelRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.inference.get_model.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.inference.get_model.go new file mode 100644 index 000000000..2fd7e665e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.inference.get_model.go @@ -0,0 +1,197 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.11.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newInferenceGetModelFunc(t Transport) InferenceGetModel { + return func(model_id string, task_type string, o ...func(*InferenceGetModelRequest)) (*Response, error) { + var r = InferenceGetModelRequest{ModelID: model_id, TaskType: task_type} + for _, f := range o { + f(&r) + } + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferenceGetModel get a model in the Inference API +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/get-inference-api.html. +type InferenceGetModel func(model_id string, task_type string, o ...func(*InferenceGetModelRequest)) (*Response, error) + +// InferenceGetModelRequest configures the Inference Get Model API request. +type InferenceGetModelRequest struct { + ModelID string + TaskType string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context +} + +// Do executes the request and returns response or error. +func (r InferenceGetModelRequest) Do(ctx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ) + + method = "GET" + + path.Grow(7 + 1 + len("_inference") + 1 + len(r.TaskType) + 1 + len(r.ModelID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString(r.TaskType) + path.WriteString("/") + path.WriteString(r.ModelID) + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + res, err := transport.Perform(req) + if err != nil { + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferenceGetModel) WithContext(v context.Context) func(*InferenceGetModelRequest) { + return func(r *InferenceGetModelRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferenceGetModel) WithPretty() func(*InferenceGetModelRequest) { + return func(r *InferenceGetModelRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferenceGetModel) WithHuman() func(*InferenceGetModelRequest) { + return func(r *InferenceGetModelRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferenceGetModel) WithErrorTrace() func(*InferenceGetModelRequest) { + return func(r *InferenceGetModelRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferenceGetModel) WithFilterPath(v ...string) func(*InferenceGetModelRequest) { + return func(r *InferenceGetModelRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferenceGetModel) WithHeader(h map[string]string) func(*InferenceGetModelRequest) { + return func(r *InferenceGetModelRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferenceGetModel) WithOpaqueID(s string) func(*InferenceGetModelRequest) { + return func(r *InferenceGetModelRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.inference.inference.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.inference.inference.go new file mode 100644 index 000000000..82e17f5b5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.inference.inference.go @@ -0,0 +1,211 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.11.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferenceInferenceFunc(t Transport) InferenceInference { + return func(model_id string, task_type string, o ...func(*InferenceInferenceRequest)) (*Response, error) { + var r = InferenceInferenceRequest{ModelID: model_id, TaskType: task_type} + for _, f := range o { + f(&r) + } + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferenceInference perform inference on a model +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/post-inference-api.html. +type InferenceInference func(model_id string, task_type string, o ...func(*InferenceInferenceRequest)) (*Response, error) + +// InferenceInferenceRequest configures the Inference Inference API request. +type InferenceInferenceRequest struct { + Body io.Reader + + ModelID string + TaskType string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context +} + +// Do executes the request and returns response or error. +func (r InferenceInferenceRequest) Do(ctx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ) + + method = "POST" + + path.Grow(7 + 1 + len("_inference") + 1 + len(r.TaskType) + 1 + len(r.ModelID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString(r.TaskType) + path.WriteString("/") + path.WriteString(r.ModelID) + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + res, err := transport.Perform(req) + if err != nil { + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferenceInference) WithContext(v context.Context) func(*InferenceInferenceRequest) { + return func(r *InferenceInferenceRequest) { + r.ctx = v + } +} + +// WithBody - The inference payload. +func (f InferenceInference) WithBody(v io.Reader) func(*InferenceInferenceRequest) { + return func(r *InferenceInferenceRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferenceInference) WithPretty() func(*InferenceInferenceRequest) { + return func(r *InferenceInferenceRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferenceInference) WithHuman() func(*InferenceInferenceRequest) { + return func(r *InferenceInferenceRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferenceInference) WithErrorTrace() func(*InferenceInferenceRequest) { + return func(r *InferenceInferenceRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferenceInference) WithFilterPath(v ...string) func(*InferenceInferenceRequest) { + return func(r *InferenceInferenceRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferenceInference) WithHeader(h map[string]string) func(*InferenceInferenceRequest) { + return func(r *InferenceInferenceRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferenceInference) WithOpaqueID(s string) func(*InferenceInferenceRequest) { + return func(r *InferenceInferenceRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.inference.put_model.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.inference.put_model.go new file mode 100644 index 000000000..7e5cc99f9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.inference.put_model.go @@ -0,0 +1,211 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.11.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newInferencePutModelFunc(t Transport) InferencePutModel { + return func(model_id string, task_type string, o ...func(*InferencePutModelRequest)) (*Response, error) { + var r = InferencePutModelRequest{ModelID: model_id, TaskType: task_type} + for _, f := range o { + f(&r) + } + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// InferencePutModel configure a model for use in the Inference API +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/put-inference-api.html. +type InferencePutModel func(model_id string, task_type string, o ...func(*InferencePutModelRequest)) (*Response, error) + +// InferencePutModelRequest configures the Inference Put Model API request. +type InferencePutModelRequest struct { + Body io.Reader + + ModelID string + TaskType string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context +} + +// Do executes the request and returns response or error. +func (r InferencePutModelRequest) Do(ctx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ) + + method = "PUT" + + path.Grow(7 + 1 + len("_inference") + 1 + len(r.TaskType) + 1 + len(r.ModelID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_inference") + path.WriteString("/") + path.WriteString(r.TaskType) + path.WriteString("/") + path.WriteString(r.ModelID) + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + res, err := transport.Perform(req) + if err != nil { + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f InferencePutModel) WithContext(v context.Context) func(*InferencePutModelRequest) { + return func(r *InferencePutModelRequest) { + r.ctx = v + } +} + +// WithBody - The model's task and service settings. +func (f InferencePutModel) WithBody(v io.Reader) func(*InferencePutModelRequest) { + return func(r *InferencePutModelRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f InferencePutModel) WithPretty() func(*InferencePutModelRequest) { + return func(r *InferencePutModelRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f InferencePutModel) WithHuman() func(*InferencePutModelRequest) { + return func(r *InferencePutModelRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f InferencePutModel) WithErrorTrace() func(*InferencePutModelRequest) { + return func(r *InferencePutModelRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f InferencePutModel) WithFilterPath(v ...string) func(*InferencePutModelRequest) { + return func(r *InferencePutModelRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f InferencePutModel) WithHeader(h map[string]string) func(*InferencePutModelRequest) { + return func(r *InferencePutModelRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f InferencePutModel) WithOpaqueID(s string) func(*InferencePutModelRequest) { + return func(r *InferencePutModelRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.info.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.info.go index c6ae08324..d24d44302 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.info.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.info.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.ingest.delete_pipeline.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.ingest.delete_pipeline.go index b2d597cbd..26c22c3c9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.ingest.delete_pipeline.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.ingest.delete_pipeline.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.ingest.geo_ip_stats.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.ingest.geo_ip_stats.go index 4184f36c2..04072692a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.ingest.geo_ip_stats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.ingest.geo_ip_stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.ingest.get_pipeline.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.ingest.get_pipeline.go index 95e491228..6b58c5de4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.ingest.get_pipeline.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.ingest.get_pipeline.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.ingest.processor_grok.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.ingest.processor_grok.go index 4051694e4..63217bf8c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.ingest.processor_grok.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.ingest.processor_grok.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.ingest.put_pipeline.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.ingest.put_pipeline.go index 7ea34961a..c291accf3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.ingest.put_pipeline.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.ingest.put_pipeline.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.ingest.simulate.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.ingest.simulate.go index 6a3d4ddf1..3b5f3ee0f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.ingest.simulate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.ingest.simulate.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.knn_search.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.knn_search.go index 43f99fe1c..a70e6e5a1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.knn_search.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.knn_search.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.mget.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.mget.go index d18150a13..eb6054ac1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.mget.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.mget.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.msearch.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.msearch.go index 2502af42e..77ca3de34 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.msearch.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.msearch.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.msearch_template.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.msearch_template.go index 8deef4219..414d6d130 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.msearch_template.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.msearch_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.mtermvectors.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.mtermvectors.go index f24c3ed5c..3df6b2d24 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.mtermvectors.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.mtermvectors.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.nodes.clear_repositories_metering_archive.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.nodes.clear_repositories_metering_archive.go index 3c56209c9..43d8a6c83 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.nodes.clear_repositories_metering_archive.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.nodes.clear_repositories_metering_archive.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.nodes.get_repositories_metering_info.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.nodes.get_repositories_metering_info.go index 530b7295a..53b965d25 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.nodes.get_repositories_metering_info.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.nodes.get_repositories_metering_info.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.nodes.hot_threads.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.nodes.hot_threads.go index 9c7979786..5904193c2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.nodes.hot_threads.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.nodes.hot_threads.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.nodes.info.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.nodes.info.go index 9586e28a8..354c9f7ae 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.nodes.info.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.nodes.info.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.nodes.reload_secure_settings.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.nodes.reload_secure_settings.go index c80ed4625..7f5312d7f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.nodes.reload_secure_settings.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.nodes.reload_secure_settings.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.nodes.stats.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.nodes.stats.go index 74e1d1672..73e4860ec 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.nodes.stats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.nodes.stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.nodes.usage.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.nodes.usage.go index c2dd5ab08..720411919 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.nodes.usage.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.nodes.usage.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.ping.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.ping.go index 176765cf4..4b8039673 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.ping.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.ping.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.put_script.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.put_script.go index d146bee32..887124393 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.put_script.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.put_script.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.query_ruleset.delete.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.query_ruleset.delete.go new file mode 100644 index 000000000..72fcbaf9d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.query_ruleset.delete.go @@ -0,0 +1,194 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.11.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newQueryRulesetDeleteFunc(t Transport) QueryRulesetDelete { + return func(ruleset_id string, o ...func(*QueryRulesetDeleteRequest)) (*Response, error) { + var r = QueryRulesetDeleteRequest{RulesetID: ruleset_id} + for _, f := range o { + f(&r) + } + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// QueryRulesetDelete deletes a query ruleset. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-query-ruleset.html. +type QueryRulesetDelete func(ruleset_id string, o ...func(*QueryRulesetDeleteRequest)) (*Response, error) + +// QueryRulesetDeleteRequest configures the Query Ruleset Delete API request. +type QueryRulesetDeleteRequest struct { + RulesetID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context +} + +// Do executes the request and returns response or error. +func (r QueryRulesetDeleteRequest) Do(ctx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ) + + method = "DELETE" + + path.Grow(7 + 1 + len("_query_rules") + 1 + len(r.RulesetID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_query_rules") + path.WriteString("/") + path.WriteString(r.RulesetID) + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + res, err := transport.Perform(req) + if err != nil { + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f QueryRulesetDelete) WithContext(v context.Context) func(*QueryRulesetDeleteRequest) { + return func(r *QueryRulesetDeleteRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f QueryRulesetDelete) WithPretty() func(*QueryRulesetDeleteRequest) { + return func(r *QueryRulesetDeleteRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f QueryRulesetDelete) WithHuman() func(*QueryRulesetDeleteRequest) { + return func(r *QueryRulesetDeleteRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f QueryRulesetDelete) WithErrorTrace() func(*QueryRulesetDeleteRequest) { + return func(r *QueryRulesetDeleteRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f QueryRulesetDelete) WithFilterPath(v ...string) func(*QueryRulesetDeleteRequest) { + return func(r *QueryRulesetDeleteRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f QueryRulesetDelete) WithHeader(h map[string]string) func(*QueryRulesetDeleteRequest) { + return func(r *QueryRulesetDeleteRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f QueryRulesetDelete) WithOpaqueID(s string) func(*QueryRulesetDeleteRequest) { + return func(r *QueryRulesetDeleteRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.query_ruleset.get.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.query_ruleset.get.go new file mode 100644 index 000000000..7ac8ed363 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.query_ruleset.get.go @@ -0,0 +1,194 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.11.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newQueryRulesetGetFunc(t Transport) QueryRulesetGet { + return func(ruleset_id string, o ...func(*QueryRulesetGetRequest)) (*Response, error) { + var r = QueryRulesetGetRequest{RulesetID: ruleset_id} + for _, f := range o { + f(&r) + } + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// QueryRulesetGet returns the details about a query ruleset. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/get-query-ruleset.html. +type QueryRulesetGet func(ruleset_id string, o ...func(*QueryRulesetGetRequest)) (*Response, error) + +// QueryRulesetGetRequest configures the Query Ruleset Get API request. +type QueryRulesetGetRequest struct { + RulesetID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context +} + +// Do executes the request and returns response or error. +func (r QueryRulesetGetRequest) Do(ctx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ) + + method = "GET" + + path.Grow(7 + 1 + len("_query_rules") + 1 + len(r.RulesetID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_query_rules") + path.WriteString("/") + path.WriteString(r.RulesetID) + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + res, err := transport.Perform(req) + if err != nil { + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f QueryRulesetGet) WithContext(v context.Context) func(*QueryRulesetGetRequest) { + return func(r *QueryRulesetGetRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f QueryRulesetGet) WithPretty() func(*QueryRulesetGetRequest) { + return func(r *QueryRulesetGetRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f QueryRulesetGet) WithHuman() func(*QueryRulesetGetRequest) { + return func(r *QueryRulesetGetRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f QueryRulesetGet) WithErrorTrace() func(*QueryRulesetGetRequest) { + return func(r *QueryRulesetGetRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f QueryRulesetGet) WithFilterPath(v ...string) func(*QueryRulesetGetRequest) { + return func(r *QueryRulesetGetRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f QueryRulesetGet) WithHeader(h map[string]string) func(*QueryRulesetGetRequest) { + return func(r *QueryRulesetGetRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f QueryRulesetGet) WithOpaqueID(s string) func(*QueryRulesetGetRequest) { + return func(r *QueryRulesetGetRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.query_ruleset.list.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.query_ruleset.list.go new file mode 100644 index 000000000..cc75849ce --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.query_ruleset.list.go @@ -0,0 +1,215 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.11.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newQueryRulesetListFunc(t Transport) QueryRulesetList { + return func(o ...func(*QueryRulesetListRequest)) (*Response, error) { + var r = QueryRulesetListRequest{} + for _, f := range o { + f(&r) + } + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// QueryRulesetList lists query rulesets. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/list-query-rulesets.html. +type QueryRulesetList func(o ...func(*QueryRulesetListRequest)) (*Response, error) + +// QueryRulesetListRequest configures the Query Ruleset List API request. +type QueryRulesetListRequest struct { + From *int + Size *int + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context +} + +// Do executes the request and returns response or error. +func (r QueryRulesetListRequest) Do(ctx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ) + + method = "GET" + + path.Grow(7 + len("/_query_rules")) + path.WriteString("http://") + path.WriteString("/_query_rules") + + params = make(map[string]string) + + if r.From != nil { + params["from"] = strconv.FormatInt(int64(*r.From), 10) + } + + if r.Size != nil { + params["size"] = strconv.FormatInt(int64(*r.Size), 10) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + res, err := transport.Perform(req) + if err != nil { + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f QueryRulesetList) WithContext(v context.Context) func(*QueryRulesetListRequest) { + return func(r *QueryRulesetListRequest) { + r.ctx = v + } +} + +// WithFrom - starting offset (default: 0). +func (f QueryRulesetList) WithFrom(v int) func(*QueryRulesetListRequest) { + return func(r *QueryRulesetListRequest) { + r.From = &v + } +} + +// WithSize - specifies a max number of results to get (default: 100). +func (f QueryRulesetList) WithSize(v int) func(*QueryRulesetListRequest) { + return func(r *QueryRulesetListRequest) { + r.Size = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f QueryRulesetList) WithPretty() func(*QueryRulesetListRequest) { + return func(r *QueryRulesetListRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f QueryRulesetList) WithHuman() func(*QueryRulesetListRequest) { + return func(r *QueryRulesetListRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f QueryRulesetList) WithErrorTrace() func(*QueryRulesetListRequest) { + return func(r *QueryRulesetListRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f QueryRulesetList) WithFilterPath(v ...string) func(*QueryRulesetListRequest) { + return func(r *QueryRulesetListRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f QueryRulesetList) WithHeader(h map[string]string) func(*QueryRulesetListRequest) { + return func(r *QueryRulesetListRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f QueryRulesetList) WithOpaqueID(s string) func(*QueryRulesetListRequest) { + return func(r *QueryRulesetListRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.query_ruleset.put.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.query_ruleset.put.go new file mode 100644 index 000000000..c201a832f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.query_ruleset.put.go @@ -0,0 +1,201 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.11.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newQueryRulesetPutFunc(t Transport) QueryRulesetPut { + return func(body io.Reader, ruleset_id string, o ...func(*QueryRulesetPutRequest)) (*Response, error) { + var r = QueryRulesetPutRequest{Body: body, RulesetID: ruleset_id} + for _, f := range o { + f(&r) + } + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// QueryRulesetPut creates or updates a query ruleset. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/put-query-ruleset.html. +type QueryRulesetPut func(body io.Reader, ruleset_id string, o ...func(*QueryRulesetPutRequest)) (*Response, error) + +// QueryRulesetPutRequest configures the Query Ruleset Put API request. +type QueryRulesetPutRequest struct { + Body io.Reader + + RulesetID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context +} + +// Do executes the request and returns response or error. +func (r QueryRulesetPutRequest) Do(ctx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ) + + method = "PUT" + + path.Grow(7 + 1 + len("_query_rules") + 1 + len(r.RulesetID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_query_rules") + path.WriteString("/") + path.WriteString(r.RulesetID) + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + res, err := transport.Perform(req) + if err != nil { + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f QueryRulesetPut) WithContext(v context.Context) func(*QueryRulesetPutRequest) { + return func(r *QueryRulesetPutRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f QueryRulesetPut) WithPretty() func(*QueryRulesetPutRequest) { + return func(r *QueryRulesetPutRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f QueryRulesetPut) WithHuman() func(*QueryRulesetPutRequest) { + return func(r *QueryRulesetPutRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f QueryRulesetPut) WithErrorTrace() func(*QueryRulesetPutRequest) { + return func(r *QueryRulesetPutRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f QueryRulesetPut) WithFilterPath(v ...string) func(*QueryRulesetPutRequest) { + return func(r *QueryRulesetPutRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f QueryRulesetPut) WithHeader(h map[string]string) func(*QueryRulesetPutRequest) { + return func(r *QueryRulesetPutRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f QueryRulesetPut) WithOpaqueID(s string) func(*QueryRulesetPutRequest) { + return func(r *QueryRulesetPutRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.rank_eval.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.rank_eval.go index 6901956e7..5242a226c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.rank_eval.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.rank_eval.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.reindex.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.reindex.go index 52c6f3b53..68c250882 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.reindex.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.reindex.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.reindex_rethrottle.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.reindex_rethrottle.go index 404616d9f..80b442f58 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.reindex_rethrottle.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.reindex_rethrottle.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.render_search_template.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.render_search_template.go index a0f499967..cf120cb0e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.render_search_template.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.render_search_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.scripts_painless_execute.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.scripts_painless_execute.go index 472be3e3e..8d4b88a84 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.scripts_painless_execute.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.scripts_painless_execute.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.scroll.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.scroll.go index fbbba2430..cc21f5a7a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.scroll.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.scroll.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search.go index 2b69de30b..e641c9d0a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi @@ -67,6 +67,7 @@ type SearchRequest struct { From *int IgnoreThrottled *bool IgnoreUnavailable *bool + IncludeNamedQueriesScore *bool Lenient *bool MaxConcurrentShardRequests *int MinCompatibleShardNode string @@ -188,6 +189,10 @@ func (r SearchRequest) Do(ctx context.Context, transport Transport) (*Response, params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) } + if r.IncludeNamedQueriesScore != nil { + params["include_named_queries_score"] = strconv.FormatBool(*r.IncludeNamedQueriesScore) + } + if r.Lenient != nil { params["lenient"] = strconv.FormatBool(*r.Lenient) } @@ -493,6 +498,13 @@ func (f Search) WithIgnoreUnavailable(v bool) func(*SearchRequest) { } } +// WithIncludeNamedQueriesScore - indicates whether hit.matched_queries should be rendered as a map that includes the name of the matched query associated with its score (true) or as an array containing the name of the matched queries (false). +func (f Search) WithIncludeNamedQueriesScore(v bool) func(*SearchRequest) { + return func(r *SearchRequest) { + r.IncludeNamedQueriesScore = &v + } +} + // WithLenient - specify whether format-based query failures (such as providing text to a numeric field) should be ignored. func (f Search) WithLenient(v bool) func(*SearchRequest) { return func(r *SearchRequest) { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search_application.delete.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search_application.delete.go new file mode 100644 index 000000000..b56695a9b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search_application.delete.go @@ -0,0 +1,196 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.11.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newSearchApplicationDeleteFunc(t Transport) SearchApplicationDelete { + return func(name string, o ...func(*SearchApplicationDeleteRequest)) (*Response, error) { + var r = SearchApplicationDeleteRequest{Name: name} + for _, f := range o { + f(&r) + } + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SearchApplicationDelete deletes a search application. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-search-application.html. +type SearchApplicationDelete func(name string, o ...func(*SearchApplicationDeleteRequest)) (*Response, error) + +// SearchApplicationDeleteRequest configures the Search Application Delete API request. +type SearchApplicationDeleteRequest struct { + Name string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context +} + +// Do executes the request and returns response or error. +func (r SearchApplicationDeleteRequest) Do(ctx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ) + + method = "DELETE" + + path.Grow(7 + 1 + len("_application") + 1 + len("search_application") + 1 + len(r.Name)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_application") + path.WriteString("/") + path.WriteString("search_application") + path.WriteString("/") + path.WriteString(r.Name) + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + res, err := transport.Perform(req) + if err != nil { + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SearchApplicationDelete) WithContext(v context.Context) func(*SearchApplicationDeleteRequest) { + return func(r *SearchApplicationDeleteRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SearchApplicationDelete) WithPretty() func(*SearchApplicationDeleteRequest) { + return func(r *SearchApplicationDeleteRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SearchApplicationDelete) WithHuman() func(*SearchApplicationDeleteRequest) { + return func(r *SearchApplicationDeleteRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SearchApplicationDelete) WithErrorTrace() func(*SearchApplicationDeleteRequest) { + return func(r *SearchApplicationDeleteRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SearchApplicationDelete) WithFilterPath(v ...string) func(*SearchApplicationDeleteRequest) { + return func(r *SearchApplicationDeleteRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SearchApplicationDelete) WithHeader(h map[string]string) func(*SearchApplicationDeleteRequest) { + return func(r *SearchApplicationDeleteRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SearchApplicationDelete) WithOpaqueID(s string) func(*SearchApplicationDeleteRequest) { + return func(r *SearchApplicationDeleteRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search_application.delete_behavioral_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search_application.delete_behavioral_analytics.go new file mode 100644 index 000000000..5f309e49a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search_application.delete_behavioral_analytics.go @@ -0,0 +1,196 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.11.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newSearchApplicationDeleteBehavioralAnalyticsFunc(t Transport) SearchApplicationDeleteBehavioralAnalytics { + return func(name string, o ...func(*SearchApplicationDeleteBehavioralAnalyticsRequest)) (*Response, error) { + var r = SearchApplicationDeleteBehavioralAnalyticsRequest{Name: name} + for _, f := range o { + f(&r) + } + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SearchApplicationDeleteBehavioralAnalytics delete a behavioral analytics collection. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-analytics-collection.html. +type SearchApplicationDeleteBehavioralAnalytics func(name string, o ...func(*SearchApplicationDeleteBehavioralAnalyticsRequest)) (*Response, error) + +// SearchApplicationDeleteBehavioralAnalyticsRequest configures the Search Application Delete Behavioral Analytics API request. +type SearchApplicationDeleteBehavioralAnalyticsRequest struct { + Name string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context +} + +// Do executes the request and returns response or error. +func (r SearchApplicationDeleteBehavioralAnalyticsRequest) Do(ctx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ) + + method = "DELETE" + + path.Grow(7 + 1 + len("_application") + 1 + len("analytics") + 1 + len(r.Name)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_application") + path.WriteString("/") + path.WriteString("analytics") + path.WriteString("/") + path.WriteString(r.Name) + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + res, err := transport.Perform(req) + if err != nil { + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SearchApplicationDeleteBehavioralAnalytics) WithContext(v context.Context) func(*SearchApplicationDeleteBehavioralAnalyticsRequest) { + return func(r *SearchApplicationDeleteBehavioralAnalyticsRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SearchApplicationDeleteBehavioralAnalytics) WithPretty() func(*SearchApplicationDeleteBehavioralAnalyticsRequest) { + return func(r *SearchApplicationDeleteBehavioralAnalyticsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SearchApplicationDeleteBehavioralAnalytics) WithHuman() func(*SearchApplicationDeleteBehavioralAnalyticsRequest) { + return func(r *SearchApplicationDeleteBehavioralAnalyticsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SearchApplicationDeleteBehavioralAnalytics) WithErrorTrace() func(*SearchApplicationDeleteBehavioralAnalyticsRequest) { + return func(r *SearchApplicationDeleteBehavioralAnalyticsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SearchApplicationDeleteBehavioralAnalytics) WithFilterPath(v ...string) func(*SearchApplicationDeleteBehavioralAnalyticsRequest) { + return func(r *SearchApplicationDeleteBehavioralAnalyticsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SearchApplicationDeleteBehavioralAnalytics) WithHeader(h map[string]string) func(*SearchApplicationDeleteBehavioralAnalyticsRequest) { + return func(r *SearchApplicationDeleteBehavioralAnalyticsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SearchApplicationDeleteBehavioralAnalytics) WithOpaqueID(s string) func(*SearchApplicationDeleteBehavioralAnalyticsRequest) { + return func(r *SearchApplicationDeleteBehavioralAnalyticsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search_application.get.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search_application.get.go new file mode 100644 index 000000000..8d5d70890 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search_application.get.go @@ -0,0 +1,196 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.11.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newSearchApplicationGetFunc(t Transport) SearchApplicationGet { + return func(name string, o ...func(*SearchApplicationGetRequest)) (*Response, error) { + var r = SearchApplicationGetRequest{Name: name} + for _, f := range o { + f(&r) + } + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SearchApplicationGet returns the details about a search application. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/get-search-application.html. +type SearchApplicationGet func(name string, o ...func(*SearchApplicationGetRequest)) (*Response, error) + +// SearchApplicationGetRequest configures the Search Application Get API request. +type SearchApplicationGetRequest struct { + Name string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context +} + +// Do executes the request and returns response or error. +func (r SearchApplicationGetRequest) Do(ctx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ) + + method = "GET" + + path.Grow(7 + 1 + len("_application") + 1 + len("search_application") + 1 + len(r.Name)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_application") + path.WriteString("/") + path.WriteString("search_application") + path.WriteString("/") + path.WriteString(r.Name) + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + res, err := transport.Perform(req) + if err != nil { + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SearchApplicationGet) WithContext(v context.Context) func(*SearchApplicationGetRequest) { + return func(r *SearchApplicationGetRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SearchApplicationGet) WithPretty() func(*SearchApplicationGetRequest) { + return func(r *SearchApplicationGetRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SearchApplicationGet) WithHuman() func(*SearchApplicationGetRequest) { + return func(r *SearchApplicationGetRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SearchApplicationGet) WithErrorTrace() func(*SearchApplicationGetRequest) { + return func(r *SearchApplicationGetRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SearchApplicationGet) WithFilterPath(v ...string) func(*SearchApplicationGetRequest) { + return func(r *SearchApplicationGetRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SearchApplicationGet) WithHeader(h map[string]string) func(*SearchApplicationGetRequest) { + return func(r *SearchApplicationGetRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SearchApplicationGet) WithOpaqueID(s string) func(*SearchApplicationGetRequest) { + return func(r *SearchApplicationGetRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search_application.get_behavioral_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search_application.get_behavioral_analytics.go new file mode 100644 index 000000000..294756fd5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search_application.get_behavioral_analytics.go @@ -0,0 +1,205 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.11.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newSearchApplicationGetBehavioralAnalyticsFunc(t Transport) SearchApplicationGetBehavioralAnalytics { + return func(o ...func(*SearchApplicationGetBehavioralAnalyticsRequest)) (*Response, error) { + var r = SearchApplicationGetBehavioralAnalyticsRequest{} + for _, f := range o { + f(&r) + } + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SearchApplicationGetBehavioralAnalytics returns the existing behavioral analytics collections. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/list-analytics-collection.html. +type SearchApplicationGetBehavioralAnalytics func(o ...func(*SearchApplicationGetBehavioralAnalyticsRequest)) (*Response, error) + +// SearchApplicationGetBehavioralAnalyticsRequest configures the Search Application Get Behavioral Analytics API request. +type SearchApplicationGetBehavioralAnalyticsRequest struct { + Name []string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context +} + +// Do executes the request and returns response or error. +func (r SearchApplicationGetBehavioralAnalyticsRequest) Do(ctx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ) + + method = "GET" + + path.Grow(7 + 1 + len("_application") + 1 + len("analytics") + 1 + len(strings.Join(r.Name, ","))) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_application") + path.WriteString("/") + path.WriteString("analytics") + if len(r.Name) > 0 { + path.WriteString("/") + path.WriteString(strings.Join(r.Name, ",")) + } + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + res, err := transport.Perform(req) + if err != nil { + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SearchApplicationGetBehavioralAnalytics) WithContext(v context.Context) func(*SearchApplicationGetBehavioralAnalyticsRequest) { + return func(r *SearchApplicationGetBehavioralAnalyticsRequest) { + r.ctx = v + } +} + +// WithName - a list of analytics collections to limit the returned information. +func (f SearchApplicationGetBehavioralAnalytics) WithName(v ...string) func(*SearchApplicationGetBehavioralAnalyticsRequest) { + return func(r *SearchApplicationGetBehavioralAnalyticsRequest) { + r.Name = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SearchApplicationGetBehavioralAnalytics) WithPretty() func(*SearchApplicationGetBehavioralAnalyticsRequest) { + return func(r *SearchApplicationGetBehavioralAnalyticsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SearchApplicationGetBehavioralAnalytics) WithHuman() func(*SearchApplicationGetBehavioralAnalyticsRequest) { + return func(r *SearchApplicationGetBehavioralAnalyticsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SearchApplicationGetBehavioralAnalytics) WithErrorTrace() func(*SearchApplicationGetBehavioralAnalyticsRequest) { + return func(r *SearchApplicationGetBehavioralAnalyticsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SearchApplicationGetBehavioralAnalytics) WithFilterPath(v ...string) func(*SearchApplicationGetBehavioralAnalyticsRequest) { + return func(r *SearchApplicationGetBehavioralAnalyticsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SearchApplicationGetBehavioralAnalytics) WithHeader(h map[string]string) func(*SearchApplicationGetBehavioralAnalyticsRequest) { + return func(r *SearchApplicationGetBehavioralAnalyticsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SearchApplicationGetBehavioralAnalytics) WithOpaqueID(s string) func(*SearchApplicationGetBehavioralAnalyticsRequest) { + return func(r *SearchApplicationGetBehavioralAnalyticsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search_application.list.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search_application.list.go new file mode 100644 index 000000000..40638bc61 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search_application.list.go @@ -0,0 +1,227 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.11.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newSearchApplicationListFunc(t Transport) SearchApplicationList { + return func(o ...func(*SearchApplicationListRequest)) (*Response, error) { + var r = SearchApplicationListRequest{} + for _, f := range o { + f(&r) + } + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SearchApplicationList returns the existing search applications. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/list-search-applications.html. +type SearchApplicationList func(o ...func(*SearchApplicationListRequest)) (*Response, error) + +// SearchApplicationListRequest configures the Search Application List API request. +type SearchApplicationListRequest struct { + From *int + Query string + Size *int + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context +} + +// Do executes the request and returns response or error. +func (r SearchApplicationListRequest) Do(ctx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ) + + method = "GET" + + path.Grow(7 + len("/_application/search_application")) + path.WriteString("http://") + path.WriteString("/_application/search_application") + + params = make(map[string]string) + + if r.From != nil { + params["from"] = strconv.FormatInt(int64(*r.From), 10) + } + + if r.Query != "" { + params["q"] = r.Query + } + + if r.Size != nil { + params["size"] = strconv.FormatInt(int64(*r.Size), 10) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + res, err := transport.Perform(req) + if err != nil { + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SearchApplicationList) WithContext(v context.Context) func(*SearchApplicationListRequest) { + return func(r *SearchApplicationListRequest) { + r.ctx = v + } +} + +// WithFrom - starting offset (default: 0). +func (f SearchApplicationList) WithFrom(v int) func(*SearchApplicationListRequest) { + return func(r *SearchApplicationListRequest) { + r.From = &v + } +} + +// WithQuery - query in the lucene query string syntax. +func (f SearchApplicationList) WithQuery(v string) func(*SearchApplicationListRequest) { + return func(r *SearchApplicationListRequest) { + r.Query = v + } +} + +// WithSize - specifies a max number of results to get. +func (f SearchApplicationList) WithSize(v int) func(*SearchApplicationListRequest) { + return func(r *SearchApplicationListRequest) { + r.Size = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SearchApplicationList) WithPretty() func(*SearchApplicationListRequest) { + return func(r *SearchApplicationListRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SearchApplicationList) WithHuman() func(*SearchApplicationListRequest) { + return func(r *SearchApplicationListRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SearchApplicationList) WithErrorTrace() func(*SearchApplicationListRequest) { + return func(r *SearchApplicationListRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SearchApplicationList) WithFilterPath(v ...string) func(*SearchApplicationListRequest) { + return func(r *SearchApplicationListRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SearchApplicationList) WithHeader(h map[string]string) func(*SearchApplicationListRequest) { + return func(r *SearchApplicationListRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SearchApplicationList) WithOpaqueID(s string) func(*SearchApplicationListRequest) { + return func(r *SearchApplicationListRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search_application.post_behavioral_analytics_event.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search_application.post_behavioral_analytics_event.go new file mode 100644 index 000000000..5117067bf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search_application.post_behavioral_analytics_event.go @@ -0,0 +1,222 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.11.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" +) + +func newSearchApplicationPostBehavioralAnalyticsEventFunc(t Transport) SearchApplicationPostBehavioralAnalyticsEvent { + return func(body io.Reader, collection_name string, event_type string, o ...func(*SearchApplicationPostBehavioralAnalyticsEventRequest)) (*Response, error) { + var r = SearchApplicationPostBehavioralAnalyticsEventRequest{Body: body, CollectionName: collection_name, EventType: event_type} + for _, f := range o { + f(&r) + } + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SearchApplicationPostBehavioralAnalyticsEvent creates a behavioral analytics event for existing collection. +// +// This API is experimental. +// +// See full documentation at http://todo.com/tbd. +type SearchApplicationPostBehavioralAnalyticsEvent func(body io.Reader, collection_name string, event_type string, o ...func(*SearchApplicationPostBehavioralAnalyticsEventRequest)) (*Response, error) + +// SearchApplicationPostBehavioralAnalyticsEventRequest configures the Search Application Post Behavioral Analytics Event API request. +type SearchApplicationPostBehavioralAnalyticsEventRequest struct { + Body io.Reader + + CollectionName string + EventType string + + Debug *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context +} + +// Do executes the request and returns response or error. +func (r SearchApplicationPostBehavioralAnalyticsEventRequest) Do(ctx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ) + + method = "POST" + + path.Grow(7 + 1 + len("_application") + 1 + len("analytics") + 1 + len(r.CollectionName) + 1 + len("event") + 1 + len(r.EventType)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_application") + path.WriteString("/") + path.WriteString("analytics") + path.WriteString("/") + path.WriteString(r.CollectionName) + path.WriteString("/") + path.WriteString("event") + path.WriteString("/") + path.WriteString(r.EventType) + + params = make(map[string]string) + + if r.Debug != nil { + params["debug"] = strconv.FormatBool(*r.Debug) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + res, err := transport.Perform(req) + if err != nil { + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SearchApplicationPostBehavioralAnalyticsEvent) WithContext(v context.Context) func(*SearchApplicationPostBehavioralAnalyticsEventRequest) { + return func(r *SearchApplicationPostBehavioralAnalyticsEventRequest) { + r.ctx = v + } +} + +// WithDebug - if true, returns event information that will be stored. +func (f SearchApplicationPostBehavioralAnalyticsEvent) WithDebug(v bool) func(*SearchApplicationPostBehavioralAnalyticsEventRequest) { + return func(r *SearchApplicationPostBehavioralAnalyticsEventRequest) { + r.Debug = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SearchApplicationPostBehavioralAnalyticsEvent) WithPretty() func(*SearchApplicationPostBehavioralAnalyticsEventRequest) { + return func(r *SearchApplicationPostBehavioralAnalyticsEventRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SearchApplicationPostBehavioralAnalyticsEvent) WithHuman() func(*SearchApplicationPostBehavioralAnalyticsEventRequest) { + return func(r *SearchApplicationPostBehavioralAnalyticsEventRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SearchApplicationPostBehavioralAnalyticsEvent) WithErrorTrace() func(*SearchApplicationPostBehavioralAnalyticsEventRequest) { + return func(r *SearchApplicationPostBehavioralAnalyticsEventRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SearchApplicationPostBehavioralAnalyticsEvent) WithFilterPath(v ...string) func(*SearchApplicationPostBehavioralAnalyticsEventRequest) { + return func(r *SearchApplicationPostBehavioralAnalyticsEventRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SearchApplicationPostBehavioralAnalyticsEvent) WithHeader(h map[string]string) func(*SearchApplicationPostBehavioralAnalyticsEventRequest) { + return func(r *SearchApplicationPostBehavioralAnalyticsEventRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SearchApplicationPostBehavioralAnalyticsEvent) WithOpaqueID(s string) func(*SearchApplicationPostBehavioralAnalyticsEventRequest) { + return func(r *SearchApplicationPostBehavioralAnalyticsEventRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search_application.put.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search_application.put.go new file mode 100644 index 000000000..bf1600d0b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search_application.put.go @@ -0,0 +1,217 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.11.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strconv" + "strings" +) + +func newSearchApplicationPutFunc(t Transport) SearchApplicationPut { + return func(name string, body io.Reader, o ...func(*SearchApplicationPutRequest)) (*Response, error) { + var r = SearchApplicationPutRequest{Name: name, Body: body} + for _, f := range o { + f(&r) + } + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SearchApplicationPut creates or updates a search application. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/put-search-application.html. +type SearchApplicationPut func(name string, body io.Reader, o ...func(*SearchApplicationPutRequest)) (*Response, error) + +// SearchApplicationPutRequest configures the Search Application Put API request. +type SearchApplicationPutRequest struct { + Body io.Reader + + Name string + + Create *bool + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context +} + +// Do executes the request and returns response or error. +func (r SearchApplicationPutRequest) Do(ctx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ) + + method = "PUT" + + path.Grow(7 + 1 + len("_application") + 1 + len("search_application") + 1 + len(r.Name)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_application") + path.WriteString("/") + path.WriteString("search_application") + path.WriteString("/") + path.WriteString(r.Name) + + params = make(map[string]string) + + if r.Create != nil { + params["create"] = strconv.FormatBool(*r.Create) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + res, err := transport.Perform(req) + if err != nil { + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SearchApplicationPut) WithContext(v context.Context) func(*SearchApplicationPutRequest) { + return func(r *SearchApplicationPutRequest) { + r.ctx = v + } +} + +// WithCreate - if true, requires that a search application with the specified resource_id does not already exist. (default: false). +func (f SearchApplicationPut) WithCreate(v bool) func(*SearchApplicationPutRequest) { + return func(r *SearchApplicationPutRequest) { + r.Create = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SearchApplicationPut) WithPretty() func(*SearchApplicationPutRequest) { + return func(r *SearchApplicationPutRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SearchApplicationPut) WithHuman() func(*SearchApplicationPutRequest) { + return func(r *SearchApplicationPutRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SearchApplicationPut) WithErrorTrace() func(*SearchApplicationPutRequest) { + return func(r *SearchApplicationPutRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SearchApplicationPut) WithFilterPath(v ...string) func(*SearchApplicationPutRequest) { + return func(r *SearchApplicationPutRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SearchApplicationPut) WithHeader(h map[string]string) func(*SearchApplicationPutRequest) { + return func(r *SearchApplicationPutRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SearchApplicationPut) WithOpaqueID(s string) func(*SearchApplicationPutRequest) { + return func(r *SearchApplicationPutRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search_application.put_behavioral_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search_application.put_behavioral_analytics.go new file mode 100644 index 000000000..8a6671e81 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search_application.put_behavioral_analytics.go @@ -0,0 +1,196 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.11.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newSearchApplicationPutBehavioralAnalyticsFunc(t Transport) SearchApplicationPutBehavioralAnalytics { + return func(name string, o ...func(*SearchApplicationPutBehavioralAnalyticsRequest)) (*Response, error) { + var r = SearchApplicationPutBehavioralAnalyticsRequest{Name: name} + for _, f := range o { + f(&r) + } + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SearchApplicationPutBehavioralAnalytics creates a behavioral analytics collection. +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/put-analytics-collection.html. +type SearchApplicationPutBehavioralAnalytics func(name string, o ...func(*SearchApplicationPutBehavioralAnalyticsRequest)) (*Response, error) + +// SearchApplicationPutBehavioralAnalyticsRequest configures the Search Application Put Behavioral Analytics API request. +type SearchApplicationPutBehavioralAnalyticsRequest struct { + Name string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context +} + +// Do executes the request and returns response or error. +func (r SearchApplicationPutBehavioralAnalyticsRequest) Do(ctx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ) + + method = "PUT" + + path.Grow(7 + 1 + len("_application") + 1 + len("analytics") + 1 + len(r.Name)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_application") + path.WriteString("/") + path.WriteString("analytics") + path.WriteString("/") + path.WriteString(r.Name) + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + res, err := transport.Perform(req) + if err != nil { + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SearchApplicationPutBehavioralAnalytics) WithContext(v context.Context) func(*SearchApplicationPutBehavioralAnalyticsRequest) { + return func(r *SearchApplicationPutBehavioralAnalyticsRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SearchApplicationPutBehavioralAnalytics) WithPretty() func(*SearchApplicationPutBehavioralAnalyticsRequest) { + return func(r *SearchApplicationPutBehavioralAnalyticsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SearchApplicationPutBehavioralAnalytics) WithHuman() func(*SearchApplicationPutBehavioralAnalyticsRequest) { + return func(r *SearchApplicationPutBehavioralAnalyticsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SearchApplicationPutBehavioralAnalytics) WithErrorTrace() func(*SearchApplicationPutBehavioralAnalyticsRequest) { + return func(r *SearchApplicationPutBehavioralAnalyticsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SearchApplicationPutBehavioralAnalytics) WithFilterPath(v ...string) func(*SearchApplicationPutBehavioralAnalyticsRequest) { + return func(r *SearchApplicationPutBehavioralAnalyticsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SearchApplicationPutBehavioralAnalytics) WithHeader(h map[string]string) func(*SearchApplicationPutBehavioralAnalyticsRequest) { + return func(r *SearchApplicationPutBehavioralAnalyticsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SearchApplicationPutBehavioralAnalytics) WithOpaqueID(s string) func(*SearchApplicationPutBehavioralAnalyticsRequest) { + return func(r *SearchApplicationPutBehavioralAnalyticsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search_application.render_query.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search_application.render_query.go new file mode 100644 index 000000000..27cd75d64 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search_application.render_query.go @@ -0,0 +1,212 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.11.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSearchApplicationRenderQueryFunc(t Transport) SearchApplicationRenderQuery { + return func(name string, o ...func(*SearchApplicationRenderQueryRequest)) (*Response, error) { + var r = SearchApplicationRenderQueryRequest{Name: name} + for _, f := range o { + f(&r) + } + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SearchApplicationRenderQuery renders a query for given search application search parameters +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/search-application-render-query.html. +type SearchApplicationRenderQuery func(name string, o ...func(*SearchApplicationRenderQueryRequest)) (*Response, error) + +// SearchApplicationRenderQueryRequest configures the Search Application Render Query API request. +type SearchApplicationRenderQueryRequest struct { + Body io.Reader + + Name string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context +} + +// Do executes the request and returns response or error. +func (r SearchApplicationRenderQueryRequest) Do(ctx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ) + + method = "POST" + + path.Grow(7 + 1 + len("_application") + 1 + len("search_application") + 1 + len(r.Name) + 1 + len("_render_query")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_application") + path.WriteString("/") + path.WriteString("search_application") + path.WriteString("/") + path.WriteString(r.Name) + path.WriteString("/") + path.WriteString("_render_query") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + res, err := transport.Perform(req) + if err != nil { + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SearchApplicationRenderQuery) WithContext(v context.Context) func(*SearchApplicationRenderQueryRequest) { + return func(r *SearchApplicationRenderQueryRequest) { + r.ctx = v + } +} + +// WithBody - Search parameters, which will override any default search parameters defined in the search application template. +func (f SearchApplicationRenderQuery) WithBody(v io.Reader) func(*SearchApplicationRenderQueryRequest) { + return func(r *SearchApplicationRenderQueryRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SearchApplicationRenderQuery) WithPretty() func(*SearchApplicationRenderQueryRequest) { + return func(r *SearchApplicationRenderQueryRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SearchApplicationRenderQuery) WithHuman() func(*SearchApplicationRenderQueryRequest) { + return func(r *SearchApplicationRenderQueryRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SearchApplicationRenderQuery) WithErrorTrace() func(*SearchApplicationRenderQueryRequest) { + return func(r *SearchApplicationRenderQueryRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SearchApplicationRenderQuery) WithFilterPath(v ...string) func(*SearchApplicationRenderQueryRequest) { + return func(r *SearchApplicationRenderQueryRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SearchApplicationRenderQuery) WithHeader(h map[string]string) func(*SearchApplicationRenderQueryRequest) { + return func(r *SearchApplicationRenderQueryRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SearchApplicationRenderQuery) WithOpaqueID(s string) func(*SearchApplicationRenderQueryRequest) { + return func(r *SearchApplicationRenderQueryRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search_application.search.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search_application.search.go new file mode 100644 index 000000000..39b232401 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search_application.search.go @@ -0,0 +1,212 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.11.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSearchApplicationSearchFunc(t Transport) SearchApplicationSearch { + return func(name string, o ...func(*SearchApplicationSearchRequest)) (*Response, error) { + var r = SearchApplicationSearchRequest{Name: name} + for _, f := range o { + f(&r) + } + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SearchApplicationSearch perform a search against a search application +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/search-application-search.html. +type SearchApplicationSearch func(name string, o ...func(*SearchApplicationSearchRequest)) (*Response, error) + +// SearchApplicationSearchRequest configures the Search Application Search API request. +type SearchApplicationSearchRequest struct { + Body io.Reader + + Name string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context +} + +// Do executes the request and returns response or error. +func (r SearchApplicationSearchRequest) Do(ctx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ) + + method = "POST" + + path.Grow(7 + 1 + len("_application") + 1 + len("search_application") + 1 + len(r.Name) + 1 + len("_search")) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_application") + path.WriteString("/") + path.WriteString("search_application") + path.WriteString("/") + path.WriteString(r.Name) + path.WriteString("/") + path.WriteString("_search") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + res, err := transport.Perform(req) + if err != nil { + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SearchApplicationSearch) WithContext(v context.Context) func(*SearchApplicationSearchRequest) { + return func(r *SearchApplicationSearchRequest) { + r.ctx = v + } +} + +// WithBody - Search parameters, including template parameters that override defaults. +func (f SearchApplicationSearch) WithBody(v io.Reader) func(*SearchApplicationSearchRequest) { + return func(r *SearchApplicationSearchRequest) { + r.Body = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SearchApplicationSearch) WithPretty() func(*SearchApplicationSearchRequest) { + return func(r *SearchApplicationSearchRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SearchApplicationSearch) WithHuman() func(*SearchApplicationSearchRequest) { + return func(r *SearchApplicationSearchRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SearchApplicationSearch) WithErrorTrace() func(*SearchApplicationSearchRequest) { + return func(r *SearchApplicationSearchRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SearchApplicationSearch) WithFilterPath(v ...string) func(*SearchApplicationSearchRequest) { + return func(r *SearchApplicationSearchRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SearchApplicationSearch) WithHeader(h map[string]string) func(*SearchApplicationSearchRequest) { + return func(r *SearchApplicationSearchRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SearchApplicationSearch) WithOpaqueID(s string) func(*SearchApplicationSearchRequest) { + return func(r *SearchApplicationSearchRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search_mvt.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search_mvt.go index 93531f10b..4b9891fd8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search_mvt.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search_mvt.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search_shards.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search_shards.go index f06d7f80a..2a437f341 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search_shards.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search_shards.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search_template.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search_template.go index 4f77d9724..de7ae32af 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search_template.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.search_template.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.shutdown.delete_node.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.shutdown.delete_node.go index 82bdd20e3..a32e31d8b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.shutdown.delete_node.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.shutdown.delete_node.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.shutdown.get_node.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.shutdown.get_node.go index fb610dba2..2e721e447 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.shutdown.get_node.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.shutdown.get_node.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.shutdown.put_node.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.shutdown.put_node.go index fa3a29509..d1a147ee1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.shutdown.put_node.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.shutdown.put_node.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.cleanup_repository.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.cleanup_repository.go index 215f0ce37..72a549766 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.cleanup_repository.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.cleanup_repository.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.clone.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.clone.go index acfd00bfb..a1b02c44d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.clone.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.clone.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.create.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.create.go index 85371ba4b..40d445ef5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.create.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.create.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.create_repository.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.create_repository.go index 384aab918..8baaaff7b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.create_repository.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.create_repository.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.delete.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.delete.go index 61647e1bc..28eed17be 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.delete.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.delete.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.delete_repository.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.delete_repository.go index 85dc0c24d..534447c20 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.delete_repository.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.delete_repository.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.get.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.get.go index 3c855af27..9f8bb4071 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.get.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.get.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.get_repository.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.get_repository.go index e3b4ba4db..f17b98003 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.get_repository.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.get_repository.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.repository_analyze.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.repository_analyze.go index df3f23ca7..25ecd1684 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.repository_analyze.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.repository_analyze.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.restore.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.restore.go index c26f37640..a8bb58ef9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.restore.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.restore.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.status.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.status.go index 7cef5ebfc..81e127078 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.status.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.status.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.verify_repository.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.verify_repository.go index 1643082bb..91d3230b6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.verify_repository.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.snapshot.verify_repository.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.synonyms.delete_synonym.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.synonyms.delete_synonym.go new file mode 100644 index 000000000..b190523dc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.synonyms.delete_synonym.go @@ -0,0 +1,194 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.11.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newSynonymsDeleteSynonymFunc(t Transport) SynonymsDeleteSynonym { + return func(id string, o ...func(*SynonymsDeleteSynonymRequest)) (*Response, error) { + var r = SynonymsDeleteSynonymRequest{DocumentID: id} + for _, f := range o { + f(&r) + } + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SynonymsDeleteSynonym deletes a synonym set +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-synonyms-set.html. +type SynonymsDeleteSynonym func(id string, o ...func(*SynonymsDeleteSynonymRequest)) (*Response, error) + +// SynonymsDeleteSynonymRequest configures the Synonyms Delete Synonym API request. +type SynonymsDeleteSynonymRequest struct { + DocumentID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context +} + +// Do executes the request and returns response or error. +func (r SynonymsDeleteSynonymRequest) Do(ctx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ) + + method = "DELETE" + + path.Grow(7 + 1 + len("_synonyms") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_synonyms") + path.WriteString("/") + path.WriteString(r.DocumentID) + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + res, err := transport.Perform(req) + if err != nil { + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SynonymsDeleteSynonym) WithContext(v context.Context) func(*SynonymsDeleteSynonymRequest) { + return func(r *SynonymsDeleteSynonymRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SynonymsDeleteSynonym) WithPretty() func(*SynonymsDeleteSynonymRequest) { + return func(r *SynonymsDeleteSynonymRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SynonymsDeleteSynonym) WithHuman() func(*SynonymsDeleteSynonymRequest) { + return func(r *SynonymsDeleteSynonymRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SynonymsDeleteSynonym) WithErrorTrace() func(*SynonymsDeleteSynonymRequest) { + return func(r *SynonymsDeleteSynonymRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SynonymsDeleteSynonym) WithFilterPath(v ...string) func(*SynonymsDeleteSynonymRequest) { + return func(r *SynonymsDeleteSynonymRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SynonymsDeleteSynonym) WithHeader(h map[string]string) func(*SynonymsDeleteSynonymRequest) { + return func(r *SynonymsDeleteSynonymRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SynonymsDeleteSynonym) WithOpaqueID(s string) func(*SynonymsDeleteSynonymRequest) { + return func(r *SynonymsDeleteSynonymRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.synonyms.delete_synonym_rule.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.synonyms.delete_synonym_rule.go new file mode 100644 index 000000000..1b99a6301 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.synonyms.delete_synonym_rule.go @@ -0,0 +1,197 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.11.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newSynonymsDeleteSynonymRuleFunc(t Transport) SynonymsDeleteSynonymRule { + return func(rule_id string, set_id string, o ...func(*SynonymsDeleteSynonymRuleRequest)) (*Response, error) { + var r = SynonymsDeleteSynonymRuleRequest{RuleID: rule_id, SetID: set_id} + for _, f := range o { + f(&r) + } + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SynonymsDeleteSynonymRule deletes a synonym rule in a synonym set +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-synonym-rule.html. +type SynonymsDeleteSynonymRule func(rule_id string, set_id string, o ...func(*SynonymsDeleteSynonymRuleRequest)) (*Response, error) + +// SynonymsDeleteSynonymRuleRequest configures the Synonyms Delete Synonym Rule API request. +type SynonymsDeleteSynonymRuleRequest struct { + RuleID string + SetID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context +} + +// Do executes the request and returns response or error. +func (r SynonymsDeleteSynonymRuleRequest) Do(ctx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ) + + method = "DELETE" + + path.Grow(7 + 1 + len("_synonyms") + 1 + len(r.SetID) + 1 + len(r.RuleID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_synonyms") + path.WriteString("/") + path.WriteString(r.SetID) + path.WriteString("/") + path.WriteString(r.RuleID) + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + res, err := transport.Perform(req) + if err != nil { + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SynonymsDeleteSynonymRule) WithContext(v context.Context) func(*SynonymsDeleteSynonymRuleRequest) { + return func(r *SynonymsDeleteSynonymRuleRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SynonymsDeleteSynonymRule) WithPretty() func(*SynonymsDeleteSynonymRuleRequest) { + return func(r *SynonymsDeleteSynonymRuleRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SynonymsDeleteSynonymRule) WithHuman() func(*SynonymsDeleteSynonymRuleRequest) { + return func(r *SynonymsDeleteSynonymRuleRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SynonymsDeleteSynonymRule) WithErrorTrace() func(*SynonymsDeleteSynonymRuleRequest) { + return func(r *SynonymsDeleteSynonymRuleRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SynonymsDeleteSynonymRule) WithFilterPath(v ...string) func(*SynonymsDeleteSynonymRuleRequest) { + return func(r *SynonymsDeleteSynonymRuleRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SynonymsDeleteSynonymRule) WithHeader(h map[string]string) func(*SynonymsDeleteSynonymRuleRequest) { + return func(r *SynonymsDeleteSynonymRuleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SynonymsDeleteSynonymRule) WithOpaqueID(s string) func(*SynonymsDeleteSynonymRuleRequest) { + return func(r *SynonymsDeleteSynonymRuleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.synonyms.get_synonym.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.synonyms.get_synonym.go new file mode 100644 index 000000000..e0b8a163c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.synonyms.get_synonym.go @@ -0,0 +1,220 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.11.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newSynonymsGetSynonymFunc(t Transport) SynonymsGetSynonym { + return func(id string, o ...func(*SynonymsGetSynonymRequest)) (*Response, error) { + var r = SynonymsGetSynonymRequest{DocumentID: id} + for _, f := range o { + f(&r) + } + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SynonymsGetSynonym retrieves a synonym set +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/get-synonyms-set.html. +type SynonymsGetSynonym func(id string, o ...func(*SynonymsGetSynonymRequest)) (*Response, error) + +// SynonymsGetSynonymRequest configures the Synonyms Get Synonym API request. +type SynonymsGetSynonymRequest struct { + DocumentID string + + From *int + Size *int + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context +} + +// Do executes the request and returns response or error. +func (r SynonymsGetSynonymRequest) Do(ctx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ) + + method = "GET" + + path.Grow(7 + 1 + len("_synonyms") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_synonyms") + path.WriteString("/") + path.WriteString(r.DocumentID) + + params = make(map[string]string) + + if r.From != nil { + params["from"] = strconv.FormatInt(int64(*r.From), 10) + } + + if r.Size != nil { + params["size"] = strconv.FormatInt(int64(*r.Size), 10) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + res, err := transport.Perform(req) + if err != nil { + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SynonymsGetSynonym) WithContext(v context.Context) func(*SynonymsGetSynonymRequest) { + return func(r *SynonymsGetSynonymRequest) { + r.ctx = v + } +} + +// WithFrom - starting offset. +func (f SynonymsGetSynonym) WithFrom(v int) func(*SynonymsGetSynonymRequest) { + return func(r *SynonymsGetSynonymRequest) { + r.From = &v + } +} + +// WithSize - specifies a max number of results to get. +func (f SynonymsGetSynonym) WithSize(v int) func(*SynonymsGetSynonymRequest) { + return func(r *SynonymsGetSynonymRequest) { + r.Size = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SynonymsGetSynonym) WithPretty() func(*SynonymsGetSynonymRequest) { + return func(r *SynonymsGetSynonymRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SynonymsGetSynonym) WithHuman() func(*SynonymsGetSynonymRequest) { + return func(r *SynonymsGetSynonymRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SynonymsGetSynonym) WithErrorTrace() func(*SynonymsGetSynonymRequest) { + return func(r *SynonymsGetSynonymRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SynonymsGetSynonym) WithFilterPath(v ...string) func(*SynonymsGetSynonymRequest) { + return func(r *SynonymsGetSynonymRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SynonymsGetSynonym) WithHeader(h map[string]string) func(*SynonymsGetSynonymRequest) { + return func(r *SynonymsGetSynonymRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SynonymsGetSynonym) WithOpaqueID(s string) func(*SynonymsGetSynonymRequest) { + return func(r *SynonymsGetSynonymRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.synonyms.get_synonym_rule.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.synonyms.get_synonym_rule.go new file mode 100644 index 000000000..0c55fa948 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.synonyms.get_synonym_rule.go @@ -0,0 +1,197 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.11.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newSynonymsGetSynonymRuleFunc(t Transport) SynonymsGetSynonymRule { + return func(rule_id string, set_id string, o ...func(*SynonymsGetSynonymRuleRequest)) (*Response, error) { + var r = SynonymsGetSynonymRuleRequest{RuleID: rule_id, SetID: set_id} + for _, f := range o { + f(&r) + } + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SynonymsGetSynonymRule retrieves a synonym rule from a synonym set +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/get-synonym-rule.html. +type SynonymsGetSynonymRule func(rule_id string, set_id string, o ...func(*SynonymsGetSynonymRuleRequest)) (*Response, error) + +// SynonymsGetSynonymRuleRequest configures the Synonyms Get Synonym Rule API request. +type SynonymsGetSynonymRuleRequest struct { + RuleID string + SetID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context +} + +// Do executes the request and returns response or error. +func (r SynonymsGetSynonymRuleRequest) Do(ctx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ) + + method = "GET" + + path.Grow(7 + 1 + len("_synonyms") + 1 + len(r.SetID) + 1 + len(r.RuleID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_synonyms") + path.WriteString("/") + path.WriteString(r.SetID) + path.WriteString("/") + path.WriteString(r.RuleID) + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + res, err := transport.Perform(req) + if err != nil { + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SynonymsGetSynonymRule) WithContext(v context.Context) func(*SynonymsGetSynonymRuleRequest) { + return func(r *SynonymsGetSynonymRuleRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SynonymsGetSynonymRule) WithPretty() func(*SynonymsGetSynonymRuleRequest) { + return func(r *SynonymsGetSynonymRuleRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SynonymsGetSynonymRule) WithHuman() func(*SynonymsGetSynonymRuleRequest) { + return func(r *SynonymsGetSynonymRuleRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SynonymsGetSynonymRule) WithErrorTrace() func(*SynonymsGetSynonymRuleRequest) { + return func(r *SynonymsGetSynonymRuleRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SynonymsGetSynonymRule) WithFilterPath(v ...string) func(*SynonymsGetSynonymRuleRequest) { + return func(r *SynonymsGetSynonymRuleRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SynonymsGetSynonymRule) WithHeader(h map[string]string) func(*SynonymsGetSynonymRuleRequest) { + return func(r *SynonymsGetSynonymRuleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SynonymsGetSynonymRule) WithOpaqueID(s string) func(*SynonymsGetSynonymRuleRequest) { + return func(r *SynonymsGetSynonymRuleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.synonyms.get_synonyms_sets.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.synonyms.get_synonyms_sets.go new file mode 100644 index 000000000..881e168b7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.synonyms.get_synonyms_sets.go @@ -0,0 +1,215 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.11.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strconv" + "strings" +) + +func newSynonymsGetSynonymsSetsFunc(t Transport) SynonymsGetSynonymsSets { + return func(o ...func(*SynonymsGetSynonymsSetsRequest)) (*Response, error) { + var r = SynonymsGetSynonymsSetsRequest{} + for _, f := range o { + f(&r) + } + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SynonymsGetSynonymsSets retrieves a summary of all defined synonym sets +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/list-synonyms-sets.html. +type SynonymsGetSynonymsSets func(o ...func(*SynonymsGetSynonymsSetsRequest)) (*Response, error) + +// SynonymsGetSynonymsSetsRequest configures the Synonyms Get Synonyms Sets API request. +type SynonymsGetSynonymsSetsRequest struct { + From *int + Size *int + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context +} + +// Do executes the request and returns response or error. +func (r SynonymsGetSynonymsSetsRequest) Do(ctx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ) + + method = "GET" + + path.Grow(7 + len("/_synonyms")) + path.WriteString("http://") + path.WriteString("/_synonyms") + + params = make(map[string]string) + + if r.From != nil { + params["from"] = strconv.FormatInt(int64(*r.From), 10) + } + + if r.Size != nil { + params["size"] = strconv.FormatInt(int64(*r.Size), 10) + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + res, err := transport.Perform(req) + if err != nil { + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SynonymsGetSynonymsSets) WithContext(v context.Context) func(*SynonymsGetSynonymsSetsRequest) { + return func(r *SynonymsGetSynonymsSetsRequest) { + r.ctx = v + } +} + +// WithFrom - starting offset. +func (f SynonymsGetSynonymsSets) WithFrom(v int) func(*SynonymsGetSynonymsSetsRequest) { + return func(r *SynonymsGetSynonymsSetsRequest) { + r.From = &v + } +} + +// WithSize - specifies a max number of results to get. +func (f SynonymsGetSynonymsSets) WithSize(v int) func(*SynonymsGetSynonymsSetsRequest) { + return func(r *SynonymsGetSynonymsSetsRequest) { + r.Size = &v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SynonymsGetSynonymsSets) WithPretty() func(*SynonymsGetSynonymsSetsRequest) { + return func(r *SynonymsGetSynonymsSetsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SynonymsGetSynonymsSets) WithHuman() func(*SynonymsGetSynonymsSetsRequest) { + return func(r *SynonymsGetSynonymsSetsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SynonymsGetSynonymsSets) WithErrorTrace() func(*SynonymsGetSynonymsSetsRequest) { + return func(r *SynonymsGetSynonymsSetsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SynonymsGetSynonymsSets) WithFilterPath(v ...string) func(*SynonymsGetSynonymsSetsRequest) { + return func(r *SynonymsGetSynonymsSetsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SynonymsGetSynonymsSets) WithHeader(h map[string]string) func(*SynonymsGetSynonymsSetsRequest) { + return func(r *SynonymsGetSynonymsSetsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SynonymsGetSynonymsSets) WithOpaqueID(s string) func(*SynonymsGetSynonymsSetsRequest) { + return func(r *SynonymsGetSynonymsSetsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.synonyms.put_synonym.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.synonyms.put_synonym.go new file mode 100644 index 000000000..b580e58a8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.synonyms.put_synonym.go @@ -0,0 +1,201 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.11.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSynonymsPutSynonymFunc(t Transport) SynonymsPutSynonym { + return func(id string, body io.Reader, o ...func(*SynonymsPutSynonymRequest)) (*Response, error) { + var r = SynonymsPutSynonymRequest{DocumentID: id, Body: body} + for _, f := range o { + f(&r) + } + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SynonymsPutSynonym creates or updates a synonyms set +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/put-synonyms-set.html. +type SynonymsPutSynonym func(id string, body io.Reader, o ...func(*SynonymsPutSynonymRequest)) (*Response, error) + +// SynonymsPutSynonymRequest configures the Synonyms Put Synonym API request. +type SynonymsPutSynonymRequest struct { + DocumentID string + + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context +} + +// Do executes the request and returns response or error. +func (r SynonymsPutSynonymRequest) Do(ctx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ) + + method = "PUT" + + path.Grow(7 + 1 + len("_synonyms") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_synonyms") + path.WriteString("/") + path.WriteString(r.DocumentID) + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + res, err := transport.Perform(req) + if err != nil { + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SynonymsPutSynonym) WithContext(v context.Context) func(*SynonymsPutSynonymRequest) { + return func(r *SynonymsPutSynonymRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SynonymsPutSynonym) WithPretty() func(*SynonymsPutSynonymRequest) { + return func(r *SynonymsPutSynonymRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SynonymsPutSynonym) WithHuman() func(*SynonymsPutSynonymRequest) { + return func(r *SynonymsPutSynonymRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SynonymsPutSynonym) WithErrorTrace() func(*SynonymsPutSynonymRequest) { + return func(r *SynonymsPutSynonymRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SynonymsPutSynonym) WithFilterPath(v ...string) func(*SynonymsPutSynonymRequest) { + return func(r *SynonymsPutSynonymRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SynonymsPutSynonym) WithHeader(h map[string]string) func(*SynonymsPutSynonymRequest) { + return func(r *SynonymsPutSynonymRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SynonymsPutSynonym) WithOpaqueID(s string) func(*SynonymsPutSynonymRequest) { + return func(r *SynonymsPutSynonymRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.synonyms.put_synonym_rule.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.synonyms.put_synonym_rule.go new file mode 100644 index 000000000..bedfdec7f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.synonyms.put_synonym_rule.go @@ -0,0 +1,204 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.11.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSynonymsPutSynonymRuleFunc(t Transport) SynonymsPutSynonymRule { + return func(body io.Reader, rule_id string, set_id string, o ...func(*SynonymsPutSynonymRuleRequest)) (*Response, error) { + var r = SynonymsPutSynonymRuleRequest{Body: body, RuleID: rule_id, SetID: set_id} + for _, f := range o { + f(&r) + } + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SynonymsPutSynonymRule creates or updates a synonym rule in a synonym set +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/put-synonym-rule.html. +type SynonymsPutSynonymRule func(body io.Reader, rule_id string, set_id string, o ...func(*SynonymsPutSynonymRuleRequest)) (*Response, error) + +// SynonymsPutSynonymRuleRequest configures the Synonyms Put Synonym Rule API request. +type SynonymsPutSynonymRuleRequest struct { + Body io.Reader + + RuleID string + SetID string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context +} + +// Do executes the request and returns response or error. +func (r SynonymsPutSynonymRuleRequest) Do(ctx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ) + + method = "PUT" + + path.Grow(7 + 1 + len("_synonyms") + 1 + len(r.SetID) + 1 + len(r.RuleID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_synonyms") + path.WriteString("/") + path.WriteString(r.SetID) + path.WriteString("/") + path.WriteString(r.RuleID) + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + res, err := transport.Perform(req) + if err != nil { + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SynonymsPutSynonymRule) WithContext(v context.Context) func(*SynonymsPutSynonymRuleRequest) { + return func(r *SynonymsPutSynonymRuleRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SynonymsPutSynonymRule) WithPretty() func(*SynonymsPutSynonymRuleRequest) { + return func(r *SynonymsPutSynonymRuleRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SynonymsPutSynonymRule) WithHuman() func(*SynonymsPutSynonymRuleRequest) { + return func(r *SynonymsPutSynonymRuleRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SynonymsPutSynonymRule) WithErrorTrace() func(*SynonymsPutSynonymRuleRequest) { + return func(r *SynonymsPutSynonymRuleRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SynonymsPutSynonymRule) WithFilterPath(v ...string) func(*SynonymsPutSynonymRuleRequest) { + return func(r *SynonymsPutSynonymRuleRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SynonymsPutSynonymRule) WithHeader(h map[string]string) func(*SynonymsPutSynonymRuleRequest) { + return func(r *SynonymsPutSynonymRuleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SynonymsPutSynonymRule) WithOpaqueID(s string) func(*SynonymsPutSynonymRuleRequest) { + return func(r *SynonymsPutSynonymRuleRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.tasks.cancel.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.tasks.cancel.go index a02330494..7fe76ae12 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.tasks.cancel.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.tasks.cancel.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.tasks.get.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.tasks.get.go index 6335238a3..b235708ca 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.tasks.get.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.tasks.get.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.tasks.list.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.tasks.list.go index 6be6fdfcc..87335b25d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.tasks.list.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.tasks.list.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.terms_enum.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.terms_enum.go index 69189f68d..b4aa4d30d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.terms_enum.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.terms_enum.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.termvectors.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.termvectors.go index eafc6c69c..28398de1b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.termvectors.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.termvectors.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.update.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.update.go index fbe3db82d..6d2a89641 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.update.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.update.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.update_by_query.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.update_by_query.go index 0bfcfb3bd..aca32af78 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.update_by_query.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.update_by_query.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.update_by_query_rethrottle.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.update_by_query_rethrottle.go index 46aa92ac3..4a9b2eb04 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.update_by_query_rethrottle.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.update_by_query_rethrottle.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.async_search.delete.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.async_search.delete.go index e07c58814..614608420 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.async_search.delete.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.async_search.delete.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.async_search.get.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.async_search.get.go index 917aed70f..4b0955cfb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.async_search.get.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.async_search.get.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.async_search.status.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.async_search.status.go index 95fe01606..b3ef4c8b9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.async_search.status.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.async_search.status.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.async_search.submit.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.async_search.submit.go index 07d20a0e2..491da6533 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.async_search.submit.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.async_search.submit.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.autoscaling.delete_autoscaling_policy.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.autoscaling.delete_autoscaling_policy.go index d586223b2..17c8cd193 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.autoscaling.delete_autoscaling_policy.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.autoscaling.delete_autoscaling_policy.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.autoscaling.get_autoscaling_capacity.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.autoscaling.get_autoscaling_capacity.go index ff065cc6d..ddc350f43 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.autoscaling.get_autoscaling_capacity.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.autoscaling.get_autoscaling_capacity.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.autoscaling.get_autoscaling_policy.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.autoscaling.get_autoscaling_policy.go index 4191466c6..a14ab56f6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.autoscaling.get_autoscaling_policy.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.autoscaling.get_autoscaling_policy.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.autoscaling.put_autoscaling_policy.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.autoscaling.put_autoscaling_policy.go index 2f884744b..bfa2812b6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.autoscaling.put_autoscaling_policy.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.autoscaling.put_autoscaling_policy.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.cat.ml_data_frame_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.cat.ml_data_frame_analytics.go index 55ea95233..17f3657d8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.cat.ml_data_frame_analytics.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.cat.ml_data_frame_analytics.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.cat.ml_datafeeds.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.cat.ml_datafeeds.go index 1b57d9979..b07385714 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.cat.ml_datafeeds.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.cat.ml_datafeeds.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.cat.ml_jobs.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.cat.ml_jobs.go index 50f3f23bb..2e22252a9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.cat.ml_jobs.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.cat.ml_jobs.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.cat.ml_trained_models.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.cat.ml_trained_models.go index 10c168cbc..65535d124 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.cat.ml_trained_models.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.cat.ml_trained_models.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.cat.transforms.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.cat.transforms.go index 9d2d62a1a..45f9f87e0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.cat.transforms.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.cat.transforms.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.delete_auto_follow_pattern.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.delete_auto_follow_pattern.go index 2094ee6f2..4b4c85e4b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.delete_auto_follow_pattern.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.delete_auto_follow_pattern.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.follow.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.follow.go index e1d6bebac..206240c3b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.follow.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.follow.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.follow_info.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.follow_info.go index 7a1e61391..522d09018 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.follow_info.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.follow_info.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.follow_stats.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.follow_stats.go index bebb0ca16..565835c89 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.follow_stats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.follow_stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.forget_follower.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.forget_follower.go index a3ef7fc4b..4ce10aa87 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.forget_follower.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.forget_follower.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.get_auto_follow_pattern.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.get_auto_follow_pattern.go index 2bc89df2b..e70f88758 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.get_auto_follow_pattern.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.get_auto_follow_pattern.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.pause_auto_follow_pattern.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.pause_auto_follow_pattern.go index e49662349..40605bf6b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.pause_auto_follow_pattern.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.pause_auto_follow_pattern.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.pause_follow.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.pause_follow.go index 3a031a2ce..a6bae1f59 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.pause_follow.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.pause_follow.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.put_auto_follow_pattern.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.put_auto_follow_pattern.go index e85347177..919067093 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.put_auto_follow_pattern.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.put_auto_follow_pattern.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.resume_auto_follow_pattern.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.resume_auto_follow_pattern.go index 2913a9318..e9986073e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.resume_auto_follow_pattern.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.resume_auto_follow_pattern.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.resume_follow.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.resume_follow.go index dc58c9aab..298f34c36 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.resume_follow.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.resume_follow.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.stats.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.stats.go index 90b32d563..8695a490f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.stats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.unfollow.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.unfollow.go index 59091bf27..1197b4f72 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.unfollow.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ccr.unfollow.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.close_point_in_time.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.close_point_in_time.go index f5fdc17d4..c9436762a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.close_point_in_time.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.close_point_in_time.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.enrich.delete_policy.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.enrich.delete_policy.go index 594be499d..984f01986 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.enrich.delete_policy.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.enrich.delete_policy.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.enrich.execute_policy.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.enrich.execute_policy.go index d871f00f9..0dc8d0c1b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.enrich.execute_policy.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.enrich.execute_policy.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.enrich.get_policy.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.enrich.get_policy.go index f1ef1dd89..1cea3da9c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.enrich.get_policy.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.enrich.get_policy.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.enrich.put_policy.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.enrich.put_policy.go index 84435c270..ea9baf071 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.enrich.put_policy.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.enrich.put_policy.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.enrich.stats.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.enrich.stats.go index c67ca1963..468af4c1b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.enrich.stats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.enrich.stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.eql.delete.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.eql.delete.go index 4b76e9685..ea1631821 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.eql.delete.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.eql.delete.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.eql.get.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.eql.get.go index e404624e8..24c51618b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.eql.get.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.eql.get.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.eql.get_status.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.eql.get_status.go index 75d16b881..01724d0ea 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.eql.get_status.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.eql.get_status.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.eql.search.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.eql.search.go index 179122b6c..cf2137412 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.eql.search.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.eql.search.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.esql.query.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.esql.query.go new file mode 100644 index 000000000..4b7a4f730 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.esql.query.go @@ -0,0 +1,221 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.11.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newEsqlQueryFunc(t Transport) EsqlQuery { + return func(body io.Reader, o ...func(*EsqlQueryRequest)) (*Response, error) { + var r = EsqlQueryRequest{Body: body} + for _, f := range o { + f(&r) + } + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// EsqlQuery - Executes an ESQL request +// +// This API is experimental. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/esql-query-api.html. +type EsqlQuery func(body io.Reader, o ...func(*EsqlQueryRequest)) (*Response, error) + +// EsqlQueryRequest configures the Esql Query API request. +type EsqlQueryRequest struct { + Body io.Reader + + Delimiter string + Format string + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context +} + +// Do executes the request and returns response or error. +func (r EsqlQueryRequest) Do(ctx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ) + + method = "POST" + + path.Grow(7 + len("/_query")) + path.WriteString("http://") + path.WriteString("/_query") + + params = make(map[string]string) + + if r.Delimiter != "" { + params["delimiter"] = r.Delimiter + } + + if r.Format != "" { + params["format"] = r.Format + } + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + res, err := transport.Perform(req) + if err != nil { + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f EsqlQuery) WithContext(v context.Context) func(*EsqlQueryRequest) { + return func(r *EsqlQueryRequest) { + r.ctx = v + } +} + +// WithDelimiter - the character to use between values within a csv row. only valid for the csv format.. +func (f EsqlQuery) WithDelimiter(v string) func(*EsqlQueryRequest) { + return func(r *EsqlQueryRequest) { + r.Delimiter = v + } +} + +// WithFormat - a short version of the accept header, e.g. json, yaml. +func (f EsqlQuery) WithFormat(v string) func(*EsqlQueryRequest) { + return func(r *EsqlQueryRequest) { + r.Format = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f EsqlQuery) WithPretty() func(*EsqlQueryRequest) { + return func(r *EsqlQueryRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f EsqlQuery) WithHuman() func(*EsqlQueryRequest) { + return func(r *EsqlQueryRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f EsqlQuery) WithErrorTrace() func(*EsqlQueryRequest) { + return func(r *EsqlQueryRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f EsqlQuery) WithFilterPath(v ...string) func(*EsqlQueryRequest) { + return func(r *EsqlQueryRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f EsqlQuery) WithHeader(h map[string]string) func(*EsqlQueryRequest) { + return func(r *EsqlQueryRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f EsqlQuery) WithOpaqueID(s string) func(*EsqlQueryRequest) { + return func(r *EsqlQueryRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.graph.explore.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.graph.explore.go index 9bf3069cd..d2f7ae871 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.graph.explore.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.graph.explore.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.delete_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.delete_lifecycle.go index 061b60b00..fb3b75f44 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.delete_lifecycle.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.delete_lifecycle.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.explain_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.explain_lifecycle.go index e280a1790..7b2bee2d8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.explain_lifecycle.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.explain_lifecycle.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.get_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.get_lifecycle.go index 01ed864e6..2032a1b08 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.get_lifecycle.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.get_lifecycle.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.get_status.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.get_status.go index 41dd77633..5affd0f85 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.get_status.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.get_status.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.migrate_to_data_tiers.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.migrate_to_data_tiers.go index d4feffc90..07615a067 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.migrate_to_data_tiers.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.migrate_to_data_tiers.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.move_to_step.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.move_to_step.go index bbd1648a2..5342d0988 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.move_to_step.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.move_to_step.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.put_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.put_lifecycle.go index 4d66a9b0d..e485697bf 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.put_lifecycle.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.put_lifecycle.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.remove_policy.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.remove_policy.go index 263f5641f..1ebf50c46 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.remove_policy.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.remove_policy.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.retry.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.retry.go index 5f23ce18d..e274176ce 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.retry.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.retry.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.start.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.start.go index 37726a5fd..aa68dee6d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.start.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.start.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.stop.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.stop.go index 51d38eb85..65c0d343e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.stop.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ilm.stop.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.indices.create_data_stream.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.indices.create_data_stream.go index 580cb64df..9bbe3825f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.indices.create_data_stream.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.indices.create_data_stream.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.indices.data_streams_stats.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.indices.data_streams_stats.go index 489327b8c..7e006383e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.indices.data_streams_stats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.indices.data_streams_stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.indices.delete_data_stream.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.indices.delete_data_stream.go index f73e61cc7..b0df2ffb2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.indices.delete_data_stream.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.indices.delete_data_stream.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.indices.get_data_stream.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.indices.get_data_stream.go index e2d16c32a..062feedca 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.indices.get_data_stream.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.indices.get_data_stream.go @@ -15,13 +15,14 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi import ( "context" "net/http" + "strconv" "strings" ) @@ -47,6 +48,7 @@ type IndicesGetDataStreamRequest struct { Name []string ExpandWildcards string + IncludeDefaults *bool Pretty bool Human bool @@ -83,6 +85,10 @@ func (r IndicesGetDataStreamRequest) Do(ctx context.Context, transport Transport params["expand_wildcards"] = r.ExpandWildcards } + if r.IncludeDefaults != nil { + params["include_defaults"] = strconv.FormatBool(*r.IncludeDefaults) + } + if r.Pretty { params["pretty"] = "true" } @@ -163,6 +169,13 @@ func (f IndicesGetDataStream) WithExpandWildcards(v string) func(*IndicesGetData } } +// WithIncludeDefaults - return all relevant default configurations for the data stream (default: false). +func (f IndicesGetDataStream) WithIncludeDefaults(v bool) func(*IndicesGetDataStreamRequest) { + return func(r *IndicesGetDataStreamRequest) { + r.IncludeDefaults = &v + } +} + // WithPretty makes the response body pretty-printed. func (f IndicesGetDataStream) WithPretty() func(*IndicesGetDataStreamRequest) { return func(r *IndicesGetDataStreamRequest) { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.indices.migrate_to_data_stream.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.indices.migrate_to_data_stream.go index 7c41edff0..20996caae 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.indices.migrate_to_data_stream.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.indices.migrate_to_data_stream.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.indices.promote_data_stream.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.indices.promote_data_stream.go index 6aa90703e..0c4f5e9fa 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.indices.promote_data_stream.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.indices.promote_data_stream.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.indices.reload_search_analyzers.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.indices.reload_search_analyzers.go index 09994ea1e..b7bda8e40 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.indices.reload_search_analyzers.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.indices.reload_search_analyzers.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi @@ -51,6 +51,7 @@ type IndicesReloadSearchAnalyzersRequest struct { AllowNoIndices *bool ExpandWildcards string IgnoreUnavailable *bool + Resource string Pretty bool Human bool @@ -97,6 +98,10 @@ func (r IndicesReloadSearchAnalyzersRequest) Do(ctx context.Context, transport T params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) } + if r.Resource != "" { + params["resource"] = r.Resource + } + if r.Pretty { params["pretty"] = "true" } @@ -184,6 +189,13 @@ func (f IndicesReloadSearchAnalyzers) WithIgnoreUnavailable(v bool) func(*Indice } } +// WithResource - changed resource to reload analyzers from if applicable. +func (f IndicesReloadSearchAnalyzers) WithResource(v string) func(*IndicesReloadSearchAnalyzersRequest) { + return func(r *IndicesReloadSearchAnalyzersRequest) { + r.Resource = v + } +} + // WithPretty makes the response body pretty-printed. func (f IndicesReloadSearchAnalyzers) WithPretty() func(*IndicesReloadSearchAnalyzersRequest) { return func(r *IndicesReloadSearchAnalyzersRequest) { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.indices.unfreeze.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.indices.unfreeze.go index 013013733..0cde389ac 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.indices.unfreeze.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.indices.unfreeze.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.license.delete.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.license.delete.go index 98dab485f..66a3a2d4f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.license.delete.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.license.delete.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.license.get.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.license.get.go index 047282145..c1564940b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.license.get.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.license.get.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.license.get_basic_status.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.license.get_basic_status.go index f86c38318..91c1a29b5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.license.get_basic_status.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.license.get_basic_status.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.license.get_trial_status.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.license.get_trial_status.go index a2c55f978..4512c99dc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.license.get_trial_status.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.license.get_trial_status.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.license.post.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.license.post.go index bfdb5eb06..3afb36bb2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.license.post.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.license.post.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.license.post_start_basic.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.license.post_start_basic.go index 5ef4e4a0f..7003ffe5e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.license.post_start_basic.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.license.post_start_basic.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.license.post_start_trial.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.license.post_start_trial.go index 4bbc238d0..7af010461 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.license.post_start_trial.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.license.post_start_trial.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.logstash.delete_pipeline.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.logstash.delete_pipeline.go index 5f57bda82..2ed67ea01 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.logstash.delete_pipeline.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.logstash.delete_pipeline.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.logstash.get_pipeline.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.logstash.get_pipeline.go index 492e477d6..f945d2ca9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.logstash.get_pipeline.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.logstash.get_pipeline.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi @@ -26,8 +26,8 @@ import ( ) func newLogstashGetPipelineFunc(t Transport) LogstashGetPipeline { - return func(id string, o ...func(*LogstashGetPipelineRequest)) (*Response, error) { - var r = LogstashGetPipelineRequest{DocumentID: id} + return func(o ...func(*LogstashGetPipelineRequest)) (*Response, error) { + var r = LogstashGetPipelineRequest{} for _, f := range o { f(&r) } @@ -40,7 +40,7 @@ func newLogstashGetPipelineFunc(t Transport) LogstashGetPipeline { // LogstashGetPipeline - Retrieves Logstash Pipelines used by Central Management // // See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/logstash-api-get-pipeline.html. -type LogstashGetPipeline func(id string, o ...func(*LogstashGetPipelineRequest)) (*Response, error) +type LogstashGetPipeline func(o ...func(*LogstashGetPipelineRequest)) (*Response, error) // LogstashGetPipelineRequest configures the Logstash Get Pipeline API request. type LogstashGetPipelineRequest struct { @@ -72,8 +72,10 @@ func (r LogstashGetPipelineRequest) Do(ctx context.Context, transport Transport) path.WriteString("_logstash") path.WriteString("/") path.WriteString("pipeline") - path.WriteString("/") - path.WriteString(r.DocumentID) + if r.DocumentID != "" { + path.WriteString("/") + path.WriteString(r.DocumentID) + } params = make(map[string]string) @@ -143,6 +145,13 @@ func (f LogstashGetPipeline) WithContext(v context.Context) func(*LogstashGetPip } } +// WithDocumentID - a list of pipeline ids. +func (f LogstashGetPipeline) WithDocumentID(v string) func(*LogstashGetPipelineRequest) { + return func(r *LogstashGetPipelineRequest) { + r.DocumentID = v + } +} + // WithPretty makes the response body pretty-printed. func (f LogstashGetPipeline) WithPretty() func(*LogstashGetPipelineRequest) { return func(r *LogstashGetPipelineRequest) { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.logstash.put_pipeline.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.logstash.put_pipeline.go index 024e9c78f..29b13baa4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.logstash.put_pipeline.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.logstash.put_pipeline.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.migration.deprecations.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.migration.deprecations.go index fc9ef52ec..763498273 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.migration.deprecations.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.migration.deprecations.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.migration.get_feature_upgrade_status.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.migration.get_feature_upgrade_status.go index d56ce9d24..ed2a9ebaa 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.migration.get_feature_upgrade_status.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.migration.get_feature_upgrade_status.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.migration.post_feature_upgrade.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.migration.post_feature_upgrade.go index ea6eed8e0..19c68fd62 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.migration.post_feature_upgrade.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.migration.post_feature_upgrade.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.clear_trained_model_deployment_cache.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.clear_trained_model_deployment_cache.go index 3493a0840..011cc974a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.clear_trained_model_deployment_cache.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.clear_trained_model_deployment_cache.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.close_job.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.close_job.go index 487474125..eeef7f444 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.close_job.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.close_job.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_calendar.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_calendar.go index 70f0736f1..a3dceb79f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_calendar.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_calendar.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_calendar_event.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_calendar_event.go index 2ba406edc..7152b9348 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_calendar_event.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_calendar_event.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_calendar_job.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_calendar_job.go index 978367913..46fdda9bd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_calendar_job.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_calendar_job.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_data_frame_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_data_frame_analytics.go index 8085aa61e..ed4d927e8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_data_frame_analytics.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_data_frame_analytics.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_datafeed.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_datafeed.go index c2facb93d..3c0b46ffd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_datafeed.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_datafeed.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_expired_data.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_expired_data.go index 2caf3d2ce..2ff710352 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_expired_data.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_expired_data.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_filter.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_filter.go index 55a66b4f5..790c25ab5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_filter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_filter.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_forecast.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_forecast.go index b2080cc2e..8820d559d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_forecast.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_forecast.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_job.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_job.go index 82b7305eb..5180253db 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_job.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_job.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_model_snapshot.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_model_snapshot.go index ed6994974..55c1a6993 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_model_snapshot.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_model_snapshot.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_trained_model.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_trained_model.go index 6e6197d0b..44fb58ff8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_trained_model.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_trained_model.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_trained_model_alias.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_trained_model_alias.go index b6e64917f..ccbbdc7c3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_trained_model_alias.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.delete_trained_model_alias.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.estimate_model_memory.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.estimate_model_memory.go index a657db2a5..d47f3a07e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.estimate_model_memory.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.estimate_model_memory.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.evaluate_data_frame.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.evaluate_data_frame.go index 3b368cee5..5cb640266 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.evaluate_data_frame.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.evaluate_data_frame.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.explain_data_frame_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.explain_data_frame_analytics.go index 56ce67fa5..1eac7dc28 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.explain_data_frame_analytics.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.explain_data_frame_analytics.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.flush_job.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.flush_job.go index 23feeab21..ce7252a07 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.flush_job.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.flush_job.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.forecast.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.forecast.go index 7b3192fa1..b0efc7208 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.forecast.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.forecast.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_buckets.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_buckets.go index 24a7cde2c..30d34fd64 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_buckets.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_buckets.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_calendar_events.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_calendar_events.go index 4590484be..1f200e2d0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_calendar_events.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_calendar_events.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_calendars.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_calendars.go index 7b46e298d..f4692bfb0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_calendars.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_calendars.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_categories.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_categories.go index 1fb578787..b9586b218 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_categories.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_categories.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_data_frame_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_data_frame_analytics.go index 3b6f15622..39bda0a7e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_data_frame_analytics.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_data_frame_analytics.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_data_frame_analytics_stats.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_data_frame_analytics_stats.go index c6919057a..d7d96a458 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_data_frame_analytics_stats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_data_frame_analytics_stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_datafeed_stats.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_datafeed_stats.go index 7d02a6dcb..663c6fb48 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_datafeed_stats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_datafeed_stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_datafeeds.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_datafeeds.go index f5e51b7a7..08abcc18a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_datafeeds.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_datafeeds.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_filters.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_filters.go index 9b6e61d84..2028809fd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_filters.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_filters.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_influencers.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_influencers.go index 5b5ba6887..a9e2dd3f7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_influencers.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_influencers.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_job_stats.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_job_stats.go index 175a17efa..0c0be6fa3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_job_stats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_job_stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_jobs.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_jobs.go index e7abff141..f2af6d2a3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_jobs.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_jobs.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_memory_stats.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_memory_stats.go index 5c83af597..827379de3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_memory_stats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_memory_stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_model_snapshot_upgrade_stats.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_model_snapshot_upgrade_stats.go index 5fd545902..0cd1ed946 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_model_snapshot_upgrade_stats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_model_snapshot_upgrade_stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_model_snapshots.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_model_snapshots.go index 6bda0be61..3478bdfda 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_model_snapshots.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_model_snapshots.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_overall_buckets.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_overall_buckets.go index 6348a8ea3..e392d3883 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_overall_buckets.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_overall_buckets.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_records.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_records.go index cd5c9049c..83fad6221 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_records.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_records.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_trained_models.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_trained_models.go index 6eadae005..c80de65ab 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_trained_models.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_trained_models.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_trained_models_stats.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_trained_models_stats.go index 9b92ecf5c..c85217995 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_trained_models_stats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.get_trained_models_stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.infer_trained_model.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.infer_trained_model.go index 0e6496bd9..f2e386d56 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.infer_trained_model.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.infer_trained_model.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.info.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.info.go index bc91b9736..b880c2aea 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.info.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.info.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.open_job.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.open_job.go index b6c04d6fd..b7e77607d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.open_job.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.open_job.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.post_calendar_events.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.post_calendar_events.go index 7e6471fa5..b6e92dbf8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.post_calendar_events.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.post_calendar_events.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.post_data.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.post_data.go index c9834a822..db5cf20e8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.post_data.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.post_data.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.preview_data_frame_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.preview_data_frame_analytics.go index 7b4f406d9..7f76fad7b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.preview_data_frame_analytics.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.preview_data_frame_analytics.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.preview_datafeed.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.preview_datafeed.go index 6329769ba..e3aee6e51 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.preview_datafeed.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.preview_datafeed.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.put_calendar.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.put_calendar.go index 3669f6e62..601a32d2b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.put_calendar.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.put_calendar.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.put_calendar_job.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.put_calendar_job.go index fb7ff81f3..f84737fad 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.put_calendar_job.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.put_calendar_job.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.put_data_frame_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.put_data_frame_analytics.go index e24b9de6b..91c0e6501 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.put_data_frame_analytics.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.put_data_frame_analytics.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.put_datafeed.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.put_datafeed.go index 57b691e9d..e6943675e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.put_datafeed.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.put_datafeed.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.put_filter.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.put_filter.go index aea1c4296..5e2598b1b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.put_filter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.put_filter.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.put_job.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.put_job.go index 0c41e47aa..b6a82a714 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.put_job.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.put_job.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.put_trained_model.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.put_trained_model.go index ae4739e6d..f19eb6cd5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.put_trained_model.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.put_trained_model.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi @@ -51,6 +51,7 @@ type MLPutTrainedModelRequest struct { ModelID string DeferDefinitionDecompression *bool + WaitForCompletion *bool Pretty bool Human bool @@ -87,6 +88,10 @@ func (r MLPutTrainedModelRequest) Do(ctx context.Context, transport Transport) ( params["defer_definition_decompression"] = strconv.FormatBool(*r.DeferDefinitionDecompression) } + if r.WaitForCompletion != nil { + params["wait_for_completion"] = strconv.FormatBool(*r.WaitForCompletion) + } + if r.Pretty { params["pretty"] = "true" } @@ -164,6 +169,13 @@ func (f MLPutTrainedModel) WithDeferDefinitionDecompression(v bool) func(*MLPutT } } +// WithWaitForCompletion - whether to wait for all child operations(e.g. model download) to complete, before returning or not. default to false. +func (f MLPutTrainedModel) WithWaitForCompletion(v bool) func(*MLPutTrainedModelRequest) { + return func(r *MLPutTrainedModelRequest) { + r.WaitForCompletion = &v + } +} + // WithPretty makes the response body pretty-printed. func (f MLPutTrainedModel) WithPretty() func(*MLPutTrainedModelRequest) { return func(r *MLPutTrainedModelRequest) { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.put_trained_model_alias.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.put_trained_model_alias.go index e2b4797b3..245502df9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.put_trained_model_alias.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.put_trained_model_alias.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.put_trained_model_definition_part.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.put_trained_model_definition_part.go index 8af2ea555..4f3fe11f0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.put_trained_model_definition_part.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.put_trained_model_definition_part.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.put_trained_model_vocabulary.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.put_trained_model_vocabulary.go index bfb2eed24..6c5b4aa09 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.put_trained_model_vocabulary.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.put_trained_model_vocabulary.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.reset_job.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.reset_job.go index 325c64bcb..8d308c0e7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.reset_job.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.reset_job.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.revert_model_snapshot.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.revert_model_snapshot.go index d0e4a1850..80a348a85 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.revert_model_snapshot.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.revert_model_snapshot.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.set_upgrade_mode.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.set_upgrade_mode.go index 8ba508577..58564690b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.set_upgrade_mode.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.set_upgrade_mode.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.start_data_frame_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.start_data_frame_analytics.go index d6e9ff4d1..c17510d9b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.start_data_frame_analytics.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.start_data_frame_analytics.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.start_datafeed.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.start_datafeed.go index d452df052..5f751babb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.start_datafeed.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.start_datafeed.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.start_trained_model_deployment.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.start_trained_model_deployment.go index fd70a4d64..801285515 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.start_trained_model_deployment.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.start_trained_model_deployment.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi @@ -49,6 +49,7 @@ type MLStartTrainedModelDeploymentRequest struct { ModelID string CacheSize string + DeploymentID string NumberOfAllocations *int Priority string QueueCapacity *int @@ -95,6 +96,10 @@ func (r MLStartTrainedModelDeploymentRequest) Do(ctx context.Context, transport params["cache_size"] = r.CacheSize } + if r.DeploymentID != "" { + params["deployment_id"] = r.DeploymentID + } + if r.NumberOfAllocations != nil { params["number_of_allocations"] = strconv.FormatInt(int64(*r.NumberOfAllocations), 10) } @@ -192,6 +197,13 @@ func (f MLStartTrainedModelDeployment) WithCacheSize(v string) func(*MLStartTrai } } +// WithDeploymentID - the ID of the new deployment. defaults to the model_id if not set.. +func (f MLStartTrainedModelDeployment) WithDeploymentID(v string) func(*MLStartTrainedModelDeploymentRequest) { + return func(r *MLStartTrainedModelDeploymentRequest) { + r.DeploymentID = v + } +} + // WithNumberOfAllocations - the total number of allocations this model is assigned across machine learning nodes.. func (f MLStartTrainedModelDeployment) WithNumberOfAllocations(v int) func(*MLStartTrainedModelDeploymentRequest) { return func(r *MLStartTrainedModelDeploymentRequest) { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.stop_data_frame_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.stop_data_frame_analytics.go index 73b667705..f77733010 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.stop_data_frame_analytics.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.stop_data_frame_analytics.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.stop_datafeed.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.stop_datafeed.go index e3e82265c..d2f5d655a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.stop_datafeed.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.stop_datafeed.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.stop_trained_model_deployment.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.stop_trained_model_deployment.go index 16e726194..20fa2ad40 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.stop_trained_model_deployment.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.stop_trained_model_deployment.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.update_data_frame_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.update_data_frame_analytics.go index 1126e9933..1fcfb05eb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.update_data_frame_analytics.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.update_data_frame_analytics.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.update_datafeed.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.update_datafeed.go index 32a33ec4c..e70b2ac81 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.update_datafeed.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.update_datafeed.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.update_filter.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.update_filter.go index 2337997d6..634aacdf6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.update_filter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.update_filter.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.update_job.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.update_job.go index aeb616e4c..f348f1961 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.update_job.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.update_job.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.update_model_snapshot.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.update_model_snapshot.go index 55856fe68..c3684783b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.update_model_snapshot.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.update_model_snapshot.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.update_trained_model_deployment.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.update_trained_model_deployment.go index d33984c0e..33220332b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.update_trained_model_deployment.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.update_trained_model_deployment.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.upgrade_job_snapshot.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.upgrade_job_snapshot.go index f9e7bfad6..f838126a1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.upgrade_job_snapshot.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.upgrade_job_snapshot.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.validate.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.validate.go index 220ab797f..061d0cd16 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.validate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.validate.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.validate_detector.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.validate_detector.go index 79103e27e..385cfd44d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.validate_detector.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ml.validate_detector.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.monitoring.bulk.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.monitoring.bulk.go index 5f92d2353..b5865d6c7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.monitoring.bulk.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.monitoring.bulk.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.open_point_in_time.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.open_point_in_time.go index 1cf0de077..6a1f89ee8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.open_point_in_time.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.open_point_in_time.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.rollup.delete_job.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.rollup.delete_job.go index 57eeb5a44..9b3873edc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.rollup.delete_job.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.rollup.delete_job.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.rollup.get_jobs.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.rollup.get_jobs.go index 3f73cf6e2..f9ae24a6c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.rollup.get_jobs.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.rollup.get_jobs.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.rollup.get_rollup_caps.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.rollup.get_rollup_caps.go index fc2b941f5..007da8648 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.rollup.get_rollup_caps.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.rollup.get_rollup_caps.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.rollup.get_rollup_index_caps.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.rollup.get_rollup_index_caps.go index 36f6fdf9a..0eecbd8ef 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.rollup.get_rollup_index_caps.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.rollup.get_rollup_index_caps.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.rollup.put_job.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.rollup.put_job.go index 4f2ff63ab..44e530731 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.rollup.put_job.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.rollup.put_job.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.rollup.rollup_search.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.rollup.rollup_search.go index c4c54ce4e..846606720 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.rollup.rollup_search.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.rollup.rollup_search.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.rollup.start_job.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.rollup.start_job.go index c4bf13428..b259b4b7b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.rollup.start_job.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.rollup.start_job.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.rollup.stop_job.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.rollup.stop_job.go index 61a2e93df..b21421a80 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.rollup.stop_job.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.rollup.stop_job.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.searchable_snapshots.cache_stats.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.searchable_snapshots.cache_stats.go index 349b40c2c..67181cc6d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.searchable_snapshots.cache_stats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.searchable_snapshots.cache_stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.searchable_snapshots.clear_cache.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.searchable_snapshots.clear_cache.go index 039266b42..e29f55242 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.searchable_snapshots.clear_cache.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.searchable_snapshots.clear_cache.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.searchable_snapshots.mount.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.searchable_snapshots.mount.go index 8ab57087d..6d479566b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.searchable_snapshots.mount.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.searchable_snapshots.mount.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.searchable_snapshots.stats.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.searchable_snapshots.stats.go index 0e9855d3f..4932f04df 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.searchable_snapshots.stats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.searchable_snapshots.stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.activate_user_profile.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.activate_user_profile.go index d8191de08..d28656f0e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.activate_user_profile.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.activate_user_profile.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.authenticate.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.authenticate.go index aa2907157..c5291d623 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.authenticate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.authenticate.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.bulk_update_api_keys.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.bulk_update_api_keys.go index eb8cd5c90..f4b7053cf 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.bulk_update_api_keys.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.bulk_update_api_keys.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.change_password.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.change_password.go index e5129998d..e628edb48 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.change_password.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.change_password.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.clear_api_key_cache.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.clear_api_key_cache.go index 37d4a1acd..c27329233 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.clear_api_key_cache.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.clear_api_key_cache.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.clear_cached_privileges.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.clear_cached_privileges.go index 681ecf203..b8d34a86a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.clear_cached_privileges.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.clear_cached_privileges.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.clear_cached_realms.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.clear_cached_realms.go index ac45d546d..ee665a0f5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.clear_cached_realms.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.clear_cached_realms.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.clear_cached_roles.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.clear_cached_roles.go index 2fa9a4708..bef38b65a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.clear_cached_roles.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.clear_cached_roles.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.clear_cached_service_tokens.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.clear_cached_service_tokens.go index 8d4b5b53e..49cca223b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.clear_cached_service_tokens.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.clear_cached_service_tokens.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.create_api_key.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.create_api_key.go index 0b1045300..f8226aac9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.create_api_key.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.create_api_key.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.create_cross_cluster_api_key.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.create_cross_cluster_api_key.go new file mode 100644 index 000000000..69a587a64 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.create_cross_cluster_api_key.go @@ -0,0 +1,196 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.11.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSecurityCreateCrossClusterAPIKeyFunc(t Transport) SecurityCreateCrossClusterAPIKey { + return func(body io.Reader, o ...func(*SecurityCreateCrossClusterAPIKeyRequest)) (*Response, error) { + var r = SecurityCreateCrossClusterAPIKeyRequest{Body: body} + for _, f := range o { + f(&r) + } + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityCreateCrossClusterAPIKey - Creates a cross-cluster API key for API key based remote cluster access. +// +// This API is beta. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-cross-cluster-api-key.html. +type SecurityCreateCrossClusterAPIKey func(body io.Reader, o ...func(*SecurityCreateCrossClusterAPIKeyRequest)) (*Response, error) + +// SecurityCreateCrossClusterAPIKeyRequest configures the Security Create Cross ClusterAPI Key API request. +type SecurityCreateCrossClusterAPIKeyRequest struct { + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context +} + +// Do executes the request and returns response or error. +func (r SecurityCreateCrossClusterAPIKeyRequest) Do(ctx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ) + + method = "POST" + + path.Grow(7 + len("/_security/cross_cluster/api_key")) + path.WriteString("http://") + path.WriteString("/_security/cross_cluster/api_key") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + res, err := transport.Perform(req) + if err != nil { + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityCreateCrossClusterAPIKey) WithContext(v context.Context) func(*SecurityCreateCrossClusterAPIKeyRequest) { + return func(r *SecurityCreateCrossClusterAPIKeyRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityCreateCrossClusterAPIKey) WithPretty() func(*SecurityCreateCrossClusterAPIKeyRequest) { + return func(r *SecurityCreateCrossClusterAPIKeyRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityCreateCrossClusterAPIKey) WithHuman() func(*SecurityCreateCrossClusterAPIKeyRequest) { + return func(r *SecurityCreateCrossClusterAPIKeyRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityCreateCrossClusterAPIKey) WithErrorTrace() func(*SecurityCreateCrossClusterAPIKeyRequest) { + return func(r *SecurityCreateCrossClusterAPIKeyRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityCreateCrossClusterAPIKey) WithFilterPath(v ...string) func(*SecurityCreateCrossClusterAPIKeyRequest) { + return func(r *SecurityCreateCrossClusterAPIKeyRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityCreateCrossClusterAPIKey) WithHeader(h map[string]string) func(*SecurityCreateCrossClusterAPIKeyRequest) { + return func(r *SecurityCreateCrossClusterAPIKeyRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityCreateCrossClusterAPIKey) WithOpaqueID(s string) func(*SecurityCreateCrossClusterAPIKeyRequest) { + return func(r *SecurityCreateCrossClusterAPIKeyRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.create_service_token.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.create_service_token.go index 85c5eaf9d..dccc0b8aa 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.create_service_token.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.create_service_token.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.delete_privileges.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.delete_privileges.go index d8255ec8c..e0b26b2b3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.delete_privileges.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.delete_privileges.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.delete_role.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.delete_role.go index 250e8291c..5438981ec 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.delete_role.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.delete_role.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.delete_role_mapping.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.delete_role_mapping.go index 3fe898a54..324a3bdd8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.delete_role_mapping.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.delete_role_mapping.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.delete_service_token.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.delete_service_token.go index f3d1cbca0..fe0c3baf5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.delete_service_token.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.delete_service_token.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.delete_user.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.delete_user.go index d9d437eba..7ada7230b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.delete_user.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.delete_user.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.disable_user.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.disable_user.go index 6564a66b1..8989aef02 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.disable_user.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.disable_user.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.disable_user_profile.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.disable_user_profile.go index 1b215d502..4fd124acc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.disable_user_profile.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.disable_user_profile.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.enable_user.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.enable_user.go index 5f9c5897b..f642caa1e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.enable_user.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.enable_user.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.enable_user_profile.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.enable_user_profile.go index c8c7335d3..31933071c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.enable_user_profile.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.enable_user_profile.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.enroll_kibana.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.enroll_kibana.go index d1b61c6ca..3995e0ef2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.enroll_kibana.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.enroll_kibana.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.enroll_node.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.enroll_node.go index 46d8a9044..2e7345928 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.enroll_node.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.enroll_node.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_api_key.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_api_key.go index 4c32f3a94..afd93807a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_api_key.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_api_key.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_builtin_privileges.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_builtin_privileges.go index 2d10a6455..5b03cbea6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_builtin_privileges.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_builtin_privileges.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_privileges.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_privileges.go index 551163467..5e3d78963 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_privileges.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_privileges.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_role.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_role.go index 3953f4905..fbf53b7da 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_role.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_role.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_role_mapping.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_role_mapping.go index 4d5b7405b..aa2b3d7d7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_role_mapping.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_role_mapping.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_service_accounts.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_service_accounts.go index d81f14508..a687a1864 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_service_accounts.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_service_accounts.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_service_credentials.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_service_credentials.go index 48c9fe56a..bee63161d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_service_credentials.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_service_credentials.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_settings.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_settings.go new file mode 100644 index 000000000..ef68022fe --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_settings.go @@ -0,0 +1,187 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.11.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newSecurityGetSettingsFunc(t Transport) SecurityGetSettings { + return func(o ...func(*SecurityGetSettingsRequest)) (*Response, error) { + var r = SecurityGetSettingsRequest{} + for _, f := range o { + f(&r) + } + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityGetSettings - Retrieve settings for the security system indices +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-settings.html. +type SecurityGetSettings func(o ...func(*SecurityGetSettingsRequest)) (*Response, error) + +// SecurityGetSettingsRequest configures the Security Get Settings API request. +type SecurityGetSettingsRequest struct { + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context +} + +// Do executes the request and returns response or error. +func (r SecurityGetSettingsRequest) Do(ctx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ) + + method = "GET" + + path.Grow(7 + len("/_security/settings")) + path.WriteString("http://") + path.WriteString("/_security/settings") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + res, err := transport.Perform(req) + if err != nil { + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityGetSettings) WithContext(v context.Context) func(*SecurityGetSettingsRequest) { + return func(r *SecurityGetSettingsRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityGetSettings) WithPretty() func(*SecurityGetSettingsRequest) { + return func(r *SecurityGetSettingsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityGetSettings) WithHuman() func(*SecurityGetSettingsRequest) { + return func(r *SecurityGetSettingsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityGetSettings) WithErrorTrace() func(*SecurityGetSettingsRequest) { + return func(r *SecurityGetSettingsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityGetSettings) WithFilterPath(v ...string) func(*SecurityGetSettingsRequest) { + return func(r *SecurityGetSettingsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityGetSettings) WithHeader(h map[string]string) func(*SecurityGetSettingsRequest) { + return func(r *SecurityGetSettingsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityGetSettings) WithOpaqueID(s string) func(*SecurityGetSettingsRequest) { + return func(r *SecurityGetSettingsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_token.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_token.go index e1d119484..496296880 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_token.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_token.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_user.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_user.go index ddda48f7f..c406baa7b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_user.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_user.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_user_privileges.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_user_privileges.go index e9d2b5121..aa4a32652 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_user_privileges.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_user_privileges.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_user_profile.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_user_profile.go index 9eca27a26..a586016f2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_user_profile.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.get_user_profile.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.grant_api_key.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.grant_api_key.go index c0b28b026..bb1d76597 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.grant_api_key.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.grant_api_key.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.has_privileges.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.has_privileges.go index e88751421..33086da4b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.has_privileges.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.has_privileges.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.has_privileges_user_profile.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.has_privileges_user_profile.go index e6b018f39..6b8dbf6fc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.has_privileges_user_profile.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.has_privileges_user_profile.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.invalidate_api_key.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.invalidate_api_key.go index 75787ea19..93b6234aa 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.invalidate_api_key.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.invalidate_api_key.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.invalidate_token.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.invalidate_token.go index 08ed2687d..5875db9d8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.invalidate_token.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.invalidate_token.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.oidc_authenticate.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.oidc_authenticate.go index 7db6ed4f9..1ad0845e3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.oidc_authenticate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.oidc_authenticate.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.oidc_logout.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.oidc_logout.go index 36bd90fc4..a10b338bb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.oidc_logout.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.oidc_logout.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.oidc_prepare_authentication.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.oidc_prepare_authentication.go index 86ac31686..ea75d8608 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.oidc_prepare_authentication.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.oidc_prepare_authentication.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.put_privileges.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.put_privileges.go index 961d0e66b..31bd6f183 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.put_privileges.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.put_privileges.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.put_role.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.put_role.go index 69964a8e6..012ad04e0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.put_role.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.put_role.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.put_role_mapping.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.put_role_mapping.go index 1d957c43c..464048b3c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.put_role_mapping.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.put_role_mapping.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.put_user.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.put_user.go index 1b1b57523..0e1c86b78 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.put_user.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.put_user.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.query_api_keys.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.query_api_keys.go index 99236bd8b..edddfb065 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.query_api_keys.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.query_api_keys.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.saml_authenticate.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.saml_authenticate.go index f64e52bce..f8cf51515 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.saml_authenticate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.saml_authenticate.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.saml_complete_logout.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.saml_complete_logout.go index 9eb7c8480..df5f0cbd4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.saml_complete_logout.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.saml_complete_logout.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.saml_invalidate.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.saml_invalidate.go index 112e5c826..b0836845b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.saml_invalidate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.saml_invalidate.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.saml_logout.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.saml_logout.go index 8c382ed75..c94e4eb6f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.saml_logout.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.saml_logout.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.saml_prepare_authentication.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.saml_prepare_authentication.go index e30fc231e..f9f7d001e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.saml_prepare_authentication.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.saml_prepare_authentication.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.saml_service_provider_metadata.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.saml_service_provider_metadata.go index 1b18a6623..e2f8f3eba 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.saml_service_provider_metadata.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.saml_service_provider_metadata.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.suggest_user_profiles.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.suggest_user_profiles.go index d3d599f13..421286d23 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.suggest_user_profiles.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.suggest_user_profiles.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.update_api_key.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.update_api_key.go index 720b11742..cfa35049c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.update_api_key.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.update_api_key.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.update_cross_cluster_api_key.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.update_cross_cluster_api_key.go new file mode 100644 index 000000000..a7f52a2bc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.update_cross_cluster_api_key.go @@ -0,0 +1,205 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.11.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSecurityUpdateCrossClusterAPIKeyFunc(t Transport) SecurityUpdateCrossClusterAPIKey { + return func(id string, body io.Reader, o ...func(*SecurityUpdateCrossClusterAPIKeyRequest)) (*Response, error) { + var r = SecurityUpdateCrossClusterAPIKeyRequest{DocumentID: id, Body: body} + for _, f := range o { + f(&r) + } + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityUpdateCrossClusterAPIKey - Updates attributes of an existing cross-cluster API key. +// +// This API is beta. +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-update-cross-cluster-api-key.html. +type SecurityUpdateCrossClusterAPIKey func(id string, body io.Reader, o ...func(*SecurityUpdateCrossClusterAPIKeyRequest)) (*Response, error) + +// SecurityUpdateCrossClusterAPIKeyRequest configures the Security Update Cross ClusterAPI Key API request. +type SecurityUpdateCrossClusterAPIKeyRequest struct { + DocumentID string + + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context +} + +// Do executes the request and returns response or error. +func (r SecurityUpdateCrossClusterAPIKeyRequest) Do(ctx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ) + + method = "PUT" + + path.Grow(7 + 1 + len("_security") + 1 + len("cross_cluster") + 1 + len("api_key") + 1 + len(r.DocumentID)) + path.WriteString("http://") + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("cross_cluster") + path.WriteString("/") + path.WriteString("api_key") + path.WriteString("/") + path.WriteString(r.DocumentID) + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + res, err := transport.Perform(req) + if err != nil { + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityUpdateCrossClusterAPIKey) WithContext(v context.Context) func(*SecurityUpdateCrossClusterAPIKeyRequest) { + return func(r *SecurityUpdateCrossClusterAPIKeyRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityUpdateCrossClusterAPIKey) WithPretty() func(*SecurityUpdateCrossClusterAPIKeyRequest) { + return func(r *SecurityUpdateCrossClusterAPIKeyRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityUpdateCrossClusterAPIKey) WithHuman() func(*SecurityUpdateCrossClusterAPIKeyRequest) { + return func(r *SecurityUpdateCrossClusterAPIKeyRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityUpdateCrossClusterAPIKey) WithErrorTrace() func(*SecurityUpdateCrossClusterAPIKeyRequest) { + return func(r *SecurityUpdateCrossClusterAPIKeyRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityUpdateCrossClusterAPIKey) WithFilterPath(v ...string) func(*SecurityUpdateCrossClusterAPIKeyRequest) { + return func(r *SecurityUpdateCrossClusterAPIKeyRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityUpdateCrossClusterAPIKey) WithHeader(h map[string]string) func(*SecurityUpdateCrossClusterAPIKeyRequest) { + return func(r *SecurityUpdateCrossClusterAPIKeyRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityUpdateCrossClusterAPIKey) WithOpaqueID(s string) func(*SecurityUpdateCrossClusterAPIKeyRequest) { + return func(r *SecurityUpdateCrossClusterAPIKeyRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.update_settings.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.update_settings.go new file mode 100644 index 000000000..11bfdbe9a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.update_settings.go @@ -0,0 +1,194 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.11.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newSecurityUpdateSettingsFunc(t Transport) SecurityUpdateSettings { + return func(body io.Reader, o ...func(*SecurityUpdateSettingsRequest)) (*Response, error) { + var r = SecurityUpdateSettingsRequest{Body: body} + for _, f := range o { + f(&r) + } + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// SecurityUpdateSettings - Update settings for the security system index +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-update-settings.html. +type SecurityUpdateSettings func(body io.Reader, o ...func(*SecurityUpdateSettingsRequest)) (*Response, error) + +// SecurityUpdateSettingsRequest configures the Security Update Settings API request. +type SecurityUpdateSettingsRequest struct { + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context +} + +// Do executes the request and returns response or error. +func (r SecurityUpdateSettingsRequest) Do(ctx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ) + + method = "PUT" + + path.Grow(7 + len("/_security/settings")) + path.WriteString("http://") + path.WriteString("/_security/settings") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + res, err := transport.Perform(req) + if err != nil { + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f SecurityUpdateSettings) WithContext(v context.Context) func(*SecurityUpdateSettingsRequest) { + return func(r *SecurityUpdateSettingsRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f SecurityUpdateSettings) WithPretty() func(*SecurityUpdateSettingsRequest) { + return func(r *SecurityUpdateSettingsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f SecurityUpdateSettings) WithHuman() func(*SecurityUpdateSettingsRequest) { + return func(r *SecurityUpdateSettingsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f SecurityUpdateSettings) WithErrorTrace() func(*SecurityUpdateSettingsRequest) { + return func(r *SecurityUpdateSettingsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f SecurityUpdateSettings) WithFilterPath(v ...string) func(*SecurityUpdateSettingsRequest) { + return func(r *SecurityUpdateSettingsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f SecurityUpdateSettings) WithHeader(h map[string]string) func(*SecurityUpdateSettingsRequest) { + return func(r *SecurityUpdateSettingsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f SecurityUpdateSettings) WithOpaqueID(s string) func(*SecurityUpdateSettingsRequest) { + return func(r *SecurityUpdateSettingsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.update_user_profile_data.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.update_user_profile_data.go index 65c08de61..7888bab3c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.update_user_profile_data.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.security.update_user_profile_data.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.slm.delete_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.slm.delete_lifecycle.go index 8c3bcb577..94cb47834 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.slm.delete_lifecycle.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.slm.delete_lifecycle.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.slm.execute_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.slm.execute_lifecycle.go index f2da4abb4..92e3a62ab 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.slm.execute_lifecycle.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.slm.execute_lifecycle.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.slm.execute_retention.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.slm.execute_retention.go index e8d727631..c5d0f090c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.slm.execute_retention.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.slm.execute_retention.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.slm.get_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.slm.get_lifecycle.go index 2abadacbd..0a0e4353b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.slm.get_lifecycle.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.slm.get_lifecycle.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.slm.get_stats.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.slm.get_stats.go index 262200e7d..ec2a592c0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.slm.get_stats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.slm.get_stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.slm.get_status.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.slm.get_status.go index d01a714d3..333ed714d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.slm.get_status.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.slm.get_status.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.slm.put_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.slm.put_lifecycle.go index dc22a53b8..d35b4c3fa 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.slm.put_lifecycle.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.slm.put_lifecycle.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.slm.start.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.slm.start.go index 833ec31c8..37d8923cc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.slm.start.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.slm.start.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.slm.stop.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.slm.stop.go index 72af54f4a..33d898b9d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.slm.stop.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.slm.stop.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.sql.clear_cursor.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.sql.clear_cursor.go index 4d546e5e4..93553d2fc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.sql.clear_cursor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.sql.clear_cursor.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.sql.delete_async.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.sql.delete_async.go index ef2ceb8ac..c8cff7a2d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.sql.delete_async.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.sql.delete_async.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.sql.get_async.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.sql.get_async.go index 676aaf358..88b3f659e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.sql.get_async.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.sql.get_async.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.sql.get_async_status.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.sql.get_async_status.go index 602ed7773..3f96b0bfc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.sql.get_async_status.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.sql.get_async_status.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.sql.query.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.sql.query.go index 535e0ecf9..e9a4a3e51 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.sql.query.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.sql.query.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.sql.translate.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.sql.translate.go index 98a1b0b4b..bb775f812 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.sql.translate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.sql.translate.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ssl.certificates.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ssl.certificates.go index bc0babb4a..fae22f625 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ssl.certificates.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.ssl.certificates.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.text_structure.find_structure.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.text_structure.find_structure.go index 0cf8a5207..952ba07d8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.text_structure.find_structure.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.text_structure.find_structure.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.delete_transform.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.delete_transform.go index edfe5df42..07c23f707 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.delete_transform.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.delete_transform.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi @@ -48,8 +48,9 @@ type TransformDeleteTransform func(transform_id string, o ...func(*TransformDele type TransformDeleteTransformRequest struct { TransformID string - Force *bool - Timeout time.Duration + DeleteDestIndex *bool + Force *bool + Timeout time.Duration Pretty bool Human bool @@ -80,6 +81,10 @@ func (r TransformDeleteTransformRequest) Do(ctx context.Context, transport Trans params = make(map[string]string) + if r.DeleteDestIndex != nil { + params["delete_dest_index"] = strconv.FormatBool(*r.DeleteDestIndex) + } + if r.Force != nil { params["force"] = strconv.FormatBool(*r.Force) } @@ -154,6 +159,13 @@ func (f TransformDeleteTransform) WithContext(v context.Context) func(*Transform } } +// WithDeleteDestIndex - when `true`, the destination index is deleted together with the transform. the default value is `false`, meaning that the destination index will not be deleted.. +func (f TransformDeleteTransform) WithDeleteDestIndex(v bool) func(*TransformDeleteTransformRequest) { + return func(r *TransformDeleteTransformRequest) { + r.DeleteDestIndex = &v + } +} + // WithForce - when `true`, the transform is deleted regardless of its current state. the default value is `false`, meaning that the transform must be `stopped` before it can be deleted.. func (f TransformDeleteTransform) WithForce(v bool) func(*TransformDeleteTransformRequest) { return func(r *TransformDeleteTransformRequest) { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.get_transform.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.get_transform.go index e747a35a0..bc5fcf363 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.get_transform.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.get_transform.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.get_transform_stats.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.get_transform_stats.go index 01857a0dd..c16142f10 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.get_transform_stats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.get_transform_stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.preview_transform.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.preview_transform.go index 74093d421..e1520f500 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.preview_transform.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.preview_transform.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.put_transform.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.put_transform.go index fb39d2b5f..aefa41169 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.put_transform.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.put_transform.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.reset_transform.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.reset_transform.go index 4c616a635..16700b549 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.reset_transform.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.reset_transform.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.schedule_now_transform.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.schedule_now_transform.go index 6f2b95688..7ed9961fe 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.schedule_now_transform.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.schedule_now_transform.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.start_transform.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.start_transform.go index c1de62c56..dfca04e28 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.start_transform.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.start_transform.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.stop_transform.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.stop_transform.go index 05ecd4684..29e3342f1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.stop_transform.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.stop_transform.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.update_transform.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.update_transform.go index e8886fec1..e7f0844d2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.update_transform.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.update_transform.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.upgrade_transforms.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.upgrade_transforms.go index ef7359b2b..d45dbca73 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.upgrade_transforms.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.transform.upgrade_transforms.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.ack_watch.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.ack_watch.go index 24515aea2..015b1cd5a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.ack_watch.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.ack_watch.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.activate_watch.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.activate_watch.go index cefbfdd01..26694d6f6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.activate_watch.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.activate_watch.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.deactivate_watch.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.deactivate_watch.go index 187433911..fd34a24b6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.deactivate_watch.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.deactivate_watch.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.delete_watch.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.delete_watch.go index 40ad9a3be..36dcc37c2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.delete_watch.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.delete_watch.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.execute_watch.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.execute_watch.go index 8291b3097..43138c00f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.execute_watch.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.execute_watch.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.get_settings.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.get_settings.go new file mode 100644 index 000000000..8844a7f68 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.get_settings.go @@ -0,0 +1,187 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.11.0: DO NOT EDIT + +package esapi + +import ( + "context" + "net/http" + "strings" +) + +func newWatcherGetSettingsFunc(t Transport) WatcherGetSettings { + return func(o ...func(*WatcherGetSettingsRequest)) (*Response, error) { + var r = WatcherGetSettingsRequest{} + for _, f := range o { + f(&r) + } + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// WatcherGetSettings - Retrieve settings for the watcher system index +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-get-settings.html. +type WatcherGetSettings func(o ...func(*WatcherGetSettingsRequest)) (*Response, error) + +// WatcherGetSettingsRequest configures the Watcher Get Settings API request. +type WatcherGetSettingsRequest struct { + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context +} + +// Do executes the request and returns response or error. +func (r WatcherGetSettingsRequest) Do(ctx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ) + + method = "GET" + + path.Grow(7 + len("/_watcher/settings")) + path.WriteString("http://") + path.WriteString("/_watcher/settings") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), nil) + if err != nil { + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + res, err := transport.Perform(req) + if err != nil { + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f WatcherGetSettings) WithContext(v context.Context) func(*WatcherGetSettingsRequest) { + return func(r *WatcherGetSettingsRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f WatcherGetSettings) WithPretty() func(*WatcherGetSettingsRequest) { + return func(r *WatcherGetSettingsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f WatcherGetSettings) WithHuman() func(*WatcherGetSettingsRequest) { + return func(r *WatcherGetSettingsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f WatcherGetSettings) WithErrorTrace() func(*WatcherGetSettingsRequest) { + return func(r *WatcherGetSettingsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f WatcherGetSettings) WithFilterPath(v ...string) func(*WatcherGetSettingsRequest) { + return func(r *WatcherGetSettingsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f WatcherGetSettings) WithHeader(h map[string]string) func(*WatcherGetSettingsRequest) { + return func(r *WatcherGetSettingsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f WatcherGetSettings) WithOpaqueID(s string) func(*WatcherGetSettingsRequest) { + return func(r *WatcherGetSettingsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.get_watch.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.get_watch.go index f861aefad..3592723ae 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.get_watch.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.get_watch.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.put_watch.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.put_watch.go index 09ed46b62..ae6520eba 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.put_watch.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.put_watch.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.query_watches.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.query_watches.go index 215343c80..c976dcb59 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.query_watches.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.query_watches.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.start.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.start.go index 2068b16c4..f68786e9b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.start.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.start.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.stats.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.stats.go index 1a837d050..9c17b89c1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.stats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.stats.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.stop.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.stop.go index ae7136720..4a8859fb3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.stop.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.stop.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.update_settings.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.update_settings.go new file mode 100644 index 000000000..829b46489 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.watcher.update_settings.go @@ -0,0 +1,194 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Code generated from specification version 8.11.0: DO NOT EDIT + +package esapi + +import ( + "context" + "io" + "net/http" + "strings" +) + +func newWatcherUpdateSettingsFunc(t Transport) WatcherUpdateSettings { + return func(body io.Reader, o ...func(*WatcherUpdateSettingsRequest)) (*Response, error) { + var r = WatcherUpdateSettingsRequest{Body: body} + for _, f := range o { + f(&r) + } + return r.Do(r.ctx, t) + } +} + +// ----- API Definition ------------------------------------------------------- + +// WatcherUpdateSettings - Update settings for the watcher system index +// +// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-update-settings.html. +type WatcherUpdateSettings func(body io.Reader, o ...func(*WatcherUpdateSettingsRequest)) (*Response, error) + +// WatcherUpdateSettingsRequest configures the Watcher Update Settings API request. +type WatcherUpdateSettingsRequest struct { + Body io.Reader + + Pretty bool + Human bool + ErrorTrace bool + FilterPath []string + + Header http.Header + + ctx context.Context +} + +// Do executes the request and returns response or error. +func (r WatcherUpdateSettingsRequest) Do(ctx context.Context, transport Transport) (*Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ) + + method = "PUT" + + path.Grow(7 + len("/_watcher/settings")) + path.WriteString("http://") + path.WriteString("/_watcher/settings") + + params = make(map[string]string) + + if r.Pretty { + params["pretty"] = "true" + } + + if r.Human { + params["human"] = "true" + } + + if r.ErrorTrace { + params["error_trace"] = "true" + } + + if len(r.FilterPath) > 0 { + params["filter_path"] = strings.Join(r.FilterPath, ",") + } + + req, err := newRequest(method, path.String(), r.Body) + if err != nil { + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if r.Body != nil && req.Header.Get(headerContentType) == "" { + req.Header[headerContentType] = headerContentTypeJSON + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + res, err := transport.Perform(req) + if err != nil { + return nil, err + } + + response := Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +func (f WatcherUpdateSettings) WithContext(v context.Context) func(*WatcherUpdateSettingsRequest) { + return func(r *WatcherUpdateSettingsRequest) { + r.ctx = v + } +} + +// WithPretty makes the response body pretty-printed. +func (f WatcherUpdateSettings) WithPretty() func(*WatcherUpdateSettingsRequest) { + return func(r *WatcherUpdateSettingsRequest) { + r.Pretty = true + } +} + +// WithHuman makes statistical values human-readable. +func (f WatcherUpdateSettings) WithHuman() func(*WatcherUpdateSettingsRequest) { + return func(r *WatcherUpdateSettingsRequest) { + r.Human = true + } +} + +// WithErrorTrace includes the stack trace for errors in the response body. +func (f WatcherUpdateSettings) WithErrorTrace() func(*WatcherUpdateSettingsRequest) { + return func(r *WatcherUpdateSettingsRequest) { + r.ErrorTrace = true + } +} + +// WithFilterPath filters the properties of the response body. +func (f WatcherUpdateSettings) WithFilterPath(v ...string) func(*WatcherUpdateSettingsRequest) { + return func(r *WatcherUpdateSettingsRequest) { + r.FilterPath = v + } +} + +// WithHeader adds the headers to the HTTP request. +func (f WatcherUpdateSettings) WithHeader(h map[string]string) func(*WatcherUpdateSettingsRequest) { + return func(r *WatcherUpdateSettingsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + for k, v := range h { + r.Header.Add(k, v) + } + } +} + +// WithOpaqueID adds the X-Opaque-Id header to the HTTP request. +func (f WatcherUpdateSettings) WithOpaqueID(s string) func(*WatcherUpdateSettingsRequest) { + return func(r *WatcherUpdateSettingsRequest) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set("X-Opaque-Id", s) + } +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.xpack.info.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.xpack.info.go index 61fa69965..b6f8b7411 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.xpack.info.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.xpack.info.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.xpack.usage.go b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.xpack.usage.go index b3e4b346e..bd3d8b74d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.xpack.usage.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/esapi/api.xpack.xpack.usage.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. // -// Code generated from specification version 8.7.0: DO NOT EDIT +// Code generated from specification version 8.11.0: DO NOT EDIT package esapi diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/internal/version/version.go b/vendor/github.com/elastic/go-elasticsearch/v8/internal/version/version.go index eeda88646..964f556d2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/internal/version/version.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/internal/version/version.go @@ -18,4 +18,4 @@ package version // Client returns the client version as a string. -const Client = "8.7.0" +const Client = "8.11.0" diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/api._.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/api._.go index 1811a2a84..123b74db9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/api._.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/api._.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package typedapi @@ -76,6 +76,7 @@ import ( cluster_get_component_template "github.com/elastic/go-elasticsearch/v8/typedapi/cluster/getcomponenttemplate" cluster_get_settings "github.com/elastic/go-elasticsearch/v8/typedapi/cluster/getsettings" cluster_health "github.com/elastic/go-elasticsearch/v8/typedapi/cluster/health" + cluster_info "github.com/elastic/go-elasticsearch/v8/typedapi/cluster/info" cluster_pending_tasks "github.com/elastic/go-elasticsearch/v8/typedapi/cluster/pendingtasks" cluster_post_voting_config_exclusions "github.com/elastic/go-elasticsearch/v8/typedapi/cluster/postvotingconfigexclusions" cluster_put_component_template "github.com/elastic/go-elasticsearch/v8/typedapi/cluster/putcomponenttemplate" @@ -84,6 +85,7 @@ import ( cluster_reroute "github.com/elastic/go-elasticsearch/v8/typedapi/cluster/reroute" cluster_state "github.com/elastic/go-elasticsearch/v8/typedapi/cluster/state" cluster_stats "github.com/elastic/go-elasticsearch/v8/typedapi/cluster/stats" + core_bulk "github.com/elastic/go-elasticsearch/v8/typedapi/core/bulk" core_clear_scroll "github.com/elastic/go-elasticsearch/v8/typedapi/core/clearscroll" core_close_point_in_time "github.com/elastic/go-elasticsearch/v8/typedapi/core/closepointintime" core_count "github.com/elastic/go-elasticsearch/v8/typedapi/core/count" @@ -101,10 +103,13 @@ import ( core_get_script_context "github.com/elastic/go-elasticsearch/v8/typedapi/core/getscriptcontext" core_get_script_languages "github.com/elastic/go-elasticsearch/v8/typedapi/core/getscriptlanguages" core_get_source "github.com/elastic/go-elasticsearch/v8/typedapi/core/getsource" + core_health_report "github.com/elastic/go-elasticsearch/v8/typedapi/core/healthreport" core_index "github.com/elastic/go-elasticsearch/v8/typedapi/core/index" core_info "github.com/elastic/go-elasticsearch/v8/typedapi/core/info" core_knn_search "github.com/elastic/go-elasticsearch/v8/typedapi/core/knnsearch" core_mget "github.com/elastic/go-elasticsearch/v8/typedapi/core/mget" + core_msearch "github.com/elastic/go-elasticsearch/v8/typedapi/core/msearch" + core_msearch_template "github.com/elastic/go-elasticsearch/v8/typedapi/core/msearchtemplate" core_mtermvectors "github.com/elastic/go-elasticsearch/v8/typedapi/core/mtermvectors" core_open_point_in_time "github.com/elastic/go-elasticsearch/v8/typedapi/core/openpointintime" core_ping "github.com/elastic/go-elasticsearch/v8/typedapi/core/ping" @@ -139,6 +144,7 @@ import ( features_get_features "github.com/elastic/go-elasticsearch/v8/typedapi/features/getfeatures" features_reset_features "github.com/elastic/go-elasticsearch/v8/typedapi/features/resetfeatures" fleet_global_checkpoints "github.com/elastic/go-elasticsearch/v8/typedapi/fleet/globalcheckpoints" + fleet_msearch "github.com/elastic/go-elasticsearch/v8/typedapi/fleet/msearch" fleet_search "github.com/elastic/go-elasticsearch/v8/typedapi/fleet/search" graph_explore "github.com/elastic/go-elasticsearch/v8/typedapi/graph/explore" ilm_delete_lifecycle "github.com/elastic/go-elasticsearch/v8/typedapi/ilm/deletelifecycle" @@ -162,6 +168,7 @@ import ( indices_data_streams_stats "github.com/elastic/go-elasticsearch/v8/typedapi/indices/datastreamsstats" indices_delete "github.com/elastic/go-elasticsearch/v8/typedapi/indices/delete" indices_delete_alias "github.com/elastic/go-elasticsearch/v8/typedapi/indices/deletealias" + indices_delete_data_lifecycle "github.com/elastic/go-elasticsearch/v8/typedapi/indices/deletedatalifecycle" indices_delete_data_stream "github.com/elastic/go-elasticsearch/v8/typedapi/indices/deletedatastream" indices_delete_index_template "github.com/elastic/go-elasticsearch/v8/typedapi/indices/deleteindextemplate" indices_delete_template "github.com/elastic/go-elasticsearch/v8/typedapi/indices/deletetemplate" @@ -171,11 +178,13 @@ import ( indices_exists_alias "github.com/elastic/go-elasticsearch/v8/typedapi/indices/existsalias" indices_exists_index_template "github.com/elastic/go-elasticsearch/v8/typedapi/indices/existsindextemplate" indices_exists_template "github.com/elastic/go-elasticsearch/v8/typedapi/indices/existstemplate" + indices_explain_data_lifecycle "github.com/elastic/go-elasticsearch/v8/typedapi/indices/explaindatalifecycle" indices_field_usage_stats "github.com/elastic/go-elasticsearch/v8/typedapi/indices/fieldusagestats" indices_flush "github.com/elastic/go-elasticsearch/v8/typedapi/indices/flush" indices_forcemerge "github.com/elastic/go-elasticsearch/v8/typedapi/indices/forcemerge" indices_get "github.com/elastic/go-elasticsearch/v8/typedapi/indices/get" indices_get_alias "github.com/elastic/go-elasticsearch/v8/typedapi/indices/getalias" + indices_get_data_lifecycle "github.com/elastic/go-elasticsearch/v8/typedapi/indices/getdatalifecycle" indices_get_data_stream "github.com/elastic/go-elasticsearch/v8/typedapi/indices/getdatastream" indices_get_field_mapping "github.com/elastic/go-elasticsearch/v8/typedapi/indices/getfieldmapping" indices_get_index_template "github.com/elastic/go-elasticsearch/v8/typedapi/indices/getindextemplate" @@ -187,6 +196,7 @@ import ( indices_open "github.com/elastic/go-elasticsearch/v8/typedapi/indices/open" indices_promote_data_stream "github.com/elastic/go-elasticsearch/v8/typedapi/indices/promotedatastream" indices_put_alias "github.com/elastic/go-elasticsearch/v8/typedapi/indices/putalias" + indices_put_data_lifecycle "github.com/elastic/go-elasticsearch/v8/typedapi/indices/putdatalifecycle" indices_put_index_template "github.com/elastic/go-elasticsearch/v8/typedapi/indices/putindextemplate" indices_put_mapping "github.com/elastic/go-elasticsearch/v8/typedapi/indices/putmapping" indices_put_settings "github.com/elastic/go-elasticsearch/v8/typedapi/indices/putsettings" @@ -267,6 +277,7 @@ import ( ml_info "github.com/elastic/go-elasticsearch/v8/typedapi/ml/info" ml_open_job "github.com/elastic/go-elasticsearch/v8/typedapi/ml/openjob" ml_post_calendar_events "github.com/elastic/go-elasticsearch/v8/typedapi/ml/postcalendarevents" + ml_post_data "github.com/elastic/go-elasticsearch/v8/typedapi/ml/postdata" ml_preview_datafeed "github.com/elastic/go-elasticsearch/v8/typedapi/ml/previewdatafeed" ml_preview_data_frame_analytics "github.com/elastic/go-elasticsearch/v8/typedapi/ml/previewdataframeanalytics" ml_put_calendar "github.com/elastic/go-elasticsearch/v8/typedapi/ml/putcalendar" @@ -296,6 +307,7 @@ import ( ml_upgrade_job_snapshot "github.com/elastic/go-elasticsearch/v8/typedapi/ml/upgradejobsnapshot" ml_validate "github.com/elastic/go-elasticsearch/v8/typedapi/ml/validate" ml_validate_detector "github.com/elastic/go-elasticsearch/v8/typedapi/ml/validatedetector" + monitoring_bulk "github.com/elastic/go-elasticsearch/v8/typedapi/monitoring/bulk" nodes_clear_repositories_metering_archive "github.com/elastic/go-elasticsearch/v8/typedapi/nodes/clearrepositoriesmeteringarchive" nodes_get_repositories_metering_info "github.com/elastic/go-elasticsearch/v8/typedapi/nodes/getrepositoriesmeteringinfo" nodes_hot_threads "github.com/elastic/go-elasticsearch/v8/typedapi/nodes/hotthreads" @@ -303,6 +315,10 @@ import ( nodes_reload_secure_settings "github.com/elastic/go-elasticsearch/v8/typedapi/nodes/reloadsecuresettings" nodes_stats "github.com/elastic/go-elasticsearch/v8/typedapi/nodes/stats" nodes_usage "github.com/elastic/go-elasticsearch/v8/typedapi/nodes/usage" + query_ruleset_delete "github.com/elastic/go-elasticsearch/v8/typedapi/queryruleset/delete" + query_ruleset_get "github.com/elastic/go-elasticsearch/v8/typedapi/queryruleset/get" + query_ruleset_list "github.com/elastic/go-elasticsearch/v8/typedapi/queryruleset/list" + query_ruleset_put "github.com/elastic/go-elasticsearch/v8/typedapi/queryruleset/put" rollup_delete_job "github.com/elastic/go-elasticsearch/v8/typedapi/rollup/deletejob" rollup_get_jobs "github.com/elastic/go-elasticsearch/v8/typedapi/rollup/getjobs" rollup_get_rollup_caps "github.com/elastic/go-elasticsearch/v8/typedapi/rollup/getrollupcaps" @@ -315,6 +331,14 @@ import ( searchable_snapshots_clear_cache "github.com/elastic/go-elasticsearch/v8/typedapi/searchablesnapshots/clearcache" searchable_snapshots_mount "github.com/elastic/go-elasticsearch/v8/typedapi/searchablesnapshots/mount" searchable_snapshots_stats "github.com/elastic/go-elasticsearch/v8/typedapi/searchablesnapshots/stats" + search_application_delete "github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/delete" + search_application_delete_behavioral_analytics "github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/deletebehavioralanalytics" + search_application_get "github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/get" + search_application_get_behavioral_analytics "github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/getbehavioralanalytics" + search_application_list "github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/list" + search_application_put "github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/put" + search_application_put_behavioral_analytics "github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/putbehavioralanalytics" + search_application_search "github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/search" security_activate_user_profile "github.com/elastic/go-elasticsearch/v8/typedapi/security/activateuserprofile" security_authenticate "github.com/elastic/go-elasticsearch/v8/typedapi/security/authenticate" security_bulk_update_api_keys "github.com/elastic/go-elasticsearch/v8/typedapi/security/bulkupdateapikeys" @@ -325,6 +349,7 @@ import ( security_clear_cached_roles "github.com/elastic/go-elasticsearch/v8/typedapi/security/clearcachedroles" security_clear_cached_service_tokens "github.com/elastic/go-elasticsearch/v8/typedapi/security/clearcachedservicetokens" security_create_api_key "github.com/elastic/go-elasticsearch/v8/typedapi/security/createapikey" + security_create_cross_cluster_api_key "github.com/elastic/go-elasticsearch/v8/typedapi/security/createcrossclusterapikey" security_create_service_token "github.com/elastic/go-elasticsearch/v8/typedapi/security/createservicetoken" security_delete_privileges "github.com/elastic/go-elasticsearch/v8/typedapi/security/deleteprivileges" security_delete_role "github.com/elastic/go-elasticsearch/v8/typedapi/security/deleterole" @@ -400,15 +425,24 @@ import ( sql_query "github.com/elastic/go-elasticsearch/v8/typedapi/sql/query" sql_translate "github.com/elastic/go-elasticsearch/v8/typedapi/sql/translate" ssl_certificates "github.com/elastic/go-elasticsearch/v8/typedapi/ssl/certificates" + synonyms_delete_synonym "github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/deletesynonym" + synonyms_delete_synonym_rule "github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/deletesynonymrule" + synonyms_get_synonym "github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/getsynonym" + synonyms_get_synonym_rule "github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/getsynonymrule" + synonyms_get_synonyms_sets "github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/getsynonymssets" + synonyms_put_synonym "github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/putsynonym" + synonyms_put_synonym_rule "github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/putsynonymrule" tasks_cancel "github.com/elastic/go-elasticsearch/v8/typedapi/tasks/cancel" tasks_get "github.com/elastic/go-elasticsearch/v8/typedapi/tasks/get" tasks_list "github.com/elastic/go-elasticsearch/v8/typedapi/tasks/list" + text_structure_find_structure "github.com/elastic/go-elasticsearch/v8/typedapi/textstructure/findstructure" transform_delete_transform "github.com/elastic/go-elasticsearch/v8/typedapi/transform/deletetransform" transform_get_transform "github.com/elastic/go-elasticsearch/v8/typedapi/transform/gettransform" transform_get_transform_stats "github.com/elastic/go-elasticsearch/v8/typedapi/transform/gettransformstats" transform_preview_transform "github.com/elastic/go-elasticsearch/v8/typedapi/transform/previewtransform" transform_put_transform "github.com/elastic/go-elasticsearch/v8/typedapi/transform/puttransform" transform_reset_transform "github.com/elastic/go-elasticsearch/v8/typedapi/transform/resettransform" + transform_schedule_now_transform "github.com/elastic/go-elasticsearch/v8/typedapi/transform/schedulenowtransform" transform_start_transform "github.com/elastic/go-elasticsearch/v8/typedapi/transform/starttransform" transform_stop_transform "github.com/elastic/go-elasticsearch/v8/typedapi/transform/stoptransform" transform_update_transform "github.com/elastic/go-elasticsearch/v8/typedapi/transform/updatetransform" @@ -418,528 +452,764 @@ import ( watcher_deactivate_watch "github.com/elastic/go-elasticsearch/v8/typedapi/watcher/deactivatewatch" watcher_delete_watch "github.com/elastic/go-elasticsearch/v8/typedapi/watcher/deletewatch" watcher_execute_watch "github.com/elastic/go-elasticsearch/v8/typedapi/watcher/executewatch" + watcher_get_settings "github.com/elastic/go-elasticsearch/v8/typedapi/watcher/getsettings" watcher_get_watch "github.com/elastic/go-elasticsearch/v8/typedapi/watcher/getwatch" watcher_put_watch "github.com/elastic/go-elasticsearch/v8/typedapi/watcher/putwatch" watcher_query_watches "github.com/elastic/go-elasticsearch/v8/typedapi/watcher/querywatches" watcher_start "github.com/elastic/go-elasticsearch/v8/typedapi/watcher/start" watcher_stats "github.com/elastic/go-elasticsearch/v8/typedapi/watcher/stats" watcher_stop "github.com/elastic/go-elasticsearch/v8/typedapi/watcher/stop" + watcher_update_settings "github.com/elastic/go-elasticsearch/v8/typedapi/watcher/updatesettings" xpack_info "github.com/elastic/go-elasticsearch/v8/typedapi/xpack/info" xpack_usage "github.com/elastic/go-elasticsearch/v8/typedapi/xpack/usage" ) -type Async struct { +type AsyncSearch struct { // Deletes an async search by ID. If the search is still running, the search // request will be cancelled. Otherwise, the saved search results are deleted. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/async-search.html Delete async_search_delete.NewDelete // Retrieves the results of a previously submitted async search request given // its ID. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/async-search.html Get async_search_get.NewGet // Retrieves the status of a previously submitted async search request given its // ID. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/async-search.html Status async_search_status.NewStatus // Executes a search request asynchronously. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/async-search.html Submit async_search_submit.NewSubmit } type Autoscaling struct { // Deletes an autoscaling policy. Designed for indirect use by ECE/ESS and ECK. // Direct use is not supported. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-delete-autoscaling-policy.html DeleteAutoscalingPolicy autoscaling_delete_autoscaling_policy.NewDeleteAutoscalingPolicy // Gets the current autoscaling capacity based on the configured autoscaling // policy. Designed for indirect use by ECE/ESS and ECK. Direct use is not // supported. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-get-autoscaling-capacity.html GetAutoscalingCapacity autoscaling_get_autoscaling_capacity.NewGetAutoscalingCapacity // Retrieves an autoscaling policy. Designed for indirect use by ECE/ESS and // ECK. Direct use is not supported. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-get-autoscaling-capacity.html GetAutoscalingPolicy autoscaling_get_autoscaling_policy.NewGetAutoscalingPolicy // Creates a new autoscaling policy. Designed for indirect use by ECE/ESS and // ECK. Direct use is not supported. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-put-autoscaling-policy.html PutAutoscalingPolicy autoscaling_put_autoscaling_policy.NewPutAutoscalingPolicy } type Cat struct { // Shows information about currently configured aliases to indices including // filter and routing infos. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-alias.html Aliases cat_aliases.NewAliases // Provides a snapshot of how many shards are allocated to each data node and // how much disk space they are using. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-allocation.html Allocation cat_allocation.NewAllocation // Returns information about existing component_templates templates. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-component-templates.html ComponentTemplates cat_component_templates.NewComponentTemplates // Provides quick access to the document count of the entire cluster, or // individual indices. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-count.html Count cat_count.NewCount // Shows how much heap memory is currently being used by fielddata on every data // node in the cluster. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-fielddata.html Fielddata cat_fielddata.NewFielddata // Returns a concise representation of the cluster health. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-health.html Health cat_health.NewHealth // Returns help for the Cat APIs. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat.html Help cat_help.NewHelp // Returns information about indices: number of primaries and replicas, document // counts, disk size, ... + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-indices.html Indices cat_indices.NewIndices // Returns information about the master node. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-master.html Master cat_master.NewMaster // Gets configuration and usage information about data frame analytics jobs. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-dfanalytics.html MlDataFrameAnalytics cat_ml_data_frame_analytics.NewMlDataFrameAnalytics // Gets configuration and usage information about datafeeds. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-datafeeds.html MlDatafeeds cat_ml_datafeeds.NewMlDatafeeds // Gets configuration and usage information about anomaly detection jobs. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-anomaly-detectors.html MlJobs cat_ml_jobs.NewMlJobs // Gets configuration and usage information about inference trained models. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-trained-model.html MlTrainedModels cat_ml_trained_models.NewMlTrainedModels // Returns information about custom node attributes. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-nodeattrs.html Nodeattrs cat_nodeattrs.NewNodeattrs // Returns basic statistics about performance of cluster nodes. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-nodes.html Nodes cat_nodes.NewNodes // Returns a concise representation of the cluster pending tasks. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-pending-tasks.html PendingTasks cat_pending_tasks.NewPendingTasks // Returns information about installed plugins across nodes node. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-plugins.html Plugins cat_plugins.NewPlugins // Returns information about index shard recoveries, both on-going completed. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-recovery.html Recovery cat_recovery.NewRecovery // Returns information about snapshot repositories registered in the cluster. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-repositories.html Repositories cat_repositories.NewRepositories // Provides low-level information about the segments in the shards of an index. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-segments.html Segments cat_segments.NewSegments // Provides a detailed view of shard allocation on nodes. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-shards.html Shards cat_shards.NewShards // Returns all snapshots in a specific repository. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-snapshots.html Snapshots cat_snapshots.NewSnapshots // Returns information about the tasks currently executing on one or more nodes // in the cluster. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html Tasks cat_tasks.NewTasks // Returns information about existing templates. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-templates.html Templates cat_templates.NewTemplates // Returns cluster-wide thread pool statistics per node. // By default the active, queue and rejected statistics are returned for all // thread pools. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-thread-pool.html ThreadPool cat_thread_pool.NewThreadPool // Gets configuration and usage information about transforms. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-transforms.html Transforms cat_transforms.NewTransforms } type Ccr struct { // Deletes auto-follow patterns. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-delete-auto-follow-pattern.html DeleteAutoFollowPattern ccr_delete_auto_follow_pattern.NewDeleteAutoFollowPattern // Creates a new follower index configured to follow the referenced leader // index. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-put-follow.html Follow ccr_follow.NewFollow // Retrieves information about all follower indices, including parameters and // status for each follower index + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-follow-info.html FollowInfo ccr_follow_info.NewFollowInfo // Retrieves follower stats. return shard-level stats about the following tasks // associated with each shard for the specified indices. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-follow-stats.html FollowStats ccr_follow_stats.NewFollowStats // Removes the follower retention leases from the leader. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-post-forget-follower.html ForgetFollower ccr_forget_follower.NewForgetFollower // Gets configured auto-follow patterns. Returns the specified auto-follow // pattern collection. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-auto-follow-pattern.html GetAutoFollowPattern ccr_get_auto_follow_pattern.NewGetAutoFollowPattern // Pauses an auto-follow pattern + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-pause-auto-follow-pattern.html PauseAutoFollowPattern ccr_pause_auto_follow_pattern.NewPauseAutoFollowPattern // Pauses a follower index. The follower index will not fetch any additional // operations from the leader index. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-post-pause-follow.html PauseFollow ccr_pause_follow.NewPauseFollow // Creates a new named collection of auto-follow patterns against a specified // remote cluster. Newly created indices on the remote cluster matching any of // the specified patterns will be automatically configured as follower indices. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-put-auto-follow-pattern.html PutAutoFollowPattern ccr_put_auto_follow_pattern.NewPutAutoFollowPattern // Resumes an auto-follow pattern that has been paused + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-resume-auto-follow-pattern.html ResumeAutoFollowPattern ccr_resume_auto_follow_pattern.NewResumeAutoFollowPattern // Resumes a follower index that has been paused + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-post-resume-follow.html ResumeFollow ccr_resume_follow.NewResumeFollow // Gets all stats related to cross-cluster replication. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-stats.html Stats ccr_stats.NewStats // Stops the following task associated with a follower index and removes index // metadata and settings associated with cross-cluster replication. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-post-unfollow.html Unfollow ccr_unfollow.NewUnfollow } type Cluster struct { // Provides explanations for shard allocations in the cluster. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-allocation-explain.html AllocationExplain cluster_allocation_explain.NewAllocationExplain // Deletes a component template + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-component-template.html DeleteComponentTemplate cluster_delete_component_template.NewDeleteComponentTemplate // Clears cluster voting config exclusions. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/voting-config-exclusions.html DeleteVotingConfigExclusions cluster_delete_voting_config_exclusions.NewDeleteVotingConfigExclusions // Returns information about whether a particular component template exist + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-component-template.html ExistsComponentTemplate cluster_exists_component_template.NewExistsComponentTemplate // Returns one or more component templates + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-component-template.html GetComponentTemplate cluster_get_component_template.NewGetComponentTemplate // Returns cluster settings. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-get-settings.html GetSettings cluster_get_settings.NewGetSettings // Returns basic information about the health of the cluster. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html Health cluster_health.NewHealth + // Returns different information about the cluster. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-info.html + Info cluster_info.NewInfo // Returns a list of any cluster-level changes (e.g. create index, update // mapping, // allocate or fail shard) which have not yet been executed. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-pending.html PendingTasks cluster_pending_tasks.NewPendingTasks // Updates the cluster voting config exclusions by node ids or node names. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/voting-config-exclusions.html PostVotingConfigExclusions cluster_post_voting_config_exclusions.NewPostVotingConfigExclusions // Creates or updates a component template + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-component-template.html PutComponentTemplate cluster_put_component_template.NewPutComponentTemplate // Updates the cluster settings. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-update-settings.html PutSettings cluster_put_settings.NewPutSettings // Returns the information about configured remote clusters. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-remote-info.html RemoteInfo cluster_remote_info.NewRemoteInfo // Allows to manually change the allocation of individual shards in the cluster. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-reroute.html Reroute cluster_reroute.NewReroute // Returns a comprehensive information about the state of the cluster. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-state.html State cluster_state.NewState // Returns high-level overview of cluster statistics. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html Stats cluster_stats.NewStats } type Core struct { + // Allows to perform multiple index/update/delete operations in a single + // request. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html + Bulk core_bulk.NewBulk // Explicitly clears the search context for a scroll. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/clear-scroll-api.html ClearScroll core_clear_scroll.NewClearScroll // Close a point in time + // https://www.elastic.co/guide/en/elasticsearch/reference/current/point-in-time-api.html ClosePointInTime core_close_point_in_time.NewClosePointInTime // Returns number of documents matching a query. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-count.html Count core_count.NewCount // Creates a new document in the index. // // Returns a 409 response when a document with a same ID already exists in the // index. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html Create core_create.NewCreate // Removes a document from the index. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete.html Delete core_delete.NewDelete // Deletes documents matching the provided query. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete-by-query.html DeleteByQuery core_delete_by_query.NewDeleteByQuery // Changes the number of requests per second for a particular Delete By Query // operation. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete-by-query.html DeleteByQueryRethrottle core_delete_by_query_rethrottle.NewDeleteByQueryRethrottle // Deletes a script. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html DeleteScript core_delete_script.NewDeleteScript // Returns information about whether a document exists in an index. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html Exists core_exists.NewExists // Returns information about whether a document source exists in an index. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html ExistsSource core_exists_source.NewExistsSource // Returns information about why a specific matches (or doesn't match) a query. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-explain.html Explain core_explain.NewExplain // Returns the information about the capabilities of fields among multiple // indices. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-field-caps.html FieldCaps core_field_caps.NewFieldCaps // Returns a document. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html Get core_get.NewGet // Returns a script. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html GetScript core_get_script.NewGetScript // Returns all script contexts. + // https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-contexts.html GetScriptContext core_get_script_context.NewGetScriptContext // Returns available script types, languages and contexts + // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html GetScriptLanguages core_get_script_languages.NewGetScriptLanguages // Returns the source of a document. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html GetSource core_get_source.NewGetSource + // Returns the health of the cluster. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/health-api.html + HealthReport core_health_report.NewHealthReport // Creates or updates a document in an index. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html Index core_index.NewIndex // Returns basic information about the cluster. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html Info core_info.NewInfo // Performs a kNN search. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html KnnSearch core_knn_search.NewKnnSearch // Allows to get multiple documents in one request. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-get.html Mget core_mget.NewMget + // Allows to execute several search operations in one request. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html + Msearch core_msearch.NewMsearch + // Allows to execute several search template operations in one request. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html + MsearchTemplate core_msearch_template.NewMsearchTemplate // Returns multiple termvectors in one request. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-termvectors.html Mtermvectors core_mtermvectors.NewMtermvectors // Open a point in time that can be used in subsequent searches + // https://www.elastic.co/guide/en/elasticsearch/reference/current/point-in-time-api.html OpenPointInTime core_open_point_in_time.NewOpenPointInTime // Returns whether the cluster is running. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html Ping core_ping.NewPing // Creates or updates a script. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html PutScript core_put_script.NewPutScript // Allows to evaluate the quality of ranked search results over a set of typical // search queries + // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-rank-eval.html RankEval core_rank_eval.NewRankEval // Allows to copy documents from one index to another, optionally filtering the // source // documents by a query, changing the destination index settings, or fetching // the // documents from a remote cluster. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-reindex.html Reindex core_reindex.NewReindex // Changes the number of requests per second for a particular Reindex operation. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-reindex.html ReindexRethrottle core_reindex_rethrottle.NewReindexRethrottle // Allows to use the Mustache language to pre-render a search definition. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/render-search-template-api.html RenderSearchTemplate core_render_search_template.NewRenderSearchTemplate // Allows an arbitrary script to be executed and a result to be returned + // https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-execute-api.html ScriptsPainlessExecute core_scripts_painless_execute.NewScriptsPainlessExecute // Allows to retrieve a large numbers of results from a single search request. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-body.html#request-body-search-scroll Scroll core_scroll.NewScroll // Returns results matching a query. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html Search core_search.NewSearch // Searches a vector tile for geospatial values. Returns results as a binary // Mapbox vector tile. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-vector-tile-api.html SearchMvt core_search_mvt.NewSearchMvt // Returns information about the indices and shards that a search request would // be executed against. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-shards.html SearchShards core_search_shards.NewSearchShards // Allows to use the Mustache language to pre-render a search definition. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-template.html SearchTemplate core_search_template.NewSearchTemplate // The terms enum API can be used to discover terms in the index that begin // with the provided string. It is designed for low-latency look-ups used in // auto-complete scenarios. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-terms-enum.html TermsEnum core_terms_enum.NewTermsEnum // Returns information and statistics about terms in the fields of a particular // document. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-termvectors.html Termvectors core_termvectors.NewTermvectors // Updates a document with a script or partial document. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update.html Update core_update.NewUpdate // Performs an update on every document in the index without changing the // source, // for example to pick up a mapping change. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html UpdateByQuery core_update_by_query.NewUpdateByQuery // Changes the number of requests per second for a particular Update By Query // operation. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html UpdateByQueryRethrottle core_update_by_query_rethrottle.NewUpdateByQueryRethrottle } -type Dangling struct { +type DanglingIndices struct { // Deletes the specified dangling index + // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-gateway-dangling-indices.html DeleteDanglingIndex dangling_indices_delete_dangling_index.NewDeleteDanglingIndex // Imports the specified dangling index + // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-gateway-dangling-indices.html ImportDanglingIndex dangling_indices_import_dangling_index.NewImportDanglingIndex // Returns all dangling indices. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-gateway-dangling-indices.html ListDanglingIndices dangling_indices_list_dangling_indices.NewListDanglingIndices } type Enrich struct { // Deletes an existing enrich policy and its enrich index. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-enrich-policy-api.html DeletePolicy enrich_delete_policy.NewDeletePolicy // Creates the enrich index for an existing enrich policy. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/execute-enrich-policy-api.html ExecutePolicy enrich_execute_policy.NewExecutePolicy // Gets information about an enrich policy. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-enrich-policy-api.html GetPolicy enrich_get_policy.NewGetPolicy // Creates a new enrich policy. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-enrich-policy-api.html PutPolicy enrich_put_policy.NewPutPolicy // Gets enrich coordinator statistics and information about enrich policies that // are currently executing. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/enrich-stats-api.html Stats enrich_stats.NewStats } type Eql struct { // Deletes an async EQL search by ID. If the search is still running, the search // request will be cancelled. Otherwise, the saved search results are deleted. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/eql-search-api.html Delete eql_delete.NewDelete // Returns async results from previously executed Event Query Language (EQL) // search + // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-async-eql-search-api.html Get eql_get.NewGet // Returns the status of a previously submitted async or stored Event Query // Language (EQL) search + // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-async-eql-status-api.html GetStatus eql_get_status.NewGetStatus // Returns results matching a query expressed in Event Query Language (EQL) + // https://www.elastic.co/guide/en/elasticsearch/reference/current/eql-search-api.html Search eql_search.NewSearch } type Features struct { // Gets a list of features which can be included in snapshots using the // feature_states field when creating a snapshot + // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-features-api.html GetFeatures features_get_features.NewGetFeatures // Resets the internal state of features, usually by deleting system indices + // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html ResetFeatures features_reset_features.NewResetFeatures } type Fleet struct { // Returns the current global checkpoints for an index. This API is design for // internal use by the fleet server project. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-global-checkpoints.html GlobalCheckpoints fleet_global_checkpoints.NewGlobalCheckpoints + // Multi Search API where the search will only be executed after specified + // checkpoints are available due to a refresh. This API is designed for internal + // use by the fleet server project. + // + Msearch fleet_msearch.NewMsearch // Search API where the search will only be executed after specified checkpoints // are available due to a refresh. This API is designed for internal use by the // fleet server project. + // Search fleet_search.NewSearch } type Graph struct { // Explore extracted and summarized information about the documents and terms in // an index. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/graph-explore-api.html Explore graph_explore.NewExplore } type Ilm struct { // Deletes the specified lifecycle policy definition. A currently used policy // cannot be deleted. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-delete-lifecycle.html DeleteLifecycle ilm_delete_lifecycle.NewDeleteLifecycle // Retrieves information about the index's current lifecycle state, such as the // currently executing phase, action, and step. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-explain-lifecycle.html ExplainLifecycle ilm_explain_lifecycle.NewExplainLifecycle // Returns the specified policy definition. Includes the policy version and last // modified date. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-get-lifecycle.html GetLifecycle ilm_get_lifecycle.NewGetLifecycle // Retrieves the current index lifecycle management (ILM) status. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-get-status.html GetStatus ilm_get_status.NewGetStatus // Migrates the indices and ILM policies away from custom node attribute // allocation routing to data tiers routing + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-migrate-to-data-tiers.html MigrateToDataTiers ilm_migrate_to_data_tiers.NewMigrateToDataTiers // Manually moves an index into the specified step and executes that step. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-move-to-step.html MoveToStep ilm_move_to_step.NewMoveToStep // Creates a lifecycle policy + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-put-lifecycle.html PutLifecycle ilm_put_lifecycle.NewPutLifecycle // Removes the assigned lifecycle policy and stops managing the specified index + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-remove-policy.html RemovePolicy ilm_remove_policy.NewRemovePolicy // Retries executing the policy for an index that is in the ERROR step. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-retry-policy.html Retry ilm_retry.NewRetry // Start the index lifecycle management (ILM) plugin. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-start.html Start ilm_start.NewStart // Halts all lifecycle management operations and stops the index lifecycle // management (ILM) plugin + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-stop.html Stop ilm_stop.NewStop } type Indices struct { // Adds a block to an index. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-blocks.html AddBlock indices_add_block.NewAddBlock // Performs the analysis process on a text and return the tokens breakdown of // the text. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-analyze.html Analyze indices_analyze.NewAnalyze // Clears all or specific caches for one or more indices. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-clearcache.html ClearCache indices_clear_cache.NewClearCache // Clones an index + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-clone-index.html Clone indices_clone.NewClone // Closes an index. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-close.html Close indices_close.NewClose // Creates an index with optional settings and mappings. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html Create indices_create.NewCreate // Creates a data stream + // https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html CreateDataStream indices_create_data_stream.NewCreateDataStream // Provides statistics on operations happening in a data stream. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html DataStreamsStats indices_data_streams_stats.NewDataStreamsStats // Deletes an index. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-index.html Delete indices_delete.NewDelete // Deletes an alias. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html DeleteAlias indices_delete_alias.NewDeleteAlias + // Deletes the data stream lifecycle of the selected data streams. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams-delete-lifecycle.html + DeleteDataLifecycle indices_delete_data_lifecycle.NewDeleteDataLifecycle // Deletes a data stream. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html DeleteDataStream indices_delete_data_stream.NewDeleteDataStream // Deletes an index template. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html DeleteIndexTemplate indices_delete_index_template.NewDeleteIndexTemplate // Deletes an index template. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html DeleteTemplate indices_delete_template.NewDeleteTemplate // Analyzes the disk usage of each field of an index or data stream + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-disk-usage.html DiskUsage indices_disk_usage.NewDiskUsage // Downsample an index + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-downsample-data-stream.html Downsample indices_downsample.NewDownsample // Returns information about whether a particular index exists. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-exists.html Exists indices_exists.NewExists // Returns information about whether a particular alias exists. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html ExistsAlias indices_exists_alias.NewExistsAlias // Returns information about whether a particular index template exists. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html ExistsIndexTemplate indices_exists_index_template.NewExistsIndexTemplate // Returns information about whether a particular index template exists. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html ExistsTemplate indices_exists_template.NewExistsTemplate + // Retrieves information about the index's current data stream lifecycle, such + // as any potential encountered error, time since creation etc. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams-explain-lifecycle.html + ExplainDataLifecycle indices_explain_data_lifecycle.NewExplainDataLifecycle // Returns the field usage stats for each field of an index + // https://www.elastic.co/guide/en/elasticsearch/reference/current/field-usage-stats.html FieldUsageStats indices_field_usage_stats.NewFieldUsageStats // Performs the flush operation on one or more indices. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-flush.html Flush indices_flush.NewFlush // Performs the force merge operation on one or more indices. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-forcemerge.html Forcemerge indices_forcemerge.NewForcemerge // Returns information about one or more indices. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-index.html Get indices_get.NewGet // Returns an alias. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html GetAlias indices_get_alias.NewGetAlias + // Returns the data stream lifecycle of the selected data streams. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams-get-lifecycle.html + GetDataLifecycle indices_get_data_lifecycle.NewGetDataLifecycle // Returns data streams. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html GetDataStream indices_get_data_stream.NewGetDataStream // Returns mapping for one or more fields. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-field-mapping.html GetFieldMapping indices_get_field_mapping.NewGetFieldMapping // Returns an index template. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html GetIndexTemplate indices_get_index_template.NewGetIndexTemplate // Returns mappings for one or more indices. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-mapping.html GetMapping indices_get_mapping.NewGetMapping // Returns settings for one or more indices. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-settings.html GetSettings indices_get_settings.NewGetSettings // Returns an index template. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html GetTemplate indices_get_template.NewGetTemplate // Migrates an alias to a data stream + // https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html MigrateToDataStream indices_migrate_to_data_stream.NewMigrateToDataStream // Modifies a data stream + // https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html ModifyDataStream indices_modify_data_stream.NewModifyDataStream // Opens an index. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html Open indices_open.NewOpen // Promotes a data stream from a replicated data stream managed by CCR to a // regular data stream + // https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html PromoteDataStream indices_promote_data_stream.NewPromoteDataStream // Creates or updates an alias. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html PutAlias indices_put_alias.NewPutAlias + // Updates the data stream lifecycle of the selected data streams. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams-put-lifecycle.html + PutDataLifecycle indices_put_data_lifecycle.NewPutDataLifecycle // Creates or updates an index template. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html PutIndexTemplate indices_put_index_template.NewPutIndexTemplate // Updates the index mappings. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-put-mapping.html PutMapping indices_put_mapping.NewPutMapping // Updates the index settings. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-update-settings.html PutSettings indices_put_settings.NewPutSettings // Creates or updates an index template. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html PutTemplate indices_put_template.NewPutTemplate // Returns information about ongoing index shard recoveries. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-recovery.html Recovery indices_recovery.NewRecovery // Performs the refresh operation in one or more indices. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-refresh.html Refresh indices_refresh.NewRefresh // Reloads an index's search analyzers and their resources. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-reload-analyzers.html ReloadSearchAnalyzers indices_reload_search_analyzers.NewReloadSearchAnalyzers // Returns information about any matching indices, aliases, and data streams + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-resolve-index-api.html ResolveIndex indices_resolve_index.NewResolveIndex // Updates an alias to point to a new index when the existing index // is considered to be too large or too old. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-rollover-index.html Rollover indices_rollover.NewRollover // Provides low-level information about segments in a Lucene index. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-segments.html Segments indices_segments.NewSegments // Provides store information for shard copies of indices. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-shards-stores.html ShardStores indices_shard_stores.NewShardStores // Allow to shrink an existing index into a new index with fewer primary shards. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-shrink-index.html Shrink indices_shrink.NewShrink // Simulate matching the given index name against the index templates in the // system + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html SimulateIndexTemplate indices_simulate_index_template.NewSimulateIndexTemplate // Simulate resolving the given template name or body + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html SimulateTemplate indices_simulate_template.NewSimulateTemplate // Allows you to split an existing index into a new index with more primary // shards. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-split-index.html Split indices_split.NewSplit // Provides statistics on operations happening in an index. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-stats.html Stats indices_stats.NewStats // Unfreezes an index. When a frozen index is unfrozen, the index goes through // the normal recovery process and becomes writeable again. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/unfreeze-index-api.html Unfreeze indices_unfreeze.NewUnfreeze // Updates index aliases. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html UpdateAliases indices_update_aliases.NewUpdateAliases // Allows a user to validate a potentially expensive query without executing it. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-validate.html ValidateQuery indices_validate_query.NewValidateQuery } type Ingest struct { // Deletes a pipeline. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-pipeline-api.html DeletePipeline ingest_delete_pipeline.NewDeletePipeline // Returns statistical information about geoip databases + // https://www.elastic.co/guide/en/elasticsearch/reference/current/geoip-processor.html GeoIpStats ingest_geo_ip_stats.NewGeoIpStats // Returns a pipeline. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-pipeline-api.html GetPipeline ingest_get_pipeline.NewGetPipeline // Returns a list of the built-in patterns. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/grok-processor.html ProcessorGrok ingest_processor_grok.NewProcessorGrok // Creates or updates a pipeline. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ingest.html PutPipeline ingest_put_pipeline.NewPutPipeline // Allows to simulate a pipeline with example documents. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/simulate-pipeline-api.html Simulate ingest_simulate.NewSimulate } type License struct { // Deletes licensing information for the cluster + // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-license.html Delete license_delete.NewDelete // Retrieves licensing information for the cluster + // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-license.html Get license_get.NewGet // Retrieves information about the status of the basic license. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-basic-status.html GetBasicStatus license_get_basic_status.NewGetBasicStatus // Retrieves information about the status of the trial license. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-trial-status.html GetTrialStatus license_get_trial_status.NewGetTrialStatus // Updates the license for the cluster. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-license.html Post license_post.NewPost // Starts an indefinite basic license. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/start-basic.html PostStartBasic license_post_start_basic.NewPostStartBasic // starts a limited time trial license. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/start-trial.html PostStartTrial license_post_start_trial.NewPostStartTrial } type Logstash struct { // Deletes Logstash Pipelines used by Central Management + // https://www.elastic.co/guide/en/elasticsearch/reference/current/logstash-api-delete-pipeline.html DeletePipeline logstash_delete_pipeline.NewDeletePipeline // Retrieves Logstash Pipelines used by Central Management + // https://www.elastic.co/guide/en/elasticsearch/reference/current/logstash-api-get-pipeline.html GetPipeline logstash_get_pipeline.NewGetPipeline // Adds and updates Logstash Pipelines used for Central Management + // https://www.elastic.co/guide/en/elasticsearch/reference/current/logstash-api-put-pipeline.html PutPipeline logstash_put_pipeline.NewPutPipeline } @@ -947,635 +1217,993 @@ type Migration struct { // Retrieves information about different cluster, node, and index level settings // that use deprecated features that will be removed or changed in the next // major version. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/migration-api-deprecation.html Deprecations migration_deprecations.NewDeprecations // Find out whether system features need to be upgraded or not + // https://www.elastic.co/guide/en/elasticsearch/reference/current/migration-api-feature-upgrade.html GetFeatureUpgradeStatus migration_get_feature_upgrade_status.NewGetFeatureUpgradeStatus // Begin upgrades for system features + // https://www.elastic.co/guide/en/elasticsearch/reference/current/migration-api-feature-upgrade.html PostFeatureUpgrade migration_post_feature_upgrade.NewPostFeatureUpgrade } type Ml struct { // Clear the cached results from a trained model deployment + // https://www.elastic.co/guide/en/elasticsearch/reference/current/clear-trained-model-deployment-cache.html ClearTrainedModelDeploymentCache ml_clear_trained_model_deployment_cache.NewClearTrainedModelDeploymentCache // Closes one or more anomaly detection jobs. A job can be opened and closed // multiple times throughout its lifecycle. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-close-job.html CloseJob ml_close_job.NewCloseJob // Deletes a calendar. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-calendar.html DeleteCalendar ml_delete_calendar.NewDeleteCalendar // Deletes scheduled events from a calendar. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-calendar-event.html DeleteCalendarEvent ml_delete_calendar_event.NewDeleteCalendarEvent // Deletes anomaly detection jobs from a calendar. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-calendar-job.html DeleteCalendarJob ml_delete_calendar_job.NewDeleteCalendarJob // Deletes an existing data frame analytics job. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-dfanalytics.html DeleteDataFrameAnalytics ml_delete_data_frame_analytics.NewDeleteDataFrameAnalytics // Deletes an existing datafeed. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-datafeed.html DeleteDatafeed ml_delete_datafeed.NewDeleteDatafeed // Deletes expired and unused machine learning data. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-expired-data.html DeleteExpiredData ml_delete_expired_data.NewDeleteExpiredData // Deletes a filter. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-filter.html DeleteFilter ml_delete_filter.NewDeleteFilter // Deletes forecasts from a machine learning job. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-forecast.html DeleteForecast ml_delete_forecast.NewDeleteForecast // Deletes an existing anomaly detection job. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-job.html DeleteJob ml_delete_job.NewDeleteJob // Deletes an existing model snapshot. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-snapshot.html DeleteModelSnapshot ml_delete_model_snapshot.NewDeleteModelSnapshot // Deletes an existing trained inference model that is currently not referenced // by an ingest pipeline. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-trained-models.html DeleteTrainedModel ml_delete_trained_model.NewDeleteTrainedModel // Deletes a model alias that refers to the trained model + // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-trained-models-aliases.html DeleteTrainedModelAlias ml_delete_trained_model_alias.NewDeleteTrainedModelAlias // Estimates the model memory + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-apis.html EstimateModelMemory ml_estimate_model_memory.NewEstimateModelMemory // Evaluates the data frame analytics for an annotated index. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/evaluate-dfanalytics.html EvaluateDataFrame ml_evaluate_data_frame.NewEvaluateDataFrame // Explains a data frame analytics config. + // http://www.elastic.co/guide/en/elasticsearch/reference/current/explain-dfanalytics.html ExplainDataFrameAnalytics ml_explain_data_frame_analytics.NewExplainDataFrameAnalytics // Forces any buffered data to be processed by the job. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-flush-job.html FlushJob ml_flush_job.NewFlushJob // Predicts the future behavior of a time series by using its historical // behavior. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-forecast.html Forecast ml_forecast.NewForecast // Retrieves anomaly detection job results for one or more buckets. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-bucket.html GetBuckets ml_get_buckets.NewGetBuckets // Retrieves information about the scheduled events in calendars. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-calendar-event.html GetCalendarEvents ml_get_calendar_events.NewGetCalendarEvents // Retrieves configuration information for calendars. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-calendar.html GetCalendars ml_get_calendars.NewGetCalendars // Retrieves anomaly detection job results for one or more categories. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-category.html GetCategories ml_get_categories.NewGetCategories // Retrieves configuration information for data frame analytics jobs. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-dfanalytics.html GetDataFrameAnalytics ml_get_data_frame_analytics.NewGetDataFrameAnalytics // Retrieves usage information for data frame analytics jobs. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-dfanalytics-stats.html GetDataFrameAnalyticsStats ml_get_data_frame_analytics_stats.NewGetDataFrameAnalyticsStats // Retrieves usage information for datafeeds. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-datafeed-stats.html GetDatafeedStats ml_get_datafeed_stats.NewGetDatafeedStats // Retrieves configuration information for datafeeds. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-datafeed.html GetDatafeeds ml_get_datafeeds.NewGetDatafeeds // Retrieves filters. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-filter.html GetFilters ml_get_filters.NewGetFilters // Retrieves anomaly detection job results for one or more influencers. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-influencer.html GetInfluencers ml_get_influencers.NewGetInfluencers // Retrieves usage information for anomaly detection jobs. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job-stats.html GetJobStats ml_get_job_stats.NewGetJobStats // Retrieves configuration information for anomaly detection jobs. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job.html GetJobs ml_get_jobs.NewGetJobs // Returns information on how ML is using memory. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-ml-memory.html GetMemoryStats ml_get_memory_stats.NewGetMemoryStats // Gets stats for anomaly detection job model snapshot upgrades that are in // progress. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job-model-snapshot-upgrade-stats.html GetModelSnapshotUpgradeStats ml_get_model_snapshot_upgrade_stats.NewGetModelSnapshotUpgradeStats // Retrieves information about model snapshots. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-snapshot.html GetModelSnapshots ml_get_model_snapshots.NewGetModelSnapshots // Retrieves overall bucket results that summarize the bucket results of // multiple anomaly detection jobs. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-overall-buckets.html GetOverallBuckets ml_get_overall_buckets.NewGetOverallBuckets // Retrieves anomaly records for an anomaly detection job. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-record.html GetRecords ml_get_records.NewGetRecords // Retrieves configuration information for a trained inference model. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-trained-models.html GetTrainedModels ml_get_trained_models.NewGetTrainedModels // Retrieves usage information for trained inference models. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-trained-models-stats.html GetTrainedModelsStats ml_get_trained_models_stats.NewGetTrainedModelsStats // Evaluate a trained model. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-trained-model.html InferTrainedModel ml_infer_trained_model.NewInferTrainedModel // Returns defaults and limits used by machine learning. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-ml-info.html Info ml_info.NewInfo // Opens one or more anomaly detection jobs. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-open-job.html OpenJob ml_open_job.NewOpenJob // Posts scheduled events in a calendar. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-post-calendar-event.html PostCalendarEvents ml_post_calendar_events.NewPostCalendarEvents + // Sends data to an anomaly detection job for analysis. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-post-data.html + PostData ml_post_data.NewPostData // Previews that will be analyzed given a data frame analytics config. + // http://www.elastic.co/guide/en/elasticsearch/reference/current/preview-dfanalytics.html PreviewDataFrameAnalytics ml_preview_data_frame_analytics.NewPreviewDataFrameAnalytics // Previews a datafeed. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-preview-datafeed.html PreviewDatafeed ml_preview_datafeed.NewPreviewDatafeed // Instantiates a calendar. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-calendar.html PutCalendar ml_put_calendar.NewPutCalendar // Adds an anomaly detection job to a calendar. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-calendar-job.html PutCalendarJob ml_put_calendar_job.NewPutCalendarJob // Instantiates a data frame analytics job. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-dfanalytics.html PutDataFrameAnalytics ml_put_data_frame_analytics.NewPutDataFrameAnalytics // Instantiates a datafeed. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-datafeed.html PutDatafeed ml_put_datafeed.NewPutDatafeed // Instantiates a filter. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-filter.html PutFilter ml_put_filter.NewPutFilter // Instantiates an anomaly detection job. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-job.html PutJob ml_put_job.NewPutJob // Creates an inference trained model. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-trained-models.html PutTrainedModel ml_put_trained_model.NewPutTrainedModel // Creates a new model alias (or reassigns an existing one) to refer to the // trained model + // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-trained-models-aliases.html PutTrainedModelAlias ml_put_trained_model_alias.NewPutTrainedModelAlias // Creates part of a trained model definition + // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-trained-model-definition-part.html PutTrainedModelDefinitionPart ml_put_trained_model_definition_part.NewPutTrainedModelDefinitionPart // Creates a trained model vocabulary + // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-trained-model-vocabulary.html PutTrainedModelVocabulary ml_put_trained_model_vocabulary.NewPutTrainedModelVocabulary // Resets an existing anomaly detection job. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-reset-job.html ResetJob ml_reset_job.NewResetJob // Reverts to a specific snapshot. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-revert-snapshot.html RevertModelSnapshot ml_revert_model_snapshot.NewRevertModelSnapshot // Sets a cluster wide upgrade_mode setting that prepares machine learning // indices for an upgrade. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-set-upgrade-mode.html SetUpgradeMode ml_set_upgrade_mode.NewSetUpgradeMode // Starts a data frame analytics job. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/start-dfanalytics.html StartDataFrameAnalytics ml_start_data_frame_analytics.NewStartDataFrameAnalytics // Starts one or more datafeeds. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-start-datafeed.html StartDatafeed ml_start_datafeed.NewStartDatafeed // Start a trained model deployment. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/start-trained-model-deployment.html StartTrainedModelDeployment ml_start_trained_model_deployment.NewStartTrainedModelDeployment // Stops one or more data frame analytics jobs. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/stop-dfanalytics.html StopDataFrameAnalytics ml_stop_data_frame_analytics.NewStopDataFrameAnalytics // Stops one or more datafeeds. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-stop-datafeed.html StopDatafeed ml_stop_datafeed.NewStopDatafeed // Stop a trained model deployment. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/stop-trained-model-deployment.html StopTrainedModelDeployment ml_stop_trained_model_deployment.NewStopTrainedModelDeployment // Updates certain properties of a data frame analytics job. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-dfanalytics.html UpdateDataFrameAnalytics ml_update_data_frame_analytics.NewUpdateDataFrameAnalytics // Updates certain properties of a datafeed. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-datafeed.html UpdateDatafeed ml_update_datafeed.NewUpdateDatafeed // Updates the description of a filter, adds items, or removes items. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-filter.html UpdateFilter ml_update_filter.NewUpdateFilter // Updates certain properties of an anomaly detection job. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-job.html UpdateJob ml_update_job.NewUpdateJob // Updates certain properties of a snapshot. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-snapshot.html UpdateModelSnapshot ml_update_model_snapshot.NewUpdateModelSnapshot // Upgrades a given job snapshot to the current major version. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-upgrade-job-model-snapshot.html UpgradeJobSnapshot ml_upgrade_job_snapshot.NewUpgradeJobSnapshot // Validates an anomaly detection job. + // https://www.elastic.co/guide/en/machine-learning/current/ml-jobs.html Validate ml_validate.NewValidate // Validates an anomaly detection detector. + // https://www.elastic.co/guide/en/machine-learning/current/ml-jobs.html ValidateDetector ml_validate_detector.NewValidateDetector } +type Monitoring struct { + // Used by the monitoring features to send monitoring data. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/monitor-elasticsearch-cluster.html + Bulk monitoring_bulk.NewBulk +} + type Nodes struct { // Removes the archived repositories metering information present in the // cluster. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/clear-repositories-metering-archive-api.html ClearRepositoriesMeteringArchive nodes_clear_repositories_metering_archive.NewClearRepositoriesMeteringArchive // Returns cluster repositories metering information. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-repositories-metering-api.html GetRepositoriesMeteringInfo nodes_get_repositories_metering_info.NewGetRepositoriesMeteringInfo // Returns information about hot threads on each node in the cluster. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-hot-threads.html HotThreads nodes_hot_threads.NewHotThreads // Returns information about nodes in the cluster. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-info.html Info nodes_info.NewInfo // Reloads secure settings. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/secure-settings.html#reloadable-secure-settings ReloadSecureSettings nodes_reload_secure_settings.NewReloadSecureSettings // Returns statistical information about nodes in the cluster. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html Stats nodes_stats.NewStats // Returns low-level information about REST actions usage on nodes. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-usage.html Usage nodes_usage.NewUsage } +type QueryRuleset struct { + // Deletes a query ruleset. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-query-ruleset.html + Delete query_ruleset_delete.NewDelete + // Returns the details about a query ruleset. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-query-ruleset.html + Get query_ruleset_get.NewGet + // Lists query rulesets. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/list-query-rulesets.html + List query_ruleset_list.NewList + // Creates or updates a query ruleset. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-query-ruleset.html + Put query_ruleset_put.NewPut +} + type Rollup struct { // Deletes an existing rollup job. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-delete-job.html DeleteJob rollup_delete_job.NewDeleteJob // Retrieves the configuration, stats, and status of rollup jobs. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-get-job.html GetJobs rollup_get_jobs.NewGetJobs // Returns the capabilities of any rollup jobs that have been configured for a // specific index or index pattern. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-get-rollup-caps.html GetRollupCaps rollup_get_rollup_caps.NewGetRollupCaps // Returns the rollup capabilities of all jobs inside of a rollup index (e.g. // the index where rollup data is stored). + // https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-get-rollup-index-caps.html GetRollupIndexCaps rollup_get_rollup_index_caps.NewGetRollupIndexCaps // Creates a rollup job. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-put-job.html PutJob rollup_put_job.NewPutJob // Enables searching rolled-up data using the standard query DSL. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-search.html RollupSearch rollup_rollup_search.NewRollupSearch // Starts an existing, stopped rollup job. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-start-job.html StartJob rollup_start_job.NewStartJob // Stops an existing, started rollup job. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/rollup-stop-job.html StopJob rollup_stop_job.NewStopJob } -type Searchable struct { +type SearchApplication struct { + // Deletes a search application. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-search-application.html + Delete search_application_delete.NewDelete + // Delete a behavioral analytics collection. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-analytics-collection.html + DeleteBehavioralAnalytics search_application_delete_behavioral_analytics.NewDeleteBehavioralAnalytics + // Returns the details about a search application. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-search-application.html + Get search_application_get.NewGet + // Returns the existing behavioral analytics collections. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/list-analytics-collection.html + GetBehavioralAnalytics search_application_get_behavioral_analytics.NewGetBehavioralAnalytics + // Returns the existing search applications. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/list-search-applications.html + List search_application_list.NewList + // Creates or updates a search application. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-search-application.html + Put search_application_put.NewPut + // Creates a behavioral analytics collection. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-analytics-collection.html + PutBehavioralAnalytics search_application_put_behavioral_analytics.NewPutBehavioralAnalytics + // Perform a search against a search application + // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-application-search.html + Search search_application_search.NewSearch +} + +type SearchableSnapshots struct { // Retrieve node-level cache statistics about searchable snapshots. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/searchable-snapshots-apis.html CacheStats searchable_snapshots_cache_stats.NewCacheStats // Clear the cache of searchable snapshots. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/searchable-snapshots-apis.html ClearCache searchable_snapshots_clear_cache.NewClearCache // Mount a snapshot as a searchable index. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/searchable-snapshots-api-mount-snapshot.html Mount searchable_snapshots_mount.NewMount // Retrieve shard-level statistics about searchable snapshots. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/searchable-snapshots-apis.html Stats searchable_snapshots_stats.NewStats } type Security struct { // Creates or updates the user profile on behalf of another user. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-activate-user-profile.html ActivateUserProfile security_activate_user_profile.NewActivateUserProfile // Enables authentication as a user and retrieve information about the // authenticated user. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-authenticate.html Authenticate security_authenticate.NewAuthenticate // Updates the attributes of multiple existing API keys. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-bulk-update-api-keys.html BulkUpdateApiKeys security_bulk_update_api_keys.NewBulkUpdateApiKeys // Changes the passwords of users in the native realm and built-in users. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-change-password.html ChangePassword security_change_password.NewChangePassword // Clear a subset or all entries from the API key cache. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-api-key-cache.html ClearApiKeyCache security_clear_api_key_cache.NewClearApiKeyCache // Evicts application privileges from the native application privileges cache. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-privilege-cache.html ClearCachedPrivileges security_clear_cached_privileges.NewClearCachedPrivileges // Evicts users from the user cache. Can completely clear the cache or evict // specific users. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-cache.html ClearCachedRealms security_clear_cached_realms.NewClearCachedRealms // Evicts roles from the native role cache. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-role-cache.html ClearCachedRoles security_clear_cached_roles.NewClearCachedRoles // Evicts tokens from the service account token caches. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-service-token-caches.html ClearCachedServiceTokens security_clear_cached_service_tokens.NewClearCachedServiceTokens // Creates an API key for access without requiring basic authentication. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-api-key.html CreateApiKey security_create_api_key.NewCreateApiKey + // Creates a cross-cluster API key for API key based remote cluster access. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-cross-cluster-api-key.html + CreateCrossClusterApiKey security_create_cross_cluster_api_key.NewCreateCrossClusterApiKey // Creates a service account token for access without requiring basic // authentication. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-service-token.html CreateServiceToken security_create_service_token.NewCreateServiceToken // Removes application privileges. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-privilege.html DeletePrivileges security_delete_privileges.NewDeletePrivileges // Removes roles in the native realm. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-role.html DeleteRole security_delete_role.NewDeleteRole // Removes role mappings. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-role-mapping.html DeleteRoleMapping security_delete_role_mapping.NewDeleteRoleMapping // Deletes a service account token. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-service-token.html DeleteServiceToken security_delete_service_token.NewDeleteServiceToken // Deletes users from the native realm. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-user.html DeleteUser security_delete_user.NewDeleteUser // Disables users in the native realm. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-disable-user.html DisableUser security_disable_user.NewDisableUser // Disables a user profile so it's not visible in user profile searches. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-disable-user-profile.html DisableUserProfile security_disable_user_profile.NewDisableUserProfile // Enables users in the native realm. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-enable-user.html EnableUser security_enable_user.NewEnableUser // Enables a user profile so it's visible in user profile searches. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-enable-user-profile.html EnableUserProfile security_enable_user_profile.NewEnableUserProfile // Allows a kibana instance to configure itself to communicate with a secured // elasticsearch cluster. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-kibana-enrollment.html EnrollKibana security_enroll_kibana.NewEnrollKibana // Allows a new node to enroll to an existing cluster with security enabled. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-node-enrollment.html EnrollNode security_enroll_node.NewEnrollNode // Retrieves information for one or more API keys. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-api-key.html GetApiKey security_get_api_key.NewGetApiKey // Retrieves the list of cluster privileges and index privileges that are // available in this version of Elasticsearch. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-builtin-privileges.html GetBuiltinPrivileges security_get_builtin_privileges.NewGetBuiltinPrivileges // Retrieves application privileges. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-privileges.html GetPrivileges security_get_privileges.NewGetPrivileges // Retrieves roles in the native realm. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-role.html GetRole security_get_role.NewGetRole // Retrieves role mappings. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-role-mapping.html GetRoleMapping security_get_role_mapping.NewGetRoleMapping // Retrieves information about service accounts. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-service-accounts.html GetServiceAccounts security_get_service_accounts.NewGetServiceAccounts // Retrieves information of all service credentials for a service account. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-service-credentials.html GetServiceCredentials security_get_service_credentials.NewGetServiceCredentials // Creates a bearer token for access without requiring basic authentication. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-token.html GetToken security_get_token.NewGetToken // Retrieves information about users in the native realm and built-in users. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-user.html GetUser security_get_user.NewGetUser // Retrieves security privileges for the logged in user. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-user-privileges.html GetUserPrivileges security_get_user_privileges.NewGetUserPrivileges // Retrieves user profiles for the given unique ID(s). + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-user-profile.html GetUserProfile security_get_user_profile.NewGetUserProfile // Creates an API key on behalf of another user. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-grant-api-key.html GrantApiKey security_grant_api_key.NewGrantApiKey // Determines whether the specified user has a specified list of privileges. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-has-privileges.html HasPrivileges security_has_privileges.NewHasPrivileges // Determines whether the users associated with the specified profile IDs have // all the requested privileges. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-has-privileges-user-profile.html HasPrivilegesUserProfile security_has_privileges_user_profile.NewHasPrivilegesUserProfile // Invalidates one or more API keys. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-invalidate-api-key.html InvalidateApiKey security_invalidate_api_key.NewInvalidateApiKey // Invalidates one or more access tokens or refresh tokens. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-invalidate-token.html InvalidateToken security_invalidate_token.NewInvalidateToken // Exchanges an OpenID Connection authentication response message for an // Elasticsearch access token and refresh token pair + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-oidc-authenticate.html OidcAuthenticate security_oidc_authenticate.NewOidcAuthenticate // Invalidates a refresh token and access token that was generated from the // OpenID Connect Authenticate API + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-oidc-logout.html OidcLogout security_oidc_logout.NewOidcLogout // Creates an OAuth 2.0 authentication request as a URL string + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-oidc-prepare-authentication.html OidcPrepareAuthentication security_oidc_prepare_authentication.NewOidcPrepareAuthentication // Adds or updates application privileges. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-privileges.html PutPrivileges security_put_privileges.NewPutPrivileges // Adds and updates roles in the native realm. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-role.html PutRole security_put_role.NewPutRole // Creates and updates role mappings. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-role-mapping.html PutRoleMapping security_put_role_mapping.NewPutRoleMapping // Adds and updates users in the native realm. These users are commonly referred // to as native users. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-user.html PutUser security_put_user.NewPutUser // Retrieves information for API keys using a subset of query DSL + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-query-api-key.html QueryApiKeys security_query_api_keys.NewQueryApiKeys // Exchanges a SAML Response message for an Elasticsearch access token and // refresh token pair + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-authenticate.html SamlAuthenticate security_saml_authenticate.NewSamlAuthenticate // Verifies the logout response sent from the SAML IdP + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-complete-logout.html SamlCompleteLogout security_saml_complete_logout.NewSamlCompleteLogout // Consumes a SAML LogoutRequest + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-invalidate.html SamlInvalidate security_saml_invalidate.NewSamlInvalidate // Invalidates an access token and a refresh token that were generated via the // SAML Authenticate API + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-logout.html SamlLogout security_saml_logout.NewSamlLogout // Creates a SAML authentication request + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-prepare-authentication.html SamlPrepareAuthentication security_saml_prepare_authentication.NewSamlPrepareAuthentication // Generates SAML metadata for the Elastic stack SAML 2.0 Service Provider + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-saml-sp-metadata.html SamlServiceProviderMetadata security_saml_service_provider_metadata.NewSamlServiceProviderMetadata // Get suggestions for user profiles that match specified search criteria. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-suggest-user-profile.html SuggestUserProfiles security_suggest_user_profiles.NewSuggestUserProfiles // Updates attributes of an existing API key. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-update-api-key.html UpdateApiKey security_update_api_key.NewUpdateApiKey // Update application specific data for the user profile of the given unique ID. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-update-user-profile-data.html UpdateUserProfileData security_update_user_profile_data.NewUpdateUserProfileData } type Shutdown struct { // Removes a node from the shutdown list. Designed for indirect use by ECE/ESS // and ECK. Direct use is not supported. + // https://www.elastic.co/guide/en/elasticsearch/reference/current DeleteNode shutdown_delete_node.NewDeleteNode // Retrieve status of a node or nodes that are currently marked as shutting // down. Designed for indirect use by ECE/ESS and ECK. Direct use is not // supported. + // https://www.elastic.co/guide/en/elasticsearch/reference/current GetNode shutdown_get_node.NewGetNode // Adds a node to be shut down. Designed for indirect use by ECE/ESS and ECK. // Direct use is not supported. + // https://www.elastic.co/guide/en/elasticsearch/reference/current PutNode shutdown_put_node.NewPutNode } type Slm struct { // Deletes an existing snapshot lifecycle policy. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-delete-policy.html DeleteLifecycle slm_delete_lifecycle.NewDeleteLifecycle // Immediately creates a snapshot according to the lifecycle policy, without // waiting for the scheduled time. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-execute-lifecycle.html ExecuteLifecycle slm_execute_lifecycle.NewExecuteLifecycle // Deletes any snapshots that are expired according to the policy's retention // rules. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-execute-retention.html ExecuteRetention slm_execute_retention.NewExecuteRetention // Retrieves one or more snapshot lifecycle policy definitions and information // about the latest snapshot attempts. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-get-policy.html GetLifecycle slm_get_lifecycle.NewGetLifecycle // Returns global and policy-level statistics about actions taken by snapshot // lifecycle management. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-get-stats.html GetStats slm_get_stats.NewGetStats // Retrieves the status of snapshot lifecycle management (SLM). + // https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-get-status.html GetStatus slm_get_status.NewGetStatus // Creates or updates a snapshot lifecycle policy. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-put-policy.html PutLifecycle slm_put_lifecycle.NewPutLifecycle // Turns on snapshot lifecycle management (SLM). + // https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-start.html Start slm_start.NewStart // Turns off snapshot lifecycle management (SLM). + // https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-stop.html Stop slm_stop.NewStop } type Snapshot struct { // Removes stale data from repository. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/clean-up-snapshot-repo-api.html CleanupRepository snapshot_cleanup_repository.NewCleanupRepository // Clones indices from one snapshot into another snapshot in the same // repository. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html Clone snapshot_clone.NewClone // Creates a snapshot in a repository. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html Create snapshot_create.NewCreate // Creates a repository. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html CreateRepository snapshot_create_repository.NewCreateRepository // Deletes one or more snapshots. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html Delete snapshot_delete.NewDelete // Deletes a repository. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html DeleteRepository snapshot_delete_repository.NewDeleteRepository // Returns information about a snapshot. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html Get snapshot_get.NewGet // Returns information about a repository. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html GetRepository snapshot_get_repository.NewGetRepository // Restores a snapshot. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html Restore snapshot_restore.NewRestore // Returns information about the status of a snapshot. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html Status snapshot_status.NewStatus // Verifies a repository. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html VerifyRepository snapshot_verify_repository.NewVerifyRepository } type Sql struct { // Clears the SQL cursor + // https://www.elastic.co/guide/en/elasticsearch/reference/current/clear-sql-cursor-api.html ClearCursor sql_clear_cursor.NewClearCursor // Deletes an async SQL search or a stored synchronous SQL search. If the search // is still running, the API cancels it. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-async-sql-search-api.html DeleteAsync sql_delete_async.NewDeleteAsync // Returns the current status and available results for an async SQL search or // stored synchronous SQL search + // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-async-sql-search-api.html GetAsync sql_get_async.NewGetAsync // Returns the current status of an async SQL search or a stored synchronous SQL // search + // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-async-sql-search-status-api.html GetAsyncStatus sql_get_async_status.NewGetAsyncStatus // Executes a SQL request + // https://www.elastic.co/guide/en/elasticsearch/reference/current/sql-search-api.html Query sql_query.NewQuery // Translates SQL into Elasticsearch queries + // https://www.elastic.co/guide/en/elasticsearch/reference/current/sql-translate-api.html Translate sql_translate.NewTranslate } type Ssl struct { // Retrieves information about the X.509 certificates used to encrypt // communications in the cluster. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-ssl.html Certificates ssl_certificates.NewCertificates } +type Synonyms struct { + // Deletes a synonym set + // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-synonyms-set.html + DeleteSynonym synonyms_delete_synonym.NewDeleteSynonym + // Deletes a synonym rule in a synonym set + // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-synonym-rule.html + DeleteSynonymRule synonyms_delete_synonym_rule.NewDeleteSynonymRule + // Retrieves a synonym set + // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-synonyms-set.html + GetSynonym synonyms_get_synonym.NewGetSynonym + // Retrieves a synonym rule from a synonym set + // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-synonym-rule.html + GetSynonymRule synonyms_get_synonym_rule.NewGetSynonymRule + // Retrieves a summary of all defined synonym sets + // https://www.elastic.co/guide/en/elasticsearch/reference/current/list-synonyms-sets.html + GetSynonymsSets synonyms_get_synonyms_sets.NewGetSynonymsSets + // Creates or updates a synonyms set + // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-synonyms-set.html + PutSynonym synonyms_put_synonym.NewPutSynonym + // Creates or updates a synonym rule in a synonym set + // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-synonym-rule.html + PutSynonymRule synonyms_put_synonym_rule.NewPutSynonymRule +} + type Tasks struct { // Cancels a task, if it can be cancelled through an API. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html Cancel tasks_cancel.NewCancel // Returns information about a task. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html Get tasks_get.NewGet // Returns a list of tasks. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html List tasks_list.NewList } +type TextStructure struct { + // Finds the structure of a text file. The text file must contain data that is + // suitable to be ingested into Elasticsearch. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/find-structure.html + FindStructure text_structure_find_structure.NewFindStructure +} + type Transform struct { // Deletes an existing transform. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-transform.html DeleteTransform transform_delete_transform.NewDeleteTransform // Retrieves configuration information for transforms. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-transform.html GetTransform transform_get_transform.NewGetTransform // Retrieves usage information for transforms. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/get-transform-stats.html GetTransformStats transform_get_transform_stats.NewGetTransformStats // Previews a transform. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/preview-transform.html PreviewTransform transform_preview_transform.NewPreviewTransform // Instantiates a transform. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/put-transform.html PutTransform transform_put_transform.NewPutTransform // Resets an existing transform. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/reset-transform.html ResetTransform transform_reset_transform.NewResetTransform + // Schedules now a transform. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/schedule-now-transform.html + ScheduleNowTransform transform_schedule_now_transform.NewScheduleNowTransform // Starts one or more transforms. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/start-transform.html StartTransform transform_start_transform.NewStartTransform // Stops one or more transforms. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/stop-transform.html StopTransform transform_stop_transform.NewStopTransform // Updates certain properties of a transform. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/update-transform.html UpdateTransform transform_update_transform.NewUpdateTransform // Upgrades all transforms. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/upgrade-transforms.html UpgradeTransforms transform_upgrade_transforms.NewUpgradeTransforms } type Watcher struct { // Acknowledges a watch, manually throttling the execution of the watch's // actions. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-ack-watch.html AckWatch watcher_ack_watch.NewAckWatch // Activates a currently inactive watch. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-activate-watch.html ActivateWatch watcher_activate_watch.NewActivateWatch // Deactivates a currently active watch. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-deactivate-watch.html DeactivateWatch watcher_deactivate_watch.NewDeactivateWatch // Removes a watch from Watcher. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-delete-watch.html DeleteWatch watcher_delete_watch.NewDeleteWatch // Forces the execution of a stored watch. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-execute-watch.html ExecuteWatch watcher_execute_watch.NewExecuteWatch + // Retrieve settings for the watcher system index + // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-get-settings.html + GetSettings watcher_get_settings.NewGetSettings // Retrieves a watch by its ID. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-get-watch.html GetWatch watcher_get_watch.NewGetWatch // Creates a new watch, or updates an existing one. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-put-watch.html PutWatch watcher_put_watch.NewPutWatch // Retrieves stored watches. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-query-watches.html QueryWatches watcher_query_watches.NewQueryWatches // Starts Watcher if it is not already running. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-start.html Start watcher_start.NewStart // Retrieves the current Watcher metrics. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-stats.html Stats watcher_stats.NewStats // Stops Watcher if it is running. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-stop.html Stop watcher_stop.NewStop + // Update settings for the watcher system index + // https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-update-settings.html + UpdateSettings watcher_update_settings.NewUpdateSettings } type Xpack struct { // Retrieves information about the installed X-Pack features. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/info-api.html Info xpack_info.NewInfo // Retrieves usage information about the installed X-Pack features. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/usage-api.html Usage xpack_usage.NewUsage } type API struct { - Async Async - Autoscaling Autoscaling - Cat Cat - Ccr Ccr - Cluster Cluster - Core Core - Dangling Dangling - Enrich Enrich - Eql Eql - Features Features - Fleet Fleet - Graph Graph - Ilm Ilm - Indices Indices - Ingest Ingest - License License - Logstash Logstash - Migration Migration - Ml Ml - Nodes Nodes - Rollup Rollup - Searchable Searchable - Security Security - Shutdown Shutdown - Slm Slm - Snapshot Snapshot - Sql Sql - Ssl Ssl - Tasks Tasks - Transform Transform - Watcher Watcher - Xpack Xpack - + AsyncSearch AsyncSearch + Autoscaling Autoscaling + Cat Cat + Ccr Ccr + Cluster Cluster + Core Core + DanglingIndices DanglingIndices + Enrich Enrich + Eql Eql + Features Features + Fleet Fleet + Graph Graph + Ilm Ilm + Indices Indices + Ingest Ingest + License License + Logstash Logstash + Migration Migration + Ml Ml + Monitoring Monitoring + Nodes Nodes + QueryRuleset QueryRuleset + Rollup Rollup + SearchApplication SearchApplication + SearchableSnapshots SearchableSnapshots + Security Security + Shutdown Shutdown + Slm Slm + Snapshot Snapshot + Sql Sql + Ssl Ssl + Synonyms Synonyms + Tasks Tasks + TextStructure TextStructure + Transform Transform + Watcher Watcher + Xpack Xpack + + // Allows to perform multiple index/update/delete operations in a single + // request. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html + Bulk core_bulk.NewBulk // Explicitly clears the search context for a scroll. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/clear-scroll-api.html ClearScroll core_clear_scroll.NewClearScroll // Close a point in time + // https://www.elastic.co/guide/en/elasticsearch/reference/current/point-in-time-api.html ClosePointInTime core_close_point_in_time.NewClosePointInTime // Returns number of documents matching a query. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-count.html Count core_count.NewCount // Creates a new document in the index. // // Returns a 409 response when a document with a same ID already exists in the // index. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html Create core_create.NewCreate // Removes a document from the index. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete.html Delete core_delete.NewDelete // Deletes documents matching the provided query. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete-by-query.html DeleteByQuery core_delete_by_query.NewDeleteByQuery // Changes the number of requests per second for a particular Delete By Query // operation. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete-by-query.html DeleteByQueryRethrottle core_delete_by_query_rethrottle.NewDeleteByQueryRethrottle // Deletes a script. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html DeleteScript core_delete_script.NewDeleteScript // Returns information about whether a document exists in an index. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html Exists core_exists.NewExists // Returns information about whether a document source exists in an index. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html ExistsSource core_exists_source.NewExistsSource // Returns information about why a specific matches (or doesn't match) a query. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-explain.html Explain core_explain.NewExplain // Returns the information about the capabilities of fields among multiple // indices. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-field-caps.html FieldCaps core_field_caps.NewFieldCaps // Returns a document. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html Get core_get.NewGet // Returns a script. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html GetScript core_get_script.NewGetScript // Returns all script contexts. + // https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-contexts.html GetScriptContext core_get_script_context.NewGetScriptContext // Returns available script types, languages and contexts + // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html GetScriptLanguages core_get_script_languages.NewGetScriptLanguages // Returns the source of a document. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html GetSource core_get_source.NewGetSource + // Returns the health of the cluster. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/health-api.html + HealthReport core_health_report.NewHealthReport // Creates or updates a document in an index. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html Index core_index.NewIndex // Returns basic information about the cluster. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html Info core_info.NewInfo // Performs a kNN search. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html KnnSearch core_knn_search.NewKnnSearch // Allows to get multiple documents in one request. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-get.html Mget core_mget.NewMget + // Allows to execute several search operations in one request. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html + Msearch core_msearch.NewMsearch + // Allows to execute several search template operations in one request. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html + MsearchTemplate core_msearch_template.NewMsearchTemplate // Returns multiple termvectors in one request. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-termvectors.html Mtermvectors core_mtermvectors.NewMtermvectors // Open a point in time that can be used in subsequent searches + // https://www.elastic.co/guide/en/elasticsearch/reference/current/point-in-time-api.html OpenPointInTime core_open_point_in_time.NewOpenPointInTime // Returns whether the cluster is running. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html Ping core_ping.NewPing // Creates or updates a script. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html PutScript core_put_script.NewPutScript // Allows to evaluate the quality of ranked search results over a set of typical // search queries + // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-rank-eval.html RankEval core_rank_eval.NewRankEval // Allows to copy documents from one index to another, optionally filtering the // source // documents by a query, changing the destination index settings, or fetching // the // documents from a remote cluster. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-reindex.html Reindex core_reindex.NewReindex // Changes the number of requests per second for a particular Reindex operation. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-reindex.html ReindexRethrottle core_reindex_rethrottle.NewReindexRethrottle // Allows to use the Mustache language to pre-render a search definition. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/render-search-template-api.html RenderSearchTemplate core_render_search_template.NewRenderSearchTemplate // Allows an arbitrary script to be executed and a result to be returned + // https://www.elastic.co/guide/en/elasticsearch/painless/current/painless-execute-api.html ScriptsPainlessExecute core_scripts_painless_execute.NewScriptsPainlessExecute // Allows to retrieve a large numbers of results from a single search request. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-body.html#request-body-search-scroll Scroll core_scroll.NewScroll // Returns results matching a query. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html Search core_search.NewSearch // Searches a vector tile for geospatial values. Returns results as a binary // Mapbox vector tile. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-vector-tile-api.html SearchMvt core_search_mvt.NewSearchMvt // Returns information about the indices and shards that a search request would // be executed against. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-shards.html SearchShards core_search_shards.NewSearchShards // Allows to use the Mustache language to pre-render a search definition. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-template.html SearchTemplate core_search_template.NewSearchTemplate // The terms enum API can be used to discover terms in the index that begin // with the provided string. It is designed for low-latency look-ups used in // auto-complete scenarios. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-terms-enum.html TermsEnum core_terms_enum.NewTermsEnum // Returns information and statistics about terms in the fields of a particular // document. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-termvectors.html Termvectors core_termvectors.NewTermvectors // Updates a document with a script or partial document. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update.html Update core_update.NewUpdate // Performs an update on every document in the index without changing the // source, // for example to pick up a mapping change. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html UpdateByQuery core_update_by_query.NewUpdateByQuery // Changes the number of requests per second for a particular Update By Query // operation. + // https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html UpdateByQueryRethrottle core_update_by_query_rethrottle.NewUpdateByQueryRethrottle } func New(tp elastictransport.Interface) *API { return &API{ - Async: Async{ + // AsyncSearch + AsyncSearch: AsyncSearch{ Delete: async_search_delete.NewDeleteFunc(tp), Get: async_search_get.NewGetFunc(tp), Status: async_search_status.NewStatusFunc(tp), Submit: async_search_submit.NewSubmitFunc(tp), }, + // Autoscaling Autoscaling: Autoscaling{ DeleteAutoscalingPolicy: autoscaling_delete_autoscaling_policy.NewDeleteAutoscalingPolicyFunc(tp), GetAutoscalingCapacity: autoscaling_get_autoscaling_capacity.NewGetAutoscalingCapacityFunc(tp), @@ -1583,6 +2211,7 @@ func New(tp elastictransport.Interface) *API { PutAutoscalingPolicy: autoscaling_put_autoscaling_policy.NewPutAutoscalingPolicyFunc(tp), }, + // Cat Cat: Cat{ Aliases: cat_aliases.NewAliasesFunc(tp), Allocation: cat_allocation.NewAllocationFunc(tp), @@ -1612,6 +2241,7 @@ func New(tp elastictransport.Interface) *API { Transforms: cat_transforms.NewTransformsFunc(tp), }, + // Ccr Ccr: Ccr{ DeleteAutoFollowPattern: ccr_delete_auto_follow_pattern.NewDeleteAutoFollowPatternFunc(tp), Follow: ccr_follow.NewFollowFunc(tp), @@ -1628,6 +2258,7 @@ func New(tp elastictransport.Interface) *API { Unfollow: ccr_unfollow.NewUnfollowFunc(tp), }, + // Cluster Cluster: Cluster{ AllocationExplain: cluster_allocation_explain.NewAllocationExplainFunc(tp), DeleteComponentTemplate: cluster_delete_component_template.NewDeleteComponentTemplateFunc(tp), @@ -1636,6 +2267,7 @@ func New(tp elastictransport.Interface) *API { GetComponentTemplate: cluster_get_component_template.NewGetComponentTemplateFunc(tp), GetSettings: cluster_get_settings.NewGetSettingsFunc(tp), Health: cluster_health.NewHealthFunc(tp), + Info: cluster_info.NewInfoFunc(tp), PendingTasks: cluster_pending_tasks.NewPendingTasksFunc(tp), PostVotingConfigExclusions: cluster_post_voting_config_exclusions.NewPostVotingConfigExclusionsFunc(tp), PutComponentTemplate: cluster_put_component_template.NewPutComponentTemplateFunc(tp), @@ -1646,7 +2278,9 @@ func New(tp elastictransport.Interface) *API { Stats: cluster_stats.NewStatsFunc(tp), }, + // Core Core: Core{ + Bulk: core_bulk.NewBulkFunc(tp), ClearScroll: core_clear_scroll.NewClearScrollFunc(tp), ClosePointInTime: core_close_point_in_time.NewClosePointInTimeFunc(tp), Count: core_count.NewCountFunc(tp), @@ -1664,10 +2298,13 @@ func New(tp elastictransport.Interface) *API { GetScriptContext: core_get_script_context.NewGetScriptContextFunc(tp), GetScriptLanguages: core_get_script_languages.NewGetScriptLanguagesFunc(tp), GetSource: core_get_source.NewGetSourceFunc(tp), + HealthReport: core_health_report.NewHealthReportFunc(tp), Index: core_index.NewIndexFunc(tp), Info: core_info.NewInfoFunc(tp), KnnSearch: core_knn_search.NewKnnSearchFunc(tp), Mget: core_mget.NewMgetFunc(tp), + Msearch: core_msearch.NewMsearchFunc(tp), + MsearchTemplate: core_msearch_template.NewMsearchTemplateFunc(tp), Mtermvectors: core_mtermvectors.NewMtermvectorsFunc(tp), OpenPointInTime: core_open_point_in_time.NewOpenPointInTimeFunc(tp), Ping: core_ping.NewPingFunc(tp), @@ -1689,12 +2326,14 @@ func New(tp elastictransport.Interface) *API { UpdateByQueryRethrottle: core_update_by_query_rethrottle.NewUpdateByQueryRethrottleFunc(tp), }, - Dangling: Dangling{ + // DanglingIndices + DanglingIndices: DanglingIndices{ DeleteDanglingIndex: dangling_indices_delete_dangling_index.NewDeleteDanglingIndexFunc(tp), ImportDanglingIndex: dangling_indices_import_dangling_index.NewImportDanglingIndexFunc(tp), ListDanglingIndices: dangling_indices_list_dangling_indices.NewListDanglingIndicesFunc(tp), }, + // Enrich Enrich: Enrich{ DeletePolicy: enrich_delete_policy.NewDeletePolicyFunc(tp), ExecutePolicy: enrich_execute_policy.NewExecutePolicyFunc(tp), @@ -1703,6 +2342,7 @@ func New(tp elastictransport.Interface) *API { Stats: enrich_stats.NewStatsFunc(tp), }, + // Eql Eql: Eql{ Delete: eql_delete.NewDeleteFunc(tp), Get: eql_get.NewGetFunc(tp), @@ -1710,20 +2350,25 @@ func New(tp elastictransport.Interface) *API { Search: eql_search.NewSearchFunc(tp), }, + // Features Features: Features{ GetFeatures: features_get_features.NewGetFeaturesFunc(tp), ResetFeatures: features_reset_features.NewResetFeaturesFunc(tp), }, + // Fleet Fleet: Fleet{ GlobalCheckpoints: fleet_global_checkpoints.NewGlobalCheckpointsFunc(tp), + Msearch: fleet_msearch.NewMsearchFunc(tp), Search: fleet_search.NewSearchFunc(tp), }, + // Graph Graph: Graph{ Explore: graph_explore.NewExploreFunc(tp), }, + // Ilm Ilm: Ilm{ DeleteLifecycle: ilm_delete_lifecycle.NewDeleteLifecycleFunc(tp), ExplainLifecycle: ilm_explain_lifecycle.NewExplainLifecycleFunc(tp), @@ -1738,6 +2383,7 @@ func New(tp elastictransport.Interface) *API { Stop: ilm_stop.NewStopFunc(tp), }, + // Indices Indices: Indices{ AddBlock: indices_add_block.NewAddBlockFunc(tp), Analyze: indices_analyze.NewAnalyzeFunc(tp), @@ -1749,6 +2395,7 @@ func New(tp elastictransport.Interface) *API { DataStreamsStats: indices_data_streams_stats.NewDataStreamsStatsFunc(tp), Delete: indices_delete.NewDeleteFunc(tp), DeleteAlias: indices_delete_alias.NewDeleteAliasFunc(tp), + DeleteDataLifecycle: indices_delete_data_lifecycle.NewDeleteDataLifecycleFunc(tp), DeleteDataStream: indices_delete_data_stream.NewDeleteDataStreamFunc(tp), DeleteIndexTemplate: indices_delete_index_template.NewDeleteIndexTemplateFunc(tp), DeleteTemplate: indices_delete_template.NewDeleteTemplateFunc(tp), @@ -1758,11 +2405,13 @@ func New(tp elastictransport.Interface) *API { ExistsAlias: indices_exists_alias.NewExistsAliasFunc(tp), ExistsIndexTemplate: indices_exists_index_template.NewExistsIndexTemplateFunc(tp), ExistsTemplate: indices_exists_template.NewExistsTemplateFunc(tp), + ExplainDataLifecycle: indices_explain_data_lifecycle.NewExplainDataLifecycleFunc(tp), FieldUsageStats: indices_field_usage_stats.NewFieldUsageStatsFunc(tp), Flush: indices_flush.NewFlushFunc(tp), Forcemerge: indices_forcemerge.NewForcemergeFunc(tp), Get: indices_get.NewGetFunc(tp), GetAlias: indices_get_alias.NewGetAliasFunc(tp), + GetDataLifecycle: indices_get_data_lifecycle.NewGetDataLifecycleFunc(tp), GetDataStream: indices_get_data_stream.NewGetDataStreamFunc(tp), GetFieldMapping: indices_get_field_mapping.NewGetFieldMappingFunc(tp), GetIndexTemplate: indices_get_index_template.NewGetIndexTemplateFunc(tp), @@ -1774,6 +2423,7 @@ func New(tp elastictransport.Interface) *API { Open: indices_open.NewOpenFunc(tp), PromoteDataStream: indices_promote_data_stream.NewPromoteDataStreamFunc(tp), PutAlias: indices_put_alias.NewPutAliasFunc(tp), + PutDataLifecycle: indices_put_data_lifecycle.NewPutDataLifecycleFunc(tp), PutIndexTemplate: indices_put_index_template.NewPutIndexTemplateFunc(tp), PutMapping: indices_put_mapping.NewPutMappingFunc(tp), PutSettings: indices_put_settings.NewPutSettingsFunc(tp), @@ -1795,6 +2445,7 @@ func New(tp elastictransport.Interface) *API { ValidateQuery: indices_validate_query.NewValidateQueryFunc(tp), }, + // Ingest Ingest: Ingest{ DeletePipeline: ingest_delete_pipeline.NewDeletePipelineFunc(tp), GeoIpStats: ingest_geo_ip_stats.NewGeoIpStatsFunc(tp), @@ -1804,6 +2455,7 @@ func New(tp elastictransport.Interface) *API { Simulate: ingest_simulate.NewSimulateFunc(tp), }, + // License License: License{ Delete: license_delete.NewDeleteFunc(tp), Get: license_get.NewGetFunc(tp), @@ -1814,18 +2466,21 @@ func New(tp elastictransport.Interface) *API { PostStartTrial: license_post_start_trial.NewPostStartTrialFunc(tp), }, + // Logstash Logstash: Logstash{ DeletePipeline: logstash_delete_pipeline.NewDeletePipelineFunc(tp), GetPipeline: logstash_get_pipeline.NewGetPipelineFunc(tp), PutPipeline: logstash_put_pipeline.NewPutPipelineFunc(tp), }, + // Migration Migration: Migration{ Deprecations: migration_deprecations.NewDeprecationsFunc(tp), GetFeatureUpgradeStatus: migration_get_feature_upgrade_status.NewGetFeatureUpgradeStatusFunc(tp), PostFeatureUpgrade: migration_post_feature_upgrade.NewPostFeatureUpgradeFunc(tp), }, + // Ml Ml: Ml{ ClearTrainedModelDeploymentCache: ml_clear_trained_model_deployment_cache.NewClearTrainedModelDeploymentCacheFunc(tp), CloseJob: ml_close_job.NewCloseJobFunc(tp), @@ -1869,6 +2524,7 @@ func New(tp elastictransport.Interface) *API { Info: ml_info.NewInfoFunc(tp), OpenJob: ml_open_job.NewOpenJobFunc(tp), PostCalendarEvents: ml_post_calendar_events.NewPostCalendarEventsFunc(tp), + PostData: ml_post_data.NewPostDataFunc(tp), PreviewDataFrameAnalytics: ml_preview_data_frame_analytics.NewPreviewDataFrameAnalyticsFunc(tp), PreviewDatafeed: ml_preview_datafeed.NewPreviewDatafeedFunc(tp), PutCalendar: ml_put_calendar.NewPutCalendarFunc(tp), @@ -1900,6 +2556,12 @@ func New(tp elastictransport.Interface) *API { ValidateDetector: ml_validate_detector.NewValidateDetectorFunc(tp), }, + // Monitoring + Monitoring: Monitoring{ + Bulk: monitoring_bulk.NewBulkFunc(tp), + }, + + // Nodes Nodes: Nodes{ ClearRepositoriesMeteringArchive: nodes_clear_repositories_metering_archive.NewClearRepositoriesMeteringArchiveFunc(tp), GetRepositoriesMeteringInfo: nodes_get_repositories_metering_info.NewGetRepositoriesMeteringInfoFunc(tp), @@ -1910,6 +2572,15 @@ func New(tp elastictransport.Interface) *API { Usage: nodes_usage.NewUsageFunc(tp), }, + // QueryRuleset + QueryRuleset: QueryRuleset{ + Delete: query_ruleset_delete.NewDeleteFunc(tp), + Get: query_ruleset_get.NewGetFunc(tp), + List: query_ruleset_list.NewListFunc(tp), + Put: query_ruleset_put.NewPutFunc(tp), + }, + + // Rollup Rollup: Rollup{ DeleteJob: rollup_delete_job.NewDeleteJobFunc(tp), GetJobs: rollup_get_jobs.NewGetJobsFunc(tp), @@ -1921,13 +2592,27 @@ func New(tp elastictransport.Interface) *API { StopJob: rollup_stop_job.NewStopJobFunc(tp), }, - Searchable: Searchable{ + // SearchApplication + SearchApplication: SearchApplication{ + Delete: search_application_delete.NewDeleteFunc(tp), + DeleteBehavioralAnalytics: search_application_delete_behavioral_analytics.NewDeleteBehavioralAnalyticsFunc(tp), + Get: search_application_get.NewGetFunc(tp), + GetBehavioralAnalytics: search_application_get_behavioral_analytics.NewGetBehavioralAnalyticsFunc(tp), + List: search_application_list.NewListFunc(tp), + Put: search_application_put.NewPutFunc(tp), + PutBehavioralAnalytics: search_application_put_behavioral_analytics.NewPutBehavioralAnalyticsFunc(tp), + Search: search_application_search.NewSearchFunc(tp), + }, + + // SearchableSnapshots + SearchableSnapshots: SearchableSnapshots{ CacheStats: searchable_snapshots_cache_stats.NewCacheStatsFunc(tp), ClearCache: searchable_snapshots_clear_cache.NewClearCacheFunc(tp), Mount: searchable_snapshots_mount.NewMountFunc(tp), Stats: searchable_snapshots_stats.NewStatsFunc(tp), }, + // Security Security: Security{ ActivateUserProfile: security_activate_user_profile.NewActivateUserProfileFunc(tp), Authenticate: security_authenticate.NewAuthenticateFunc(tp), @@ -1939,6 +2624,7 @@ func New(tp elastictransport.Interface) *API { ClearCachedRoles: security_clear_cached_roles.NewClearCachedRolesFunc(tp), ClearCachedServiceTokens: security_clear_cached_service_tokens.NewClearCachedServiceTokensFunc(tp), CreateApiKey: security_create_api_key.NewCreateApiKeyFunc(tp), + CreateCrossClusterApiKey: security_create_cross_cluster_api_key.NewCreateCrossClusterApiKeyFunc(tp), CreateServiceToken: security_create_service_token.NewCreateServiceTokenFunc(tp), DeletePrivileges: security_delete_privileges.NewDeletePrivilegesFunc(tp), DeleteRole: security_delete_role.NewDeleteRoleFunc(tp), @@ -1986,12 +2672,14 @@ func New(tp elastictransport.Interface) *API { UpdateUserProfileData: security_update_user_profile_data.NewUpdateUserProfileDataFunc(tp), }, + // Shutdown Shutdown: Shutdown{ DeleteNode: shutdown_delete_node.NewDeleteNodeFunc(tp), GetNode: shutdown_get_node.NewGetNodeFunc(tp), PutNode: shutdown_put_node.NewPutNodeFunc(tp), }, + // Slm Slm: Slm{ DeleteLifecycle: slm_delete_lifecycle.NewDeleteLifecycleFunc(tp), ExecuteLifecycle: slm_execute_lifecycle.NewExecuteLifecycleFunc(tp), @@ -2004,6 +2692,7 @@ func New(tp elastictransport.Interface) *API { Stop: slm_stop.NewStopFunc(tp), }, + // Snapshot Snapshot: Snapshot{ CleanupRepository: snapshot_cleanup_repository.NewCleanupRepositoryFunc(tp), Clone: snapshot_clone.NewCloneFunc(tp), @@ -2018,6 +2707,7 @@ func New(tp elastictransport.Interface) *API { VerifyRepository: snapshot_verify_repository.NewVerifyRepositoryFunc(tp), }, + // Sql Sql: Sql{ ClearCursor: sql_clear_cursor.NewClearCursorFunc(tp), DeleteAsync: sql_delete_async.NewDeleteAsyncFunc(tp), @@ -2027,48 +2717,73 @@ func New(tp elastictransport.Interface) *API { Translate: sql_translate.NewTranslateFunc(tp), }, + // Ssl Ssl: Ssl{ Certificates: ssl_certificates.NewCertificatesFunc(tp), }, + // Synonyms + Synonyms: Synonyms{ + DeleteSynonym: synonyms_delete_synonym.NewDeleteSynonymFunc(tp), + DeleteSynonymRule: synonyms_delete_synonym_rule.NewDeleteSynonymRuleFunc(tp), + GetSynonym: synonyms_get_synonym.NewGetSynonymFunc(tp), + GetSynonymRule: synonyms_get_synonym_rule.NewGetSynonymRuleFunc(tp), + GetSynonymsSets: synonyms_get_synonyms_sets.NewGetSynonymsSetsFunc(tp), + PutSynonym: synonyms_put_synonym.NewPutSynonymFunc(tp), + PutSynonymRule: synonyms_put_synonym_rule.NewPutSynonymRuleFunc(tp), + }, + + // Tasks Tasks: Tasks{ Cancel: tasks_cancel.NewCancelFunc(tp), Get: tasks_get.NewGetFunc(tp), List: tasks_list.NewListFunc(tp), }, + // TextStructure + TextStructure: TextStructure{ + FindStructure: text_structure_find_structure.NewFindStructureFunc(tp), + }, + + // Transform Transform: Transform{ - DeleteTransform: transform_delete_transform.NewDeleteTransformFunc(tp), - GetTransform: transform_get_transform.NewGetTransformFunc(tp), - GetTransformStats: transform_get_transform_stats.NewGetTransformStatsFunc(tp), - PreviewTransform: transform_preview_transform.NewPreviewTransformFunc(tp), - PutTransform: transform_put_transform.NewPutTransformFunc(tp), - ResetTransform: transform_reset_transform.NewResetTransformFunc(tp), - StartTransform: transform_start_transform.NewStartTransformFunc(tp), - StopTransform: transform_stop_transform.NewStopTransformFunc(tp), - UpdateTransform: transform_update_transform.NewUpdateTransformFunc(tp), - UpgradeTransforms: transform_upgrade_transforms.NewUpgradeTransformsFunc(tp), + DeleteTransform: transform_delete_transform.NewDeleteTransformFunc(tp), + GetTransform: transform_get_transform.NewGetTransformFunc(tp), + GetTransformStats: transform_get_transform_stats.NewGetTransformStatsFunc(tp), + PreviewTransform: transform_preview_transform.NewPreviewTransformFunc(tp), + PutTransform: transform_put_transform.NewPutTransformFunc(tp), + ResetTransform: transform_reset_transform.NewResetTransformFunc(tp), + ScheduleNowTransform: transform_schedule_now_transform.NewScheduleNowTransformFunc(tp), + StartTransform: transform_start_transform.NewStartTransformFunc(tp), + StopTransform: transform_stop_transform.NewStopTransformFunc(tp), + UpdateTransform: transform_update_transform.NewUpdateTransformFunc(tp), + UpgradeTransforms: transform_upgrade_transforms.NewUpgradeTransformsFunc(tp), }, + // Watcher Watcher: Watcher{ AckWatch: watcher_ack_watch.NewAckWatchFunc(tp), ActivateWatch: watcher_activate_watch.NewActivateWatchFunc(tp), DeactivateWatch: watcher_deactivate_watch.NewDeactivateWatchFunc(tp), DeleteWatch: watcher_delete_watch.NewDeleteWatchFunc(tp), ExecuteWatch: watcher_execute_watch.NewExecuteWatchFunc(tp), + GetSettings: watcher_get_settings.NewGetSettingsFunc(tp), GetWatch: watcher_get_watch.NewGetWatchFunc(tp), PutWatch: watcher_put_watch.NewPutWatchFunc(tp), QueryWatches: watcher_query_watches.NewQueryWatchesFunc(tp), Start: watcher_start.NewStartFunc(tp), Stats: watcher_stats.NewStatsFunc(tp), Stop: watcher_stop.NewStopFunc(tp), + UpdateSettings: watcher_update_settings.NewUpdateSettingsFunc(tp), }, + // Xpack Xpack: Xpack{ Info: xpack_info.NewInfoFunc(tp), Usage: xpack_usage.NewUsageFunc(tp), }, + Bulk: core_bulk.NewBulkFunc(tp), ClearScroll: core_clear_scroll.NewClearScrollFunc(tp), ClosePointInTime: core_close_point_in_time.NewClosePointInTimeFunc(tp), Count: core_count.NewCountFunc(tp), @@ -2086,10 +2801,13 @@ func New(tp elastictransport.Interface) *API { GetScriptContext: core_get_script_context.NewGetScriptContextFunc(tp), GetScriptLanguages: core_get_script_languages.NewGetScriptLanguagesFunc(tp), GetSource: core_get_source.NewGetSourceFunc(tp), + HealthReport: core_health_report.NewHealthReportFunc(tp), Index: core_index.NewIndexFunc(tp), Info: core_info.NewInfoFunc(tp), KnnSearch: core_knn_search.NewKnnSearchFunc(tp), Mget: core_mget.NewMgetFunc(tp), + Msearch: core_msearch.NewMsearchFunc(tp), + MsearchTemplate: core_msearch_template.NewMsearchTemplateFunc(tp), Mtermvectors: core_mtermvectors.NewMtermvectorsFunc(tp), OpenPointInTime: core_open_point_in_time.NewOpenPointInTimeFunc(tp), Ping: core_ping.NewPingFunc(tp), diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/asyncsearch/delete/delete.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/asyncsearch/delete/delete.go index 0766fc098..c16e6ffb3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/asyncsearch/delete/delete.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/asyncsearch/delete/delete.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Deletes an async search by ID. If the search is still running, the search // request will be cancelled. Otherwise, the saved search results are deleted. @@ -68,7 +68,7 @@ func NewDeleteFunc(tp elastictransport.Interface) NewDelete { return func(id string) *Delete { n := New(tp) - n.Id(id) + n._id(id) return n } @@ -170,7 +170,6 @@ func (r Delete) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -179,6 +178,10 @@ func (r Delete) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -210,11 +213,11 @@ func (r *Delete) Header(key, value string) *Delete { return r } -// Id The async search ID +// Id A unique identifier for the async search. // API Name: id -func (r *Delete) Id(v string) *Delete { +func (r *Delete) _id(id string) *Delete { r.paramSet |= idMask - r.id = v + r.id = id return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/asyncsearch/delete/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/asyncsearch/delete/response.go index 25f97df45..5370968db 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/asyncsearch/delete/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/asyncsearch/delete/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package delete // Response holds the response body struct for the package delete // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/async_search/delete/AsyncSearchDeleteResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/async_search/delete/AsyncSearchDeleteResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/asyncsearch/get/get.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/asyncsearch/get/get.go index c28271155..972b66307 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/asyncsearch/get/get.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/asyncsearch/get/get.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves the results of a previously submitted async search request given // its ID. @@ -69,7 +69,7 @@ func NewGetFunc(tp elastictransport.Interface) NewGet { return func(id string) *Get { n := New(tp) - n.Id(id) + n._id(id) return n } @@ -173,7 +173,6 @@ func (r Get) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -182,6 +181,10 @@ func (r Get) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -213,20 +216,25 @@ func (r *Get) Header(key, value string) *Get { return r } -// Id The async search ID +// Id A unique identifier for the async search. // API Name: id -func (r *Get) Id(v string) *Get { +func (r *Get) _id(id string) *Get { r.paramSet |= idMask - r.id = v + r.id = id return r } -// KeepAlive Specify the time interval in which the results (partial or final) for this -// search will be available +// KeepAlive Specifies how long the async search should be available in the cluster. +// When not specified, the `keep_alive` set with the corresponding submit async +// request will be used. +// Otherwise, it is possible to override the value and extend the validity of +// the request. +// When this period expires, the search, if still running, is cancelled. +// If the search is completed, its saved results are deleted. // API name: keep_alive -func (r *Get) KeepAlive(v string) *Get { - r.values.Set("keep_alive", v) +func (r *Get) KeepAlive(duration string) *Get { + r.values.Set("keep_alive", duration) return r } @@ -234,16 +242,22 @@ func (r *Get) KeepAlive(v string) *Get { // TypedKeys Specify whether aggregation and suggester names should be prefixed by their // respective types in the response // API name: typed_keys -func (r *Get) TypedKeys(b bool) *Get { - r.values.Set("typed_keys", strconv.FormatBool(b)) +func (r *Get) TypedKeys(typedkeys bool) *Get { + r.values.Set("typed_keys", strconv.FormatBool(typedkeys)) return r } -// WaitForCompletionTimeout Specify the time that the request should block waiting for the final response +// WaitForCompletionTimeout Specifies to wait for the search to be completed up until the provided +// timeout. +// Final results will be returned if available before the timeout expires, +// otherwise the currently available results will be returned once the timeout +// expires. +// By default no timeout is set meaning that the currently available results +// will be returned without any additional wait. // API name: wait_for_completion_timeout -func (r *Get) WaitForCompletionTimeout(v string) *Get { - r.values.Set("wait_for_completion_timeout", v) +func (r *Get) WaitForCompletionTimeout(duration string) *Get { + r.values.Set("wait_for_completion_timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/asyncsearch/get/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/asyncsearch/get/response.go index 879b9e4f5..14dc3001e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/asyncsearch/get/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/asyncsearch/get/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package get @@ -26,10 +26,30 @@ import ( // Response holds the response body struct for the package get // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/async_search/get/AsyncSearchGetResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/async_search/get/AsyncSearchGetResponse.ts#L22-L24 type Response struct { - Response types.AsyncSearch `json:"response"` + + // CompletionTime Indicates when the async search completed. Only present + // when the search has completed. + CompletionTime types.DateTime `json:"completion_time,omitempty"` + CompletionTimeInMillis *int64 `json:"completion_time_in_millis,omitempty"` + // ExpirationTime Indicates when the async search will expire. + ExpirationTime types.DateTime `json:"expiration_time,omitempty"` + ExpirationTimeInMillis int64 `json:"expiration_time_in_millis"` + Id *string `json:"id,omitempty"` + // IsPartial When the query is no longer running, this property indicates whether the + // search failed or was successfully completed on all shards. + // While the query is running, `is_partial` is always set to `true`. + IsPartial bool `json:"is_partial"` + // IsRunning Indicates whether the search is still running or has completed. + // NOTE: If the search failed after some shards returned their results or the + // node that is coordinating the async search dies, results may be partial even + // though `is_running` is `false`. + IsRunning bool `json:"is_running"` + Response types.AsyncSearch `json:"response"` + StartTime types.DateTime `json:"start_time,omitempty"` + StartTimeInMillis int64 `json:"start_time_in_millis"` } // NewResponse returns a Response diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/asyncsearch/status/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/asyncsearch/status/response.go index 478279bd7..e3f6b768e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/asyncsearch/status/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/asyncsearch/status/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package status @@ -26,11 +26,39 @@ import ( // Response holds the response body struct for the package status // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/async_search/status/AsyncSearchStatusResponse.ts#L28-L30 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/async_search/status/AsyncSearchStatusResponse.ts#L39-L41 type Response struct { - CompletionStatus *int `json:"completion_status,omitempty"` - Shards_ types.ShardStatistics `json:"_shards"` + + // Clusters_ Metadata about clusters involved in the cross-cluster search. + // Not shown for local-only searches. + Clusters_ *types.ClusterStatistics `json:"_clusters,omitempty"` + // CompletionStatus If the async search completed, this field shows the status code of the + // search. + // For example, 200 indicates that the async search was successfully completed. + // 503 indicates that the async search was completed with an error. + CompletionStatus *int `json:"completion_status,omitempty"` + // CompletionTime Indicates when the async search completed. Only present + // when the search has completed. + CompletionTime types.DateTime `json:"completion_time,omitempty"` + CompletionTimeInMillis *int64 `json:"completion_time_in_millis,omitempty"` + // ExpirationTime Indicates when the async search will expire. + ExpirationTime types.DateTime `json:"expiration_time,omitempty"` + ExpirationTimeInMillis int64 `json:"expiration_time_in_millis"` + Id *string `json:"id,omitempty"` + // IsPartial When the query is no longer running, this property indicates whether the + // search failed or was successfully completed on all shards. + // While the query is running, `is_partial` is always set to `true`. + IsPartial bool `json:"is_partial"` + // IsRunning Indicates whether the search is still running or has completed. + // NOTE: If the search failed after some shards returned their results or the + // node that is coordinating the async search dies, results may be partial even + // though `is_running` is `false`. + IsRunning bool `json:"is_running"` + // Shards_ Indicates how many shards have run the query so far. + Shards_ types.ShardStatistics `json:"_shards"` + StartTime types.DateTime `json:"start_time,omitempty"` + StartTimeInMillis int64 `json:"start_time_in_millis"` } // NewResponse returns a Response diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/asyncsearch/status/status.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/asyncsearch/status/status.go index 3d5c9a914..0a6bfe57c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/asyncsearch/status/status.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/asyncsearch/status/status.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves the status of a previously submitted async search request given its // ID. @@ -68,7 +68,7 @@ func NewStatusFunc(tp elastictransport.Interface) NewStatus { return func(id string) *Status { n := New(tp) - n.Id(id) + n._id(id) return n } @@ -172,7 +172,6 @@ func (r Status) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -181,6 +180,10 @@ func (r Status) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -212,11 +215,11 @@ func (r *Status) Header(key, value string) *Status { return r } -// Id The async search ID +// Id A unique identifier for the async search. // API Name: id -func (r *Status) Id(v string) *Status { +func (r *Status) _id(id string) *Status { r.paramSet |= idMask - r.id = v + r.id = id return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/asyncsearch/submit/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/asyncsearch/submit/request.go index cc64b3f71..379c1d4df 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/asyncsearch/submit/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/asyncsearch/submit/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package submit @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package submit // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/async_search/submit/AsyncSearchSubmitRequest.ts#L55-L255 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/async_search/submit/AsyncSearchSubmitRequest.ts#L55-L286 type Request struct { Aggregations map[string]types.Aggregations `json:"aggregations,omitempty"` Collapse *types.FieldCollapse `json:"collapse,omitempty"` @@ -53,7 +53,7 @@ type Request struct { // IndicesBoost Boosts the _score of documents from specified indices. IndicesBoost []map[string]types.Float64 `json:"indices_boost,omitempty"` // Knn Defines the approximate kNN search to run. - Knn *types.KnnQuery `json:"knn,omitempty"` + Knn []types.KnnQuery `json:"knn,omitempty"` // MinScore Minimum _score for matching documents. Documents with a lower _score are // not included in the search results. MinScore *types.Float64 `json:"min_score,omitempty"` @@ -67,7 +67,7 @@ type Request struct { Rescore []types.Rescore `json:"rescore,omitempty"` // RuntimeMappings Defines one or more runtime fields in the search request. These fields take // precedence over mapped fields with the same name. - RuntimeMappings map[string]types.RuntimeField `json:"runtime_mappings,omitempty"` + RuntimeMappings types.RuntimeFields `json:"runtime_mappings,omitempty"` // ScriptFields Retrieve a script evaluation (based on different fields) for each hit. ScriptFields map[string]types.ScriptField `json:"script_fields,omitempty"` SearchAfter []types.FieldValue `json:"search_after,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/asyncsearch/submit/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/asyncsearch/submit/response.go index 3eb1e42d5..9b1fb997f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/asyncsearch/submit/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/asyncsearch/submit/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package submit @@ -26,10 +26,30 @@ import ( // Response holds the response body struct for the package submit // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/async_search/submit/AsyncSearchSubmitResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/async_search/submit/AsyncSearchSubmitResponse.ts#L22-L24 type Response struct { - Response types.AsyncSearch `json:"response"` + + // CompletionTime Indicates when the async search completed. Only present + // when the search has completed. + CompletionTime types.DateTime `json:"completion_time,omitempty"` + CompletionTimeInMillis *int64 `json:"completion_time_in_millis,omitempty"` + // ExpirationTime Indicates when the async search will expire. + ExpirationTime types.DateTime `json:"expiration_time,omitempty"` + ExpirationTimeInMillis int64 `json:"expiration_time_in_millis"` + Id *string `json:"id,omitempty"` + // IsPartial When the query is no longer running, this property indicates whether the + // search failed or was successfully completed on all shards. + // While the query is running, `is_partial` is always set to `true`. + IsPartial bool `json:"is_partial"` + // IsRunning Indicates whether the search is still running or has completed. + // NOTE: If the search failed after some shards returned their results or the + // node that is coordinating the async search dies, results may be partial even + // though `is_running` is `false`. + IsRunning bool `json:"is_running"` + Response types.AsyncSearch `json:"response"` + StartTime types.DateTime `json:"start_time,omitempty"` + StartTimeInMillis int64 `json:"start_time_in_millis"` } // NewResponse returns a Response diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/asyncsearch/submit/submit.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/asyncsearch/submit/submit.go index 4051405e7..c428817ee 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/asyncsearch/submit/submit.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/asyncsearch/submit/submit.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Executes a search request asynchronously. package submit @@ -35,7 +35,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/operator" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/searchtype" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/suggestmode" @@ -57,8 +57,9 @@ type Submit struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -87,6 +88,8 @@ func New(tp elastictransport.Interface) *Submit { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -116,9 +119,19 @@ func (r *Submit) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -126,6 +139,7 @@ func (r *Submit) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -213,7 +227,6 @@ func (r Submit) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -222,6 +235,10 @@ func (r Submit) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -235,35 +252,38 @@ func (r *Submit) Header(key, value string) *Submit { // Index A comma-separated list of index names to search; use `_all` or empty string // to perform the operation on all indices // API Name: index -func (r *Submit) Index(v string) *Submit { +func (r *Submit) Index(index string) *Submit { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// WaitForCompletionTimeout Specify the time that the request should block waiting for the final response +// WaitForCompletionTimeout Blocks and waits until the search is completed up to a certain timeout. +// When the async search completes within the timeout, the response won’t +// include the ID as the results are not stored in the cluster. // API name: wait_for_completion_timeout -func (r *Submit) WaitForCompletionTimeout(v string) *Submit { - r.values.Set("wait_for_completion_timeout", v) +func (r *Submit) WaitForCompletionTimeout(duration string) *Submit { + r.values.Set("wait_for_completion_timeout", duration) return r } -// KeepOnCompletion Control whether the response should be stored in the cluster if it completed -// within the provided [wait_for_completion] time (default: false) +// KeepOnCompletion If `true`, results are stored for later retrieval when the search completes +// within the `wait_for_completion_timeout`. // API name: keep_on_completion -func (r *Submit) KeepOnCompletion(b bool) *Submit { - r.values.Set("keep_on_completion", strconv.FormatBool(b)) +func (r *Submit) KeepOnCompletion(keeponcompletion bool) *Submit { + r.values.Set("keep_on_completion", strconv.FormatBool(keeponcompletion)) return r } -// KeepAlive Update the time interval in which the results (partial or final) for this -// search will be available +// KeepAlive Specifies how long the async search needs to be available. +// Ongoing async searches and any saved search results are deleted after this +// period. // API name: keep_alive -func (r *Submit) KeepAlive(v string) *Submit { - r.values.Set("keep_alive", v) +func (r *Submit) KeepAlive(duration string) *Submit { + r.values.Set("keep_alive", duration) return r } @@ -271,8 +291,8 @@ func (r *Submit) KeepAlive(v string) *Submit { // AllowNoIndices Whether to ignore if a wildcard indices expression resolves into no concrete // indices. (This includes `_all` string or when no indices have been specified) // API name: allow_no_indices -func (r *Submit) AllowNoIndices(b bool) *Submit { - r.values.Set("allow_no_indices", strconv.FormatBool(b)) +func (r *Submit) AllowNoIndices(allownoindices bool) *Submit { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) return r } @@ -280,16 +300,16 @@ func (r *Submit) AllowNoIndices(b bool) *Submit { // AllowPartialSearchResults Indicate if an error should be returned if there is a partial search failure // or timeout // API name: allow_partial_search_results -func (r *Submit) AllowPartialSearchResults(b bool) *Submit { - r.values.Set("allow_partial_search_results", strconv.FormatBool(b)) +func (r *Submit) AllowPartialSearchResults(allowpartialsearchresults bool) *Submit { + r.values.Set("allow_partial_search_results", strconv.FormatBool(allowpartialsearchresults)) return r } // Analyzer The analyzer to use for the query string // API name: analyzer -func (r *Submit) Analyzer(v string) *Submit { - r.values.Set("analyzer", v) +func (r *Submit) Analyzer(analyzer string) *Submit { + r.values.Set("analyzer", analyzer) return r } @@ -297,33 +317,35 @@ func (r *Submit) Analyzer(v string) *Submit { // AnalyzeWildcard Specify whether wildcard and prefix queries should be analyzed (default: // false) // API name: analyze_wildcard -func (r *Submit) AnalyzeWildcard(b bool) *Submit { - r.values.Set("analyze_wildcard", strconv.FormatBool(b)) +func (r *Submit) AnalyzeWildcard(analyzewildcard bool) *Submit { + r.values.Set("analyze_wildcard", strconv.FormatBool(analyzewildcard)) return r } -// BatchedReduceSize The number of shard results that should be reduced at once on the -// coordinating node. This value should be used as the granularity at which -// progress results will be made available. +// BatchedReduceSize Affects how often partial results become available, which happens whenever +// shard results are reduced. +// A partial reduction is performed every time the coordinating node has +// received a certain number of new shard responses (5 by default). // API name: batched_reduce_size -func (r *Submit) BatchedReduceSize(v string) *Submit { - r.values.Set("batched_reduce_size", v) +func (r *Submit) BatchedReduceSize(batchedreducesize string) *Submit { + r.values.Set("batched_reduce_size", batchedreducesize) return r } +// CcsMinimizeRoundtrips The default value is the only supported value. // API name: ccs_minimize_roundtrips -func (r *Submit) CcsMinimizeRoundtrips(b bool) *Submit { - r.values.Set("ccs_minimize_roundtrips", strconv.FormatBool(b)) +func (r *Submit) CcsMinimizeRoundtrips(ccsminimizeroundtrips bool) *Submit { + r.values.Set("ccs_minimize_roundtrips", strconv.FormatBool(ccsminimizeroundtrips)) return r } // DefaultOperator The default operator for query string query (AND or OR) // API name: default_operator -func (r *Submit) DefaultOperator(enum operator.Operator) *Submit { - r.values.Set("default_operator", enum.String()) +func (r *Submit) DefaultOperator(defaultoperator operator.Operator) *Submit { + r.values.Set("default_operator", defaultoperator.String()) return r } @@ -331,17 +353,8 @@ func (r *Submit) DefaultOperator(enum operator.Operator) *Submit { // Df The field to use as default where no field prefix is given in the query // string // API name: df -func (r *Submit) Df(v string) *Submit { - r.values.Set("df", v) - - return r -} - -// DocvalueFields A comma-separated list of fields to return as the docvalue representation of -// a field for each hit -// API name: docvalue_fields -func (r *Submit) DocvalueFields(v string) *Submit { - r.values.Set("docvalue_fields", v) +func (r *Submit) Df(df string) *Submit { + r.values.Set("df", df) return r } @@ -349,17 +362,12 @@ func (r *Submit) DocvalueFields(v string) *Submit { // ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, // closed or both. // API name: expand_wildcards -func (r *Submit) ExpandWildcards(v string) *Submit { - r.values.Set("expand_wildcards", v) - - return r -} - -// Explain Specify whether to return detailed information about score computation as -// part of a hit -// API name: explain -func (r *Submit) Explain(b bool) *Submit { - r.values.Set("explain", strconv.FormatBool(b)) +func (r *Submit) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Submit { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } @@ -367,8 +375,8 @@ func (r *Submit) Explain(b bool) *Submit { // IgnoreThrottled Whether specified concrete, expanded or aliased indices should be ignored // when throttled // API name: ignore_throttled -func (r *Submit) IgnoreThrottled(b bool) *Submit { - r.values.Set("ignore_throttled", strconv.FormatBool(b)) +func (r *Submit) IgnoreThrottled(ignorethrottled bool) *Submit { + r.values.Set("ignore_throttled", strconv.FormatBool(ignorethrottled)) return r } @@ -376,8 +384,8 @@ func (r *Submit) IgnoreThrottled(b bool) *Submit { // IgnoreUnavailable Whether specified concrete indices should be ignored when unavailable // (missing or closed) // API name: ignore_unavailable -func (r *Submit) IgnoreUnavailable(b bool) *Submit { - r.values.Set("ignore_unavailable", strconv.FormatBool(b)) +func (r *Submit) IgnoreUnavailable(ignoreunavailable bool) *Submit { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) return r } @@ -385,8 +393,8 @@ func (r *Submit) IgnoreUnavailable(b bool) *Submit { // Lenient Specify whether format-based query failures (such as providing text to a // numeric field) should be ignored // API name: lenient -func (r *Submit) Lenient(b bool) *Submit { - r.values.Set("lenient", strconv.FormatBool(b)) +func (r *Submit) Lenient(lenient bool) *Submit { + r.values.Set("lenient", strconv.FormatBool(lenient)) return r } @@ -395,15 +403,15 @@ func (r *Submit) Lenient(b bool) *Submit { // concurrently. This value should be used to limit the impact of the search on // the cluster in order to limit the number of concurrent shard requests // API name: max_concurrent_shard_requests -func (r *Submit) MaxConcurrentShardRequests(v string) *Submit { - r.values.Set("max_concurrent_shard_requests", v) +func (r *Submit) MaxConcurrentShardRequests(maxconcurrentshardrequests string) *Submit { + r.values.Set("max_concurrent_shard_requests", maxconcurrentshardrequests) return r } // API name: min_compatible_shard_node -func (r *Submit) MinCompatibleShardNode(v string) *Submit { - r.values.Set("min_compatible_shard_node", v) +func (r *Submit) MinCompatibleShardNode(versionstring string) *Submit { + r.values.Set("min_compatible_shard_node", versionstring) return r } @@ -411,15 +419,18 @@ func (r *Submit) MinCompatibleShardNode(v string) *Submit { // Preference Specify the node or shard the operation should be performed on (default: // random) // API name: preference -func (r *Submit) Preference(v string) *Submit { - r.values.Set("preference", v) +func (r *Submit) Preference(preference string) *Submit { + r.values.Set("preference", preference) return r } +// PreFilterShardSize The default value cannot be changed, which enforces the execution of a +// pre-filter roundtrip to retrieve statistics from each shard so that the ones +// that surely don’t hold any document matching the query get skipped. // API name: pre_filter_shard_size -func (r *Submit) PreFilterShardSize(v string) *Submit { - r.values.Set("pre_filter_shard_size", v) +func (r *Submit) PreFilterShardSize(prefiltershardsize string) *Submit { + r.values.Set("pre_filter_shard_size", prefiltershardsize) return r } @@ -427,204 +438,395 @@ func (r *Submit) PreFilterShardSize(v string) *Submit { // RequestCache Specify if request cache should be used for this request or not, defaults to // true // API name: request_cache -func (r *Submit) RequestCache(b bool) *Submit { - r.values.Set("request_cache", strconv.FormatBool(b)) +func (r *Submit) RequestCache(requestcache bool) *Submit { + r.values.Set("request_cache", strconv.FormatBool(requestcache)) return r } // Routing A comma-separated list of specific routing values // API name: routing -func (r *Submit) Routing(v string) *Submit { - r.values.Set("routing", v) +func (r *Submit) Routing(routing string) *Submit { + r.values.Set("routing", routing) return r } // API name: scroll -func (r *Submit) Scroll(v string) *Submit { - r.values.Set("scroll", v) +func (r *Submit) Scroll(duration string) *Submit { + r.values.Set("scroll", duration) return r } // SearchType Search operation type // API name: search_type -func (r *Submit) SearchType(enum searchtype.SearchType) *Submit { - r.values.Set("search_type", enum.String()) - - return r -} - -// Stats Specific 'tag' of the request for logging and statistical purposes -// API name: stats -func (r *Submit) Stats(v string) *Submit { - r.values.Set("stats", v) - - return r -} - -// StoredFields A comma-separated list of stored fields to return as part of a hit -// API name: stored_fields -func (r *Submit) StoredFields(v string) *Submit { - r.values.Set("stored_fields", v) +func (r *Submit) SearchType(searchtype searchtype.SearchType) *Submit { + r.values.Set("search_type", searchtype.String()) return r } // SuggestField Specifies which field to use for suggestions. // API name: suggest_field -func (r *Submit) SuggestField(v string) *Submit { - r.values.Set("suggest_field", v) +func (r *Submit) SuggestField(field string) *Submit { + r.values.Set("suggest_field", field) return r } // SuggestMode Specify suggest mode // API name: suggest_mode -func (r *Submit) SuggestMode(enum suggestmode.SuggestMode) *Submit { - r.values.Set("suggest_mode", enum.String()) +func (r *Submit) SuggestMode(suggestmode suggestmode.SuggestMode) *Submit { + r.values.Set("suggest_mode", suggestmode.String()) return r } // SuggestSize How many suggestions to return in response // API name: suggest_size -func (r *Submit) SuggestSize(v string) *Submit { - r.values.Set("suggest_size", v) +func (r *Submit) SuggestSize(suggestsize string) *Submit { + r.values.Set("suggest_size", suggestsize) return r } // SuggestText The source text for which the suggestions should be returned. // API name: suggest_text -func (r *Submit) SuggestText(v string) *Submit { - r.values.Set("suggest_text", v) +func (r *Submit) SuggestText(suggesttext string) *Submit { + r.values.Set("suggest_text", suggesttext) return r } -// TerminateAfter The maximum number of documents to collect for each shard, upon reaching -// which the query execution will terminate early. -// API name: terminate_after -func (r *Submit) TerminateAfter(v string) *Submit { - r.values.Set("terminate_after", v) +// TypedKeys Specify whether aggregation and suggester names should be prefixed by their +// respective types in the response +// API name: typed_keys +func (r *Submit) TypedKeys(typedkeys bool) *Submit { + r.values.Set("typed_keys", strconv.FormatBool(typedkeys)) return r } -// Timeout Explicit operation timeout -// API name: timeout -func (r *Submit) Timeout(v string) *Submit { - r.values.Set("timeout", v) +// API name: rest_total_hits_as_int +func (r *Submit) RestTotalHitsAsInt(resttotalhitsasint bool) *Submit { + r.values.Set("rest_total_hits_as_int", strconv.FormatBool(resttotalhitsasint)) return r } -// TrackTotalHits Indicate if the number of documents that match the query should be tracked. A -// number can also be specified, to accurately track the total hit count up to -// the number. -// API name: track_total_hits -func (r *Submit) TrackTotalHits(v string) *Submit { - r.values.Set("track_total_hits", v) +// SourceExcludes_ A list of fields to exclude from the returned _source field +// API name: _source_excludes +func (r *Submit) SourceExcludes_(fields ...string) *Submit { + r.values.Set("_source_excludes", strings.Join(fields, ",")) return r } -// TrackScores Whether to calculate and return scores even if they are not used for sorting -// API name: track_scores -func (r *Submit) TrackScores(b bool) *Submit { - r.values.Set("track_scores", strconv.FormatBool(b)) +// SourceIncludes_ A list of fields to extract and return from the _source field +// API name: _source_includes +func (r *Submit) SourceIncludes_(fields ...string) *Submit { + r.values.Set("_source_includes", strings.Join(fields, ",")) return r } -// TypedKeys Specify whether aggregation and suggester names should be prefixed by their -// respective types in the response -// API name: typed_keys -func (r *Submit) TypedKeys(b bool) *Submit { - r.values.Set("typed_keys", strconv.FormatBool(b)) +// Q Query in the Lucene query string syntax +// API name: q +func (r *Submit) Q(q string) *Submit { + r.values.Set("q", q) return r } -// API name: rest_total_hits_as_int -func (r *Submit) RestTotalHitsAsInt(b bool) *Submit { - r.values.Set("rest_total_hits_as_int", strconv.FormatBool(b)) +// API name: aggregations +func (r *Submit) Aggregations(aggregations map[string]types.Aggregations) *Submit { + + r.req.Aggregations = aggregations return r } -// Version Specify whether to return document version as part of a hit -// API name: version -func (r *Submit) Version(b bool) *Submit { - r.values.Set("version", strconv.FormatBool(b)) +// API name: collapse +func (r *Submit) Collapse(collapse *types.FieldCollapse) *Submit { + + r.req.Collapse = collapse return r } -// Source_ True or false to return the _source field or not, or a list of fields to -// return -// API name: _source -func (r *Submit) Source_(v string) *Submit { - r.values.Set("_source", v) +// DocvalueFields Array of wildcard (*) patterns. The request returns doc values for field +// names matching these patterns in the hits.fields property of the response. +// API name: docvalue_fields +func (r *Submit) DocvalueFields(docvaluefields ...types.FieldAndFormat) *Submit { + r.req.DocvalueFields = docvaluefields return r } -// SourceExcludes_ A list of fields to exclude from the returned _source field -// API name: _source_excludes -func (r *Submit) SourceExcludes_(v string) *Submit { - r.values.Set("_source_excludes", v) +// Explain If true, returns detailed information about score computation as part of a +// hit. +// API name: explain +func (r *Submit) Explain(explain bool) *Submit { + r.req.Explain = &explain return r } -// SourceIncludes_ A list of fields to extract and return from the _source field -// API name: _source_includes -func (r *Submit) SourceIncludes_(v string) *Submit { - r.values.Set("_source_includes", v) +// Ext Configuration of search extensions defined by Elasticsearch plugins. +// API name: ext +func (r *Submit) Ext(ext map[string]json.RawMessage) *Submit { + + r.req.Ext = ext return r } -// SeqNoPrimaryTerm Specify whether to return sequence number and primary term of the last -// modification of each hit -// API name: seq_no_primary_term -func (r *Submit) SeqNoPrimaryTerm(b bool) *Submit { - r.values.Set("seq_no_primary_term", strconv.FormatBool(b)) +// Fields Array of wildcard (*) patterns. The request returns values for field names +// matching these patterns in the hits.fields property of the response. +// API name: fields +func (r *Submit) Fields(fields ...types.FieldAndFormat) *Submit { + r.req.Fields = fields return r } -// Q Query in the Lucene query string syntax -// API name: q -func (r *Submit) Q(v string) *Submit { - r.values.Set("q", v) +// From Starting document offset. By default, you cannot page through more than +// 10,000 +// hits using the from and size parameters. To page through more hits, use the +// search_after parameter. +// API name: from +func (r *Submit) From(from int) *Submit { + r.req.From = &from + + return r +} + +// API name: highlight +func (r *Submit) Highlight(highlight *types.Highlight) *Submit { + + r.req.Highlight = highlight + + return r +} + +// IndicesBoost Boosts the _score of documents from specified indices. +// API name: indices_boost +func (r *Submit) IndicesBoost(indicesboosts ...map[string]types.Float64) *Submit { + r.req.IndicesBoost = indicesboosts + + return r +} + +// Knn Defines the approximate kNN search to run. +// API name: knn +func (r *Submit) Knn(knns ...types.KnnQuery) *Submit { + r.req.Knn = knns + + return r +} + +// MinScore Minimum _score for matching documents. Documents with a lower _score are +// not included in the search results. +// API name: min_score +func (r *Submit) MinScore(minscore types.Float64) *Submit { + + r.req.MinScore = &minscore + + return r +} + +// Pit Limits the search to a point in time (PIT). If you provide a PIT, you +// cannot specify an in the request path. +// API name: pit +func (r *Submit) Pit(pit *types.PointInTimeReference) *Submit { + + r.req.Pit = pit + + return r +} + +// API name: post_filter +func (r *Submit) PostFilter(postfilter *types.Query) *Submit { + + r.req.PostFilter = postfilter + + return r +} + +// API name: profile +func (r *Submit) Profile(profile bool) *Submit { + r.req.Profile = &profile + + return r +} + +// Query Defines the search definition using the Query DSL. +// API name: query +func (r *Submit) Query(query *types.Query) *Submit { + + r.req.Query = query return r } -// Size Number of hits to return (default: 10) +// API name: rescore +func (r *Submit) Rescore(rescores ...types.Rescore) *Submit { + r.req.Rescore = rescores + + return r +} + +// RuntimeMappings Defines one or more runtime fields in the search request. These fields take +// precedence over mapped fields with the same name. +// API name: runtime_mappings +func (r *Submit) RuntimeMappings(runtimefields types.RuntimeFields) *Submit { + r.req.RuntimeMappings = runtimefields + + return r +} + +// ScriptFields Retrieve a script evaluation (based on different fields) for each hit. +// API name: script_fields +func (r *Submit) ScriptFields(scriptfields map[string]types.ScriptField) *Submit { + + r.req.ScriptFields = scriptfields + + return r +} + +// API name: search_after +func (r *Submit) SearchAfter(sortresults ...types.FieldValue) *Submit { + r.req.SearchAfter = sortresults + + return r +} + +// SeqNoPrimaryTerm If true, returns sequence number and primary term of the last modification +// of each hit. See Optimistic concurrency control. +// API name: seq_no_primary_term +func (r *Submit) SeqNoPrimaryTerm(seqnoprimaryterm bool) *Submit { + r.req.SeqNoPrimaryTerm = &seqnoprimaryterm + + return r +} + +// Size The number of hits to return. By default, you cannot page through more +// than 10,000 hits using the from and size parameters. To page through more +// hits, use the search_after parameter. // API name: size -func (r *Submit) Size(i int) *Submit { - r.values.Set("size", strconv.Itoa(i)) +func (r *Submit) Size(size int) *Submit { + r.req.Size = &size return r } -// From Starting offset (default: 0) -// API name: from -func (r *Submit) From(i int) *Submit { - r.values.Set("from", strconv.Itoa(i)) +// API name: slice +func (r *Submit) Slice(slice *types.SlicedScroll) *Submit { + + r.req.Slice = slice return r } -// Sort A comma-separated list of : pairs // API name: sort -func (r *Submit) Sort(v string) *Submit { - r.values.Set("sort", v) +func (r *Submit) Sort(sorts ...types.SortCombinations) *Submit { + r.req.Sort = sorts + + return r +} + +// Source_ Indicates which source fields are returned for matching documents. These +// fields are returned in the hits._source property of the search response. +// API name: _source +func (r *Submit) Source_(sourceconfig types.SourceConfig) *Submit { + r.req.Source_ = sourceconfig + + return r +} + +// Stats Stats groups to associate with the search. Each group maintains a statistics +// aggregation for its associated searches. You can retrieve these stats using +// the indices stats API. +// API name: stats +func (r *Submit) Stats(stats ...string) *Submit { + r.req.Stats = stats + + return r +} + +// StoredFields List of stored fields to return as part of a hit. If no fields are specified, +// no stored fields are included in the response. If this field is specified, +// the _source +// parameter defaults to false. You can pass _source: true to return both source +// fields +// and stored fields in the search response. +// API name: stored_fields +func (r *Submit) StoredFields(fields ...string) *Submit { + r.req.StoredFields = fields + + return r +} + +// API name: suggest +func (r *Submit) Suggest(suggest *types.Suggester) *Submit { + + r.req.Suggest = suggest + + return r +} + +// TerminateAfter Maximum number of documents to collect for each shard. If a query reaches +// this +// limit, Elasticsearch terminates the query early. Elasticsearch collects +// documents +// before sorting. Defaults to 0, which does not terminate query execution +// early. +// API name: terminate_after +func (r *Submit) TerminateAfter(terminateafter int64) *Submit { + + r.req.TerminateAfter = &terminateafter + + return r +} + +// Timeout Specifies the period of time to wait for a response from each shard. If no +// response +// is received before the timeout expires, the request fails and returns an +// error. +// Defaults to no timeout. +// API name: timeout +func (r *Submit) Timeout(timeout string) *Submit { + + r.req.Timeout = &timeout + + return r +} + +// TrackScores If true, calculate and return document scores, even if the scores are not +// used for sorting. +// API name: track_scores +func (r *Submit) TrackScores(trackscores bool) *Submit { + r.req.TrackScores = &trackscores + + return r +} + +// TrackTotalHits Number of hits matching the query to count accurately. If true, the exact +// number of hits is returned at the cost of some performance. If false, the +// response does not include the total number of hits matching the query. +// Defaults to 10,000 hits. +// API name: track_total_hits +func (r *Submit) TrackTotalHits(trackhits types.TrackHits) *Submit { + r.req.TrackTotalHits = trackhits + + return r +} + +// Version If true, returns document version as part of a hit. +// API name: version +func (r *Submit) Version(version bool) *Submit { + r.req.Version = &version return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/autoscaling/deleteautoscalingpolicy/delete_autoscaling_policy.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/autoscaling/deleteautoscalingpolicy/delete_autoscaling_policy.go index 3eb74d21d..9950920cb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/autoscaling/deleteautoscalingpolicy/delete_autoscaling_policy.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/autoscaling/deleteautoscalingpolicy/delete_autoscaling_policy.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Deletes an autoscaling policy. Designed for indirect use by ECE/ESS and ECK. // Direct use is not supported. @@ -68,7 +68,7 @@ func NewDeleteAutoscalingPolicyFunc(tp elastictransport.Interface) NewDeleteAuto return func(name string) *DeleteAutoscalingPolicy { n := New(tp) - n.Name(name) + n._name(name) return n } @@ -172,7 +172,6 @@ func (r DeleteAutoscalingPolicy) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -181,6 +180,10 @@ func (r DeleteAutoscalingPolicy) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -214,9 +217,9 @@ func (r *DeleteAutoscalingPolicy) Header(key, value string) *DeleteAutoscalingPo // Name the name of the autoscaling policy // API Name: name -func (r *DeleteAutoscalingPolicy) Name(v string) *DeleteAutoscalingPolicy { +func (r *DeleteAutoscalingPolicy) _name(name string) *DeleteAutoscalingPolicy { r.paramSet |= nameMask - r.name = v + r.name = name return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/autoscaling/deleteautoscalingpolicy/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/autoscaling/deleteautoscalingpolicy/response.go index 5e868af50..b44228efe 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/autoscaling/deleteautoscalingpolicy/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/autoscaling/deleteautoscalingpolicy/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package deleteautoscalingpolicy // Response holds the response body struct for the package deleteautoscalingpolicy // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/autoscaling/delete_autoscaling_policy/DeleteAutoscalingPolicyResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/autoscaling/delete_autoscaling_policy/DeleteAutoscalingPolicyResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/autoscaling/getautoscalingcapacity/get_autoscaling_capacity.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/autoscaling/getautoscalingcapacity/get_autoscaling_capacity.go index 865268366..9447ef93f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/autoscaling/getautoscalingcapacity/get_autoscaling_capacity.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/autoscaling/getautoscalingcapacity/get_autoscaling_capacity.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Gets the current autoscaling capacity based on the configured autoscaling // policy. Designed for indirect use by ECE/ESS and ECK. Direct use is not @@ -163,7 +163,6 @@ func (r GetAutoscalingCapacity) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -172,6 +171,10 @@ func (r GetAutoscalingCapacity) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/autoscaling/getautoscalingcapacity/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/autoscaling/getautoscalingcapacity/response.go index 7c09ebc87..ec0beaca8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/autoscaling/getautoscalingcapacity/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/autoscaling/getautoscalingcapacity/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getautoscalingcapacity @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getautoscalingcapacity // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/autoscaling/get_autoscaling_capacity/GetAutoscalingCapacityResponse.ts#L25-L29 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/autoscaling/get_autoscaling_capacity/GetAutoscalingCapacityResponse.ts#L25-L29 type Response struct { Policies map[string]types.AutoscalingDeciders `json:"policies"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/autoscaling/getautoscalingpolicy/get_autoscaling_policy.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/autoscaling/getautoscalingpolicy/get_autoscaling_policy.go index dd821bc57..42c416fb2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/autoscaling/getautoscalingpolicy/get_autoscaling_policy.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/autoscaling/getautoscalingpolicy/get_autoscaling_policy.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves an autoscaling policy. Designed for indirect use by ECE/ESS and // ECK. Direct use is not supported. @@ -68,7 +68,7 @@ func NewGetAutoscalingPolicyFunc(tp elastictransport.Interface) NewGetAutoscalin return func(name string) *GetAutoscalingPolicy { n := New(tp) - n.Name(name) + n._name(name) return n } @@ -172,7 +172,6 @@ func (r GetAutoscalingPolicy) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -181,6 +180,10 @@ func (r GetAutoscalingPolicy) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -214,9 +217,9 @@ func (r *GetAutoscalingPolicy) Header(key, value string) *GetAutoscalingPolicy { // Name the name of the autoscaling policy // API Name: name -func (r *GetAutoscalingPolicy) Name(v string) *GetAutoscalingPolicy { +func (r *GetAutoscalingPolicy) _name(name string) *GetAutoscalingPolicy { r.paramSet |= nameMask - r.name = v + r.name = name return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/autoscaling/getautoscalingpolicy/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/autoscaling/getautoscalingpolicy/response.go index 42f0f3398..906643656 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/autoscaling/getautoscalingpolicy/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/autoscaling/getautoscalingpolicy/response.go @@ -16,15 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getautoscalingpolicy -import "encoding/json" +import ( + "encoding/json" +) // Response holds the response body struct for the package getautoscalingpolicy // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/autoscaling/get_autoscaling_policy/GetAutoscalingPolicyResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/autoscaling/get_autoscaling_policy/GetAutoscalingPolicyResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/autoscaling/putautoscalingpolicy/put_autoscaling_policy.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/autoscaling/putautoscalingpolicy/put_autoscaling_policy.go index 0950749a9..b4d09cac7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/autoscaling/putautoscalingpolicy/put_autoscaling_policy.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/autoscaling/putautoscalingpolicy/put_autoscaling_policy.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Creates a new autoscaling policy. Designed for indirect use by ECE/ESS and // ECK. Direct use is not supported. @@ -53,8 +53,9 @@ type PutAutoscalingPolicy struct { buf *gobytes.Buffer - req *types.AutoscalingPolicy - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -70,7 +71,7 @@ func NewPutAutoscalingPolicyFunc(tp elastictransport.Interface) NewPutAutoscalin return func(name string) *PutAutoscalingPolicy { n := New(tp) - n.Name(name) + n._name(name) return n } @@ -100,7 +101,7 @@ func (r *PutAutoscalingPolicy) Raw(raw io.Reader) *PutAutoscalingPolicy { } // Request allows to set the request property with the appropriate payload. -func (r *PutAutoscalingPolicy) Request(req *types.AutoscalingPolicy) *PutAutoscalingPolicy { +func (r *PutAutoscalingPolicy) Request(req *Request) *PutAutoscalingPolicy { r.req = req return r @@ -115,9 +116,19 @@ func (r *PutAutoscalingPolicy) HttpRequest(ctx context.Context) (*http.Request, var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -125,6 +136,7 @@ func (r *PutAutoscalingPolicy) HttpRequest(ctx context.Context) (*http.Request, } r.buf.Write(data) + } r.path.Scheme = "http" @@ -207,7 +219,6 @@ func (r PutAutoscalingPolicy) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -216,6 +227,10 @@ func (r PutAutoscalingPolicy) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -228,9 +243,25 @@ func (r *PutAutoscalingPolicy) Header(key, value string) *PutAutoscalingPolicy { // Name the name of the autoscaling policy // API Name: name -func (r *PutAutoscalingPolicy) Name(v string) *PutAutoscalingPolicy { +func (r *PutAutoscalingPolicy) _name(name string) *PutAutoscalingPolicy { r.paramSet |= nameMask - r.name = v + r.name = name + + return r +} + +// Deciders Decider settings +// API name: deciders +func (r *PutAutoscalingPolicy) Deciders(deciders map[string]json.RawMessage) *PutAutoscalingPolicy { + + r.req.Deciders = deciders + + return r +} + +// API name: roles +func (r *PutAutoscalingPolicy) Roles(roles ...string) *PutAutoscalingPolicy { + r.req.Roles = roles return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/autoscaling/putautoscalingpolicy/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/autoscaling/putautoscalingpolicy/request.go new file mode 100644 index 000000000..624da11bb --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/autoscaling/putautoscalingpolicy/request.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package putautoscalingpolicy + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Request holds the request body struct for the package putautoscalingpolicy +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/autoscaling/put_autoscaling_policy/PutAutoscalingPolicyRequest.ts#L24-L35 +type Request = types.AutoscalingPolicy diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/autoscaling/putautoscalingpolicy/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/autoscaling/putautoscalingpolicy/response.go index 2f544dc3c..6df9973d3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/autoscaling/putautoscalingpolicy/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/autoscaling/putautoscalingpolicy/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putautoscalingpolicy // Response holds the response body struct for the package putautoscalingpolicy // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/autoscaling/put_autoscaling_policy/PutAutoscalingPolicyResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/autoscaling/put_autoscaling_policy/PutAutoscalingPolicyResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/aliases/aliases.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/aliases/aliases.go index d16af4190..88f8fddb5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/aliases/aliases.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/aliases/aliases.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Shows information about currently configured aliases to indices including // filter and routing infos. @@ -36,6 +36,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" ) const ( @@ -177,7 +178,6 @@ func (r Aliases) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -186,6 +186,10 @@ func (r Aliases) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -217,11 +221,12 @@ func (r *Aliases) Header(key, value string) *Aliases { return r } -// Name A comma-separated list of alias names to return +// Name A comma-separated list of aliases to retrieve. Supports wildcards (`*`). To +// retrieve all aliases, omit this parameter or use `*` or `_all`. // API Name: name -func (r *Aliases) Name(v string) *Aliases { +func (r *Aliases) Name(name string) *Aliases { r.paramSet |= nameMask - r.name = v + r.name = name return r } @@ -229,8 +234,12 @@ func (r *Aliases) Name(v string) *Aliases { // ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, // closed or both. // API name: expand_wildcards -func (r *Aliases) ExpandWildcards(v string) *Aliases { - r.values.Set("expand_wildcards", v) +func (r *Aliases) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Aliases { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/aliases/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/aliases/response.go index 50196170b..04f572d75 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/aliases/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/aliases/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package aliases @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package aliases // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/aliases/CatAliasesResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/aliases/CatAliasesResponse.ts#L22-L24 type Response []types.AliasesRecord diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/allocation/allocation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/allocation/allocation.go index ce711f97d..c650641ca 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/allocation/allocation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/allocation/allocation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Provides a snapshot of how many shards are allocated to each data node and // how much disk space they are using. @@ -36,7 +36,6 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/bytes" ) @@ -179,7 +178,6 @@ func (r Allocation) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -188,6 +186,10 @@ func (r Allocation) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -219,19 +221,20 @@ func (r *Allocation) Header(key, value string) *Allocation { return r } -// NodeId A comma-separated list of node IDs or names to limit the returned information +// NodeId Comma-separated list of node identifiers or names used to limit the returned +// information. // API Name: nodeid -func (r *Allocation) NodeId(v string) *Allocation { +func (r *Allocation) NodeId(nodeid string) *Allocation { r.paramSet |= nodeidMask - r.nodeid = v + r.nodeid = nodeid return r } -// Bytes The unit in which to display byte values +// Bytes The unit used to display byte values. // API name: bytes -func (r *Allocation) Bytes(enum bytes.Bytes) *Allocation { - r.values.Set("bytes", enum.String()) +func (r *Allocation) Bytes(bytes bytes.Bytes) *Allocation { + r.values.Set("bytes", bytes.String()) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/allocation/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/allocation/response.go index 77251984b..5b221bd30 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/allocation/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/allocation/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package allocation @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package allocation // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/allocation/CatAllocationResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/allocation/CatAllocationResponse.ts#L22-L24 type Response []types.AllocationRecord diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/componenttemplates/component_templates.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/componenttemplates/component_templates.go index 995a105f4..f6f654ca9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/componenttemplates/component_templates.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/componenttemplates/component_templates.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns information about existing component_templates templates. package componenttemplates @@ -73,7 +73,7 @@ func NewComponentTemplatesFunc(tp elastictransport.Interface) NewComponentTempla // Returns information about existing component_templates templates. // -// https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-compoentn-templates.html +// https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-component-templates.html func New(tp elastictransport.Interface) *ComponentTemplates { r := &ComponentTemplates{ transport: tp, @@ -175,7 +175,6 @@ func (r ComponentTemplates) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -184,6 +183,10 @@ func (r ComponentTemplates) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -215,11 +218,12 @@ func (r *ComponentTemplates) Header(key, value string) *ComponentTemplates { return r } -// Name A pattern that returned component template names must match +// Name The name of the component template. Accepts wildcard expressions. If omitted, +// all component templates are returned. // API Name: name -func (r *ComponentTemplates) Name(v string) *ComponentTemplates { +func (r *ComponentTemplates) Name(name string) *ComponentTemplates { r.paramSet |= nameMask - r.name = v + r.name = name return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/componenttemplates/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/componenttemplates/response.go index 3a027e32e..0f9dd01c9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/componenttemplates/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/componenttemplates/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package componenttemplates @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package componenttemplates // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/component_templates/CatComponentTemplatesResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/component_templates/CatComponentTemplatesResponse.ts#L22-L24 type Response []types.CatComponentTemplate diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/count/count.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/count/count.go index 6a5008283..b7fd7b62e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/count/count.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/count/count.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Provides quick access to the document count of the entire cluster, or // individual indices. @@ -177,7 +177,6 @@ func (r Count) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -186,6 +185,10 @@ func (r Count) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -217,11 +220,14 @@ func (r *Count) Header(key, value string) *Count { return r } -// Index A comma-separated list of index names to limit the returned information +// Index Comma-separated list of data streams, indices, and aliases used to limit the +// request. +// Supports wildcards (`*`). To target all data streams and indices, omit this +// parameter or use `*` or `_all`. // API Name: index -func (r *Count) Index(v string) *Count { +func (r *Count) Index(index string) *Count { r.paramSet |= indexMask - r.index = v + r.index = index return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/count/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/count/response.go index 627a3e68d..b8cb1a6b9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/count/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/count/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package count @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package count // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/count/CatCountResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/count/CatCountResponse.ts#L22-L24 type Response []types.CountRecord diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/fielddata/fielddata.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/fielddata/fielddata.go index b26e940be..820a0dde8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/fielddata/fielddata.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/fielddata/fielddata.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Shows how much heap memory is currently being used by fielddata on every data // node in the cluster. @@ -36,7 +36,6 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/bytes" ) @@ -179,7 +178,6 @@ func (r Fielddata) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -188,6 +186,10 @@ func (r Fielddata) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -219,19 +221,20 @@ func (r *Fielddata) Header(key, value string) *Fielddata { return r } -// Fields A comma-separated list of fields to return the fielddata size +// Fields Comma-separated list of fields used to limit returned information. +// To retrieve all fields, omit this parameter. // API Name: fields -func (r *Fielddata) Fields(v string) *Fielddata { +func (r *Fielddata) Fields(fields string) *Fielddata { r.paramSet |= fieldsMask - r.fields = v + r.fields = fields return r } -// Bytes The unit in which to display byte values +// Bytes The unit used to display byte values. // API name: bytes -func (r *Fielddata) Bytes(enum bytes.Bytes) *Fielddata { - r.values.Set("bytes", enum.String()) +func (r *Fielddata) Bytes(bytes bytes.Bytes) *Fielddata { + r.values.Set("bytes", bytes.String()) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/fielddata/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/fielddata/response.go index ca179dbaf..4a8d7838b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/fielddata/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/fielddata/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package fielddata @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package fielddata // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/fielddata/CatFielddataResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/fielddata/CatFielddataResponse.ts#L22-L24 type Response []types.FielddataRecord diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/health/health.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/health/health.go index 95941dda5..253ecebda 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/health/health.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/health/health.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns a concise representation of the cluster health. package health @@ -36,6 +36,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeunit" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -160,7 +161,6 @@ func (r Health) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -169,6 +169,10 @@ func (r Health) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -200,10 +204,18 @@ func (r *Health) Header(key, value string) *Health { return r } -// Ts Set to false to disable timestamping +// Time The unit used to display time values. +// API name: time +func (r *Health) Time(time timeunit.TimeUnit) *Health { + r.values.Set("time", time.String()) + + return r +} + +// Ts If true, returns `HH:MM:SS` and Unix epoch timestamps. // API name: ts -func (r *Health) Ts(b bool) *Health { - r.values.Set("ts", strconv.FormatBool(b)) +func (r *Health) Ts(ts bool) *Health { + r.values.Set("ts", strconv.FormatBool(ts)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/health/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/health/response.go index c4ffe785f..a3151232f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/health/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/health/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package health @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package health // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/health/CatHealthResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/health/CatHealthResponse.ts#L22-L24 type Response []types.HealthRecord diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/help/help.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/help/help.go index 03180524c..705dfb15a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/help/help.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/help/help.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns help for the Cat APIs. package help @@ -157,7 +157,6 @@ func (r Help) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -166,6 +165,10 @@ func (r Help) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/help/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/help/response.go index fce98baa6..4adbf31be 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/help/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/help/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package help @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package help // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/help/CatHelpResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/help/CatHelpResponse.ts#L22-L24 type Response []types.HelpRecord diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/indices/indices.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/indices/indices.go index 33ab9bb3d..9f1c5da2f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/indices/indices.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/indices/indices.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns information about indices: number of primaries and replicas, document // counts, disk size, ... @@ -37,8 +37,8 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/bytes" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/healthstatus" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeunit" ) @@ -182,7 +182,6 @@ func (r Indices) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -191,6 +190,10 @@ func (r Indices) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -222,62 +225,68 @@ func (r *Indices) Header(key, value string) *Indices { return r } -// Index A comma-separated list of index names to limit the returned information +// Index Comma-separated list of data streams, indices, and aliases used to limit the +// request. +// Supports wildcards (`*`). To target all data streams and indices, omit this +// parameter or use `*` or `_all`. // API Name: index -func (r *Indices) Index(v string) *Indices { +func (r *Indices) Index(index string) *Indices { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// Bytes The unit in which to display byte values +// Bytes The unit used to display byte values. // API name: bytes -func (r *Indices) Bytes(enum bytes.Bytes) *Indices { - r.values.Set("bytes", enum.String()) +func (r *Indices) Bytes(bytes bytes.Bytes) *Indices { + r.values.Set("bytes", bytes.String()) return r } -// ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, -// closed or both. +// ExpandWildcards The type of index that wildcard patterns can match. // API name: expand_wildcards -func (r *Indices) ExpandWildcards(v string) *Indices { - r.values.Set("expand_wildcards", v) +func (r *Indices) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Indices { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } -// Health A health status ("green", "yellow", or "red" to filter only indices matching -// the specified health status +// Health The health status used to limit returned indices. By default, the response +// includes indices of any health status. // API name: health -func (r *Indices) Health(enum healthstatus.HealthStatus) *Indices { - r.values.Set("health", enum.String()) +func (r *Indices) Health(health healthstatus.HealthStatus) *Indices { + r.values.Set("health", health.String()) return r } -// IncludeUnloadedSegments If set to true segment stats will include stats for segments that are not -// currently loaded into memory +// IncludeUnloadedSegments If true, the response includes information from segments that are not loaded +// into memory. // API name: include_unloaded_segments -func (r *Indices) IncludeUnloadedSegments(b bool) *Indices { - r.values.Set("include_unloaded_segments", strconv.FormatBool(b)) +func (r *Indices) IncludeUnloadedSegments(includeunloadedsegments bool) *Indices { + r.values.Set("include_unloaded_segments", strconv.FormatBool(includeunloadedsegments)) return r } -// Pri Set to true to return stats only for primary shards +// Pri If true, the response only includes information from primary shards. // API name: pri -func (r *Indices) Pri(b bool) *Indices { - r.values.Set("pri", strconv.FormatBool(b)) +func (r *Indices) Pri(pri bool) *Indices { + r.values.Set("pri", strconv.FormatBool(pri)) return r } -// Time The unit in which to display time values +// Time The unit used to display time values. // API name: time -func (r *Indices) Time(enum timeunit.TimeUnit) *Indices { - r.values.Set("time", enum.String()) +func (r *Indices) Time(time timeunit.TimeUnit) *Indices { + r.values.Set("time", time.String()) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/indices/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/indices/response.go index 2ff24a733..5b9af27ea 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/indices/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/indices/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package indices @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package indices // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/indices/CatIndicesResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/indices/CatIndicesResponse.ts#L22-L24 type Response []types.IndicesRecord diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/master/master.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/master/master.go index 9fce3709e..a4bc5c227 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/master/master.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/master/master.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns information about the master node. package master @@ -159,7 +159,6 @@ func (r Master) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -168,6 +167,10 @@ func (r Master) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/master/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/master/response.go index 08fbc60e8..4c1cdf380 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/master/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/master/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package master @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package master // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/master/CatMasterResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/master/CatMasterResponse.ts#L22-L24 type Response []types.MasterRecord diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/mldatafeeds/ml_datafeeds.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/mldatafeeds/ml_datafeeds.go index 5a9a2f316..c455088e3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/mldatafeeds/ml_datafeeds.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/mldatafeeds/ml_datafeeds.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Gets configuration and usage information about datafeeds. package mldatafeeds @@ -36,7 +36,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/catdatafeedcolumn" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeunit" ) @@ -182,7 +182,6 @@ func (r MlDatafeeds) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -191,6 +190,10 @@ func (r MlDatafeeds) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -224,9 +227,9 @@ func (r *MlDatafeeds) Header(key, value string) *MlDatafeeds { // DatafeedId A numerical character string that uniquely identifies the datafeed. // API Name: datafeedid -func (r *MlDatafeeds) DatafeedId(v string) *MlDatafeeds { +func (r *MlDatafeeds) DatafeedId(datafeedid string) *MlDatafeeds { r.paramSet |= datafeedidMask - r.datafeedid = v + r.datafeedid = datafeedid return r } @@ -243,16 +246,20 @@ func (r *MlDatafeeds) DatafeedId(v string) *MlDatafeeds { // there are no matches or only // partial matches. // API name: allow_no_match -func (r *MlDatafeeds) AllowNoMatch(b bool) *MlDatafeeds { - r.values.Set("allow_no_match", strconv.FormatBool(b)) +func (r *MlDatafeeds) AllowNoMatch(allownomatch bool) *MlDatafeeds { + r.values.Set("allow_no_match", strconv.FormatBool(allownomatch)) return r } // H Comma-separated list of column names to display. // API name: h -func (r *MlDatafeeds) H(v string) *MlDatafeeds { - r.values.Set("h", v) +func (r *MlDatafeeds) H(catdatafeedcolumns ...catdatafeedcolumn.CatDatafeedColumn) *MlDatafeeds { + tmp := []string{} + for _, item := range catdatafeedcolumns { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } @@ -260,16 +267,20 @@ func (r *MlDatafeeds) H(v string) *MlDatafeeds { // S Comma-separated list of column names or column aliases used to sort the // response. // API name: s -func (r *MlDatafeeds) S(v string) *MlDatafeeds { - r.values.Set("s", v) +func (r *MlDatafeeds) S(catdatafeedcolumns ...catdatafeedcolumn.CatDatafeedColumn) *MlDatafeeds { + tmp := []string{} + for _, item := range catdatafeedcolumns { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } // Time The unit used to display time values. // API name: time -func (r *MlDatafeeds) Time(enum timeunit.TimeUnit) *MlDatafeeds { - r.values.Set("time", enum.String()) +func (r *MlDatafeeds) Time(time timeunit.TimeUnit) *MlDatafeeds { + r.values.Set("time", time.String()) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/mldatafeeds/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/mldatafeeds/response.go index e2a7bdb91..bb7596bd4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/mldatafeeds/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/mldatafeeds/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package mldatafeeds @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package mldatafeeds // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/ml_datafeeds/CatDatafeedsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/ml_datafeeds/CatDatafeedsResponse.ts#L22-L24 type Response []types.DatafeedsRecord diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/mldataframeanalytics/ml_data_frame_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/mldataframeanalytics/ml_data_frame_analytics.go index 7fb536b70..66eb08b82 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/mldataframeanalytics/ml_data_frame_analytics.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/mldataframeanalytics/ml_data_frame_analytics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Gets configuration and usage information about data frame analytics jobs. package mldataframeanalytics @@ -36,8 +36,8 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/bytes" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/catdfacolumn" ) const ( @@ -186,7 +186,6 @@ func (r MlDataFrameAnalytics) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -195,6 +194,10 @@ func (r MlDataFrameAnalytics) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -228,9 +231,9 @@ func (r *MlDataFrameAnalytics) Header(key, value string) *MlDataFrameAnalytics { // Id The ID of the data frame analytics to fetch // API Name: id -func (r *MlDataFrameAnalytics) Id(v string) *MlDataFrameAnalytics { +func (r *MlDataFrameAnalytics) Id(id string) *MlDataFrameAnalytics { r.paramSet |= idMask - r.id = v + r.id = id return r } @@ -238,24 +241,28 @@ func (r *MlDataFrameAnalytics) Id(v string) *MlDataFrameAnalytics { // AllowNoMatch Whether to ignore if a wildcard expression matches no configs. (This includes // `_all` string or when no configs have been specified) // API name: allow_no_match -func (r *MlDataFrameAnalytics) AllowNoMatch(b bool) *MlDataFrameAnalytics { - r.values.Set("allow_no_match", strconv.FormatBool(b)) +func (r *MlDataFrameAnalytics) AllowNoMatch(allownomatch bool) *MlDataFrameAnalytics { + r.values.Set("allow_no_match", strconv.FormatBool(allownomatch)) return r } // Bytes The unit in which to display byte values // API name: bytes -func (r *MlDataFrameAnalytics) Bytes(enum bytes.Bytes) *MlDataFrameAnalytics { - r.values.Set("bytes", enum.String()) +func (r *MlDataFrameAnalytics) Bytes(bytes bytes.Bytes) *MlDataFrameAnalytics { + r.values.Set("bytes", bytes.String()) return r } // H Comma-separated list of column names to display. // API name: h -func (r *MlDataFrameAnalytics) H(v string) *MlDataFrameAnalytics { - r.values.Set("h", v) +func (r *MlDataFrameAnalytics) H(catdfacolumns ...catdfacolumn.CatDfaColumn) *MlDataFrameAnalytics { + tmp := []string{} + for _, item := range catdfacolumns { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } @@ -263,16 +270,20 @@ func (r *MlDataFrameAnalytics) H(v string) *MlDataFrameAnalytics { // S Comma-separated list of column names or column aliases used to sort the // response. // API name: s -func (r *MlDataFrameAnalytics) S(v string) *MlDataFrameAnalytics { - r.values.Set("s", v) +func (r *MlDataFrameAnalytics) S(catdfacolumns ...catdfacolumn.CatDfaColumn) *MlDataFrameAnalytics { + tmp := []string{} + for _, item := range catdfacolumns { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } // Time Unit used to display time values. // API name: time -func (r *MlDataFrameAnalytics) Time(v string) *MlDataFrameAnalytics { - r.values.Set("time", v) +func (r *MlDataFrameAnalytics) Time(duration string) *MlDataFrameAnalytics { + r.values.Set("time", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/mldataframeanalytics/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/mldataframeanalytics/response.go index b3a4d1760..11c06a30f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/mldataframeanalytics/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/mldataframeanalytics/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package mldataframeanalytics @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package mldataframeanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/ml_data_frame_analytics/CatDataFrameAnalyticsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/ml_data_frame_analytics/CatDataFrameAnalyticsResponse.ts#L22-L24 type Response []types.DataFrameAnalyticsRecord diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/mljobs/ml_jobs.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/mljobs/ml_jobs.go index ea02f70a3..e7095bc72 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/mljobs/ml_jobs.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/mljobs/ml_jobs.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Gets configuration and usage information about anomaly detection jobs. package mljobs @@ -36,8 +36,8 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/bytes" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/catanomalydetectorcolumn" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeunit" ) @@ -183,7 +183,6 @@ func (r MlJobs) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -192,6 +191,10 @@ func (r MlJobs) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -225,9 +228,9 @@ func (r *MlJobs) Header(key, value string) *MlJobs { // JobId Identifier for the anomaly detection job. // API Name: jobid -func (r *MlJobs) JobId(v string) *MlJobs { +func (r *MlJobs) JobId(jobid string) *MlJobs { r.paramSet |= jobidMask - r.jobid = v + r.jobid = jobid return r } @@ -244,24 +247,28 @@ func (r *MlJobs) JobId(v string) *MlJobs { // are no matches or only partial // matches. // API name: allow_no_match -func (r *MlJobs) AllowNoMatch(b bool) *MlJobs { - r.values.Set("allow_no_match", strconv.FormatBool(b)) +func (r *MlJobs) AllowNoMatch(allownomatch bool) *MlJobs { + r.values.Set("allow_no_match", strconv.FormatBool(allownomatch)) return r } // Bytes The unit used to display byte values. // API name: bytes -func (r *MlJobs) Bytes(enum bytes.Bytes) *MlJobs { - r.values.Set("bytes", enum.String()) +func (r *MlJobs) Bytes(bytes bytes.Bytes) *MlJobs { + r.values.Set("bytes", bytes.String()) return r } // H Comma-separated list of column names to display. // API name: h -func (r *MlJobs) H(v string) *MlJobs { - r.values.Set("h", v) +func (r *MlJobs) H(catanonalydetectorcolumns ...catanomalydetectorcolumn.CatAnomalyDetectorColumn) *MlJobs { + tmp := []string{} + for _, item := range catanonalydetectorcolumns { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } @@ -269,16 +276,20 @@ func (r *MlJobs) H(v string) *MlJobs { // S Comma-separated list of column names or column aliases used to sort the // response. // API name: s -func (r *MlJobs) S(v string) *MlJobs { - r.values.Set("s", v) +func (r *MlJobs) S(catanonalydetectorcolumns ...catanomalydetectorcolumn.CatAnomalyDetectorColumn) *MlJobs { + tmp := []string{} + for _, item := range catanonalydetectorcolumns { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } // Time The unit used to display time values. // API name: time -func (r *MlJobs) Time(enum timeunit.TimeUnit) *MlJobs { - r.values.Set("time", enum.String()) +func (r *MlJobs) Time(time timeunit.TimeUnit) *MlJobs { + r.values.Set("time", time.String()) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/mljobs/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/mljobs/response.go index b41433845..669f1bbc1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/mljobs/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/mljobs/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package mljobs @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package mljobs // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/ml_jobs/CatJobsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/ml_jobs/CatJobsResponse.ts#L22-L24 type Response []types.JobsRecord diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/mltrainedmodels/ml_trained_models.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/mltrainedmodels/ml_trained_models.go index 232b549b4..b2c7f8238 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/mltrainedmodels/ml_trained_models.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/mltrainedmodels/ml_trained_models.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Gets configuration and usage information about inference trained models. package mltrainedmodels @@ -36,8 +36,8 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/bytes" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/cattrainedmodelscolumn" ) const ( @@ -182,7 +182,6 @@ func (r MlTrainedModels) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -191,6 +190,10 @@ func (r MlTrainedModels) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -222,60 +225,74 @@ func (r *MlTrainedModels) Header(key, value string) *MlTrainedModels { return r } -// ModelId The ID of the trained models stats to fetch +// ModelId A unique identifier for the trained model. // API Name: modelid -func (r *MlTrainedModels) ModelId(v string) *MlTrainedModels { +func (r *MlTrainedModels) ModelId(modelid string) *MlTrainedModels { r.paramSet |= modelidMask - r.modelid = v + r.modelid = modelid return r } -// AllowNoMatch Whether to ignore if a wildcard expression matches no trained models. (This -// includes `_all` string or when no trained models have been specified) +// AllowNoMatch Specifies what to do when the request: contains wildcard expressions and +// there are no models that match; contains the `_all` string or no identifiers +// and there are no matches; contains wildcard expressions and there are only +// partial matches. +// If `true`, the API returns an empty array when there are no matches and the +// subset of results when there are partial matches. +// If `false`, the API returns a 404 status code when there are no matches or +// only partial matches. // API name: allow_no_match -func (r *MlTrainedModels) AllowNoMatch(b bool) *MlTrainedModels { - r.values.Set("allow_no_match", strconv.FormatBool(b)) +func (r *MlTrainedModels) AllowNoMatch(allownomatch bool) *MlTrainedModels { + r.values.Set("allow_no_match", strconv.FormatBool(allownomatch)) return r } -// Bytes The unit in which to display byte values +// Bytes The unit used to display byte values. // API name: bytes -func (r *MlTrainedModels) Bytes(enum bytes.Bytes) *MlTrainedModels { - r.values.Set("bytes", enum.String()) +func (r *MlTrainedModels) Bytes(bytes bytes.Bytes) *MlTrainedModels { + r.values.Set("bytes", bytes.String()) return r } -// H Comma-separated list of column names to display +// H A comma-separated list of column names to display. // API name: h -func (r *MlTrainedModels) H(v string) *MlTrainedModels { - r.values.Set("h", v) +func (r *MlTrainedModels) H(cattrainedmodelscolumns ...cattrainedmodelscolumn.CatTrainedModelsColumn) *MlTrainedModels { + tmp := []string{} + for _, item := range cattrainedmodelscolumns { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } -// S Comma-separated list of column names or column aliases to sort by +// S A comma-separated list of column names or aliases used to sort the response. // API name: s -func (r *MlTrainedModels) S(v string) *MlTrainedModels { - r.values.Set("s", v) +func (r *MlTrainedModels) S(cattrainedmodelscolumns ...cattrainedmodelscolumn.CatTrainedModelsColumn) *MlTrainedModels { + tmp := []string{} + for _, item := range cattrainedmodelscolumns { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } -// From skips a number of trained models +// From Skips the specified number of transforms. // API name: from -func (r *MlTrainedModels) From(i int) *MlTrainedModels { - r.values.Set("from", strconv.Itoa(i)) +func (r *MlTrainedModels) From(from int) *MlTrainedModels { + r.values.Set("from", strconv.Itoa(from)) return r } -// Size specifies a max number of trained models to get +// Size The maximum number of transforms to display. // API name: size -func (r *MlTrainedModels) Size(i int) *MlTrainedModels { - r.values.Set("size", strconv.Itoa(i)) +func (r *MlTrainedModels) Size(size int) *MlTrainedModels { + r.values.Set("size", strconv.Itoa(size)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/mltrainedmodels/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/mltrainedmodels/response.go index c82e24f5e..f50c7526c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/mltrainedmodels/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/mltrainedmodels/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package mltrainedmodels @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package mltrainedmodels // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/ml_trained_models/CatTrainedModelsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/ml_trained_models/CatTrainedModelsResponse.ts#L22-L24 type Response []types.TrainedModelsRecord diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/nodeattrs/nodeattrs.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/nodeattrs/nodeattrs.go index 6f95452cc..83bab7c73 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/nodeattrs/nodeattrs.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/nodeattrs/nodeattrs.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns information about custom node attributes. package nodeattrs @@ -159,7 +159,6 @@ func (r Nodeattrs) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -168,6 +167,10 @@ func (r Nodeattrs) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/nodeattrs/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/nodeattrs/response.go index ccf3915b9..1c14452dd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/nodeattrs/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/nodeattrs/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package nodeattrs @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package nodeattrs // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/nodeattrs/CatNodeAttributesResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/nodeattrs/CatNodeAttributesResponse.ts#L22-L24 type Response []types.NodeAttributesRecord diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/nodes/nodes.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/nodes/nodes.go index 2d065769c..bce329232 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/nodes/nodes.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/nodes/nodes.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns basic statistics about performance of cluster nodes. package nodes @@ -31,11 +31,11 @@ import ( "io/ioutil" "net/http" "net/url" + "strconv" "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/bytes" ) @@ -161,7 +161,6 @@ func (r Nodes) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -170,6 +169,10 @@ func (r Nodes) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -201,18 +204,27 @@ func (r *Nodes) Header(key, value string) *Nodes { return r } -// Bytes The unit in which to display byte values +// Bytes The unit used to display byte values. // API name: bytes -func (r *Nodes) Bytes(enum bytes.Bytes) *Nodes { - r.values.Set("bytes", enum.String()) +func (r *Nodes) Bytes(bytes bytes.Bytes) *Nodes { + r.values.Set("bytes", bytes.String()) return r } -// FullId Return the full node ID instead of the shortened version (default: false) +// FullId If `true`, return the full node ID. If `false`, return the shortened node ID. // API name: full_id -func (r *Nodes) FullId(v string) *Nodes { - r.values.Set("full_id", v) +func (r *Nodes) FullId(fullid string) *Nodes { + r.values.Set("full_id", fullid) + + return r +} + +// IncludeUnloadedSegments If true, the response includes information from segments that are not loaded +// into memory. +// API name: include_unloaded_segments +func (r *Nodes) IncludeUnloadedSegments(includeunloadedsegments bool) *Nodes { + r.values.Set("include_unloaded_segments", strconv.FormatBool(includeunloadedsegments)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/nodes/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/nodes/response.go index c9ee20390..8a2f84685 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/nodes/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/nodes/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package nodes @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package nodes // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/nodes/CatNodesResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/nodes/CatNodesResponse.ts#L22-L24 type Response []types.NodesRecord diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/pendingtasks/pending_tasks.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/pendingtasks/pending_tasks.go index 774dabf6b..cbf9f29d7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/pendingtasks/pending_tasks.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/pendingtasks/pending_tasks.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns a concise representation of the cluster pending tasks. package pendingtasks @@ -159,7 +159,6 @@ func (r PendingTasks) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -168,6 +167,10 @@ func (r PendingTasks) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/pendingtasks/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/pendingtasks/response.go index bc7be74f0..7e7c7c4f6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/pendingtasks/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/pendingtasks/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package pendingtasks @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package pendingtasks // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/pending_tasks/CatPendingTasksResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/pending_tasks/CatPendingTasksResponse.ts#L22-L24 type Response []types.PendingTasksRecord diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/plugins/plugins.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/plugins/plugins.go index eb18ecb4e..068ce5cd5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/plugins/plugins.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/plugins/plugins.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns information about installed plugins across nodes node. package plugins @@ -159,7 +159,6 @@ func (r Plugins) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -168,6 +167,10 @@ func (r Plugins) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/plugins/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/plugins/response.go index 952983b5d..907479434 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/plugins/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/plugins/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package plugins @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package plugins // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/plugins/CatPluginsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/plugins/CatPluginsResponse.ts#L22-L24 type Response []types.PluginsRecord diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/recovery/recovery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/recovery/recovery.go index d5f5ea0e8..4dc798137 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/recovery/recovery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/recovery/recovery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns information about index shard recoveries, both on-going completed. package recovery @@ -36,7 +36,6 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/bytes" ) @@ -178,7 +177,6 @@ func (r Recovery) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -187,6 +185,10 @@ func (r Recovery) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -218,36 +220,38 @@ func (r *Recovery) Header(key, value string) *Recovery { return r } -// Index Comma-separated list or wildcard expression of index names to limit the -// returned information +// Index A comma-separated list of data streams, indices, and aliases used to limit +// the request. +// Supports wildcards (`*`). To target all data streams and indices, omit this +// parameter or use `*` or `_all`. // API Name: index -func (r *Recovery) Index(v string) *Recovery { +func (r *Recovery) Index(index string) *Recovery { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// ActiveOnly If `true`, the response only includes ongoing shard recoveries +// ActiveOnly If `true`, the response only includes ongoing shard recoveries. // API name: active_only -func (r *Recovery) ActiveOnly(b bool) *Recovery { - r.values.Set("active_only", strconv.FormatBool(b)) +func (r *Recovery) ActiveOnly(activeonly bool) *Recovery { + r.values.Set("active_only", strconv.FormatBool(activeonly)) return r } -// Bytes The unit in which to display byte values +// Bytes The unit used to display byte values. // API name: bytes -func (r *Recovery) Bytes(enum bytes.Bytes) *Recovery { - r.values.Set("bytes", enum.String()) +func (r *Recovery) Bytes(bytes bytes.Bytes) *Recovery { + r.values.Set("bytes", bytes.String()) return r } -// Detailed If `true`, the response includes detailed information about shard recoveries +// Detailed If `true`, the response includes detailed information about shard recoveries. // API name: detailed -func (r *Recovery) Detailed(b bool) *Recovery { - r.values.Set("detailed", strconv.FormatBool(b)) +func (r *Recovery) Detailed(detailed bool) *Recovery { + r.values.Set("detailed", strconv.FormatBool(detailed)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/recovery/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/recovery/response.go index b8af352f4..15cda5729 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/recovery/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/recovery/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package recovery @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package recovery // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/recovery/CatRecoveryResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/recovery/CatRecoveryResponse.ts#L22-L24 type Response []types.RecoveryRecord diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/repositories/repositories.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/repositories/repositories.go index d48fafd85..06f365cf4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/repositories/repositories.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/repositories/repositories.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns information about snapshot repositories registered in the cluster. package repositories @@ -159,7 +159,6 @@ func (r Repositories) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -168,6 +167,10 @@ func (r Repositories) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/repositories/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/repositories/response.go index ddc96f40c..ca2453c69 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/repositories/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/repositories/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package repositories @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package repositories // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/repositories/CatRepositoriesResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/repositories/CatRepositoriesResponse.ts#L22-L24 type Response []types.RepositoriesRecord diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/segments/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/segments/response.go index 625be2e73..95218aaff 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/segments/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/segments/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package segments @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package segments // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/segments/CatSegmentsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/segments/CatSegmentsResponse.ts#L22-L24 type Response []types.SegmentsRecord diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/segments/segments.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/segments/segments.go index 8877b5206..1f768b56b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/segments/segments.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/segments/segments.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Provides low-level information about the segments in the shards of an index. package segments @@ -35,7 +35,6 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/bytes" ) @@ -177,7 +176,6 @@ func (r Segments) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -186,6 +184,10 @@ func (r Segments) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -217,19 +219,23 @@ func (r *Segments) Header(key, value string) *Segments { return r } -// Index A comma-separated list of index names to limit the returned information +// Index A comma-separated list of data streams, indices, and aliases used to limit +// the request. +// Supports wildcards (`*`). +// To target all data streams and indices, omit this parameter or use `*` or +// `_all`. // API Name: index -func (r *Segments) Index(v string) *Segments { +func (r *Segments) Index(index string) *Segments { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// Bytes The unit in which to display byte values +// Bytes The unit used to display byte values. // API name: bytes -func (r *Segments) Bytes(enum bytes.Bytes) *Segments { - r.values.Set("bytes", enum.String()) +func (r *Segments) Bytes(bytes bytes.Bytes) *Segments { + r.values.Set("bytes", bytes.String()) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/shards/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/shards/response.go index fcc101f87..3c43dfa24 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/shards/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/shards/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package shards @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package shards // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/shards/CatShardsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/shards/CatShardsResponse.ts#L22-L24 type Response []types.ShardsRecord diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/shards/shards.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/shards/shards.go index 91143818f..f8b3e6217 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/shards/shards.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/shards/shards.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Provides a detailed view of shard allocation on nodes. package shards @@ -35,7 +35,6 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/bytes" ) @@ -177,7 +176,6 @@ func (r Shards) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -186,6 +184,10 @@ func (r Shards) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -217,19 +219,23 @@ func (r *Shards) Header(key, value string) *Shards { return r } -// Index A comma-separated list of index names to limit the returned information +// Index A comma-separated list of data streams, indices, and aliases used to limit +// the request. +// Supports wildcards (`*`). +// To target all data streams and indices, omit this parameter or use `*` or +// `_all`. // API Name: index -func (r *Shards) Index(v string) *Shards { +func (r *Shards) Index(index string) *Shards { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// Bytes The unit in which to display byte values +// Bytes The unit used to display byte values. // API name: bytes -func (r *Shards) Bytes(enum bytes.Bytes) *Shards { - r.values.Set("bytes", enum.String()) +func (r *Shards) Bytes(bytes bytes.Bytes) *Shards { + r.values.Set("bytes", bytes.String()) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/snapshots/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/snapshots/response.go index d0dadfb3c..b539b5339 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/snapshots/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/snapshots/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package snapshots @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package snapshots // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/snapshots/CatSnapshotsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/snapshots/CatSnapshotsResponse.ts#L22-L24 type Response []types.SnapshotsRecord diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/snapshots/snapshots.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/snapshots/snapshots.go index 77c352b2e..5a5768d35 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/snapshots/snapshots.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/snapshots/snapshots.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns all snapshots in a specific repository. package snapshots @@ -176,7 +176,6 @@ func (r Snapshots) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -185,6 +184,10 @@ func (r Snapshots) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -216,19 +219,23 @@ func (r *Snapshots) Header(key, value string) *Snapshots { return r } -// Repository Name of repository from which to fetch the snapshot information +// Repository A comma-separated list of snapshot repositories used to limit the request. +// Accepts wildcard expressions. +// `_all` returns all repositories. +// If any repository fails during the request, Elasticsearch returns an error. // API Name: repository -func (r *Snapshots) Repository(v string) *Snapshots { +func (r *Snapshots) Repository(repository string) *Snapshots { r.paramSet |= repositoryMask - r.repository = v + r.repository = repository return r } -// IgnoreUnavailable Set to true to ignore unavailable snapshots +// IgnoreUnavailable If `true`, the response does not include information from unavailable +// snapshots. // API name: ignore_unavailable -func (r *Snapshots) IgnoreUnavailable(b bool) *Snapshots { - r.values.Set("ignore_unavailable", strconv.FormatBool(b)) +func (r *Snapshots) IgnoreUnavailable(ignoreunavailable bool) *Snapshots { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/tasks/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/tasks/response.go index dfc492bba..d3dc2b65b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/tasks/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/tasks/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package tasks @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package tasks // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/tasks/CatTasksResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/tasks/CatTasksResponse.ts#L22-L24 type Response []types.TasksRecord diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/tasks/tasks.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/tasks/tasks.go index db809d6b8..fc5900ed0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/tasks/tasks.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/tasks/tasks.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns information about the tasks currently executing on one or more nodes // in the cluster. @@ -162,7 +162,6 @@ func (r Tasks) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -171,6 +170,10 @@ func (r Tasks) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -202,33 +205,42 @@ func (r *Tasks) Header(key, value string) *Tasks { return r } -// Actions A comma-separated list of actions that should be returned. Leave empty to -// return all. +// Actions The task action names, which are used to limit the response. // API name: actions -func (r *Tasks) Actions(v string) *Tasks { - r.values.Set("actions", v) +func (r *Tasks) Actions(actions ...string) *Tasks { + tmp := []string{} + for _, item := range actions { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("actions", strings.Join(tmp, ",")) return r } -// Detailed Return detailed task information (default: false) +// Detailed If `true`, the response includes detailed information about shard recoveries. // API name: detailed -func (r *Tasks) Detailed(b bool) *Tasks { - r.values.Set("detailed", strconv.FormatBool(b)) +func (r *Tasks) Detailed(detailed bool) *Tasks { + r.values.Set("detailed", strconv.FormatBool(detailed)) return r } +// NodeId Unique node identifiers, which are used to limit the response. // API name: node_id -func (r *Tasks) NodeId(v string) *Tasks { - r.values.Set("node_id", v) +func (r *Tasks) NodeId(nodeids ...string) *Tasks { + tmp := []string{} + for _, item := range nodeids { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("node_id", strings.Join(tmp, ",")) return r } -// API name: parent_task -func (r *Tasks) ParentTask(v string) *Tasks { - r.values.Set("parent_task", v) +// ParentTaskId The parent task identifier, which is used to limit the response. +// API name: parent_task_id +func (r *Tasks) ParentTaskId(parenttaskid string) *Tasks { + r.values.Set("parent_task_id", parenttaskid) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/templates/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/templates/response.go index 4bec59c17..e6165a96a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/templates/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/templates/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package templates @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package templates // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/templates/CatTemplatesResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/templates/CatTemplatesResponse.ts#L22-L24 type Response []types.TemplatesRecord diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/templates/templates.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/templates/templates.go index 4990dfc64..6f2331b81 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/templates/templates.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/templates/templates.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns information about existing templates. package templates @@ -175,7 +175,6 @@ func (r Templates) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -184,6 +183,10 @@ func (r Templates) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -215,11 +218,12 @@ func (r *Templates) Header(key, value string) *Templates { return r } -// Name A pattern that returned template names must match +// Name The name of the template to return. +// Accepts wildcard expressions. If omitted, all templates are returned. // API Name: name -func (r *Templates) Name(v string) *Templates { +func (r *Templates) Name(name string) *Templates { r.paramSet |= nameMask - r.name = v + r.name = name return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/threadpool/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/threadpool/response.go index cf149c275..ddc3438fb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/threadpool/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/threadpool/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package threadpool @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package threadpool // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/thread_pool/CatThreadPoolResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/thread_pool/CatThreadPoolResponse.ts#L22-L24 type Response []types.ThreadPoolRecord diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/threadpool/thread_pool.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/threadpool/thread_pool.go index debcdae58..2422c6f4a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/threadpool/thread_pool.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/threadpool/thread_pool.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns cluster-wide thread pool statistics per node. // By default the active, queue and rejected statistics are returned for all @@ -37,7 +37,6 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeunit" ) @@ -181,7 +180,6 @@ func (r ThreadPool) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -190,6 +188,10 @@ func (r ThreadPool) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -221,20 +223,20 @@ func (r *ThreadPool) Header(key, value string) *ThreadPool { return r } -// ThreadPoolPatterns List of thread pool names used to limit the request. Accepts wildcard -// expressions. +// ThreadPoolPatterns A comma-separated list of thread pool names used to limit the request. +// Accepts wildcard expressions. // API Name: threadpoolpatterns -func (r *ThreadPool) ThreadPoolPatterns(v string) *ThreadPool { +func (r *ThreadPool) ThreadPoolPatterns(threadpoolpatterns string) *ThreadPool { r.paramSet |= threadpoolpatternsMask - r.threadpoolpatterns = v + r.threadpoolpatterns = threadpoolpatterns return r } -// Time Unit used to display time values. +// Time The unit used to display time values. // API name: time -func (r *ThreadPool) Time(enum timeunit.TimeUnit) *ThreadPool { - r.values.Set("time", enum.String()) +func (r *ThreadPool) Time(time timeunit.TimeUnit) *ThreadPool { + r.values.Set("time", time.String()) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/transforms/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/transforms/response.go index b59a7180d..8ad513e9d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/transforms/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/transforms/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package transforms @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package transforms // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/transforms/CatTransformsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/transforms/CatTransformsResponse.ts#L22-L24 type Response []types.TransformsRecord diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/transforms/transforms.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/transforms/transforms.go index 0c6b36089..d0c5b870a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/transforms/transforms.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cat/transforms/transforms.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Gets configuration and usage information about transforms. package transforms @@ -36,7 +36,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/cattransformcolumn" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeunit" ) @@ -178,7 +178,6 @@ func (r Transforms) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -187,6 +186,10 @@ func (r Transforms) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -218,37 +221,48 @@ func (r *Transforms) Header(key, value string) *Transforms { return r } -// TransformId The id of the transform for which to get stats. '_all' or '*' implies all -// transforms +// TransformId A transform identifier or a wildcard expression. +// If you do not specify one of these options, the API returns information for +// all transforms. // API Name: transformid -func (r *Transforms) TransformId(v string) *Transforms { +func (r *Transforms) TransformId(transformid string) *Transforms { r.paramSet |= transformidMask - r.transformid = v + r.transformid = transformid return r } -// AllowNoMatch Whether to ignore if a wildcard expression matches no transforms. (This -// includes `_all` string or when no transforms have been specified) +// AllowNoMatch Specifies what to do when the request: contains wildcard expressions and +// there are no transforms that match; contains the `_all` string or no +// identifiers and there are no matches; contains wildcard expressions and there +// are only partial matches. +// If `true`, it returns an empty transforms array when there are no matches and +// the subset of results when there are partial matches. +// If `false`, the request returns a 404 status code when there are no matches +// or only partial matches. // API name: allow_no_match -func (r *Transforms) AllowNoMatch(b bool) *Transforms { - r.values.Set("allow_no_match", strconv.FormatBool(b)) +func (r *Transforms) AllowNoMatch(allownomatch bool) *Transforms { + r.values.Set("allow_no_match", strconv.FormatBool(allownomatch)) return r } -// From skips a number of transform configs, defaults to 0 +// From Skips the specified number of transforms. // API name: from -func (r *Transforms) From(i int) *Transforms { - r.values.Set("from", strconv.Itoa(i)) +func (r *Transforms) From(from int) *Transforms { + r.values.Set("from", strconv.Itoa(from)) return r } // H Comma-separated list of column names to display. // API name: h -func (r *Transforms) H(v string) *Transforms { - r.values.Set("h", v) +func (r *Transforms) H(cattransformcolumns ...cattransformcolumn.CatTransformColumn) *Transforms { + tmp := []string{} + for _, item := range cattransformcolumns { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } @@ -256,24 +270,28 @@ func (r *Transforms) H(v string) *Transforms { // S Comma-separated list of column names or column aliases used to sort the // response. // API name: s -func (r *Transforms) S(v string) *Transforms { - r.values.Set("s", v) +func (r *Transforms) S(cattransformcolumns ...cattransformcolumn.CatTransformColumn) *Transforms { + tmp := []string{} + for _, item := range cattransformcolumns { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } -// Time Unit used to display time values. +// Time The unit used to display time values. // API name: time -func (r *Transforms) Time(enum timeunit.TimeUnit) *Transforms { - r.values.Set("time", enum.String()) +func (r *Transforms) Time(time timeunit.TimeUnit) *Transforms { + r.values.Set("time", time.String()) return r } -// Size specifies a max number of transforms to get, defaults to 100 +// Size The maximum number of transforms to obtain. // API name: size -func (r *Transforms) Size(i int) *Transforms { - r.values.Set("size", strconv.Itoa(i)) +func (r *Transforms) Size(size int) *Transforms { + r.values.Set("size", strconv.Itoa(size)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/deleteautofollowpattern/delete_auto_follow_pattern.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/deleteautofollowpattern/delete_auto_follow_pattern.go index ec52d047a..998852d35 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/deleteautofollowpattern/delete_auto_follow_pattern.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/deleteautofollowpattern/delete_auto_follow_pattern.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Deletes auto-follow patterns. package deleteautofollowpattern @@ -67,7 +67,7 @@ func NewDeleteAutoFollowPatternFunc(tp elastictransport.Interface) NewDeleteAuto return func(name string) *DeleteAutoFollowPattern { n := New(tp) - n.Name(name) + n._name(name) return n } @@ -170,7 +170,6 @@ func (r DeleteAutoFollowPattern) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -179,6 +178,10 @@ func (r DeleteAutoFollowPattern) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -212,9 +215,9 @@ func (r *DeleteAutoFollowPattern) Header(key, value string) *DeleteAutoFollowPat // Name The name of the auto follow pattern. // API Name: name -func (r *DeleteAutoFollowPattern) Name(v string) *DeleteAutoFollowPattern { +func (r *DeleteAutoFollowPattern) _name(name string) *DeleteAutoFollowPattern { r.paramSet |= nameMask - r.name = v + r.name = name return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/deleteautofollowpattern/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/deleteautofollowpattern/response.go index b928b62d0..3b2c61dda 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/deleteautofollowpattern/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/deleteautofollowpattern/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package deleteautofollowpattern // Response holds the response body struct for the package deleteautofollowpattern // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ccr/delete_auto_follow_pattern/DeleteAutoFollowPatternResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ccr/delete_auto_follow_pattern/DeleteAutoFollowPatternResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/follow/follow.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/follow/follow.go index 5dde90e97..f9346ba1b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/follow/follow.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/follow/follow.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Creates a new follower index configured to follow the referenced leader // index. @@ -53,8 +53,9 @@ type Follow struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -70,7 +71,7 @@ func NewFollowFunc(tp elastictransport.Interface) NewFollow { return func(index string) *Follow { n := New(tp) - n.Index(index) + n._index(index) return n } @@ -86,6 +87,8 @@ func New(tp elastictransport.Interface) *Follow { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -115,9 +118,19 @@ func (r *Follow) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -125,6 +138,7 @@ func (r *Follow) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -207,7 +221,6 @@ func (r Follow) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -216,6 +229,10 @@ func (r Follow) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -228,9 +245,9 @@ func (r *Follow) Header(key, value string) *Follow { // Index The name of the follower index // API Name: index -func (r *Follow) Index(v string) *Follow { +func (r *Follow) _index(index string) *Follow { r.paramSet |= indexMask - r.index = v + r.index = index return r } @@ -240,8 +257,101 @@ func (r *Follow) Index(v string) *Follow { // non-negative value less than or equal to the total number of copies for the // shard (number of replicas + 1) // API name: wait_for_active_shards -func (r *Follow) WaitForActiveShards(v string) *Follow { - r.values.Set("wait_for_active_shards", v) +func (r *Follow) WaitForActiveShards(waitforactiveshards string) *Follow { + r.values.Set("wait_for_active_shards", waitforactiveshards) + + return r +} + +// API name: leader_index +func (r *Follow) LeaderIndex(indexname string) *Follow { + r.req.LeaderIndex = &indexname + + return r +} + +// API name: max_outstanding_read_requests +func (r *Follow) MaxOutstandingReadRequests(maxoutstandingreadrequests int64) *Follow { + + r.req.MaxOutstandingReadRequests = &maxoutstandingreadrequests + + return r +} + +// API name: max_outstanding_write_requests +func (r *Follow) MaxOutstandingWriteRequests(maxoutstandingwriterequests int64) *Follow { + + r.req.MaxOutstandingWriteRequests = &maxoutstandingwriterequests + + return r +} + +// API name: max_read_request_operation_count +func (r *Follow) MaxReadRequestOperationCount(maxreadrequestoperationcount int64) *Follow { + + r.req.MaxReadRequestOperationCount = &maxreadrequestoperationcount + + return r +} + +// API name: max_read_request_size +func (r *Follow) MaxReadRequestSize(maxreadrequestsize string) *Follow { + + r.req.MaxReadRequestSize = &maxreadrequestsize + + return r +} + +// API name: max_retry_delay +func (r *Follow) MaxRetryDelay(duration types.Duration) *Follow { + r.req.MaxRetryDelay = duration + + return r +} + +// API name: max_write_buffer_count +func (r *Follow) MaxWriteBufferCount(maxwritebuffercount int64) *Follow { + + r.req.MaxWriteBufferCount = &maxwritebuffercount + + return r +} + +// API name: max_write_buffer_size +func (r *Follow) MaxWriteBufferSize(maxwritebuffersize string) *Follow { + + r.req.MaxWriteBufferSize = &maxwritebuffersize + + return r +} + +// API name: max_write_request_operation_count +func (r *Follow) MaxWriteRequestOperationCount(maxwriterequestoperationcount int64) *Follow { + + r.req.MaxWriteRequestOperationCount = &maxwriterequestoperationcount + + return r +} + +// API name: max_write_request_size +func (r *Follow) MaxWriteRequestSize(maxwriterequestsize string) *Follow { + + r.req.MaxWriteRequestSize = &maxwriterequestsize + + return r +} + +// API name: read_poll_timeout +func (r *Follow) ReadPollTimeout(duration types.Duration) *Follow { + r.req.ReadPollTimeout = duration + + return r +} + +// API name: remote_cluster +func (r *Follow) RemoteCluster(remotecluster string) *Follow { + + r.req.RemoteCluster = &remotecluster return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/follow/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/follow/request.go index a7748c497..8f284a451 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/follow/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/follow/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package follow @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package follow // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ccr/follow/CreateFollowIndexRequest.ts#L25-L52 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ccr/follow/CreateFollowIndexRequest.ts#L25-L51 type Request struct { LeaderIndex *string `json:"leader_index,omitempty"` MaxOutstandingReadRequests *int64 `json:"max_outstanding_read_requests,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/follow/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/follow/response.go index 04901abe0..6f4ea0df6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/follow/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/follow/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package follow // Response holds the response body struct for the package follow // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ccr/follow/CreateFollowIndexResponse.ts#L20-L26 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ccr/follow/CreateFollowIndexResponse.ts#L20-L26 type Response struct { FollowIndexCreated bool `json:"follow_index_created"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/followinfo/follow_info.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/followinfo/follow_info.go index 9ddd40fd7..b1fe690f5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/followinfo/follow_info.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/followinfo/follow_info.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves information about all follower indices, including parameters and // status for each follower index @@ -68,7 +68,7 @@ func NewFollowInfoFunc(tp elastictransport.Interface) NewFollowInfo { return func(index string) *FollowInfo { n := New(tp) - n.Index(index) + n._index(index) return n } @@ -172,7 +172,6 @@ func (r FollowInfo) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -181,6 +180,10 @@ func (r FollowInfo) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -215,9 +218,9 @@ func (r *FollowInfo) Header(key, value string) *FollowInfo { // Index A comma-separated list of index patterns; use `_all` to perform the operation // on all indices // API Name: index -func (r *FollowInfo) Index(v string) *FollowInfo { +func (r *FollowInfo) _index(index string) *FollowInfo { r.paramSet |= indexMask - r.index = v + r.index = index return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/followinfo/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/followinfo/response.go index f94b067cd..652608192 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/followinfo/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/followinfo/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package followinfo @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package followinfo // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ccr/follow_info/FollowInfoResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ccr/follow_info/FollowInfoResponse.ts#L22-L24 type Response struct { FollowerIndices []types.FollowerIndex `json:"follower_indices"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/followstats/follow_stats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/followstats/follow_stats.go index 913b3d81d..1eed8fed0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/followstats/follow_stats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/followstats/follow_stats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves follower stats. return shard-level stats about the following tasks // associated with each shard for the specified indices. @@ -68,7 +68,7 @@ func NewFollowStatsFunc(tp elastictransport.Interface) NewFollowStats { return func(index string) *FollowStats { n := New(tp) - n.Index(index) + n._index(index) return n } @@ -172,7 +172,6 @@ func (r FollowStats) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -181,6 +180,10 @@ func (r FollowStats) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -215,9 +218,9 @@ func (r *FollowStats) Header(key, value string) *FollowStats { // Index A comma-separated list of index patterns; use `_all` to perform the operation // on all indices // API Name: index -func (r *FollowStats) Index(v string) *FollowStats { +func (r *FollowStats) _index(index string) *FollowStats { r.paramSet |= indexMask - r.index = v + r.index = index return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/followstats/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/followstats/response.go index 7d67ba38b..9c470f8f2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/followstats/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/followstats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package followstats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package followstats // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ccr/follow_stats/FollowIndexStatsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ccr/follow_stats/FollowIndexStatsResponse.ts#L22-L24 type Response struct { Indices []types.FollowIndexStats `json:"indices"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/forgetfollower/forget_follower.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/forgetfollower/forget_follower.go index e75e35a46..0ee1e4995 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/forgetfollower/forget_follower.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/forgetfollower/forget_follower.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Removes the follower retention leases from the leader. package forgetfollower @@ -52,8 +52,9 @@ type ForgetFollower struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -69,7 +70,7 @@ func NewForgetFollowerFunc(tp elastictransport.Interface) NewForgetFollower { return func(index string) *ForgetFollower { n := New(tp) - n.Index(index) + n._index(index) return n } @@ -84,6 +85,8 @@ func New(tp elastictransport.Interface) *ForgetFollower { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -113,9 +116,19 @@ func (r *ForgetFollower) HttpRequest(ctx context.Context) (*http.Request, error) var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -123,6 +136,7 @@ func (r *ForgetFollower) HttpRequest(ctx context.Context) (*http.Request, error) } r.buf.Write(data) + } r.path.Scheme = "http" @@ -205,7 +219,6 @@ func (r ForgetFollower) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -214,6 +227,10 @@ func (r ForgetFollower) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -227,9 +244,39 @@ func (r *ForgetFollower) Header(key, value string) *ForgetFollower { // Index the name of the leader index for which specified follower retention leases // should be removed // API Name: index -func (r *ForgetFollower) Index(v string) *ForgetFollower { +func (r *ForgetFollower) _index(index string) *ForgetFollower { r.paramSet |= indexMask - r.index = v + r.index = index + + return r +} + +// API name: follower_cluster +func (r *ForgetFollower) FollowerCluster(followercluster string) *ForgetFollower { + + r.req.FollowerCluster = &followercluster + + return r +} + +// API name: follower_index +func (r *ForgetFollower) FollowerIndex(indexname string) *ForgetFollower { + r.req.FollowerIndex = &indexname + + return r +} + +// API name: follower_index_uuid +func (r *ForgetFollower) FollowerIndexUuid(uuid string) *ForgetFollower { + r.req.FollowerIndexUuid = &uuid + + return r +} + +// API name: leader_remote_cluster +func (r *ForgetFollower) LeaderRemoteCluster(leaderremotecluster string) *ForgetFollower { + + r.req.LeaderRemoteCluster = &leaderremotecluster return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/forgetfollower/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/forgetfollower/request.go index ae0817c62..52633a9b8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/forgetfollower/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/forgetfollower/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package forgetfollower @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package forgetfollower // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ccr/forget_follower/ForgetFollowerIndexRequest.ts#L23-L39 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ccr/forget_follower/ForgetFollowerIndexRequest.ts#L23-L38 type Request struct { FollowerCluster *string `json:"follower_cluster,omitempty"` FollowerIndex *string `json:"follower_index,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/forgetfollower/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/forgetfollower/response.go index 16cc116b2..d109813e4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/forgetfollower/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/forgetfollower/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package forgetfollower @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package forgetfollower // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ccr/forget_follower/ForgetFollowerIndexResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ccr/forget_follower/ForgetFollowerIndexResponse.ts#L22-L24 type Response struct { Shards_ types.ShardStatistics `json:"_shards"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/getautofollowpattern/get_auto_follow_pattern.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/getautofollowpattern/get_auto_follow_pattern.go index 76a1a25a8..2f7870d0c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/getautofollowpattern/get_auto_follow_pattern.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/getautofollowpattern/get_auto_follow_pattern.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Gets configured auto-follow patterns. Returns the specified auto-follow // pattern collection. @@ -177,7 +177,6 @@ func (r GetAutoFollowPattern) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -186,6 +185,10 @@ func (r GetAutoFollowPattern) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -220,9 +223,9 @@ func (r *GetAutoFollowPattern) Header(key, value string) *GetAutoFollowPattern { // Name Specifies the auto-follow pattern collection that you want to retrieve. If // you do not specify a name, the API returns information for all collections. // API Name: name -func (r *GetAutoFollowPattern) Name(v string) *GetAutoFollowPattern { +func (r *GetAutoFollowPattern) Name(name string) *GetAutoFollowPattern { r.paramSet |= nameMask - r.name = v + r.name = name return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/getautofollowpattern/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/getautofollowpattern/response.go index d5913f32c..7b492a725 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/getautofollowpattern/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/getautofollowpattern/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getautofollowpattern @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getautofollowpattern // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ccr/get_auto_follow_pattern/GetAutoFollowPatternResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ccr/get_auto_follow_pattern/GetAutoFollowPatternResponse.ts#L22-L24 type Response struct { Patterns []types.AutoFollowPattern `json:"patterns"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/pauseautofollowpattern/pause_auto_follow_pattern.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/pauseautofollowpattern/pause_auto_follow_pattern.go index 86c3aaee5..e7d689121 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/pauseautofollowpattern/pause_auto_follow_pattern.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/pauseautofollowpattern/pause_auto_follow_pattern.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Pauses an auto-follow pattern package pauseautofollowpattern @@ -67,7 +67,7 @@ func NewPauseAutoFollowPatternFunc(tp elastictransport.Interface) NewPauseAutoFo return func(name string) *PauseAutoFollowPattern { n := New(tp) - n.Name(name) + n._name(name) return n } @@ -172,7 +172,6 @@ func (r PauseAutoFollowPattern) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -181,6 +180,10 @@ func (r PauseAutoFollowPattern) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -215,9 +218,9 @@ func (r *PauseAutoFollowPattern) Header(key, value string) *PauseAutoFollowPatte // Name The name of the auto follow pattern that should pause discovering new indices // to follow. // API Name: name -func (r *PauseAutoFollowPattern) Name(v string) *PauseAutoFollowPattern { +func (r *PauseAutoFollowPattern) _name(name string) *PauseAutoFollowPattern { r.paramSet |= nameMask - r.name = v + r.name = name return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/pauseautofollowpattern/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/pauseautofollowpattern/response.go index 596e804b3..27bb794e2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/pauseautofollowpattern/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/pauseautofollowpattern/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package pauseautofollowpattern // Response holds the response body struct for the package pauseautofollowpattern // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ccr/pause_auto_follow_pattern/PauseAutoFollowPatternResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ccr/pause_auto_follow_pattern/PauseAutoFollowPatternResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/pausefollow/pause_follow.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/pausefollow/pause_follow.go index 09690c31c..ea9c1ca75 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/pausefollow/pause_follow.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/pausefollow/pause_follow.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Pauses a follower index. The follower index will not fetch any additional // operations from the leader index. @@ -68,7 +68,7 @@ func NewPauseFollowFunc(tp elastictransport.Interface) NewPauseFollow { return func(index string) *PauseFollow { n := New(tp) - n.Index(index) + n._index(index) return n } @@ -172,7 +172,6 @@ func (r PauseFollow) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -181,6 +180,10 @@ func (r PauseFollow) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -214,9 +217,9 @@ func (r *PauseFollow) Header(key, value string) *PauseFollow { // Index The name of the follower index that should pause following its leader index. // API Name: index -func (r *PauseFollow) Index(v string) *PauseFollow { +func (r *PauseFollow) _index(index string) *PauseFollow { r.paramSet |= indexMask - r.index = v + r.index = index return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/pausefollow/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/pausefollow/response.go index 347e9e528..738fa58fb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/pausefollow/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/pausefollow/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package pausefollow // Response holds the response body struct for the package pausefollow // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ccr/pause_follow/PauseFollowIndexResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ccr/pause_follow/PauseFollowIndexResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/putautofollowpattern/put_auto_follow_pattern.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/putautofollowpattern/put_auto_follow_pattern.go index 3c3b9996a..27c21ee4b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/putautofollowpattern/put_auto_follow_pattern.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/putautofollowpattern/put_auto_follow_pattern.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Creates a new named collection of auto-follow patterns against a specified // remote cluster. Newly created indices on the remote cluster matching any of @@ -54,8 +54,9 @@ type PutAutoFollowPattern struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -71,7 +72,7 @@ func NewPutAutoFollowPatternFunc(tp elastictransport.Interface) NewPutAutoFollow return func(name string) *PutAutoFollowPattern { n := New(tp) - n.Name(name) + n._name(name) return n } @@ -88,6 +89,8 @@ func New(tp elastictransport.Interface) *PutAutoFollowPattern { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -117,9 +120,19 @@ func (r *PutAutoFollowPattern) HttpRequest(ctx context.Context) (*http.Request, var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -127,6 +140,7 @@ func (r *PutAutoFollowPattern) HttpRequest(ctx context.Context) (*http.Request, } r.buf.Write(data) + } r.path.Scheme = "http" @@ -209,7 +223,6 @@ func (r PutAutoFollowPattern) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -218,6 +231,10 @@ func (r PutAutoFollowPattern) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -230,9 +247,151 @@ func (r *PutAutoFollowPattern) Header(key, value string) *PutAutoFollowPattern { // Name The name of the collection of auto-follow patterns. // API Name: name -func (r *PutAutoFollowPattern) Name(v string) *PutAutoFollowPattern { +func (r *PutAutoFollowPattern) _name(name string) *PutAutoFollowPattern { r.paramSet |= nameMask - r.name = v + r.name = name + + return r +} + +// FollowIndexPattern The name of follower index. The template {{leader_index}} can be used to +// derive the name of the follower index from the name of the leader index. When +// following a data stream, use {{leader_index}}; CCR does not support changes +// to the names of a follower data stream’s backing indices. +// API name: follow_index_pattern +func (r *PutAutoFollowPattern) FollowIndexPattern(indexpattern string) *PutAutoFollowPattern { + r.req.FollowIndexPattern = &indexpattern + + return r +} + +// LeaderIndexExclusionPatterns An array of simple index patterns that can be used to exclude indices from +// being auto-followed. Indices in the remote cluster whose names are matching +// one or more leader_index_patterns and one or more +// leader_index_exclusion_patterns won’t be followed. +// API name: leader_index_exclusion_patterns +func (r *PutAutoFollowPattern) LeaderIndexExclusionPatterns(indexpatterns ...string) *PutAutoFollowPattern { + r.req.LeaderIndexExclusionPatterns = indexpatterns + + return r +} + +// LeaderIndexPatterns An array of simple index patterns to match against indices in the remote +// cluster specified by the remote_cluster field. +// API name: leader_index_patterns +func (r *PutAutoFollowPattern) LeaderIndexPatterns(indexpatterns ...string) *PutAutoFollowPattern { + r.req.LeaderIndexPatterns = indexpatterns + + return r +} + +// MaxOutstandingReadRequests The maximum number of outstanding reads requests from the remote cluster. +// API name: max_outstanding_read_requests +func (r *PutAutoFollowPattern) MaxOutstandingReadRequests(maxoutstandingreadrequests int) *PutAutoFollowPattern { + r.req.MaxOutstandingReadRequests = &maxoutstandingreadrequests + + return r +} + +// MaxOutstandingWriteRequests The maximum number of outstanding reads requests from the remote cluster. +// API name: max_outstanding_write_requests +func (r *PutAutoFollowPattern) MaxOutstandingWriteRequests(maxoutstandingwriterequests int) *PutAutoFollowPattern { + r.req.MaxOutstandingWriteRequests = &maxoutstandingwriterequests + + return r +} + +// MaxReadRequestOperationCount The maximum number of operations to pull per read from the remote cluster. +// API name: max_read_request_operation_count +func (r *PutAutoFollowPattern) MaxReadRequestOperationCount(maxreadrequestoperationcount int) *PutAutoFollowPattern { + r.req.MaxReadRequestOperationCount = &maxreadrequestoperationcount + + return r +} + +// MaxReadRequestSize The maximum size in bytes of per read of a batch of operations pulled from +// the remote cluster. +// API name: max_read_request_size +func (r *PutAutoFollowPattern) MaxReadRequestSize(bytesize types.ByteSize) *PutAutoFollowPattern { + r.req.MaxReadRequestSize = bytesize + + return r +} + +// MaxRetryDelay The maximum time to wait before retrying an operation that failed +// exceptionally. An exponential backoff strategy is employed when retrying. +// API name: max_retry_delay +func (r *PutAutoFollowPattern) MaxRetryDelay(duration types.Duration) *PutAutoFollowPattern { + r.req.MaxRetryDelay = duration + + return r +} + +// MaxWriteBufferCount The maximum number of operations that can be queued for writing. When this +// limit is reached, reads from the remote cluster will be deferred until the +// number of queued operations goes below the limit. +// API name: max_write_buffer_count +func (r *PutAutoFollowPattern) MaxWriteBufferCount(maxwritebuffercount int) *PutAutoFollowPattern { + r.req.MaxWriteBufferCount = &maxwritebuffercount + + return r +} + +// MaxWriteBufferSize The maximum total bytes of operations that can be queued for writing. When +// this limit is reached, reads from the remote cluster will be deferred until +// the total bytes of queued operations goes below the limit. +// API name: max_write_buffer_size +func (r *PutAutoFollowPattern) MaxWriteBufferSize(bytesize types.ByteSize) *PutAutoFollowPattern { + r.req.MaxWriteBufferSize = bytesize + + return r +} + +// MaxWriteRequestOperationCount The maximum number of operations per bulk write request executed on the +// follower. +// API name: max_write_request_operation_count +func (r *PutAutoFollowPattern) MaxWriteRequestOperationCount(maxwriterequestoperationcount int) *PutAutoFollowPattern { + r.req.MaxWriteRequestOperationCount = &maxwriterequestoperationcount + + return r +} + +// MaxWriteRequestSize The maximum total bytes of operations per bulk write request executed on the +// follower. +// API name: max_write_request_size +func (r *PutAutoFollowPattern) MaxWriteRequestSize(bytesize types.ByteSize) *PutAutoFollowPattern { + r.req.MaxWriteRequestSize = bytesize + + return r +} + +// ReadPollTimeout The maximum time to wait for new operations on the remote cluster when the +// follower index is synchronized with the leader index. When the timeout has +// elapsed, the poll for operations will return to the follower so that it can +// update some statistics. Then the follower will immediately attempt to read +// from the leader again. +// API name: read_poll_timeout +func (r *PutAutoFollowPattern) ReadPollTimeout(duration types.Duration) *PutAutoFollowPattern { + r.req.ReadPollTimeout = duration + + return r +} + +// RemoteCluster The remote cluster containing the leader indices to match against. +// API name: remote_cluster +func (r *PutAutoFollowPattern) RemoteCluster(remotecluster string) *PutAutoFollowPattern { + + r.req.RemoteCluster = remotecluster + + return r +} + +// Settings Settings to override from the leader index. Note that certain settings can +// not be overrode (e.g., index.number_of_shards). +// API name: settings +func (r *PutAutoFollowPattern) Settings(settings map[string]json.RawMessage) *PutAutoFollowPattern { + + r.req.Settings = settings return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/putautofollowpattern/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/putautofollowpattern/request.go index 3732ac35e..6db622c23 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/putautofollowpattern/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/putautofollowpattern/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putautofollowpattern @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package putautofollowpattern // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ccr/put_auto_follow_pattern/PutAutoFollowPatternRequest.ts#L27-L113 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ccr/put_auto_follow_pattern/PutAutoFollowPatternRequest.ts#L27-L112 type Request struct { // FollowIndexPattern The name of follower index. The template {{leader_index}} can be used to diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/putautofollowpattern/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/putautofollowpattern/response.go index 9b28d8ba1..6b17bcff0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/putautofollowpattern/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/putautofollowpattern/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putautofollowpattern // Response holds the response body struct for the package putautofollowpattern // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ccr/put_auto_follow_pattern/PutAutoFollowPatternResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ccr/put_auto_follow_pattern/PutAutoFollowPatternResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/resumeautofollowpattern/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/resumeautofollowpattern/response.go index f1fc844ac..b031d6e21 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/resumeautofollowpattern/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/resumeautofollowpattern/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package resumeautofollowpattern // Response holds the response body struct for the package resumeautofollowpattern // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ccr/resume_auto_follow_pattern/ResumeAutoFollowPatternResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ccr/resume_auto_follow_pattern/ResumeAutoFollowPatternResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/resumeautofollowpattern/resume_auto_follow_pattern.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/resumeautofollowpattern/resume_auto_follow_pattern.go index 20fe19272..fc5825ed5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/resumeautofollowpattern/resume_auto_follow_pattern.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/resumeautofollowpattern/resume_auto_follow_pattern.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Resumes an auto-follow pattern that has been paused package resumeautofollowpattern @@ -67,7 +67,7 @@ func NewResumeAutoFollowPatternFunc(tp elastictransport.Interface) NewResumeAuto return func(name string) *ResumeAutoFollowPattern { n := New(tp) - n.Name(name) + n._name(name) return n } @@ -172,7 +172,6 @@ func (r ResumeAutoFollowPattern) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -181,6 +180,10 @@ func (r ResumeAutoFollowPattern) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -215,9 +218,9 @@ func (r *ResumeAutoFollowPattern) Header(key, value string) *ResumeAutoFollowPat // Name The name of the auto follow pattern to resume discovering new indices to // follow. // API Name: name -func (r *ResumeAutoFollowPattern) Name(v string) *ResumeAutoFollowPattern { +func (r *ResumeAutoFollowPattern) _name(name string) *ResumeAutoFollowPattern { r.paramSet |= nameMask - r.name = v + r.name = name return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/resumefollow/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/resumefollow/request.go index 3bcb21278..309af0ccb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/resumefollow/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/resumefollow/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package resumefollow @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package resumefollow // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ccr/resume_follow/ResumeFollowIndexRequest.ts#L25-L47 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ccr/resume_follow/ResumeFollowIndexRequest.ts#L25-L46 type Request struct { MaxOutstandingReadRequests *int64 `json:"max_outstanding_read_requests,omitempty"` MaxOutstandingWriteRequests *int64 `json:"max_outstanding_write_requests,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/resumefollow/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/resumefollow/response.go index bb7a9d233..ebe6b6642 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/resumefollow/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/resumefollow/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package resumefollow // Response holds the response body struct for the package resumefollow // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ccr/resume_follow/ResumeFollowIndexResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ccr/resume_follow/ResumeFollowIndexResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/resumefollow/resume_follow.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/resumefollow/resume_follow.go index 92cd918b4..e58f9d323 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/resumefollow/resume_follow.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/resumefollow/resume_follow.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Resumes a follower index that has been paused package resumefollow @@ -52,8 +52,9 @@ type ResumeFollow struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -69,7 +70,7 @@ func NewResumeFollowFunc(tp elastictransport.Interface) NewResumeFollow { return func(index string) *ResumeFollow { n := New(tp) - n.Index(index) + n._index(index) return n } @@ -84,6 +85,8 @@ func New(tp elastictransport.Interface) *ResumeFollow { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -113,9 +116,19 @@ func (r *ResumeFollow) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -123,6 +136,7 @@ func (r *ResumeFollow) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -205,7 +219,6 @@ func (r ResumeFollow) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -214,6 +227,10 @@ func (r ResumeFollow) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -226,9 +243,87 @@ func (r *ResumeFollow) Header(key, value string) *ResumeFollow { // Index The name of the follow index to resume following. // API Name: index -func (r *ResumeFollow) Index(v string) *ResumeFollow { +func (r *ResumeFollow) _index(index string) *ResumeFollow { r.paramSet |= indexMask - r.index = v + r.index = index + + return r +} + +// API name: max_outstanding_read_requests +func (r *ResumeFollow) MaxOutstandingReadRequests(maxoutstandingreadrequests int64) *ResumeFollow { + + r.req.MaxOutstandingReadRequests = &maxoutstandingreadrequests + + return r +} + +// API name: max_outstanding_write_requests +func (r *ResumeFollow) MaxOutstandingWriteRequests(maxoutstandingwriterequests int64) *ResumeFollow { + + r.req.MaxOutstandingWriteRequests = &maxoutstandingwriterequests + + return r +} + +// API name: max_read_request_operation_count +func (r *ResumeFollow) MaxReadRequestOperationCount(maxreadrequestoperationcount int64) *ResumeFollow { + + r.req.MaxReadRequestOperationCount = &maxreadrequestoperationcount + + return r +} + +// API name: max_read_request_size +func (r *ResumeFollow) MaxReadRequestSize(maxreadrequestsize string) *ResumeFollow { + + r.req.MaxReadRequestSize = &maxreadrequestsize + + return r +} + +// API name: max_retry_delay +func (r *ResumeFollow) MaxRetryDelay(duration types.Duration) *ResumeFollow { + r.req.MaxRetryDelay = duration + + return r +} + +// API name: max_write_buffer_count +func (r *ResumeFollow) MaxWriteBufferCount(maxwritebuffercount int64) *ResumeFollow { + + r.req.MaxWriteBufferCount = &maxwritebuffercount + + return r +} + +// API name: max_write_buffer_size +func (r *ResumeFollow) MaxWriteBufferSize(maxwritebuffersize string) *ResumeFollow { + + r.req.MaxWriteBufferSize = &maxwritebuffersize + + return r +} + +// API name: max_write_request_operation_count +func (r *ResumeFollow) MaxWriteRequestOperationCount(maxwriterequestoperationcount int64) *ResumeFollow { + + r.req.MaxWriteRequestOperationCount = &maxwriterequestoperationcount + + return r +} + +// API name: max_write_request_size +func (r *ResumeFollow) MaxWriteRequestSize(maxwriterequestsize string) *ResumeFollow { + + r.req.MaxWriteRequestSize = &maxwriterequestsize + + return r +} + +// API name: read_poll_timeout +func (r *ResumeFollow) ReadPollTimeout(duration types.Duration) *ResumeFollow { + r.req.ReadPollTimeout = duration return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/stats/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/stats/response.go index 8c27bf681..fef80d0f0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/stats/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/stats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package stats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package stats // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ccr/stats/CcrStatsResponse.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ccr/stats/CcrStatsResponse.ts#L22-L27 type Response struct { AutoFollowStats types.AutoFollowStats `json:"auto_follow_stats"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/stats/stats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/stats/stats.go index 8aaedbc0c..f2e03ff9f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/stats/stats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/stats/stats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Gets all stats related to cross-cluster replication. package stats @@ -159,7 +159,6 @@ func (r Stats) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -168,6 +167,10 @@ func (r Stats) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/unfollow/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/unfollow/response.go index 65441e7fe..3be636f41 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/unfollow/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/unfollow/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package unfollow // Response holds the response body struct for the package unfollow // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ccr/unfollow/UnfollowIndexResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ccr/unfollow/UnfollowIndexResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/unfollow/unfollow.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/unfollow/unfollow.go index 946f44bc0..baae3a799 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/unfollow/unfollow.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ccr/unfollow/unfollow.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Stops the following task associated with a follower index and removes index // metadata and settings associated with cross-cluster replication. @@ -68,7 +68,7 @@ func NewUnfollowFunc(tp elastictransport.Interface) NewUnfollow { return func(index string) *Unfollow { n := New(tp) - n.Index(index) + n._index(index) return n } @@ -172,7 +172,6 @@ func (r Unfollow) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -181,6 +180,10 @@ func (r Unfollow) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -214,9 +217,9 @@ func (r *Unfollow) Header(key, value string) *Unfollow { // Index The name of the follower index that should be turned into a regular index. // API Name: index -func (r *Unfollow) Index(v string) *Unfollow { +func (r *Unfollow) _index(index string) *Unfollow { r.paramSet |= indexMask - r.index = v + r.index = index return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/allocationexplain/allocation_explain.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/allocationexplain/allocation_explain.go index 733926d20..57e171b95 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/allocationexplain/allocation_explain.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/allocationexplain/allocation_explain.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Provides explanations for shard allocations in the cluster. package allocationexplain @@ -49,8 +49,9 @@ type AllocationExplain struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int } @@ -77,6 +78,8 @@ func New(tp elastictransport.Interface) *AllocationExplain { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -106,9 +109,19 @@ func (r *AllocationExplain) HttpRequest(ctx context.Context) (*http.Request, err var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -116,6 +129,7 @@ func (r *AllocationExplain) HttpRequest(ctx context.Context) (*http.Request, err } r.buf.Write(data) + } r.path.Scheme = "http" @@ -197,7 +211,6 @@ func (r AllocationExplain) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -206,6 +219,10 @@ func (r AllocationExplain) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -218,16 +235,50 @@ func (r *AllocationExplain) Header(key, value string) *AllocationExplain { // IncludeDiskInfo If true, returns information about disk usage and shard sizes. // API name: include_disk_info -func (r *AllocationExplain) IncludeDiskInfo(b bool) *AllocationExplain { - r.values.Set("include_disk_info", strconv.FormatBool(b)) +func (r *AllocationExplain) IncludeDiskInfo(includediskinfo bool) *AllocationExplain { + r.values.Set("include_disk_info", strconv.FormatBool(includediskinfo)) return r } // IncludeYesDecisions If true, returns YES decisions in explanation. // API name: include_yes_decisions -func (r *AllocationExplain) IncludeYesDecisions(b bool) *AllocationExplain { - r.values.Set("include_yes_decisions", strconv.FormatBool(b)) +func (r *AllocationExplain) IncludeYesDecisions(includeyesdecisions bool) *AllocationExplain { + r.values.Set("include_yes_decisions", strconv.FormatBool(includeyesdecisions)) + + return r +} + +// CurrentNode Specifies the node ID or the name of the node to only explain a shard that is +// currently located on the specified node. +// API name: current_node +func (r *AllocationExplain) CurrentNode(currentnode string) *AllocationExplain { + + r.req.CurrentNode = ¤tnode + + return r +} + +// Index Specifies the name of the index that you would like an explanation for. +// API name: index +func (r *AllocationExplain) Index(indexname string) *AllocationExplain { + r.req.Index = &indexname + + return r +} + +// Primary If true, returns explanation for the primary shard for the given shard ID. +// API name: primary +func (r *AllocationExplain) Primary(primary bool) *AllocationExplain { + r.req.Primary = &primary + + return r +} + +// Shard Specifies the ID of the shard that you would like an explanation for. +// API name: shard +func (r *AllocationExplain) Shard(shard int) *AllocationExplain { + r.req.Shard = &shard return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/allocationexplain/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/allocationexplain/request.go index 8b6c5323e..cc858a8ef 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/allocationexplain/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/allocationexplain/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package allocationexplain @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package allocationexplain // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/allocation_explain/ClusterAllocationExplainRequest.ts#L24-L61 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/allocation_explain/ClusterAllocationExplainRequest.ts#L24-L61 type Request struct { // CurrentNode Specifies the node ID or the name of the node to only explain a shard that is diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/allocationexplain/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/allocationexplain/response.go index 75592541c..a15f604c4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/allocationexplain/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/allocationexplain/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package allocationexplain @@ -27,7 +27,7 @@ import ( // Response holds the response body struct for the package allocationexplain // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/allocation_explain/ClusterAllocationExplainResponse.ts#L32-L61 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/allocation_explain/ClusterAllocationExplainResponse.ts#L32-L64 type Response struct { AllocateExplanation *string `json:"allocate_explanation,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/deletecomponenttemplate/delete_component_template.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/deletecomponenttemplate/delete_component_template.go index 0fde57452..508462c32 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/deletecomponenttemplate/delete_component_template.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/deletecomponenttemplate/delete_component_template.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Deletes a component template package deletecomponenttemplate @@ -67,7 +67,7 @@ func NewDeleteComponentTemplateFunc(tp elastictransport.Interface) NewDeleteComp return func(name string) *DeleteComponentTemplate { n := New(tp) - n.Name(name) + n._name(name) return n } @@ -168,7 +168,6 @@ func (r DeleteComponentTemplate) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -177,6 +176,10 @@ func (r DeleteComponentTemplate) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -211,25 +214,29 @@ func (r *DeleteComponentTemplate) Header(key, value string) *DeleteComponentTemp // Name Comma-separated list or wildcard expression of component template names used // to limit the request. // API Name: name -func (r *DeleteComponentTemplate) Name(v string) *DeleteComponentTemplate { +func (r *DeleteComponentTemplate) _name(name string) *DeleteComponentTemplate { r.paramSet |= nameMask - r.name = v + r.name = name return r } -// MasterTimeout Specify timeout for connection to master +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: master_timeout -func (r *DeleteComponentTemplate) MasterTimeout(v string) *DeleteComponentTemplate { - r.values.Set("master_timeout", v) +func (r *DeleteComponentTemplate) MasterTimeout(duration string) *DeleteComponentTemplate { + r.values.Set("master_timeout", duration) return r } -// Timeout Explicit operation timeout +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: timeout -func (r *DeleteComponentTemplate) Timeout(v string) *DeleteComponentTemplate { - r.values.Set("timeout", v) +func (r *DeleteComponentTemplate) Timeout(duration string) *DeleteComponentTemplate { + r.values.Set("timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/deletecomponenttemplate/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/deletecomponenttemplate/response.go index dc815cc24..d884f08ec 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/deletecomponenttemplate/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/deletecomponenttemplate/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package deletecomponenttemplate // Response holds the response body struct for the package deletecomponenttemplate // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/delete_component_template/ClusterDeleteComponentTemplateResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/delete_component_template/ClusterDeleteComponentTemplateResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/deletevotingconfigexclusions/delete_voting_config_exclusions.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/deletevotingconfigexclusions/delete_voting_config_exclusions.go index dccb41c9e..95f19d1b8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/deletevotingconfigexclusions/delete_voting_config_exclusions.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/deletevotingconfigexclusions/delete_voting_config_exclusions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Clears cluster voting config exclusions. package deletevotingconfigexclusions @@ -24,7 +24,6 @@ package deletevotingconfigexclusions import ( gobytes "bytes" "context" - "encoding/json" "errors" "fmt" "io" @@ -35,7 +34,6 @@ import ( "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" - "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -143,33 +141,8 @@ func (r DeleteVotingConfigExclusions) Perform(ctx context.Context) (*http.Respon } // Do runs the request through the transport, handle the response and returns a deletevotingconfigexclusions.Response -func (r DeleteVotingConfigExclusions) Do(ctx context.Context) (*Response, error) { - - response := NewResponse() - - res, err := r.Perform(ctx) - if err != nil { - return nil, err - } - defer res.Body.Close() - - if res.StatusCode < 299 { - err = json.NewDecoder(res.Body).Decode(response) - if err != nil { - return nil, err - } - - return response, nil - - } - - errorResponse := types.NewElasticsearchError() - err = json.NewDecoder(res.Body).Decode(errorResponse) - if err != nil { - return nil, err - } - - return nil, errorResponse +func (r DeleteVotingConfigExclusions) Do(ctx context.Context) (bool, error) { + return r.IsSuccess(ctx) } // IsSuccess allows to run a query with a context and retrieve the result as a boolean. @@ -207,8 +180,8 @@ func (r *DeleteVotingConfigExclusions) Header(key, value string) *DeleteVotingCo // voting configuration exclusions list is cleared even if some excluded // nodes are still in the cluster. // API name: wait_for_removal -func (r *DeleteVotingConfigExclusions) WaitForRemoval(b bool) *DeleteVotingConfigExclusions { - r.values.Set("wait_for_removal", strconv.FormatBool(b)) +func (r *DeleteVotingConfigExclusions) WaitForRemoval(waitforremoval bool) *DeleteVotingConfigExclusions { + r.values.Set("wait_for_removal", strconv.FormatBool(waitforremoval)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/existscomponenttemplate/exists_component_template.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/existscomponenttemplate/exists_component_template.go index e7e959471..83b33ebdd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/existscomponenttemplate/exists_component_template.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/existscomponenttemplate/exists_component_template.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns information about whether a particular component template exist package existscomponenttemplate @@ -24,7 +24,6 @@ package existscomponenttemplate import ( gobytes "bytes" "context" - "encoding/json" "errors" "fmt" "io" @@ -35,7 +34,6 @@ import ( "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" - "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) const ( @@ -68,7 +66,7 @@ func NewExistsComponentTemplateFunc(tp elastictransport.Interface) NewExistsComp return func(name string) *ExistsComponentTemplate { n := New(tp) - n.Name(name) + n._name(name) return n } @@ -152,33 +150,8 @@ func (r ExistsComponentTemplate) Perform(ctx context.Context) (*http.Response, e } // Do runs the request through the transport, handle the response and returns a existscomponenttemplate.Response -func (r ExistsComponentTemplate) Do(ctx context.Context) (*Response, error) { - - response := NewResponse() - - res, err := r.Perform(ctx) - if err != nil { - return nil, err - } - defer res.Body.Close() - - if res.StatusCode < 299 { - err = json.NewDecoder(res.Body).Decode(response) - if err != nil { - return nil, err - } - - return response, nil - - } - - errorResponse := types.NewElasticsearchError() - err = json.NewDecoder(res.Body).Decode(errorResponse) - if err != nil { - return nil, err - } - - return nil, errorResponse +func (r ExistsComponentTemplate) Do(ctx context.Context) (bool, error) { + return r.IsSuccess(ctx) } // IsSuccess allows to run a query with a context and retrieve the result as a boolean. @@ -212,9 +185,9 @@ func (r *ExistsComponentTemplate) Header(key, value string) *ExistsComponentTemp // Name Comma-separated list of component template names used to limit the request. // Wildcard (*) expressions are supported. // API Name: name -func (r *ExistsComponentTemplate) Name(v string) *ExistsComponentTemplate { +func (r *ExistsComponentTemplate) _name(name string) *ExistsComponentTemplate { r.paramSet |= nameMask - r.name = v + r.name = name return r } @@ -223,8 +196,8 @@ func (r *ExistsComponentTemplate) Name(v string) *ExistsComponentTemplate { // received before the timeout expires, the request fails and returns an // error. // API name: master_timeout -func (r *ExistsComponentTemplate) MasterTimeout(v string) *ExistsComponentTemplate { - r.values.Set("master_timeout", v) +func (r *ExistsComponentTemplate) MasterTimeout(duration string) *ExistsComponentTemplate { + r.values.Set("master_timeout", duration) return r } @@ -232,8 +205,8 @@ func (r *ExistsComponentTemplate) MasterTimeout(v string) *ExistsComponentTempla // Local If true, the request retrieves information from the local node only. // Defaults to false, which means information is retrieved from the master node. // API name: local -func (r *ExistsComponentTemplate) Local(b bool) *ExistsComponentTemplate { - r.values.Set("local", strconv.FormatBool(b)) +func (r *ExistsComponentTemplate) Local(local bool) *ExistsComponentTemplate { + r.values.Set("local", strconv.FormatBool(local)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/getcomponenttemplate/get_component_template.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/getcomponenttemplate/get_component_template.go index 4377aa798..9aac9732e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/getcomponenttemplate/get_component_template.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/getcomponenttemplate/get_component_template.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns one or more component templates package getcomponenttemplate @@ -172,7 +172,6 @@ func (r GetComponentTemplate) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -181,6 +180,10 @@ func (r GetComponentTemplate) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -212,35 +215,47 @@ func (r *GetComponentTemplate) Header(key, value string) *GetComponentTemplate { return r } -// Name The comma separated names of the component templates +// Name Comma-separated list of component template names used to limit the request. +// Wildcard (`*`) expressions are supported. // API Name: name -func (r *GetComponentTemplate) Name(v string) *GetComponentTemplate { +func (r *GetComponentTemplate) Name(name string) *GetComponentTemplate { r.paramSet |= nameMask - r.name = v + r.name = name return r } +// FlatSettings If `true`, returns settings in flat format. // API name: flat_settings -func (r *GetComponentTemplate) FlatSettings(b bool) *GetComponentTemplate { - r.values.Set("flat_settings", strconv.FormatBool(b)) +func (r *GetComponentTemplate) FlatSettings(flatsettings bool) *GetComponentTemplate { + r.values.Set("flat_settings", strconv.FormatBool(flatsettings)) + + return r +} + +// IncludeDefaults Return all default configurations for the component template (default: false) +// API name: include_defaults +func (r *GetComponentTemplate) IncludeDefaults(includedefaults bool) *GetComponentTemplate { + r.values.Set("include_defaults", strconv.FormatBool(includedefaults)) return r } -// Local Return local information, do not retrieve the state from master node -// (default: false) +// Local If `true`, the request retrieves information from the local node only. +// If `false`, information is retrieved from the master node. // API name: local -func (r *GetComponentTemplate) Local(b bool) *GetComponentTemplate { - r.values.Set("local", strconv.FormatBool(b)) +func (r *GetComponentTemplate) Local(local bool) *GetComponentTemplate { + r.values.Set("local", strconv.FormatBool(local)) return r } -// MasterTimeout Explicit operation timeout for connection to master node +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: master_timeout -func (r *GetComponentTemplate) MasterTimeout(v string) *GetComponentTemplate { - r.values.Set("master_timeout", v) +func (r *GetComponentTemplate) MasterTimeout(duration string) *GetComponentTemplate { + r.values.Set("master_timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/getcomponenttemplate/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/getcomponenttemplate/response.go index a61a1626f..5baf46e2a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/getcomponenttemplate/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/getcomponenttemplate/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getcomponenttemplate @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getcomponenttemplate // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/get_component_template/ClusterGetComponentTemplateResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/get_component_template/ClusterGetComponentTemplateResponse.ts#L22-L24 type Response struct { ComponentTemplates []types.ClusterComponentTemplate `json:"component_templates"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/getsettings/get_settings.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/getsettings/get_settings.go index 65d7ae0f3..0b18b6422 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/getsettings/get_settings.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/getsettings/get_settings.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns cluster settings. package getsettings @@ -160,7 +160,6 @@ func (r GetSettings) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -169,6 +168,10 @@ func (r GetSettings) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -200,34 +203,38 @@ func (r *GetSettings) Header(key, value string) *GetSettings { return r } -// FlatSettings Return settings in flat format (default: false) +// FlatSettings If `true`, returns settings in flat format. // API name: flat_settings -func (r *GetSettings) FlatSettings(b bool) *GetSettings { - r.values.Set("flat_settings", strconv.FormatBool(b)) +func (r *GetSettings) FlatSettings(flatsettings bool) *GetSettings { + r.values.Set("flat_settings", strconv.FormatBool(flatsettings)) return r } -// IncludeDefaults Whether to return all default clusters setting. +// IncludeDefaults If `true`, returns default cluster settings from the local node. // API name: include_defaults -func (r *GetSettings) IncludeDefaults(b bool) *GetSettings { - r.values.Set("include_defaults", strconv.FormatBool(b)) +func (r *GetSettings) IncludeDefaults(includedefaults bool) *GetSettings { + r.values.Set("include_defaults", strconv.FormatBool(includedefaults)) return r } -// MasterTimeout Explicit operation timeout for connection to master node +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: master_timeout -func (r *GetSettings) MasterTimeout(v string) *GetSettings { - r.values.Set("master_timeout", v) +func (r *GetSettings) MasterTimeout(duration string) *GetSettings { + r.values.Set("master_timeout", duration) return r } -// Timeout Explicit operation timeout +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: timeout -func (r *GetSettings) Timeout(v string) *GetSettings { - r.values.Set("timeout", v) +func (r *GetSettings) Timeout(duration string) *GetSettings { + r.values.Set("timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/getsettings/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/getsettings/response.go index c8619f8f3..e90131a74 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/getsettings/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/getsettings/response.go @@ -16,15 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getsettings -import "encoding/json" +import ( + "encoding/json" +) // Response holds the response body struct for the package getsettings // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/get_settings/ClusterGetSettingsResponse.ts#L23-L29 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/get_settings/ClusterGetSettingsResponse.ts#L23-L29 type Response struct { Defaults map[string]json.RawMessage `json:"defaults,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/health/health.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/health/health.go index 16c8d9857..841586632 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/health/health.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/health/health.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns basic information about the health of the cluster. package health @@ -36,7 +36,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/healthstatus" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/level" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/waitforevents" @@ -180,7 +180,30 @@ func (r Health) Do(ctx context.Context) (*Response, error) { } return response, nil + } + + if res.StatusCode == 408 { + data, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(gobytes.NewReader(data)).Decode(&errorResponse) + if err != nil { + return nil, err + } + + if errorResponse.Status == 0 { + err = json.NewDecoder(gobytes.NewReader(data)).Decode(&response) + if err != nil { + return nil, err + } + + return response, nil + } + return nil, errorResponse } errorResponse := types.NewElasticsearchError() @@ -189,6 +212,10 @@ func (r Health) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -224,9 +251,9 @@ func (r *Health) Header(key, value string) *Health { // limit the request. Wildcard expressions (*) are supported. To target all data // streams and indices in a cluster, omit this parameter or use _all or *. // API Name: index -func (r *Health) Index(v string) *Health { +func (r *Health) Index(index string) *Health { r.paramSet |= indexMask - r.index = v + r.index = index return r } @@ -234,8 +261,12 @@ func (r *Health) Index(v string) *Health { // ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, // closed or both. // API name: expand_wildcards -func (r *Health) ExpandWildcards(v string) *Health { - r.values.Set("expand_wildcards", v) +func (r *Health) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Health { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } @@ -243,8 +274,8 @@ func (r *Health) ExpandWildcards(v string) *Health { // Level Can be one of cluster, indices or shards. Controls the details level of the // health information returned. // API name: level -func (r *Health) Level(enum level.Level) *Health { - r.values.Set("level", enum.String()) +func (r *Health) Level(level level.Level) *Health { + r.values.Set("level", level.String()) return r } @@ -252,8 +283,8 @@ func (r *Health) Level(enum level.Level) *Health { // Local If true, the request retrieves information from the local node only. Defaults // to false, which means information is retrieved from the master node. // API name: local -func (r *Health) Local(b bool) *Health { - r.values.Set("local", strconv.FormatBool(b)) +func (r *Health) Local(local bool) *Health { + r.values.Set("local", strconv.FormatBool(local)) return r } @@ -261,8 +292,8 @@ func (r *Health) Local(b bool) *Health { // MasterTimeout Period to wait for a connection to the master node. If no response is // received before the timeout expires, the request fails and returns an error. // API name: master_timeout -func (r *Health) MasterTimeout(v string) *Health { - r.values.Set("master_timeout", v) +func (r *Health) MasterTimeout(duration string) *Health { + r.values.Set("master_timeout", duration) return r } @@ -270,8 +301,8 @@ func (r *Health) MasterTimeout(v string) *Health { // Timeout Period to wait for a response. If no response is received before the timeout // expires, the request fails and returns an error. // API name: timeout -func (r *Health) Timeout(v string) *Health { - r.values.Set("timeout", v) +func (r *Health) Timeout(duration string) *Health { + r.values.Set("timeout", duration) return r } @@ -279,8 +310,8 @@ func (r *Health) Timeout(v string) *Health { // WaitForActiveShards A number controlling to how many active shards to wait for, all to wait for // all shards in the cluster to be active, or 0 to not wait. // API name: wait_for_active_shards -func (r *Health) WaitForActiveShards(v string) *Health { - r.values.Set("wait_for_active_shards", v) +func (r *Health) WaitForActiveShards(waitforactiveshards string) *Health { + r.values.Set("wait_for_active_shards", waitforactiveshards) return r } @@ -288,8 +319,8 @@ func (r *Health) WaitForActiveShards(v string) *Health { // WaitForEvents Can be one of immediate, urgent, high, normal, low, languid. Wait until all // currently queued events with the given priority are processed. // API name: wait_for_events -func (r *Health) WaitForEvents(enum waitforevents.WaitForEvents) *Health { - r.values.Set("wait_for_events", enum.String()) +func (r *Health) WaitForEvents(waitforevents waitforevents.WaitForEvents) *Health { + r.values.Set("wait_for_events", waitforevents.String()) return r } @@ -298,8 +329,8 @@ func (r *Health) WaitForEvents(enum waitforevents.WaitForEvents) *Health { // accepts >=N, <=N, >N and // yellow > red. By default, will not wait for any status. // API name: wait_for_status -func (r *Health) WaitForStatus(enum healthstatus.HealthStatus) *Health { - r.values.Set("wait_for_status", enum.String()) +func (r *Health) WaitForStatus(waitforstatus healthstatus.HealthStatus) *Health { + r.values.Set("wait_for_status", waitforstatus.String()) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/health/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/health/response.go index d89c77cc0..624e67d41 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/health/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/health/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package health @@ -27,7 +27,7 @@ import ( // Response holds the response body struct for the package health // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/health/ClusterHealthResponse.ts#L26-L64 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/health/ClusterHealthResponse.ts#L26-L37 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/info/info.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/info/info.go new file mode 100644 index 000000000..7ad184611 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/info/info.go @@ -0,0 +1,222 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Returns different information about the cluster. +package info + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + targetMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Info struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + buf *gobytes.Buffer + + paramSet int + + target string +} + +// NewInfo type alias for index. +type NewInfo func(target string) *Info + +// NewInfoFunc returns a new instance of Info with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewInfoFunc(tp elastictransport.Interface) NewInfo { + return func(target string) *Info { + n := New(tp) + + n._target(target) + + return n + } +} + +// Returns different information about the cluster. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cluster-info.html +func New(tp elastictransport.Interface) *Info { + r := &Info{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + buf: gobytes.NewBuffer(nil), + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Info) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == targetMask: + path.WriteString("/") + path.WriteString("_info") + path.WriteString("/") + + path.WriteString(r.target) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.buf) + } else { + req, err = http.NewRequest(method, r.path.String(), r.buf) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Info) Perform(ctx context.Context) (*http.Response, error) { + req, err := r.HttpRequest(ctx) + if err != nil { + return nil, err + } + + res, err := r.transport.Perform(req) + if err != nil { + return nil, fmt.Errorf("an error happened during the Info query execution: %w", err) + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a info.Response +func (r Info) Do(ctx context.Context) (*Response, error) { + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Info) IsSuccess(ctx context.Context) (bool, error) { + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(ioutil.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + return false, nil +} + +// Header set a key, value pair in the Info headers map. +func (r *Info) Header(key, value string) *Info { + r.headers.Set(key, value) + + return r +} + +// Target Limits the information returned to the specific target. Supports a +// comma-separated list, such as http,ingest. +// API Name: target +func (r *Info) _target(target string) *Info { + r.paramSet |= targetMask + r.target = target + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/info/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/info/response.go new file mode 100644 index 000000000..abb94a061 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/info/response.go @@ -0,0 +1,45 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package info + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Response holds the response body struct for the package info +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/info/ClusterInfoResponse.ts#L26-L34 + +type Response struct { + ClusterName string `json:"cluster_name"` + Http *types.Http `json:"http,omitempty"` + Ingest *types.NodesIngest `json:"ingest,omitempty"` + Script *types.Scripting `json:"script,omitempty"` + ThreadPool map[string]types.ThreadCount `json:"thread_pool,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + ThreadPool: make(map[string]types.ThreadCount, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/pendingtasks/pending_tasks.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/pendingtasks/pending_tasks.go index 58914c5d3..cc5cd8e71 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/pendingtasks/pending_tasks.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/pendingtasks/pending_tasks.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns a list of any cluster-level changes (e.g. create index, update // mapping, @@ -164,7 +164,6 @@ func (r PendingTasks) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -173,6 +172,10 @@ func (r PendingTasks) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -204,19 +207,21 @@ func (r *PendingTasks) Header(key, value string) *PendingTasks { return r } -// Local Return local information, do not retrieve the state from master node -// (default: false) +// Local If `true`, the request retrieves information from the local node only. +// If `false`, information is retrieved from the master node. // API name: local -func (r *PendingTasks) Local(b bool) *PendingTasks { - r.values.Set("local", strconv.FormatBool(b)) +func (r *PendingTasks) Local(local bool) *PendingTasks { + r.values.Set("local", strconv.FormatBool(local)) return r } -// MasterTimeout Specify timeout for connection to master +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: master_timeout -func (r *PendingTasks) MasterTimeout(v string) *PendingTasks { - r.values.Set("master_timeout", v) +func (r *PendingTasks) MasterTimeout(duration string) *PendingTasks { + r.values.Set("master_timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/pendingtasks/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/pendingtasks/response.go index 6d94354ba..d7f28f3cc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/pendingtasks/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/pendingtasks/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package pendingtasks @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package pendingtasks // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/pending_tasks/ClusterPendingTasksResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/pending_tasks/ClusterPendingTasksResponse.ts#L22-L24 type Response struct { Tasks []types.PendingTask `json:"tasks"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/postvotingconfigexclusions/post_voting_config_exclusions.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/postvotingconfigexclusions/post_voting_config_exclusions.go index 554cb1198..77f29bb03 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/postvotingconfigexclusions/post_voting_config_exclusions.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/postvotingconfigexclusions/post_voting_config_exclusions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Updates the cluster voting config exclusions by node ids or node names. package postvotingconfigexclusions @@ -24,7 +24,6 @@ package postvotingconfigexclusions import ( gobytes "bytes" "context" - "encoding/json" "errors" "fmt" "io" @@ -34,7 +33,6 @@ import ( "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" - "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -142,33 +140,8 @@ func (r PostVotingConfigExclusions) Perform(ctx context.Context) (*http.Response } // Do runs the request through the transport, handle the response and returns a postvotingconfigexclusions.Response -func (r PostVotingConfigExclusions) Do(ctx context.Context) (*Response, error) { - - response := NewResponse() - - res, err := r.Perform(ctx) - if err != nil { - return nil, err - } - defer res.Body.Close() - - if res.StatusCode < 299 { - err = json.NewDecoder(res.Body).Decode(response) - if err != nil { - return nil, err - } - - return response, nil - - } - - errorResponse := types.NewElasticsearchError() - err = json.NewDecoder(res.Body).Decode(errorResponse) - if err != nil { - return nil, err - } - - return nil, errorResponse +func (r PostVotingConfigExclusions) Do(ctx context.Context) (bool, error) { + return r.IsSuccess(ctx) } // IsSuccess allows to run a query with a context and retrieve the result as a boolean. @@ -202,8 +175,8 @@ func (r *PostVotingConfigExclusions) Header(key, value string) *PostVotingConfig // NodeNames A comma-separated list of the names of the nodes to exclude from the // voting configuration. If specified, you may not also specify node_ids. // API name: node_names -func (r *PostVotingConfigExclusions) NodeNames(v string) *PostVotingConfigExclusions { - r.values.Set("node_names", v) +func (r *PostVotingConfigExclusions) NodeNames(names ...string) *PostVotingConfigExclusions { + r.values.Set("node_names", strings.Join(names, ",")) return r } @@ -212,8 +185,8 @@ func (r *PostVotingConfigExclusions) NodeNames(v string) *PostVotingConfigExclus // from the voting configuration. If specified, you may not also specify // node_names. // API name: node_ids -func (r *PostVotingConfigExclusions) NodeIds(v string) *PostVotingConfigExclusions { - r.values.Set("node_ids", v) +func (r *PostVotingConfigExclusions) NodeIds(ids ...string) *PostVotingConfigExclusions { + r.values.Set("node_ids", strings.Join(ids, ",")) return r } @@ -223,8 +196,8 @@ func (r *PostVotingConfigExclusions) NodeIds(v string) *PostVotingConfigExclusio // returning. If the timeout expires before the appropriate condition // is satisfied, the request fails and returns an error. // API name: timeout -func (r *PostVotingConfigExclusions) Timeout(v string) *PostVotingConfigExclusions { - r.values.Set("timeout", v) +func (r *PostVotingConfigExclusions) Timeout(duration string) *PostVotingConfigExclusions { + r.values.Set("timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/putcomponenttemplate/put_component_template.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/putcomponenttemplate/put_component_template.go index b4ccf9de2..d1b2cd339 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/putcomponenttemplate/put_component_template.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/putcomponenttemplate/put_component_template.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Creates or updates a component template package putcomponenttemplate @@ -53,8 +53,9 @@ type PutComponentTemplate struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -70,7 +71,7 @@ func NewPutComponentTemplateFunc(tp elastictransport.Interface) NewPutComponentT return func(name string) *PutComponentTemplate { n := New(tp) - n.Name(name) + n._name(name) return n } @@ -85,6 +86,8 @@ func New(tp elastictransport.Interface) *PutComponentTemplate { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -114,9 +117,19 @@ func (r *PutComponentTemplate) HttpRequest(ctx context.Context) (*http.Request, var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -124,6 +137,7 @@ func (r *PutComponentTemplate) HttpRequest(ctx context.Context) (*http.Request, } r.buf.Write(data) + } r.path.Scheme = "http" @@ -204,7 +218,6 @@ func (r PutComponentTemplate) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -213,6 +226,10 @@ func (r PutComponentTemplate) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -223,28 +240,88 @@ func (r *PutComponentTemplate) Header(key, value string) *PutComponentTemplate { return r } -// Name The name of the template +// Name Name of the component template to create. +// Elasticsearch includes the following built-in component templates: +// `logs-mappings`; 'logs-settings`; `metrics-mappings`; +// `metrics-settings`;`synthetics-mapping`; `synthetics-settings`. +// Elastic Agent uses these templates to configure backing indices for its data +// streams. +// If you use Elastic Agent and want to overwrite one of these templates, set +// the `version` for your replacement template higher than the current version. +// If you don’t use Elastic Agent and want to disable all built-in component and +// index templates, set `stack.templates.enabled` to `false` using the cluster +// update settings API. // API Name: name -func (r *PutComponentTemplate) Name(v string) *PutComponentTemplate { +func (r *PutComponentTemplate) _name(name string) *PutComponentTemplate { r.paramSet |= nameMask - r.name = v + r.name = name return r } -// Create Whether the index template should only be added if new or can also replace an -// existing one +// Create If `true`, this request cannot replace or update existing component +// templates. // API name: create -func (r *PutComponentTemplate) Create(b bool) *PutComponentTemplate { - r.values.Set("create", strconv.FormatBool(b)) +func (r *PutComponentTemplate) Create(create bool) *PutComponentTemplate { + r.values.Set("create", strconv.FormatBool(create)) return r } -// MasterTimeout Specify timeout for connection to master +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: master_timeout -func (r *PutComponentTemplate) MasterTimeout(v string) *PutComponentTemplate { - r.values.Set("master_timeout", v) +func (r *PutComponentTemplate) MasterTimeout(duration string) *PutComponentTemplate { + r.values.Set("master_timeout", duration) + + return r +} + +// AllowAutoCreate This setting overrides the value of the `action.auto_create_index` cluster +// setting. +// If set to `true` in a template, then indices can be automatically created +// using that +// template even if auto-creation of indices is disabled via +// `actions.auto_create_index`. +// If set to `false` then data streams matching the template must always be +// explicitly created. +// API name: allow_auto_create +func (r *PutComponentTemplate) AllowAutoCreate(allowautocreate bool) *PutComponentTemplate { + r.req.AllowAutoCreate = &allowautocreate + + return r +} + +// Meta_ Optional user metadata about the component template. +// May have any contents. This map is not automatically generated by +// Elasticsearch. +// This information is stored in the cluster state, so keeping it short is +// preferable. +// To unset `_meta`, replace the template without specifying this information. +// API name: _meta +func (r *PutComponentTemplate) Meta_(metadata types.Metadata) *PutComponentTemplate { + r.req.Meta_ = metadata + + return r +} + +// Template The template to be applied which includes mappings, settings, or aliases +// configuration. +// API name: template +func (r *PutComponentTemplate) Template(template *types.IndexState) *PutComponentTemplate { + + r.req.Template = *template + + return r +} + +// Version Version number used to manage component templates externally. +// This number isn't automatically generated or incremented by Elasticsearch. +// To unset a version, replace the template without specifying a version. +// API name: version +func (r *PutComponentTemplate) Version(versionnumber int64) *PutComponentTemplate { + r.req.Version = &versionnumber return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/putcomponenttemplate/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/putcomponenttemplate/request.go index 7247450af..579ab5e26 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/putcomponenttemplate/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/putcomponenttemplate/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putcomponenttemplate @@ -29,21 +29,37 @@ import ( // Request holds the request body struct for the package putcomponenttemplate // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/put_component_template/ClusterPutComponentTemplateRequest.ts#L29-L54 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/put_component_template/ClusterPutComponentTemplateRequest.ts#L29-L99 type Request struct { - Aliases map[string]types.AliasDefinition `json:"aliases,omitempty"` - Mappings *types.TypeMapping `json:"mappings,omitempty"` - Meta_ map[string]json.RawMessage `json:"_meta,omitempty"` - Settings *types.IndexSettings `json:"settings,omitempty"` - Template types.IndexState `json:"template"` - Version *int64 `json:"version,omitempty"` + + // AllowAutoCreate This setting overrides the value of the `action.auto_create_index` cluster + // setting. + // If set to `true` in a template, then indices can be automatically created + // using that + // template even if auto-creation of indices is disabled via + // `actions.auto_create_index`. + // If set to `false` then data streams matching the template must always be + // explicitly created. + AllowAutoCreate *bool `json:"allow_auto_create,omitempty"` + // Meta_ Optional user metadata about the component template. + // May have any contents. This map is not automatically generated by + // Elasticsearch. + // This information is stored in the cluster state, so keeping it short is + // preferable. + // To unset `_meta`, replace the template without specifying this information. + Meta_ types.Metadata `json:"_meta,omitempty"` + // Template The template to be applied which includes mappings, settings, or aliases + // configuration. + Template types.IndexState `json:"template"` + // Version Version number used to manage component templates externally. + // This number isn't automatically generated or incremented by Elasticsearch. + // To unset a version, replace the template without specifying a version. + Version *int64 `json:"version,omitempty"` } // NewRequest returns a Request func NewRequest() *Request { - r := &Request{ - Aliases: make(map[string]types.AliasDefinition, 0), - } + r := &Request{} return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/putcomponenttemplate/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/putcomponenttemplate/response.go index 5d537659f..5588d69ef 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/putcomponenttemplate/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/putcomponenttemplate/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putcomponenttemplate // Response holds the response body struct for the package putcomponenttemplate // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/put_component_template/ClusterPutComponentTemplateResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/put_component_template/ClusterPutComponentTemplateResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/putsettings/put_settings.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/putsettings/put_settings.go index 22de6d791..f437c0dee 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/putsettings/put_settings.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/putsettings/put_settings.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Updates the cluster settings. package putsettings @@ -49,8 +49,9 @@ type PutSettings struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int } @@ -77,6 +78,8 @@ func New(tp elastictransport.Interface) *PutSettings { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -106,9 +109,19 @@ func (r *PutSettings) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -116,6 +129,7 @@ func (r *PutSettings) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -195,7 +209,6 @@ func (r PutSettings) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -204,6 +217,10 @@ func (r PutSettings) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -216,24 +233,40 @@ func (r *PutSettings) Header(key, value string) *PutSettings { // FlatSettings Return settings in flat format (default: false) // API name: flat_settings -func (r *PutSettings) FlatSettings(b bool) *PutSettings { - r.values.Set("flat_settings", strconv.FormatBool(b)) +func (r *PutSettings) FlatSettings(flatsettings bool) *PutSettings { + r.values.Set("flat_settings", strconv.FormatBool(flatsettings)) return r } // MasterTimeout Explicit operation timeout for connection to master node // API name: master_timeout -func (r *PutSettings) MasterTimeout(v string) *PutSettings { - r.values.Set("master_timeout", v) +func (r *PutSettings) MasterTimeout(duration string) *PutSettings { + r.values.Set("master_timeout", duration) return r } // Timeout Explicit operation timeout // API name: timeout -func (r *PutSettings) Timeout(v string) *PutSettings { - r.values.Set("timeout", v) +func (r *PutSettings) Timeout(duration string) *PutSettings { + r.values.Set("timeout", duration) + + return r +} + +// API name: persistent +func (r *PutSettings) Persistent(persistent map[string]json.RawMessage) *PutSettings { + + r.req.Persistent = persistent + + return r +} + +// API name: transient +func (r *PutSettings) Transient(transient map[string]json.RawMessage) *PutSettings { + + r.req.Transient = transient return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/putsettings/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/putsettings/request.go index a96d9e813..bc216e163 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/putsettings/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/putsettings/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putsettings @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package putsettings // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/put_settings/ClusterPutSettingsRequest.ts#L25-L43 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/put_settings/ClusterPutSettingsRequest.ts#L25-L43 type Request struct { Persistent map[string]json.RawMessage `json:"persistent,omitempty"` Transient map[string]json.RawMessage `json:"transient,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/putsettings/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/putsettings/response.go index 6ba3817bb..abe723a93 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/putsettings/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/putsettings/response.go @@ -16,15 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putsettings -import "encoding/json" +import ( + "encoding/json" +) // Response holds the response body struct for the package putsettings // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/put_settings/ClusterPutSettingsResponse.ts#L23-L29 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/put_settings/ClusterPutSettingsResponse.ts#L23-L29 type Response struct { Acknowledged bool `json:"acknowledged"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/remoteinfo/remote_info.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/remoteinfo/remote_info.go index f29ea2598..96245d517 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/remoteinfo/remote_info.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/remoteinfo/remote_info.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns the information about configured remote clusters. package remoteinfo @@ -159,7 +159,6 @@ func (r RemoteInfo) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -168,6 +167,10 @@ func (r RemoteInfo) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/remoteinfo/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/remoteinfo/response.go index 6f03ff154..7fb630b2e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/remoteinfo/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/remoteinfo/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package remoteinfo @@ -32,7 +32,7 @@ import ( // Response holds the response body struct for the package remoteinfo // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/remote_info/ClusterRemoteInfoResponse.ts#L24-L26 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/remote_info/ClusterRemoteInfoResponse.ts#L24-L26 type Response map[string]types.ClusterRemoteInfo diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/reroute/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/reroute/request.go index 9b671b8f2..db64a0ded 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/reroute/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/reroute/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package reroute @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package reroute // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/reroute/ClusterRerouteRequest.ts#L25-L70 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/reroute/ClusterRerouteRequest.ts#L25-L70 type Request struct { // Commands Defines the commands to perform. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/reroute/reroute.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/reroute/reroute.go index 07ca37351..f28d54442 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/reroute/reroute.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/reroute/reroute.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Allows to manually change the allocation of individual shards in the cluster. package reroute @@ -49,8 +49,9 @@ type Reroute struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int } @@ -77,6 +78,8 @@ func New(tp elastictransport.Interface) *Reroute { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -106,9 +109,19 @@ func (r *Reroute) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -116,6 +129,7 @@ func (r *Reroute) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -195,7 +209,6 @@ func (r Reroute) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -204,6 +217,10 @@ func (r Reroute) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -217,8 +234,8 @@ func (r *Reroute) Header(key, value string) *Reroute { // DryRun If true, then the request simulates the operation only and returns the // resulting state. // API name: dry_run -func (r *Reroute) DryRun(b bool) *Reroute { - r.values.Set("dry_run", strconv.FormatBool(b)) +func (r *Reroute) DryRun(dryrun bool) *Reroute { + r.values.Set("dry_run", strconv.FormatBool(dryrun)) return r } @@ -226,16 +243,16 @@ func (r *Reroute) DryRun(b bool) *Reroute { // Explain If true, then the response contains an explanation of why the commands can or // cannot be executed. // API name: explain -func (r *Reroute) Explain(b bool) *Reroute { - r.values.Set("explain", strconv.FormatBool(b)) +func (r *Reroute) Explain(explain bool) *Reroute { + r.values.Set("explain", strconv.FormatBool(explain)) return r } // Metric Limits the information returned to the specified metrics. // API name: metric -func (r *Reroute) Metric(v string) *Reroute { - r.values.Set("metric", v) +func (r *Reroute) Metric(metrics ...string) *Reroute { + r.values.Set("metric", strings.Join(metrics, ",")) return r } @@ -243,8 +260,8 @@ func (r *Reroute) Metric(v string) *Reroute { // RetryFailed If true, then retries allocation of shards that are blocked due to too many // subsequent allocation failures. // API name: retry_failed -func (r *Reroute) RetryFailed(b bool) *Reroute { - r.values.Set("retry_failed", strconv.FormatBool(b)) +func (r *Reroute) RetryFailed(retryfailed bool) *Reroute { + r.values.Set("retry_failed", strconv.FormatBool(retryfailed)) return r } @@ -252,8 +269,8 @@ func (r *Reroute) RetryFailed(b bool) *Reroute { // MasterTimeout Period to wait for a connection to the master node. If no response is // received before the timeout expires, the request fails and returns an error. // API name: master_timeout -func (r *Reroute) MasterTimeout(v string) *Reroute { - r.values.Set("master_timeout", v) +func (r *Reroute) MasterTimeout(duration string) *Reroute { + r.values.Set("master_timeout", duration) return r } @@ -261,8 +278,16 @@ func (r *Reroute) MasterTimeout(v string) *Reroute { // Timeout Period to wait for a response. If no response is received before the timeout // expires, the request fails and returns an error. // API name: timeout -func (r *Reroute) Timeout(v string) *Reroute { - r.values.Set("timeout", v) +func (r *Reroute) Timeout(duration string) *Reroute { + r.values.Set("timeout", duration) + + return r +} + +// Commands Defines the commands to perform. +// API name: commands +func (r *Reroute) Commands(commands ...types.Command) *Reroute { + r.req.Commands = commands return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/reroute/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/reroute/response.go index 31ee258d5..5ae9f60b6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/reroute/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/reroute/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package reroute @@ -28,7 +28,7 @@ import ( // Response holds the response body struct for the package reroute // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/reroute/ClusterRerouteResponse.ts#L23-L34 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/reroute/ClusterRerouteResponse.ts#L23-L34 type Response struct { Acknowledged bool `json:"acknowledged"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/state/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/state/response.go index 1eaab80b8..c741e4133 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/state/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/state/response.go @@ -16,14 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package state -import "encoding/json" +import ( + "encoding/json" +) // Response holds the response body struct for the package state // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/state/ClusterStateResponse.ts#L22-L29 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/state/ClusterStateResponse.ts#L22-L29 -type Response json.RawMessage +type Response = json.RawMessage + +func NewResponse() *Response { + return new(Response) +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/state/state.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/state/state.go index 7c783206f..68574126a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/state/state.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/state/state.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns a comprehensive information about the state of the cluster. package state @@ -36,6 +36,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" ) const ( @@ -192,7 +193,6 @@ func (r State) Do(ctx context.Context) (Response, error) { } return *response, nil - } errorResponse := types.NewElasticsearchError() @@ -201,6 +201,10 @@ func (r State) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -234,9 +238,9 @@ func (r *State) Header(key, value string) *State { // Metric Limit the information returned to the specified metrics // API Name: metric -func (r *State) Metric(v string) *State { +func (r *State) Metric(metric string) *State { r.paramSet |= metricMask - r.metric = v + r.metric = metric return r } @@ -244,9 +248,9 @@ func (r *State) Metric(v string) *State { // Index A comma-separated list of index names; use `_all` or empty string to perform // the operation on all indices // API Name: index -func (r *State) Index(v string) *State { +func (r *State) Index(index string) *State { r.paramSet |= indexMask - r.index = v + r.index = index return r } @@ -254,8 +258,8 @@ func (r *State) Index(v string) *State { // AllowNoIndices Whether to ignore if a wildcard indices expression resolves into no concrete // indices. (This includes `_all` string or when no indices have been specified) // API name: allow_no_indices -func (r *State) AllowNoIndices(b bool) *State { - r.values.Set("allow_no_indices", strconv.FormatBool(b)) +func (r *State) AllowNoIndices(allownoindices bool) *State { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) return r } @@ -263,16 +267,20 @@ func (r *State) AllowNoIndices(b bool) *State { // ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, // closed or both. // API name: expand_wildcards -func (r *State) ExpandWildcards(v string) *State { - r.values.Set("expand_wildcards", v) +func (r *State) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *State { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } // FlatSettings Return settings in flat format (default: false) // API name: flat_settings -func (r *State) FlatSettings(b bool) *State { - r.values.Set("flat_settings", strconv.FormatBool(b)) +func (r *State) FlatSettings(flatsettings bool) *State { + r.values.Set("flat_settings", strconv.FormatBool(flatsettings)) return r } @@ -280,8 +288,8 @@ func (r *State) FlatSettings(b bool) *State { // IgnoreUnavailable Whether specified concrete indices should be ignored when unavailable // (missing or closed) // API name: ignore_unavailable -func (r *State) IgnoreUnavailable(b bool) *State { - r.values.Set("ignore_unavailable", strconv.FormatBool(b)) +func (r *State) IgnoreUnavailable(ignoreunavailable bool) *State { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) return r } @@ -289,16 +297,16 @@ func (r *State) IgnoreUnavailable(b bool) *State { // Local Return local information, do not retrieve the state from master node // (default: false) // API name: local -func (r *State) Local(b bool) *State { - r.values.Set("local", strconv.FormatBool(b)) +func (r *State) Local(local bool) *State { + r.values.Set("local", strconv.FormatBool(local)) return r } // MasterTimeout Specify timeout for connection to master // API name: master_timeout -func (r *State) MasterTimeout(v string) *State { - r.values.Set("master_timeout", v) +func (r *State) MasterTimeout(duration string) *State { + r.values.Set("master_timeout", duration) return r } @@ -306,16 +314,16 @@ func (r *State) MasterTimeout(v string) *State { // WaitForMetadataVersion Wait for the metadata version to be equal or greater than the specified // metadata version // API name: wait_for_metadata_version -func (r *State) WaitForMetadataVersion(v string) *State { - r.values.Set("wait_for_metadata_version", v) +func (r *State) WaitForMetadataVersion(versionnumber string) *State { + r.values.Set("wait_for_metadata_version", versionnumber) return r } // WaitForTimeout The maximum time to wait for wait_for_metadata_version before timing out // API name: wait_for_timeout -func (r *State) WaitForTimeout(v string) *State { - r.values.Set("wait_for_timeout", v) +func (r *State) WaitForTimeout(duration string) *State { + r.values.Set("wait_for_timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/stats/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/stats/response.go index 54481dfa5..7eaab7183 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/stats/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/stats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package stats @@ -27,23 +27,26 @@ import ( // Response holds the response body struct for the package stats // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/stats/ClusterStatsResponse.ts#L55-L57 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/stats/ClusterStatsResponse.ts#L53-L55 type Response struct { - // ClusterName Name of the cluster, based on the Cluster name setting setting. + // ClusterName Name of the cluster, based on the cluster name setting. ClusterName string `json:"cluster_name"` // ClusterUuid Unique identifier for the cluster. ClusterUuid string `json:"cluster_uuid"` // Indices Contains statistics about indices with shards assigned to selected nodes. Indices types.ClusterIndices `json:"indices"` + // NodeStats Contains statistics about the number of nodes selected by the request’s node + // filters. + NodeStats *types.NodeStatistics `json:"_nodes,omitempty"` // Nodes Contains statistics about nodes selected by the request’s node filters. Nodes types.ClusterNodes `json:"nodes"` // Status Health status of the cluster, based on the state of its primary and replica // shards. Status healthstatus.HealthStatus `json:"status"` - // Timestamp Unix timestamp, in milliseconds, of the last time the cluster statistics were - // refreshed. + // Timestamp Unix timestamp, in milliseconds, for the last time the cluster statistics + // were refreshed. Timestamp int64 `json:"timestamp"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/stats/stats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/stats/stats.go index 0d476bf26..5744135e0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/stats/stats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/stats/stats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns high-level overview of cluster statistics. package stats @@ -178,7 +178,6 @@ func (r Stats) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -187,6 +186,10 @@ func (r Stats) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -221,28 +224,29 @@ func (r *Stats) Header(key, value string) *Stats { // NodeId Comma-separated list of node filters used to limit returned information. // Defaults to all nodes in the cluster. // API Name: nodeid -func (r *Stats) NodeId(v string) *Stats { +func (r *Stats) NodeId(nodeid string) *Stats { r.paramSet |= nodeidMask - r.nodeid = v + r.nodeid = nodeid return r } -// FlatSettings Return settings in flat format (default: false) +// FlatSettings If `true`, returns settings in flat format. // API name: flat_settings -func (r *Stats) FlatSettings(b bool) *Stats { - r.values.Set("flat_settings", strconv.FormatBool(b)) +func (r *Stats) FlatSettings(flatsettings bool) *Stats { + r.values.Set("flat_settings", strconv.FormatBool(flatsettings)) return r } -// Timeout Period to wait for each node to respond. If a node does not respond before -// its timeout expires, the response does not include its stats. However, timed -// out nodes are included in the response’s _nodes.failed property. Defaults to -// no timeout. +// Timeout Period to wait for each node to respond. +// If a node does not respond before its timeout expires, the response does not +// include its stats. +// However, timed out nodes are included in the response’s `_nodes.failed` +// property. Defaults to no timeout. // API name: timeout -func (r *Stats) Timeout(v string) *Stats { - r.values.Set("timeout", v) +func (r *Stats) Timeout(duration string) *Stats { + r.values.Set("timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/bulk/bulk.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/bulk/bulk.go new file mode 100644 index 000000000..a67e5298c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/bulk/bulk.go @@ -0,0 +1,343 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Allows to perform multiple index/update/delete operations in a single +// request. +package bulk + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/refresh" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Bulk struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + buf *gobytes.Buffer + + req *Request + deferred []func(request *Request) error + raw io.Reader + + paramSet int + + index string +} + +// NewBulk type alias for index. +type NewBulk func() *Bulk + +// NewBulkFunc returns a new instance of Bulk with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewBulkFunc(tp elastictransport.Interface) NewBulk { + return func() *Bulk { + n := New(tp) + + return n + } +} + +// Allows to perform multiple index/update/delete operations in a single +// request. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/docs-bulk.html +func New(tp elastictransport.Interface) *Bulk { + r := &Bulk{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + buf: gobytes.NewBuffer(nil), + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Bulk) Raw(raw io.Reader) *Bulk { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Bulk) Request(req *Request) *Bulk { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Bulk) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw != nil { + r.buf.ReadFrom(r.raw) + } else if r.req != nil { + + for _, elem := range *r.req { + data, err := json.Marshal(elem) + if err != nil { + return nil, err + } + r.buf.Write(data) + r.buf.Write([]byte("\n")) + } + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Bulk: %w", err) + } + + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_bulk") + + method = http.MethodPost + case r.paramSet == indexMask: + path.WriteString("/") + + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_bulk") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.buf) + } else { + req, err = http.NewRequest(method, r.path.String(), r.buf) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.buf.Len() > 0 { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+x-ndjson;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Bulk) Perform(ctx context.Context) (*http.Response, error) { + req, err := r.HttpRequest(ctx) + if err != nil { + return nil, err + } + + res, err := r.transport.Perform(req) + if err != nil { + return nil, fmt.Errorf("an error happened during the Bulk query execution: %w", err) + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a bulk.Response +func (r Bulk) Do(ctx context.Context) (*Response, error) { + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + return nil, errorResponse +} + +// Header set a key, value pair in the Bulk headers map. +func (r *Bulk) Header(key, value string) *Bulk { + r.headers.Set(key, value) + + return r +} + +// Index Name of the data stream, index, or index alias to perform bulk actions on. +// API Name: index +func (r *Bulk) Index(index string) *Bulk { + r.paramSet |= indexMask + r.index = index + + return r +} + +// Pipeline ID of the pipeline to use to preprocess incoming documents. +// If the index has a default ingest pipeline specified, then setting the value +// to `_none` disables the default ingest pipeline for this request. +// If a final pipeline is configured it will always run, regardless of the value +// of this parameter. +// API name: pipeline +func (r *Bulk) Pipeline(pipeline string) *Bulk { + r.values.Set("pipeline", pipeline) + + return r +} + +// Refresh If `true`, Elasticsearch refreshes the affected shards to make this operation +// visible to search, if `wait_for` then wait for a refresh to make this +// operation visible to search, if `false` do nothing with refreshes. +// Valid values: `true`, `false`, `wait_for`. +// API name: refresh +func (r *Bulk) Refresh(refresh refresh.Refresh) *Bulk { + r.values.Set("refresh", refresh.String()) + + return r +} + +// Routing Custom value used to route operations to a specific shard. +// API name: routing +func (r *Bulk) Routing(routing string) *Bulk { + r.values.Set("routing", routing) + + return r +} + +// Source_ `true` or `false` to return the `_source` field or not, or a list of fields +// to return. +// API name: _source +func (r *Bulk) Source_(sourceconfigparam string) *Bulk { + r.values.Set("_source", sourceconfigparam) + + return r +} + +// SourceExcludes_ A comma-separated list of source fields to exclude from the response. +// API name: _source_excludes +func (r *Bulk) SourceExcludes_(fields ...string) *Bulk { + r.values.Set("_source_excludes", strings.Join(fields, ",")) + + return r +} + +// SourceIncludes_ A comma-separated list of source fields to include in the response. +// API name: _source_includes +func (r *Bulk) SourceIncludes_(fields ...string) *Bulk { + r.values.Set("_source_includes", strings.Join(fields, ",")) + + return r +} + +// Timeout Period each action waits for the following operations: automatic index +// creation, dynamic mapping updates, waiting for active shards. +// API name: timeout +func (r *Bulk) Timeout(duration string) *Bulk { + r.values.Set("timeout", duration) + + return r +} + +// WaitForActiveShards The number of shard copies that must be active before proceeding with the +// operation. +// Set to all or any positive integer up to the total number of shards in the +// index (`number_of_replicas+1`). +// API name: wait_for_active_shards +func (r *Bulk) WaitForActiveShards(waitforactiveshards string) *Bulk { + r.values.Set("wait_for_active_shards", waitforactiveshards) + + return r +} + +// RequireAlias If `true`, the request’s actions must target an index alias. +// API name: require_alias +func (r *Bulk) RequireAlias(requirealias bool) *Bulk { + r.values.Set("require_alias", strconv.FormatBool(requirealias)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/bulk/helpers.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/bulk/helpers.go new file mode 100644 index 000000000..00d95c7c8 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/bulk/helpers.go @@ -0,0 +1,237 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package bulk + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// CreateOp is a helper function to add a CreateOperation to the current bulk request. +// doc argument can be a []byte, json.RawMessage or a struct. +func (r *Bulk) CreateOp(op types.CreateOperation, doc interface{}) error { + operation := types.OperationContainer{Create: &op} + header, err := json.Marshal(operation) + if err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.CreateOp: %w", err) + } + + if _, err := r.buf.Write(header); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.CreateOp: %w", err) + } + if _, err := r.buf.Write([]byte("\n")); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.CreateOp: %w", err) + } + + switch v := doc.(type) { + case []byte: + if json.Valid(v) { + if _, err := r.buf.Write(v); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.CreateOp: %w", err) + } + } else { + r.buf.Reset() + return fmt.Errorf("bulk.CreateOp: invalid json") + } + case json.RawMessage: + if json.Valid(v) { + if _, err := r.buf.Write(v); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.CreateOp: %w", err) + } + } else { + r.buf.Reset() + return fmt.Errorf("bulk.CreateOp: invalid json") + } + default: + body, err := json.Marshal(doc) + if err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.CreateOp: %w", err) + } + if _, err := r.buf.Write(body); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.CreateOp: %w", err) + } + } + + if _, err := r.buf.Write([]byte("\n")); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.CreateOp: %w", err) + } + + return nil +} + +// IndexOp is a helper function to add an IndexOperation to the current bulk request. +// doc argument can be a []byte, json.RawMessage or a struct. +func (r *Bulk) IndexOp(op types.IndexOperation, doc interface{}) error { + operation := types.OperationContainer{Index: &op} + header, err := json.Marshal(operation) + if err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.IndexOp: %w", err) + } + + if _, err := r.buf.Write(header); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.IndexOp: %w", err) + } + if _, err := r.buf.Write([]byte("\n")); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.IndexOp: %w", err) + } + + switch v := doc.(type) { + case []byte: + if json.Valid(v) { + if _, err := r.buf.Write(v); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.IndexOp: %w", err) + } + } else { + r.buf.Reset() + return fmt.Errorf("bulk.IndexOp: invalid json") + } + case json.RawMessage: + if json.Valid(v) { + if _, err := r.buf.Write(v); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.IndexOp: %w", err) + } + } else { + r.buf.Reset() + return fmt.Errorf("bulk.IndexOp: invalid json") + } + default: + body, err := json.Marshal(doc) + if err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.IndexOp: %w", err) + } + if _, err := r.buf.Write(body); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.IndexOp: %w", err) + } + } + + if _, err := r.buf.Write([]byte("\n")); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.IndexOp: %w", err) + } + + return nil +} + +// UpdateOp is a helper function to add an UpdateOperation with and UpdateAction to the current bulk request. +// update is optional, if both doc and update.Doc are provided, update.Doc has precedence. +func (r *Bulk) UpdateOp(op types.UpdateOperation, doc interface{}, update *types.UpdateAction) error { + operation := types.OperationContainer{Update: &op} + header, err := json.Marshal(operation) + if err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.UpdateOp: %w", err) + } + + if _, err := r.buf.Write(header); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.UpdateOp: %w", err) + } + if _, err := r.buf.Write([]byte("\n")); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.UpdateOp: %w", err) + } + + if update == nil { + update = types.NewUpdateAction() + } + + if len(update.Doc) == 0 { + switch v := doc.(type) { + case []byte: + if json.Valid(v) { + update.Doc = v + } else { + r.buf.Reset() + return fmt.Errorf("bulk.UpdateOp: invalid json") + } + case json.RawMessage: + if json.Valid(v) { + update.Doc = v + } else { + r.buf.Reset() + return fmt.Errorf("bulk.UpdateOp: invalid json") + } + default: + //doc can be nil if passed in script + if doc == nil { + break + } + body, err := json.Marshal(doc) + if err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.UpdateOp: %w", err) + } + update.Doc = body + } + } + + body, err := json.Marshal(update) + if err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.UpdateOp: %w", err) + } + if _, err := r.buf.Write(body); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.UpdateOp: %w", err) + } + + if _, err := r.buf.Write([]byte("\n")); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.UpdateOp: %w", err) + } + + return nil +} + +// DeleteOp is a helper function to add a DeleteOperation to the current bulk request. +func (r *Bulk) DeleteOp(op types.DeleteOperation) error { + operation := types.OperationContainer{Delete: &op} + header, err := json.Marshal(operation) + if err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.DeleteOp: %w", err) + } + + if _, err := r.buf.Write(header); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.DeleteOp: %w", err) + } + if _, err := r.buf.Write([]byte("\n")); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.DeleteOp: %w", err) + } + + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/customsettings.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/bulk/request.go similarity index 70% rename from vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/customsettings.go rename to vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/bulk/request.go index 2e3810bf8..d9af574da 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/customsettings.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/bulk/request.go @@ -16,13 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 -package types +package bulk -import "encoding/json" - -// CustomSettings type alias. +// Request holds the request body struct for the package bulk // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Settings.ts#L22-L27 -type CustomSettings json.RawMessage +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/bulk/BulkRequest.ts#L32-L103 +type Request = []interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/bulk/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/bulk/response.go new file mode 100644 index 000000000..3c9146782 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/bulk/response.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package bulk + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/operationtype" +) + +// Response holds the response body struct for the package bulk +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/bulk/BulkResponse.ts#L24-L31 + +type Response struct { + Errors bool `json:"errors"` + IngestTook *int64 `json:"ingest_took,omitempty"` + Items []map[operationtype.OperationType]types.ResponseItem `json:"items"` + Took int64 `json:"took"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/clearscroll/clear_scroll.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/clearscroll/clear_scroll.go index a3753495e..8cc9417a5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/clearscroll/clear_scroll.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/clearscroll/clear_scroll.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Explicitly clears the search context for a scroll. package clearscroll @@ -28,6 +28,7 @@ import ( "errors" "fmt" "io" + "io/ioutil" "net/http" "net/url" "strings" @@ -52,8 +53,9 @@ type ClearScroll struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -82,6 +84,8 @@ func New(tp elastictransport.Interface) *ClearScroll { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -111,9 +115,19 @@ func (r *ClearScroll) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -121,6 +135,7 @@ func (r *ClearScroll) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -210,7 +225,30 @@ func (r ClearScroll) Do(ctx context.Context) (*Response, error) { } return response, nil + } + + if res.StatusCode == 404 { + data, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(gobytes.NewReader(data)).Decode(&errorResponse) + if err != nil { + return nil, err + } + if errorResponse.Status == 0 { + err = json.NewDecoder(gobytes.NewReader(data)).Decode(&response) + if err != nil { + return nil, err + } + + return response, nil + } + + return nil, errorResponse } errorResponse := types.NewElasticsearchError() @@ -219,6 +257,10 @@ func (r ClearScroll) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -229,11 +271,12 @@ func (r *ClearScroll) Header(key, value string) *ClearScroll { return r } -// ScrollId A comma-separated list of scroll IDs to clear +// ScrollId Comma-separated list of scroll IDs to clear. +// To clear all scroll IDs, use `_all`. // API Name: scrollid -func (r *ClearScroll) ScrollId(v string) *ClearScroll { +func (r *ClearScroll) ScrollId(scrollid string) *ClearScroll { r.paramSet |= scrollidMask - r.scrollid = v + r.scrollid = scrollid return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/clearscroll/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/clearscroll/request.go index b44be611b..aebafa6c9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/clearscroll/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/clearscroll/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package clearscroll @@ -27,8 +27,11 @@ import ( // Request holds the request body struct for the package clearscroll // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/clear_scroll/ClearScrollRequest.ts#L23-L36 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/clear_scroll/ClearScrollRequest.ts#L23-L45 type Request struct { + + // ScrollId Scroll IDs to clear. + // To clear all scroll IDs, use `_all`. ScrollId []string `json:"scroll_id,omitempty"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/clearscroll/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/clearscroll/response.go index ff8d36f91..f4959dc38 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/clearscroll/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/clearscroll/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package clearscroll // Response holds the response body struct for the package clearscroll // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/clear_scroll/ClearScrollResponse.ts#L22-L36 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/clear_scroll/ClearScrollResponse.ts#L22-L36 type Response struct { NumFreed int `json:"num_freed"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/closepointintime/close_point_in_time.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/closepointintime/close_point_in_time.go index 4d7abc085..88b148b24 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/closepointintime/close_point_in_time.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/closepointintime/close_point_in_time.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Close a point in time package closepointintime @@ -28,6 +28,7 @@ import ( "errors" "fmt" "io" + "io/ioutil" "net/http" "net/url" "strings" @@ -48,8 +49,9 @@ type ClosePointInTime struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int } @@ -76,6 +78,8 @@ func New(tp elastictransport.Interface) *ClosePointInTime { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -105,9 +109,19 @@ func (r *ClosePointInTime) HttpRequest(ctx context.Context) (*http.Request, erro var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -115,6 +129,7 @@ func (r *ClosePointInTime) HttpRequest(ctx context.Context) (*http.Request, erro } r.buf.Write(data) + } r.path.Scheme = "http" @@ -192,7 +207,30 @@ func (r ClosePointInTime) Do(ctx context.Context) (*Response, error) { } return response, nil + } + + if res.StatusCode == 404 { + data, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(gobytes.NewReader(data)).Decode(&errorResponse) + if err != nil { + return nil, err + } + + if errorResponse.Status == 0 { + err = json.NewDecoder(gobytes.NewReader(data)).Decode(&response) + if err != nil { + return nil, err + } + + return response, nil + } + return nil, errorResponse } errorResponse := types.NewElasticsearchError() @@ -201,6 +239,10 @@ func (r ClosePointInTime) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -210,3 +252,11 @@ func (r *ClosePointInTime) Header(key, value string) *ClosePointInTime { return r } + +// Id The ID of the point-in-time. +// API name: id +func (r *ClosePointInTime) Id(id string) *ClosePointInTime { + r.req.Id = id + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/closepointintime/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/closepointintime/request.go index 5913a5570..406958eb0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/closepointintime/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/closepointintime/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package closepointintime @@ -27,8 +27,10 @@ import ( // Request holds the request body struct for the package closepointintime // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/close_point_in_time/ClosePointInTimeRequest.ts#L23-L33 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/close_point_in_time/ClosePointInTimeRequest.ts#L23-L37 type Request struct { + + // Id The ID of the point-in-time. Id string `json:"id"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/closepointintime/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/closepointintime/response.go index e7e1bd48a..6227522a8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/closepointintime/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/closepointintime/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package closepointintime // Response holds the response body struct for the package closepointintime // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/close_point_in_time/ClosePointInTimeResponse.ts#L22-L36 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/close_point_in_time/ClosePointInTimeResponse.ts#L22-L36 type Response struct { NumFreed int `json:"num_freed"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/count/count.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/count/count.go index 3003de85e..d1600eb26 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/count/count.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/count/count.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns number of documents matching a query. package count @@ -35,7 +35,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/operator" ) @@ -55,8 +55,9 @@ type Count struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -85,6 +86,8 @@ func New(tp elastictransport.Interface) *Count { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -114,9 +117,19 @@ func (r *Count) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -124,6 +137,7 @@ func (r *Count) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -209,7 +223,6 @@ func (r Count) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -218,6 +231,10 @@ func (r Count) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -228,132 +245,158 @@ func (r *Count) Header(key, value string) *Count { return r } -// Index A comma-separated list of indices to restrict the results +// Index Comma-separated list of data streams, indices, and aliases to search. +// Supports wildcards (`*`). +// To search all data streams and indices, omit this parameter or use `*` or +// `_all`. // API Name: index -func (r *Count) Index(v string) *Count { +func (r *Count) Index(index string) *Count { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// AllowNoIndices Whether to ignore if a wildcard indices expression resolves into no concrete -// indices. (This includes `_all` string or when no indices have been specified) +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. // API name: allow_no_indices -func (r *Count) AllowNoIndices(b bool) *Count { - r.values.Set("allow_no_indices", strconv.FormatBool(b)) +func (r *Count) AllowNoIndices(allownoindices bool) *Count { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) return r } -// Analyzer The analyzer to use for the query string +// Analyzer Analyzer to use for the query string. +// This parameter can only be used when the `q` query string parameter is +// specified. // API name: analyzer -func (r *Count) Analyzer(v string) *Count { - r.values.Set("analyzer", v) +func (r *Count) Analyzer(analyzer string) *Count { + r.values.Set("analyzer", analyzer) return r } -// AnalyzeWildcard Specify whether wildcard and prefix queries should be analyzed (default: -// false) +// AnalyzeWildcard If `true`, wildcard and prefix queries are analyzed. +// This parameter can only be used when the `q` query string parameter is +// specified. // API name: analyze_wildcard -func (r *Count) AnalyzeWildcard(b bool) *Count { - r.values.Set("analyze_wildcard", strconv.FormatBool(b)) +func (r *Count) AnalyzeWildcard(analyzewildcard bool) *Count { + r.values.Set("analyze_wildcard", strconv.FormatBool(analyzewildcard)) return r } -// DefaultOperator The default operator for query string query (AND or OR) +// DefaultOperator The default operator for query string query: `AND` or `OR`. +// This parameter can only be used when the `q` query string parameter is +// specified. // API name: default_operator -func (r *Count) DefaultOperator(enum operator.Operator) *Count { - r.values.Set("default_operator", enum.String()) +func (r *Count) DefaultOperator(defaultoperator operator.Operator) *Count { + r.values.Set("default_operator", defaultoperator.String()) return r } -// Df The field to use as default where no field prefix is given in the query -// string +// Df Field to use as default where no field prefix is given in the query string. +// This parameter can only be used when the `q` query string parameter is +// specified. // API name: df -func (r *Count) Df(v string) *Count { - r.values.Set("df", v) +func (r *Count) Df(df string) *Count { + r.values.Set("df", df) return r } -// ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, -// closed or both. +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. // API name: expand_wildcards -func (r *Count) ExpandWildcards(v string) *Count { - r.values.Set("expand_wildcards", v) +func (r *Count) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Count { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } -// IgnoreThrottled Whether specified concrete, expanded or aliased indices should be ignored -// when throttled +// IgnoreThrottled If `true`, concrete, expanded or aliased indices are ignored when frozen. // API name: ignore_throttled -func (r *Count) IgnoreThrottled(b bool) *Count { - r.values.Set("ignore_throttled", strconv.FormatBool(b)) +func (r *Count) IgnoreThrottled(ignorethrottled bool) *Count { + r.values.Set("ignore_throttled", strconv.FormatBool(ignorethrottled)) return r } -// IgnoreUnavailable Whether specified concrete indices should be ignored when unavailable -// (missing or closed) +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. // API name: ignore_unavailable -func (r *Count) IgnoreUnavailable(b bool) *Count { - r.values.Set("ignore_unavailable", strconv.FormatBool(b)) +func (r *Count) IgnoreUnavailable(ignoreunavailable bool) *Count { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) return r } -// Lenient Specify whether format-based query failures (such as providing text to a -// numeric field) should be ignored +// Lenient If `true`, format-based query failures (such as providing text to a numeric +// field) in the query string will be ignored. // API name: lenient -func (r *Count) Lenient(b bool) *Count { - r.values.Set("lenient", strconv.FormatBool(b)) +func (r *Count) Lenient(lenient bool) *Count { + r.values.Set("lenient", strconv.FormatBool(lenient)) return r } -// MinScore Include only documents with a specific `_score` value in the result +// MinScore Sets the minimum `_score` value that documents must have to be included in +// the result. // API name: min_score -func (r *Count) MinScore(v string) *Count { - r.values.Set("min_score", v) +func (r *Count) MinScore(minscore string) *Count { + r.values.Set("min_score", minscore) return r } -// Preference Specify the node or shard the operation should be performed on (default: -// random) +// Preference Specifies the node or shard the operation should be performed on. +// Random by default. // API name: preference -func (r *Count) Preference(v string) *Count { - r.values.Set("preference", v) +func (r *Count) Preference(preference string) *Count { + r.values.Set("preference", preference) return r } -// Routing A comma-separated list of specific routing values +// Routing Custom value used to route operations to a specific shard. // API name: routing -func (r *Count) Routing(v string) *Count { - r.values.Set("routing", v) +func (r *Count) Routing(routing string) *Count { + r.values.Set("routing", routing) return r } -// TerminateAfter The maximum count for each shard, upon reaching which the query execution -// will terminate early +// TerminateAfter Maximum number of documents to collect for each shard. +// If a query reaches this limit, Elasticsearch terminates the query early. +// Elasticsearch collects documents before sorting. // API name: terminate_after -func (r *Count) TerminateAfter(v string) *Count { - r.values.Set("terminate_after", v) +func (r *Count) TerminateAfter(terminateafter string) *Count { + r.values.Set("terminate_after", terminateafter) return r } -// Q Query in the Lucene query string syntax +// Q Query in the Lucene query string syntax. // API name: q -func (r *Count) Q(v string) *Count { - r.values.Set("q", v) +func (r *Count) Q(q string) *Count { + r.values.Set("q", q) + + return r +} + +// Query Defines the search definition using the Query DSL. +// API name: query +func (r *Count) Query(query *types.Query) *Count { + + r.req.Query = query return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/count/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/count/request.go index 27cec360f..2d69937df 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/count/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/count/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package count @@ -29,8 +29,10 @@ import ( // Request holds the request body struct for the package count // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/count/CountRequest.ts#L26-L54 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/count/CountRequest.ts#L26-L120 type Request struct { + + // Query Defines the search definition using the Query DSL. Query *types.Query `json:"query,omitempty"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/count/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/count/response.go index 2f69ddd34..d27ed2a9f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/count/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/count/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package count @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package count // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/count/CountResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/count/CountResponse.ts#L23-L25 type Response struct { Count int64 `json:"count"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/create/create.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/create/create.go index b1eaa0f27..b1094e1e8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/create/create.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/create/create.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Creates a new document in the index. // @@ -37,7 +37,6 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/refresh" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/versiontype" ) @@ -60,8 +59,9 @@ type Create struct { buf *gobytes.Buffer - req interface{} - raw io.Reader + req interface{} + deferred []func(request interface{}) error + raw io.Reader paramSet int @@ -78,9 +78,9 @@ func NewCreateFunc(tp elastictransport.Interface) NewCreate { return func(index, id string) *Create { n := New(tp) - n.Id(id) + n._id(id) - n.Index(index) + n._index(index) return n } @@ -118,6 +118,13 @@ func (r *Create) Request(req interface{}) *Create { return r } +// Document allows to set the request property with the appropriate payload. +func (r *Create) Document(document interface{}) *Create { + r.req = document + + return r +} + // HttpRequest returns the http.Request object built from the // given parameters. func (r *Create) HttpRequest(ctx context.Context) (*http.Request, error) { @@ -127,9 +134,19 @@ func (r *Create) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -137,6 +154,7 @@ func (r *Create) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -220,7 +238,6 @@ func (r Create) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -229,6 +246,10 @@ func (r Create) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -239,81 +260,94 @@ func (r *Create) Header(key, value string) *Create { return r } -// Id Document ID +// Id Unique identifier for the document. // API Name: id -func (r *Create) Id(v string) *Create { +func (r *Create) _id(id string) *Create { r.paramSet |= idMask - r.id = v + r.id = id return r } -// Index The name of the index +// Index Name of the data stream or index to target. +// If the target doesn’t exist and matches the name or wildcard (`*`) pattern of +// an index template with a `data_stream` definition, this request creates the +// data stream. +// If the target doesn’t exist and doesn’t match a data stream template, this +// request creates the index. // API Name: index -func (r *Create) Index(v string) *Create { +func (r *Create) _index(index string) *Create { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// Pipeline The pipeline id to preprocess incoming documents with +// Pipeline ID of the pipeline to use to preprocess incoming documents. +// If the index has a default ingest pipeline specified, then setting the value +// to `_none` disables the default ingest pipeline for this request. +// If a final pipeline is configured it will always run, regardless of the value +// of this parameter. // API name: pipeline -func (r *Create) Pipeline(v string) *Create { - r.values.Set("pipeline", v) +func (r *Create) Pipeline(pipeline string) *Create { + r.values.Set("pipeline", pipeline) return r } -// Refresh If `true` then refresh the affected shards to make this operation visible to -// search, if `wait_for` then wait for a refresh to make this operation visible -// to search, if `false` (the default) then do nothing with refreshes. +// Refresh If `true`, Elasticsearch refreshes the affected shards to make this operation +// visible to search, if `wait_for` then wait for a refresh to make this +// operation visible to search, if `false` do nothing with refreshes. +// Valid values: `true`, `false`, `wait_for`. // API name: refresh -func (r *Create) Refresh(enum refresh.Refresh) *Create { - r.values.Set("refresh", enum.String()) +func (r *Create) Refresh(refresh refresh.Refresh) *Create { + r.values.Set("refresh", refresh.String()) return r } -// Routing Specific routing value +// Routing Custom value used to route operations to a specific shard. // API name: routing -func (r *Create) Routing(v string) *Create { - r.values.Set("routing", v) +func (r *Create) Routing(routing string) *Create { + r.values.Set("routing", routing) return r } -// Timeout Explicit operation timeout +// Timeout Period the request waits for the following operations: automatic index +// creation, dynamic mapping updates, waiting for active shards. // API name: timeout -func (r *Create) Timeout(v string) *Create { - r.values.Set("timeout", v) +func (r *Create) Timeout(duration string) *Create { + r.values.Set("timeout", duration) return r } -// Version Explicit version number for concurrency control +// Version Explicit version number for concurrency control. +// The specified version must match the current version of the document for the +// request to succeed. // API name: version -func (r *Create) Version(v string) *Create { - r.values.Set("version", v) +func (r *Create) Version(versionnumber string) *Create { + r.values.Set("version", versionnumber) return r } -// VersionType Specific version type +// VersionType Specific version type: `external`, `external_gte`. // API name: version_type -func (r *Create) VersionType(enum versiontype.VersionType) *Create { - r.values.Set("version_type", enum.String()) +func (r *Create) VersionType(versiontype versiontype.VersionType) *Create { + r.values.Set("version_type", versiontype.String()) return r } -// WaitForActiveShards Sets the number of shard copies that must be active before proceeding with -// the index operation. Defaults to 1, meaning the primary shard only. Set to -// `all` for all shard copies, otherwise set to any non-negative value less than -// or equal to the total number of copies for the shard (number of replicas + 1) +// WaitForActiveShards The number of shard copies that must be active before proceeding with the +// operation. +// Set to `all` or any positive integer up to the total number of shards in the +// index (`number_of_replicas+1`). // API name: wait_for_active_shards -func (r *Create) WaitForActiveShards(v string) *Create { - r.values.Set("wait_for_active_shards", v) +func (r *Create) WaitForActiveShards(waitforactiveshards string) *Create { + r.values.Set("wait_for_active_shards", waitforactiveshards) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/noderoles.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/create/request.go similarity index 69% rename from vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/noderoles.go rename to vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/create/request.go index c67cc51e4..f4bde62ab 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/noderoles.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/create/request.go @@ -16,15 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 -package types +package create import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/noderole" + "encoding/json" ) -// NodeRoles type alias. +// Request holds the request body struct for the package create // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Node.ts#L96-L99 -type NodeRoles []noderole.NodeRole +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/create/CreateRequest.ts#L32-L95 +type Request = json.RawMessage diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/create/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/create/response.go index b8666f658..b8cb0a24b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/create/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/create/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package create @@ -27,7 +27,7 @@ import ( // Response holds the response body struct for the package create // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/create/CreateResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/create/CreateResponse.ts#L22-L24 type Response struct { ForcedRefresh *bool `json:"forced_refresh,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/delete/delete.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/delete/delete.go index d32d3532b..139b8f08d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/delete/delete.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/delete/delete.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Removes a document from the index. package delete @@ -35,7 +35,6 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/refresh" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/versiontype" ) @@ -73,9 +72,9 @@ func NewDeleteFunc(tp elastictransport.Interface) NewDelete { return func(index, id string) *Delete { n := New(tp) - n.Id(id) + n._id(id) - n.Index(index) + n._index(index) return n } @@ -179,7 +178,30 @@ func (r Delete) Do(ctx context.Context) (*Response, error) { } return response, nil + } + + if res.StatusCode == 404 { + data, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(gobytes.NewReader(data)).Decode(&errorResponse) + if err != nil { + return nil, err + } + + if errorResponse.Status == 0 { + err = json.NewDecoder(gobytes.NewReader(data)).Decode(&response) + if err != nil { + return nil, err + } + + return response, nil + } + + return nil, errorResponse } errorResponse := types.NewElasticsearchError() @@ -188,6 +210,10 @@ func (r Delete) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -219,91 +245,92 @@ func (r *Delete) Header(key, value string) *Delete { return r } -// Id The document ID +// Id Unique identifier for the document. // API Name: id -func (r *Delete) Id(v string) *Delete { +func (r *Delete) _id(id string) *Delete { r.paramSet |= idMask - r.id = v + r.id = id return r } -// Index The name of the index +// Index Name of the target index. // API Name: index -func (r *Delete) Index(v string) *Delete { +func (r *Delete) _index(index string) *Delete { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// IfPrimaryTerm only perform the delete operation if the last operation that has changed the -// document has the specified primary term +// IfPrimaryTerm Only perform the operation if the document has this primary term. // API name: if_primary_term -func (r *Delete) IfPrimaryTerm(v string) *Delete { - r.values.Set("if_primary_term", v) +func (r *Delete) IfPrimaryTerm(ifprimaryterm string) *Delete { + r.values.Set("if_primary_term", ifprimaryterm) return r } -// IfSeqNo only perform the delete operation if the last operation that has changed the -// document has the specified sequence number +// IfSeqNo Only perform the operation if the document has this sequence number. // API name: if_seq_no -func (r *Delete) IfSeqNo(v string) *Delete { - r.values.Set("if_seq_no", v) +func (r *Delete) IfSeqNo(sequencenumber string) *Delete { + r.values.Set("if_seq_no", sequencenumber) return r } -// Refresh If `true` then refresh the affected shards to make this operation visible to -// search, if `wait_for` then wait for a refresh to make this operation visible -// to search, if `false` (the default) then do nothing with refreshes. +// Refresh If `true`, Elasticsearch refreshes the affected shards to make this operation +// visible to search, if `wait_for` then wait for a refresh to make this +// operation visible to search, if `false` do nothing with refreshes. +// Valid values: `true`, `false`, `wait_for`. // API name: refresh -func (r *Delete) Refresh(enum refresh.Refresh) *Delete { - r.values.Set("refresh", enum.String()) +func (r *Delete) Refresh(refresh refresh.Refresh) *Delete { + r.values.Set("refresh", refresh.String()) return r } -// Routing Specific routing value +// Routing Custom value used to route operations to a specific shard. // API name: routing -func (r *Delete) Routing(v string) *Delete { - r.values.Set("routing", v) +func (r *Delete) Routing(routing string) *Delete { + r.values.Set("routing", routing) return r } -// Timeout Explicit operation timeout +// Timeout Period to wait for active shards. // API name: timeout -func (r *Delete) Timeout(v string) *Delete { - r.values.Set("timeout", v) +func (r *Delete) Timeout(duration string) *Delete { + r.values.Set("timeout", duration) return r } -// Version Explicit version number for concurrency control +// Version Explicit version number for concurrency control. +// The specified version must match the current version of the document for the +// request to succeed. // API name: version -func (r *Delete) Version(v string) *Delete { - r.values.Set("version", v) +func (r *Delete) Version(versionnumber string) *Delete { + r.values.Set("version", versionnumber) return r } -// VersionType Specific version type +// VersionType Specific version type: `external`, `external_gte`. // API name: version_type -func (r *Delete) VersionType(enum versiontype.VersionType) *Delete { - r.values.Set("version_type", enum.String()) +func (r *Delete) VersionType(versiontype versiontype.VersionType) *Delete { + r.values.Set("version_type", versiontype.String()) return r } -// WaitForActiveShards Sets the number of shard copies that must be active before proceeding with -// the delete operation. Defaults to 1, meaning the primary shard only. Set to -// `all` for all shard copies, otherwise set to any non-negative value less than -// or equal to the total number of copies for the shard (number of replicas + 1) +// WaitForActiveShards The number of shard copies that must be active before proceeding with the +// operation. +// Set to `all` or any positive integer up to the total number of shards in the +// index (`number_of_replicas+1`). // API name: wait_for_active_shards -func (r *Delete) WaitForActiveShards(v string) *Delete { - r.values.Set("wait_for_active_shards", v) +func (r *Delete) WaitForActiveShards(waitforactiveshards string) *Delete { + r.values.Set("wait_for_active_shards", waitforactiveshards) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/delete/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/delete/response.go index ad0ffe960..34bcba58f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/delete/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/delete/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package delete @@ -27,7 +27,7 @@ import ( // Response holds the response body struct for the package delete // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/delete/DeleteResponse.ts#L22-L34 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/delete/DeleteResponse.ts#L22-L34 type Response struct { ForcedRefresh *bool `json:"forced_refresh,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/deletebyquery/delete_by_query.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/deletebyquery/delete_by_query.go index 9aee2dd44..783479059 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/deletebyquery/delete_by_query.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/deletebyquery/delete_by_query.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Deletes documents matching the provided query. package deletebyquery @@ -35,8 +35,8 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/conflicts" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/operator" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/searchtype" ) @@ -57,8 +57,9 @@ type DeleteByQuery struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -74,7 +75,7 @@ func NewDeleteByQueryFunc(tp elastictransport.Interface) NewDeleteByQuery { return func(index string) *DeleteByQuery { n := New(tp) - n.Index(index) + n._index(index) return n } @@ -89,6 +90,8 @@ func New(tp elastictransport.Interface) *DeleteByQuery { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -118,9 +121,19 @@ func (r *DeleteByQuery) HttpRequest(ctx context.Context) (*http.Request, error) var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -128,6 +141,7 @@ func (r *DeleteByQuery) HttpRequest(ctx context.Context) (*http.Request, error) } r.buf.Write(data) + } r.path.Scheme = "http" @@ -208,7 +222,6 @@ func (r DeleteByQuery) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -217,6 +230,10 @@ func (r DeleteByQuery) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -227,261 +244,303 @@ func (r *DeleteByQuery) Header(key, value string) *DeleteByQuery { return r } -// Index A comma-separated list of index names to search; use `_all` or empty string -// to perform the operation on all indices +// Index Comma-separated list of data streams, indices, and aliases to search. +// Supports wildcards (`*`). +// To search all data streams or indices, omit this parameter or use `*` or +// `_all`. // API Name: index -func (r *DeleteByQuery) Index(v string) *DeleteByQuery { +func (r *DeleteByQuery) _index(index string) *DeleteByQuery { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// AllowNoIndices Whether to ignore if a wildcard indices expression resolves into no concrete -// indices. (This includes `_all` string or when no indices have been specified) +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. +// For example, a request targeting `foo*,bar*` returns an error if an index +// starts with `foo` but no index starts with `bar`. // API name: allow_no_indices -func (r *DeleteByQuery) AllowNoIndices(b bool) *DeleteByQuery { - r.values.Set("allow_no_indices", strconv.FormatBool(b)) +func (r *DeleteByQuery) AllowNoIndices(allownoindices bool) *DeleteByQuery { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) return r } -// Analyzer The analyzer to use for the query string +// Analyzer Analyzer to use for the query string. // API name: analyzer -func (r *DeleteByQuery) Analyzer(v string) *DeleteByQuery { - r.values.Set("analyzer", v) +func (r *DeleteByQuery) Analyzer(analyzer string) *DeleteByQuery { + r.values.Set("analyzer", analyzer) return r } -// AnalyzeWildcard Specify whether wildcard and prefix queries should be analyzed (default: -// false) +// AnalyzeWildcard If `true`, wildcard and prefix queries are analyzed. // API name: analyze_wildcard -func (r *DeleteByQuery) AnalyzeWildcard(b bool) *DeleteByQuery { - r.values.Set("analyze_wildcard", strconv.FormatBool(b)) +func (r *DeleteByQuery) AnalyzeWildcard(analyzewildcard bool) *DeleteByQuery { + r.values.Set("analyze_wildcard", strconv.FormatBool(analyzewildcard)) return r } -// Conflicts What to do when the delete by query hits version conflicts? +// Conflicts What to do if delete by query hits version conflicts: `abort` or `proceed`. // API name: conflicts -func (r *DeleteByQuery) Conflicts(enum conflicts.Conflicts) *DeleteByQuery { - r.values.Set("conflicts", enum.String()) +func (r *DeleteByQuery) Conflicts(conflicts conflicts.Conflicts) *DeleteByQuery { + r.values.Set("conflicts", conflicts.String()) return r } -// DefaultOperator The default operator for query string query (AND or OR) +// DefaultOperator The default operator for query string query: `AND` or `OR`. // API name: default_operator -func (r *DeleteByQuery) DefaultOperator(enum operator.Operator) *DeleteByQuery { - r.values.Set("default_operator", enum.String()) +func (r *DeleteByQuery) DefaultOperator(defaultoperator operator.Operator) *DeleteByQuery { + r.values.Set("default_operator", defaultoperator.String()) return r } -// Df The field to use as default where no field prefix is given in the query -// string +// Df Field to use as default where no field prefix is given in the query string. // API name: df -func (r *DeleteByQuery) Df(v string) *DeleteByQuery { - r.values.Set("df", v) +func (r *DeleteByQuery) Df(df string) *DeleteByQuery { + r.values.Set("df", df) return r } -// ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, -// closed or both. +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. Valid values are: +// `all`, `open`, `closed`, `hidden`, `none`. // API name: expand_wildcards -func (r *DeleteByQuery) ExpandWildcards(v string) *DeleteByQuery { - r.values.Set("expand_wildcards", v) +func (r *DeleteByQuery) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *DeleteByQuery { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } // From Starting offset (default: 0) // API name: from -func (r *DeleteByQuery) From(v string) *DeleteByQuery { - r.values.Set("from", v) +func (r *DeleteByQuery) From(from string) *DeleteByQuery { + r.values.Set("from", from) return r } -// IgnoreUnavailable Whether specified concrete indices should be ignored when unavailable -// (missing or closed) +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. // API name: ignore_unavailable -func (r *DeleteByQuery) IgnoreUnavailable(b bool) *DeleteByQuery { - r.values.Set("ignore_unavailable", strconv.FormatBool(b)) +func (r *DeleteByQuery) IgnoreUnavailable(ignoreunavailable bool) *DeleteByQuery { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) return r } -// Lenient Specify whether format-based query failures (such as providing text to a -// numeric field) should be ignored +// Lenient If `true`, format-based query failures (such as providing text to a numeric +// field) in the query string will be ignored. // API name: lenient -func (r *DeleteByQuery) Lenient(b bool) *DeleteByQuery { - r.values.Set("lenient", strconv.FormatBool(b)) - - return r -} - -// MaxDocs Maximum number of documents to process (default: all documents) -// API name: max_docs -func (r *DeleteByQuery) MaxDocs(v string) *DeleteByQuery { - r.values.Set("max_docs", v) +func (r *DeleteByQuery) Lenient(lenient bool) *DeleteByQuery { + r.values.Set("lenient", strconv.FormatBool(lenient)) return r } -// Preference Specify the node or shard the operation should be performed on (default: -// random) +// Preference Specifies the node or shard the operation should be performed on. +// Random by default. // API name: preference -func (r *DeleteByQuery) Preference(v string) *DeleteByQuery { - r.values.Set("preference", v) +func (r *DeleteByQuery) Preference(preference string) *DeleteByQuery { + r.values.Set("preference", preference) return r } -// Refresh Should the affected indexes be refreshed? +// Refresh If `true`, Elasticsearch refreshes all shards involved in the delete by query +// after the request completes. // API name: refresh -func (r *DeleteByQuery) Refresh(b bool) *DeleteByQuery { - r.values.Set("refresh", strconv.FormatBool(b)) +func (r *DeleteByQuery) Refresh(refresh bool) *DeleteByQuery { + r.values.Set("refresh", strconv.FormatBool(refresh)) return r } -// RequestCache Specify if request cache should be used for this request or not, defaults to -// index level setting +// RequestCache If `true`, the request cache is used for this request. +// Defaults to the index-level setting. // API name: request_cache -func (r *DeleteByQuery) RequestCache(b bool) *DeleteByQuery { - r.values.Set("request_cache", strconv.FormatBool(b)) +func (r *DeleteByQuery) RequestCache(requestcache bool) *DeleteByQuery { + r.values.Set("request_cache", strconv.FormatBool(requestcache)) return r } -// RequestsPerSecond The throttle for this request in sub-requests per second. -1 means no -// throttle. +// RequestsPerSecond The throttle for this request in sub-requests per second. // API name: requests_per_second -func (r *DeleteByQuery) RequestsPerSecond(v string) *DeleteByQuery { - r.values.Set("requests_per_second", v) +func (r *DeleteByQuery) RequestsPerSecond(requestspersecond string) *DeleteByQuery { + r.values.Set("requests_per_second", requestspersecond) return r } -// Routing A comma-separated list of specific routing values +// Routing Custom value used to route operations to a specific shard. // API name: routing -func (r *DeleteByQuery) Routing(v string) *DeleteByQuery { - r.values.Set("routing", v) +func (r *DeleteByQuery) Routing(routing string) *DeleteByQuery { + r.values.Set("routing", routing) return r } -// Q Query in the Lucene query string syntax +// Q Query in the Lucene query string syntax. // API name: q -func (r *DeleteByQuery) Q(v string) *DeleteByQuery { - r.values.Set("q", v) +func (r *DeleteByQuery) Q(q string) *DeleteByQuery { + r.values.Set("q", q) return r } -// Scroll Specify how long a consistent view of the index should be maintained for -// scrolled search +// Scroll Period to retain the search context for scrolling. // API name: scroll -func (r *DeleteByQuery) Scroll(v string) *DeleteByQuery { - r.values.Set("scroll", v) +func (r *DeleteByQuery) Scroll(duration string) *DeleteByQuery { + r.values.Set("scroll", duration) return r } -// ScrollSize Size on the scroll request powering the delete by query +// ScrollSize Size of the scroll request that powers the operation. // API name: scroll_size -func (r *DeleteByQuery) ScrollSize(v string) *DeleteByQuery { - r.values.Set("scroll_size", v) +func (r *DeleteByQuery) ScrollSize(scrollsize string) *DeleteByQuery { + r.values.Set("scroll_size", scrollsize) return r } -// SearchTimeout Explicit timeout for each search request. Defaults to no timeout. +// SearchTimeout Explicit timeout for each search request. +// Defaults to no timeout. // API name: search_timeout -func (r *DeleteByQuery) SearchTimeout(v string) *DeleteByQuery { - r.values.Set("search_timeout", v) +func (r *DeleteByQuery) SearchTimeout(duration string) *DeleteByQuery { + r.values.Set("search_timeout", duration) return r } -// SearchType Search operation type +// SearchType The type of the search operation. +// Available options: `query_then_fetch`, `dfs_query_then_fetch`. // API name: search_type -func (r *DeleteByQuery) SearchType(enum searchtype.SearchType) *DeleteByQuery { - r.values.Set("search_type", enum.String()) +func (r *DeleteByQuery) SearchType(searchtype searchtype.SearchType) *DeleteByQuery { + r.values.Set("search_type", searchtype.String()) return r } -// Slices The number of slices this task should be divided into. Defaults to 1, meaning -// the task isn't sliced into subtasks. Can be set to `auto`. +// Slices The number of slices this task should be divided into. // API name: slices -func (r *DeleteByQuery) Slices(v string) *DeleteByQuery { - r.values.Set("slices", v) +func (r *DeleteByQuery) Slices(slices string) *DeleteByQuery { + r.values.Set("slices", slices) return r } -// Sort A comma-separated list of : pairs +// Sort A comma-separated list of : pairs. // API name: sort -func (r *DeleteByQuery) Sort(v string) *DeleteByQuery { - r.values.Set("sort", v) +func (r *DeleteByQuery) Sort(sorts ...string) *DeleteByQuery { + tmp := []string{} + for _, item := range sorts { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("sort", strings.Join(tmp, ",")) return r } -// Stats Specific 'tag' of the request for logging and statistical purposes +// Stats Specific `tag` of the request for logging and statistical purposes. // API name: stats -func (r *DeleteByQuery) Stats(v string) *DeleteByQuery { - r.values.Set("stats", v) +func (r *DeleteByQuery) Stats(stats ...string) *DeleteByQuery { + tmp := []string{} + for _, item := range stats { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("stats", strings.Join(tmp, ",")) return r } -// TerminateAfter The maximum number of documents to collect for each shard, upon reaching -// which the query execution will terminate early. +// TerminateAfter Maximum number of documents to collect for each shard. +// If a query reaches this limit, Elasticsearch terminates the query early. +// Elasticsearch collects documents before sorting. +// Use with caution. +// Elasticsearch applies this parameter to each shard handling the request. +// When possible, let Elasticsearch perform early termination automatically. +// Avoid specifying this parameter for requests that target data streams with +// backing indices across multiple data tiers. // API name: terminate_after -func (r *DeleteByQuery) TerminateAfter(v string) *DeleteByQuery { - r.values.Set("terminate_after", v) +func (r *DeleteByQuery) TerminateAfter(terminateafter string) *DeleteByQuery { + r.values.Set("terminate_after", terminateafter) return r } -// Timeout Time each individual bulk request should wait for shards that are -// unavailable. +// Timeout Period each deletion request waits for active shards. // API name: timeout -func (r *DeleteByQuery) Timeout(v string) *DeleteByQuery { - r.values.Set("timeout", v) +func (r *DeleteByQuery) Timeout(duration string) *DeleteByQuery { + r.values.Set("timeout", duration) return r } -// Version Specify whether to return document version as part of a hit +// Version If `true`, returns the document version as part of a hit. // API name: version -func (r *DeleteByQuery) Version(b bool) *DeleteByQuery { - r.values.Set("version", strconv.FormatBool(b)) +func (r *DeleteByQuery) Version(version bool) *DeleteByQuery { + r.values.Set("version", strconv.FormatBool(version)) return r } -// WaitForActiveShards Sets the number of shard copies that must be active before proceeding with -// the delete by query operation. Defaults to 1, meaning the primary shard only. -// Set to `all` for all shard copies, otherwise set to any non-negative value -// less than or equal to the total number of copies for the shard (number of -// replicas + 1) +// WaitForActiveShards The number of shard copies that must be active before proceeding with the +// operation. +// Set to all or any positive integer up to the total number of shards in the +// index (`number_of_replicas+1`). // API name: wait_for_active_shards -func (r *DeleteByQuery) WaitForActiveShards(v string) *DeleteByQuery { - r.values.Set("wait_for_active_shards", v) +func (r *DeleteByQuery) WaitForActiveShards(waitforactiveshards string) *DeleteByQuery { + r.values.Set("wait_for_active_shards", waitforactiveshards) return r } -// WaitForCompletion Should the request should block until the delete by query is complete. +// WaitForCompletion If `true`, the request blocks until the operation is complete. // API name: wait_for_completion -func (r *DeleteByQuery) WaitForCompletion(b bool) *DeleteByQuery { - r.values.Set("wait_for_completion", strconv.FormatBool(b)) +func (r *DeleteByQuery) WaitForCompletion(waitforcompletion bool) *DeleteByQuery { + r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) + + return r +} + +// MaxDocs The maximum number of documents to delete. +// API name: max_docs +func (r *DeleteByQuery) MaxDocs(maxdocs int64) *DeleteByQuery { + + r.req.MaxDocs = &maxdocs + + return r +} + +// Query Specifies the documents to delete using the Query DSL. +// API name: query +func (r *DeleteByQuery) Query(query *types.Query) *DeleteByQuery { + + r.req.Query = query + + return r +} + +// Slice Slice the request manually using the provided slice ID and total number of +// slices. +// API name: slice +func (r *DeleteByQuery) Slice(slice *types.SlicedScroll) *DeleteByQuery { + + r.req.Slice = slice return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/deletebyquery/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/deletebyquery/request.go index 8ca900fc9..7d5d47c9c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/deletebyquery/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/deletebyquery/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package deletebyquery @@ -29,11 +29,16 @@ import ( // Request holds the request body struct for the package deletebyquery // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/delete_by_query/DeleteByQueryRequest.ts#L36-L81 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/delete_by_query/DeleteByQueryRequest.ts#L36-L209 type Request struct { - MaxDocs *int64 `json:"max_docs,omitempty"` - Query *types.Query `json:"query,omitempty"` - Slice *types.SlicedScroll `json:"slice,omitempty"` + + // MaxDocs The maximum number of documents to delete. + MaxDocs *int64 `json:"max_docs,omitempty"` + // Query Specifies the documents to delete using the Query DSL. + Query *types.Query `json:"query,omitempty"` + // Slice Slice the request manually using the provided slice ID and total number of + // slices. + Slice *types.SlicedScroll `json:"slice,omitempty"` } // NewRequest returns a Request diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/deletebyquery/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/deletebyquery/response.go index 848e94a3c..ced3108ef 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/deletebyquery/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/deletebyquery/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package deletebyquery @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package deletebyquery // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/delete_by_query/DeleteByQueryResponse.ts#L26-L45 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/delete_by_query/DeleteByQueryResponse.ts#L26-L45 type Response struct { Batches *int64 `json:"batches,omitempty"` @@ -38,9 +38,9 @@ type Response struct { SliceId *int `json:"slice_id,omitempty"` Task types.TaskId `json:"task,omitempty"` Throttled types.Duration `json:"throttled,omitempty"` - ThrottledMillis int64 `json:"throttled_millis"` + ThrottledMillis *int64 `json:"throttled_millis,omitempty"` ThrottledUntil types.Duration `json:"throttled_until,omitempty"` - ThrottledUntilMillis int64 `json:"throttled_until_millis"` + ThrottledUntilMillis *int64 `json:"throttled_until_millis,omitempty"` TimedOut *bool `json:"timed_out,omitempty"` Took *int64 `json:"took,omitempty"` Total *int64 `json:"total,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/deletebyqueryrethrottle/delete_by_query_rethrottle.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/deletebyqueryrethrottle/delete_by_query_rethrottle.go index 50b11193c..e3f28e45f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/deletebyqueryrethrottle/delete_by_query_rethrottle.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/deletebyqueryrethrottle/delete_by_query_rethrottle.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Changes the number of requests per second for a particular Delete By Query // operation. @@ -68,7 +68,7 @@ func NewDeleteByQueryRethrottleFunc(tp elastictransport.Interface) NewDeleteByQu return func(taskid string) *DeleteByQueryRethrottle { n := New(tp) - n.TaskId(taskid) + n._taskid(taskid) return n } @@ -172,7 +172,6 @@ func (r DeleteByQueryRethrottle) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -181,6 +180,10 @@ func (r DeleteByQueryRethrottle) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -212,20 +215,19 @@ func (r *DeleteByQueryRethrottle) Header(key, value string) *DeleteByQueryRethro return r } -// TaskId The task id to rethrottle +// TaskId The ID for the task. // API Name: taskid -func (r *DeleteByQueryRethrottle) TaskId(v string) *DeleteByQueryRethrottle { +func (r *DeleteByQueryRethrottle) _taskid(taskid string) *DeleteByQueryRethrottle { r.paramSet |= taskidMask - r.taskid = v + r.taskid = taskid return r } -// RequestsPerSecond The throttle to set on this request in floating sub-requests per second. -1 -// means set no throttle. +// RequestsPerSecond The throttle for this request in sub-requests per second. // API name: requests_per_second -func (r *DeleteByQueryRethrottle) RequestsPerSecond(v string) *DeleteByQueryRethrottle { - r.values.Set("requests_per_second", v) +func (r *DeleteByQueryRethrottle) RequestsPerSecond(requestspersecond string) *DeleteByQueryRethrottle { + r.values.Set("requests_per_second", requestspersecond) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/deletebyqueryrethrottle/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/deletebyqueryrethrottle/response.go index 827c506a4..e6e00c289 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/deletebyqueryrethrottle/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/deletebyqueryrethrottle/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package deletebyqueryrethrottle @@ -31,7 +31,7 @@ import ( // Response holds the response body struct for the package deletebyqueryrethrottle // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/delete_by_query_rethrottle/DeleteByQueryRethrottleResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/delete_by_query_rethrottle/DeleteByQueryRethrottleResponse.ts#L22-L24 type Response struct { NodeFailures []types.ErrorCause `json:"node_failures,omitempty"` @@ -73,6 +73,9 @@ func (s *Response) UnmarshalJSON(data []byte) error { } case "nodes": + if s.Nodes == nil { + s.Nodes = make(map[string]types.NodeTasks, 0) + } if err := dec.Decode(&s.Nodes); err != nil { return err } @@ -83,8 +86,24 @@ func (s *Response) UnmarshalJSON(data []byte) error { } case "tasks": - if err := dec.Decode(&s.Tasks); err != nil { - return err + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]types.ParentTaskInfo, 0) + if err := localDec.Decode(&o); err != nil { + return err + } + s.Tasks = o + case '[': + o := []types.TaskInfo{} + if err := localDec.Decode(&o); err != nil { + return err + } + s.Tasks = o } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/deletescript/delete_script.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/deletescript/delete_script.go index 5b3936d93..7324c1663 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/deletescript/delete_script.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/deletescript/delete_script.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Deletes a script. package deletescript @@ -67,7 +67,7 @@ func NewDeleteScriptFunc(tp elastictransport.Interface) NewDeleteScript { return func(id string) *DeleteScript { n := New(tp) - n.Id(id) + n._id(id) return n } @@ -168,7 +168,6 @@ func (r DeleteScript) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -177,6 +176,10 @@ func (r DeleteScript) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -208,27 +211,31 @@ func (r *DeleteScript) Header(key, value string) *DeleteScript { return r } -// Id Script ID +// Id Identifier for the stored script or search template. // API Name: id -func (r *DeleteScript) Id(v string) *DeleteScript { +func (r *DeleteScript) _id(id string) *DeleteScript { r.paramSet |= idMask - r.id = v + r.id = id return r } -// MasterTimeout Specify timeout for connection to master +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: master_timeout -func (r *DeleteScript) MasterTimeout(v string) *DeleteScript { - r.values.Set("master_timeout", v) +func (r *DeleteScript) MasterTimeout(duration string) *DeleteScript { + r.values.Set("master_timeout", duration) return r } -// Timeout Explicit operation timeout +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: timeout -func (r *DeleteScript) Timeout(v string) *DeleteScript { - r.values.Set("timeout", v) +func (r *DeleteScript) Timeout(duration string) *DeleteScript { + r.values.Set("timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/deletescript/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/deletescript/response.go index d7ff72532..cfe2f635f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/deletescript/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/deletescript/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package deletescript // Response holds the response body struct for the package deletescript // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/delete_script/DeleteScriptResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/delete_script/DeleteScriptResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/exists/exists.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/exists/exists.go index ed57809a8..7fde7ea4f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/exists/exists.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/exists/exists.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns information about whether a document exists in an index. package exists @@ -24,7 +24,6 @@ package exists import ( gobytes "bytes" "context" - "encoding/json" "errors" "fmt" "io" @@ -35,8 +34,6 @@ import ( "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" - "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/versiontype" ) @@ -73,9 +70,9 @@ func NewExistsFunc(tp elastictransport.Interface) NewExists { return func(index, id string) *Exists { n := New(tp) - n.Id(id) + n._id(id) - n.Index(index) + n._index(index) return n } @@ -162,33 +159,8 @@ func (r Exists) Perform(ctx context.Context) (*http.Response, error) { } // Do runs the request through the transport, handle the response and returns a exists.Response -func (r Exists) Do(ctx context.Context) (*Response, error) { - - response := NewResponse() - - res, err := r.Perform(ctx) - if err != nil { - return nil, err - } - defer res.Body.Close() - - if res.StatusCode < 299 { - err = json.NewDecoder(res.Body).Decode(response) - if err != nil { - return nil, err - } - - return response, nil - - } - - errorResponse := types.NewElasticsearchError() - err = json.NewDecoder(res.Body).Decode(errorResponse) - if err != nil { - return nil, err - } - - return nil, errorResponse +func (r Exists) Do(ctx context.Context) (bool, error) { + return r.IsSuccess(ctx) } // IsSuccess allows to run a query with a context and retrieve the result as a boolean. @@ -219,102 +191,108 @@ func (r *Exists) Header(key, value string) *Exists { return r } -// Id The document ID +// Id Identifier of the document. // API Name: id -func (r *Exists) Id(v string) *Exists { +func (r *Exists) _id(id string) *Exists { r.paramSet |= idMask - r.id = v + r.id = id return r } -// Index The name of the index +// Index Comma-separated list of data streams, indices, and aliases. +// Supports wildcards (`*`). // API Name: index -func (r *Exists) Index(v string) *Exists { +func (r *Exists) _index(index string) *Exists { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// Preference Specify the node or shard the operation should be performed on (default: -// random) +// Preference Specifies the node or shard the operation should be performed on. +// Random by default. // API name: preference -func (r *Exists) Preference(v string) *Exists { - r.values.Set("preference", v) +func (r *Exists) Preference(preference string) *Exists { + r.values.Set("preference", preference) return r } -// Realtime Specify whether to perform the operation in realtime or search mode +// Realtime If `true`, the request is real-time as opposed to near-real-time. // API name: realtime -func (r *Exists) Realtime(b bool) *Exists { - r.values.Set("realtime", strconv.FormatBool(b)) +func (r *Exists) Realtime(realtime bool) *Exists { + r.values.Set("realtime", strconv.FormatBool(realtime)) return r } -// Refresh Refresh the shard containing the document before performing the operation +// Refresh If `true`, Elasticsearch refreshes all shards involved in the delete by query +// after the request completes. // API name: refresh -func (r *Exists) Refresh(b bool) *Exists { - r.values.Set("refresh", strconv.FormatBool(b)) +func (r *Exists) Refresh(refresh bool) *Exists { + r.values.Set("refresh", strconv.FormatBool(refresh)) return r } -// Routing Specific routing value +// Routing Target the specified primary shard. // API name: routing -func (r *Exists) Routing(v string) *Exists { - r.values.Set("routing", v) +func (r *Exists) Routing(routing string) *Exists { + r.values.Set("routing", routing) return r } -// Source_ True or false to return the _source field or not, or a list of fields to -// return +// Source_ `true` or `false` to return the `_source` field or not, or a list of fields +// to return. // API name: _source -func (r *Exists) Source_(v string) *Exists { - r.values.Set("_source", v) +func (r *Exists) Source_(sourceconfigparam string) *Exists { + r.values.Set("_source", sourceconfigparam) return r } -// SourceExcludes_ A list of fields to exclude from the returned _source field +// SourceExcludes_ A comma-separated list of source fields to exclude in the response. // API name: _source_excludes -func (r *Exists) SourceExcludes_(v string) *Exists { - r.values.Set("_source_excludes", v) +func (r *Exists) SourceExcludes_(fields ...string) *Exists { + r.values.Set("_source_excludes", strings.Join(fields, ",")) return r } -// SourceIncludes_ A list of fields to extract and return from the _source field +// SourceIncludes_ A comma-separated list of source fields to include in the response. // API name: _source_includes -func (r *Exists) SourceIncludes_(v string) *Exists { - r.values.Set("_source_includes", v) +func (r *Exists) SourceIncludes_(fields ...string) *Exists { + r.values.Set("_source_includes", strings.Join(fields, ",")) return r } -// StoredFields A comma-separated list of stored fields to return in the response +// StoredFields List of stored fields to return as part of a hit. +// If no fields are specified, no stored fields are included in the response. +// If this field is specified, the `_source` parameter defaults to false. // API name: stored_fields -func (r *Exists) StoredFields(v string) *Exists { - r.values.Set("stored_fields", v) +func (r *Exists) StoredFields(fields ...string) *Exists { + r.values.Set("stored_fields", strings.Join(fields, ",")) return r } -// Version Explicit version number for concurrency control +// Version Explicit version number for concurrency control. +// The specified version must match the current version of the document for the +// request to succeed. // API name: version -func (r *Exists) Version(v string) *Exists { - r.values.Set("version", v) +func (r *Exists) Version(versionnumber string) *Exists { + r.values.Set("version", versionnumber) return r } -// VersionType Specific version type +// VersionType Specific version type: `external`, `external_gte`. // API name: version_type -func (r *Exists) VersionType(enum versiontype.VersionType) *Exists { - r.values.Set("version_type", enum.String()) +func (r *Exists) VersionType(versiontype versiontype.VersionType) *Exists { + r.values.Set("version_type", versiontype.String()) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/existssource/exists_source.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/existssource/exists_source.go index 844c48fdf..3badd7157 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/existssource/exists_source.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/existssource/exists_source.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns information about whether a document source exists in an index. package existssource @@ -24,7 +24,6 @@ package existssource import ( gobytes "bytes" "context" - "encoding/json" "errors" "fmt" "io" @@ -35,8 +34,6 @@ import ( "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" - "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/versiontype" ) @@ -73,9 +70,9 @@ func NewExistsSourceFunc(tp elastictransport.Interface) NewExistsSource { return func(index, id string) *ExistsSource { n := New(tp) - n.Id(id) + n._id(id) - n.Index(index) + n._index(index) return n } @@ -162,33 +159,8 @@ func (r ExistsSource) Perform(ctx context.Context) (*http.Response, error) { } // Do runs the request through the transport, handle the response and returns a existssource.Response -func (r ExistsSource) Do(ctx context.Context) (*Response, error) { - - response := NewResponse() - - res, err := r.Perform(ctx) - if err != nil { - return nil, err - } - defer res.Body.Close() - - if res.StatusCode < 299 { - err = json.NewDecoder(res.Body).Decode(response) - if err != nil { - return nil, err - } - - return response, nil - - } - - errorResponse := types.NewElasticsearchError() - err = json.NewDecoder(res.Body).Decode(errorResponse) - if err != nil { - return nil, err - } - - return nil, errorResponse +func (r ExistsSource) Do(ctx context.Context) (bool, error) { + return r.IsSuccess(ctx) } // IsSuccess allows to run a query with a context and retrieve the result as a boolean. @@ -219,94 +191,98 @@ func (r *ExistsSource) Header(key, value string) *ExistsSource { return r } -// Id The document ID +// Id Identifier of the document. // API Name: id -func (r *ExistsSource) Id(v string) *ExistsSource { +func (r *ExistsSource) _id(id string) *ExistsSource { r.paramSet |= idMask - r.id = v + r.id = id return r } -// Index The name of the index +// Index Comma-separated list of data streams, indices, and aliases. +// Supports wildcards (`*`). // API Name: index -func (r *ExistsSource) Index(v string) *ExistsSource { +func (r *ExistsSource) _index(index string) *ExistsSource { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// Preference Specify the node or shard the operation should be performed on (default: -// random) +// Preference Specifies the node or shard the operation should be performed on. +// Random by default. // API name: preference -func (r *ExistsSource) Preference(v string) *ExistsSource { - r.values.Set("preference", v) +func (r *ExistsSource) Preference(preference string) *ExistsSource { + r.values.Set("preference", preference) return r } -// Realtime Specify whether to perform the operation in realtime or search mode +// Realtime If true, the request is real-time as opposed to near-real-time. // API name: realtime -func (r *ExistsSource) Realtime(b bool) *ExistsSource { - r.values.Set("realtime", strconv.FormatBool(b)) +func (r *ExistsSource) Realtime(realtime bool) *ExistsSource { + r.values.Set("realtime", strconv.FormatBool(realtime)) return r } -// Refresh Refresh the shard containing the document before performing the operation +// Refresh If `true`, Elasticsearch refreshes all shards involved in the delete by query +// after the request completes. // API name: refresh -func (r *ExistsSource) Refresh(b bool) *ExistsSource { - r.values.Set("refresh", strconv.FormatBool(b)) +func (r *ExistsSource) Refresh(refresh bool) *ExistsSource { + r.values.Set("refresh", strconv.FormatBool(refresh)) return r } -// Routing Specific routing value +// Routing Target the specified primary shard. // API name: routing -func (r *ExistsSource) Routing(v string) *ExistsSource { - r.values.Set("routing", v) +func (r *ExistsSource) Routing(routing string) *ExistsSource { + r.values.Set("routing", routing) return r } -// Source_ True or false to return the _source field or not, or a list of fields to -// return +// Source_ `true` or `false` to return the `_source` field or not, or a list of fields +// to return. // API name: _source -func (r *ExistsSource) Source_(v string) *ExistsSource { - r.values.Set("_source", v) +func (r *ExistsSource) Source_(sourceconfigparam string) *ExistsSource { + r.values.Set("_source", sourceconfigparam) return r } -// SourceExcludes_ A list of fields to exclude from the returned _source field +// SourceExcludes_ A comma-separated list of source fields to exclude in the response. // API name: _source_excludes -func (r *ExistsSource) SourceExcludes_(v string) *ExistsSource { - r.values.Set("_source_excludes", v) +func (r *ExistsSource) SourceExcludes_(fields ...string) *ExistsSource { + r.values.Set("_source_excludes", strings.Join(fields, ",")) return r } -// SourceIncludes_ A list of fields to extract and return from the _source field +// SourceIncludes_ A comma-separated list of source fields to include in the response. // API name: _source_includes -func (r *ExistsSource) SourceIncludes_(v string) *ExistsSource { - r.values.Set("_source_includes", v) +func (r *ExistsSource) SourceIncludes_(fields ...string) *ExistsSource { + r.values.Set("_source_includes", strings.Join(fields, ",")) return r } -// Version Explicit version number for concurrency control +// Version Explicit version number for concurrency control. +// The specified version must match the current version of the document for the +// request to succeed. // API name: version -func (r *ExistsSource) Version(v string) *ExistsSource { - r.values.Set("version", v) +func (r *ExistsSource) Version(versionnumber string) *ExistsSource { + r.values.Set("version", versionnumber) return r } -// VersionType Specific version type +// VersionType Specific version type: `external`, `external_gte`. // API name: version_type -func (r *ExistsSource) VersionType(enum versiontype.VersionType) *ExistsSource { - r.values.Set("version_type", enum.String()) +func (r *ExistsSource) VersionType(versiontype versiontype.VersionType) *ExistsSource { + r.values.Set("version_type", versiontype.String()) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/explain/explain.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/explain/explain.go index 75614150f..4d4e7081f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/explain/explain.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/explain/explain.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns information about why a specific matches (or doesn't match) a query. package explain @@ -35,7 +35,6 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/operator" ) @@ -57,8 +56,9 @@ type Explain struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -75,9 +75,9 @@ func NewExplainFunc(tp elastictransport.Interface) NewExplain { return func(index, id string) *Explain { n := New(tp) - n.Id(id) + n._id(id) - n.Index(index) + n._index(index) return n } @@ -92,6 +92,8 @@ func New(tp elastictransport.Interface) *Explain { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -121,9 +123,19 @@ func (r *Explain) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -131,6 +143,7 @@ func (r *Explain) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -214,7 +227,6 @@ func (r Explain) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -223,6 +235,10 @@ func (r Explain) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -233,120 +249,131 @@ func (r *Explain) Header(key, value string) *Explain { return r } -// Id The document ID +// Id Defines the document ID. // API Name: id -func (r *Explain) Id(v string) *Explain { +func (r *Explain) _id(id string) *Explain { r.paramSet |= idMask - r.id = v + r.id = id return r } -// Index The name of the index +// Index Index names used to limit the request. +// Only a single index name can be provided to this parameter. // API Name: index -func (r *Explain) Index(v string) *Explain { +func (r *Explain) _index(index string) *Explain { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// Analyzer The analyzer for the query string query +// Analyzer Analyzer to use for the query string. +// This parameter can only be used when the `q` query string parameter is +// specified. // API name: analyzer -func (r *Explain) Analyzer(v string) *Explain { - r.values.Set("analyzer", v) +func (r *Explain) Analyzer(analyzer string) *Explain { + r.values.Set("analyzer", analyzer) return r } -// AnalyzeWildcard Specify whether wildcards and prefix queries in the query string query should -// be analyzed (default: false) +// AnalyzeWildcard If `true`, wildcard and prefix queries are analyzed. // API name: analyze_wildcard -func (r *Explain) AnalyzeWildcard(b bool) *Explain { - r.values.Set("analyze_wildcard", strconv.FormatBool(b)) +func (r *Explain) AnalyzeWildcard(analyzewildcard bool) *Explain { + r.values.Set("analyze_wildcard", strconv.FormatBool(analyzewildcard)) return r } -// DefaultOperator The default operator for query string query (AND or OR) +// DefaultOperator The default operator for query string query: `AND` or `OR`. // API name: default_operator -func (r *Explain) DefaultOperator(enum operator.Operator) *Explain { - r.values.Set("default_operator", enum.String()) +func (r *Explain) DefaultOperator(defaultoperator operator.Operator) *Explain { + r.values.Set("default_operator", defaultoperator.String()) return r } -// Df The default field for query string query (default: _all) +// Df Field to use as default where no field prefix is given in the query string. // API name: df -func (r *Explain) Df(v string) *Explain { - r.values.Set("df", v) +func (r *Explain) Df(df string) *Explain { + r.values.Set("df", df) return r } -// Lenient Specify whether format-based query failures (such as providing text to a -// numeric field) should be ignored +// Lenient If `true`, format-based query failures (such as providing text to a numeric +// field) in the query string will be ignored. // API name: lenient -func (r *Explain) Lenient(b bool) *Explain { - r.values.Set("lenient", strconv.FormatBool(b)) +func (r *Explain) Lenient(lenient bool) *Explain { + r.values.Set("lenient", strconv.FormatBool(lenient)) return r } -// Preference Specify the node or shard the operation should be performed on (default: -// random) +// Preference Specifies the node or shard the operation should be performed on. +// Random by default. // API name: preference -func (r *Explain) Preference(v string) *Explain { - r.values.Set("preference", v) +func (r *Explain) Preference(preference string) *Explain { + r.values.Set("preference", preference) return r } -// Routing Specific routing value +// Routing Custom value used to route operations to a specific shard. // API name: routing -func (r *Explain) Routing(v string) *Explain { - r.values.Set("routing", v) +func (r *Explain) Routing(routing string) *Explain { + r.values.Set("routing", routing) return r } -// Source_ True or false to return the _source field or not, or a list of fields to -// return +// Source_ True or false to return the `_source` field or not, or a list of fields to +// return. // API name: _source -func (r *Explain) Source_(v string) *Explain { - r.values.Set("_source", v) +func (r *Explain) Source_(sourceconfigparam string) *Explain { + r.values.Set("_source", sourceconfigparam) return r } -// SourceExcludes_ A list of fields to exclude from the returned _source field +// SourceExcludes_ A comma-separated list of source fields to exclude from the response. // API name: _source_excludes -func (r *Explain) SourceExcludes_(v string) *Explain { - r.values.Set("_source_excludes", v) +func (r *Explain) SourceExcludes_(fields ...string) *Explain { + r.values.Set("_source_excludes", strings.Join(fields, ",")) return r } -// SourceIncludes_ A list of fields to extract and return from the _source field +// SourceIncludes_ A comma-separated list of source fields to include in the response. // API name: _source_includes -func (r *Explain) SourceIncludes_(v string) *Explain { - r.values.Set("_source_includes", v) +func (r *Explain) SourceIncludes_(fields ...string) *Explain { + r.values.Set("_source_includes", strings.Join(fields, ",")) return r } -// StoredFields A comma-separated list of stored fields to return in the response +// StoredFields A comma-separated list of stored fields to return in the response. // API name: stored_fields -func (r *Explain) StoredFields(v string) *Explain { - r.values.Set("stored_fields", v) +func (r *Explain) StoredFields(fields ...string) *Explain { + r.values.Set("stored_fields", strings.Join(fields, ",")) return r } -// Q Query in the Lucene query string syntax +// Q Query in the Lucene query string syntax. // API name: q -func (r *Explain) Q(v string) *Explain { - r.values.Set("q", v) +func (r *Explain) Q(q string) *Explain { + r.values.Set("q", q) + + return r +} + +// Query Defines the search definition using the Query DSL. +// API name: query +func (r *Explain) Query(query *types.Query) *Explain { + + r.req.Query = query return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/explain/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/explain/request.go index 7052253a2..dfac436f6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/explain/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/explain/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package explain @@ -29,8 +29,10 @@ import ( // Request holds the request body struct for the package explain // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/explain/ExplainRequest.ts#L26-L53 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/explain/ExplainRequest.ts#L26-L105 type Request struct { + + // Query Defines the search definition using the Query DSL. Query *types.Query `json:"query,omitempty"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/explain/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/explain/response.go index ae60849d1..246ed0f25 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/explain/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/explain/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package explain @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package explain // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/explain/ExplainResponse.ts#L23-L31 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/explain/ExplainResponse.ts#L23-L31 type Response struct { Explanation *types.ExplanationDetail `json:"explanation,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/fieldcaps/field_caps.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/fieldcaps/field_caps.go index bef861c3d..daef5b874 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/fieldcaps/field_caps.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/fieldcaps/field_caps.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns the information about the capabilities of fields among multiple // indices. @@ -36,6 +36,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" ) const ( @@ -54,8 +55,9 @@ type FieldCaps struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -85,6 +87,8 @@ func New(tp elastictransport.Interface) *FieldCaps { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -114,9 +118,19 @@ func (r *FieldCaps) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -124,6 +138,7 @@ func (r *FieldCaps) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -209,7 +224,6 @@ func (r FieldCaps) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -218,6 +232,10 @@ func (r FieldCaps) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -232,9 +250,9 @@ func (r *FieldCaps) Header(key, value string) *FieldCaps { // request. Supports wildcards (*). To target all data streams and indices, omit // this parameter or use * or _all. // API Name: index -func (r *FieldCaps) Index(v string) *FieldCaps { +func (r *FieldCaps) Index(index string) *FieldCaps { r.paramSet |= indexMask - r.index = v + r.index = index return r } @@ -246,8 +264,8 @@ func (r *FieldCaps) Index(v string) *FieldCaps { // targeting `foo*,bar*` returns an error if an index starts with foo but no // index starts with bar. // API name: allow_no_indices -func (r *FieldCaps) AllowNoIndices(b bool) *FieldCaps { - r.values.Set("allow_no_indices", strconv.FormatBool(b)) +func (r *FieldCaps) AllowNoIndices(allownoindices bool) *FieldCaps { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) return r } @@ -256,33 +274,28 @@ func (r *FieldCaps) AllowNoIndices(b bool) *FieldCaps { // data streams, this argument determines whether wildcard expressions match // hidden data streams. Supports comma-separated values, such as `open,hidden`. // API name: expand_wildcards -func (r *FieldCaps) ExpandWildcards(v string) *FieldCaps { - r.values.Set("expand_wildcards", v) - - return r -} - -// Fields Comma-separated list of fields to retrieve capabilities for. Wildcard (`*`) -// expressions are supported. -// API name: fields -func (r *FieldCaps) Fields(v string) *FieldCaps { - r.values.Set("fields", v) +func (r *FieldCaps) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *FieldCaps { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } // IgnoreUnavailable If `true`, missing or closed indices are not included in the response. // API name: ignore_unavailable -func (r *FieldCaps) IgnoreUnavailable(b bool) *FieldCaps { - r.values.Set("ignore_unavailable", strconv.FormatBool(b)) +func (r *FieldCaps) IgnoreUnavailable(ignoreunavailable bool) *FieldCaps { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) return r } // IncludeUnmapped If true, unmapped fields are included in the response. // API name: include_unmapped -func (r *FieldCaps) IncludeUnmapped(b bool) *FieldCaps { - r.values.Set("include_unmapped", strconv.FormatBool(b)) +func (r *FieldCaps) IncludeUnmapped(includeunmapped bool) *FieldCaps { + r.values.Set("include_unmapped", strconv.FormatBool(includeunmapped)) return r } @@ -290,16 +303,50 @@ func (r *FieldCaps) IncludeUnmapped(b bool) *FieldCaps { // Filters An optional set of filters: can include // +metadata,-metadata,-nested,-multifield,-parent // API name: filters -func (r *FieldCaps) Filters(v string) *FieldCaps { - r.values.Set("filters", v) +func (r *FieldCaps) Filters(filters string) *FieldCaps { + r.values.Set("filters", filters) return r } // Types Only return results for fields that have one of the types in the list // API name: types -func (r *FieldCaps) Types(v string) *FieldCaps { - r.values.Set("types", v) +func (r *FieldCaps) Types(types ...string) *FieldCaps { + tmp := []string{} + for _, item := range types { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("types", strings.Join(tmp, ",")) + + return r +} + +// Fields List of fields to retrieve capabilities for. Wildcard (`*`) expressions are +// supported. +// API name: fields +func (r *FieldCaps) Fields(fields ...string) *FieldCaps { + r.req.Fields = fields + + return r +} + +// IndexFilter Allows to filter indices if the provided query rewrites to match_none on +// every shard. +// API name: index_filter +func (r *FieldCaps) IndexFilter(indexfilter *types.Query) *FieldCaps { + + r.req.IndexFilter = indexfilter + + return r +} + +// RuntimeMappings Defines ad-hoc runtime fields in the request similar to the way it is done in +// search requests. +// These fields exist only as part of the query and take precedence over fields +// defined with the same name in the index mappings. +// API name: runtime_mappings +func (r *FieldCaps) RuntimeMappings(runtimefields types.RuntimeFields) *FieldCaps { + r.req.RuntimeMappings = runtimefields return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/fieldcaps/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/fieldcaps/request.go index fbec039e4..7a1f368cf 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/fieldcaps/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/fieldcaps/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package fieldcaps @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package fieldcaps // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/field_caps/FieldCapabilitiesRequest.ts#L25-L95 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/field_caps/FieldCapabilitiesRequest.ts#L25-L99 type Request struct { // Fields List of fields to retrieve capabilities for. Wildcard (`*`) expressions are @@ -42,7 +42,7 @@ type Request struct { // search requests. // These fields exist only as part of the query and take precedence over fields // defined with the same name in the index mappings. - RuntimeMappings map[string]types.RuntimeField `json:"runtime_mappings,omitempty"` + RuntimeMappings types.RuntimeFields `json:"runtime_mappings,omitempty"` } // NewRequest returns a Request diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/fieldcaps/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/fieldcaps/response.go index 8e3b6b61c..aec668fba 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/fieldcaps/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/fieldcaps/response.go @@ -16,17 +16,22 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package fieldcaps import ( + "bytes" + "encoding/json" + "errors" + "io" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) // Response holds the response body struct for the package fieldcaps // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/field_caps/FieldCapabilitiesResponse.ts#L24-L35 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/field_caps/FieldCapabilitiesResponse.ts#L24-L35 type Response struct { Fields map[string]map[string]types.FieldCapability `json:"fields"` @@ -40,3 +45,46 @@ func NewResponse() *Response { } return r } + +func (s *Response) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]map[string]types.FieldCapability, 0) + } + if err := dec.Decode(&s.Fields); err != nil { + return err + } + + case "indices": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Indices = append(s.Indices, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Indices); err != nil { + return err + } + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/get/get.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/get/get.go index 31785a528..9cd55a115 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/get/get.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/get/get.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns a document. package get @@ -36,7 +36,6 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/versiontype" ) @@ -73,9 +72,9 @@ func NewGetFunc(tp elastictransport.Interface) NewGet { return func(index, id string) *Get { n := New(tp) - n.Id(id) + n._id(id) - n.Index(index) + n._index(index) return n } @@ -179,7 +178,30 @@ func (r Get) Do(ctx context.Context) (*Response, error) { } return response, nil + } + + if res.StatusCode == 404 { + data, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(gobytes.NewReader(data)).Decode(&errorResponse) + if err != nil { + return nil, err + } + + if errorResponse.Status == 0 { + err = json.NewDecoder(gobytes.NewReader(data)).Decode(&response) + if err != nil { + return nil, err + } + + return response, nil + } + + return nil, errorResponse } errorResponse := types.NewElasticsearchError() @@ -188,6 +210,10 @@ func (r Get) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -221,18 +247,18 @@ func (r *Get) Header(key, value string) *Get { // Id Unique identifier of the document. // API Name: id -func (r *Get) Id(v string) *Get { +func (r *Get) _id(id string) *Get { r.paramSet |= idMask - r.id = v + r.id = id return r } // Index Name of the index that contains the document. // API Name: index -func (r *Get) Index(v string) *Get { +func (r *Get) _index(index string) *Get { r.paramSet |= indexMask - r.index = v + r.index = index return r } @@ -240,16 +266,16 @@ func (r *Get) Index(v string) *Get { // Preference Specifies the node or shard the operation should be performed on. Random by // default. // API name: preference -func (r *Get) Preference(v string) *Get { - r.values.Set("preference", v) +func (r *Get) Preference(preference string) *Get { + r.values.Set("preference", preference) return r } -// Realtime Boolean) If true, the request is real-time as opposed to near-real-time. +// Realtime If `true`, the request is real-time as opposed to near-real-time. // API name: realtime -func (r *Get) Realtime(b bool) *Get { - r.values.Set("realtime", strconv.FormatBool(b)) +func (r *Get) Realtime(realtime bool) *Get { + r.values.Set("realtime", strconv.FormatBool(realtime)) return r } @@ -257,16 +283,16 @@ func (r *Get) Realtime(b bool) *Get { // Refresh If true, Elasticsearch refreshes the affected shards to make this operation // visible to search. If false, do nothing with refreshes. // API name: refresh -func (r *Get) Refresh(b bool) *Get { - r.values.Set("refresh", strconv.FormatBool(b)) +func (r *Get) Refresh(refresh bool) *Get { + r.values.Set("refresh", strconv.FormatBool(refresh)) return r } // Routing Target the specified primary shard. // API name: routing -func (r *Get) Routing(v string) *Get { - r.values.Set("routing", v) +func (r *Get) Routing(routing string) *Get { + r.values.Set("routing", routing) return r } @@ -274,32 +300,34 @@ func (r *Get) Routing(v string) *Get { // Source_ True or false to return the _source field or not, or a list of fields to // return. // API name: _source -func (r *Get) Source_(v string) *Get { - r.values.Set("_source", v) +func (r *Get) Source_(sourceconfigparam string) *Get { + r.values.Set("_source", sourceconfigparam) return r } // SourceExcludes_ A comma-separated list of source fields to exclude in the response. // API name: _source_excludes -func (r *Get) SourceExcludes_(v string) *Get { - r.values.Set("_source_excludes", v) +func (r *Get) SourceExcludes_(fields ...string) *Get { + r.values.Set("_source_excludes", strings.Join(fields, ",")) return r } // SourceIncludes_ A comma-separated list of source fields to include in the response. // API name: _source_includes -func (r *Get) SourceIncludes_(v string) *Get { - r.values.Set("_source_includes", v) +func (r *Get) SourceIncludes_(fields ...string) *Get { + r.values.Set("_source_includes", strings.Join(fields, ",")) return r } -// StoredFields A comma-separated list of stored fields to return in the response +// StoredFields List of stored fields to return as part of a hit. +// If no fields are specified, no stored fields are included in the response. +// If this field is specified, the `_source` parameter defaults to false. // API name: stored_fields -func (r *Get) StoredFields(v string) *Get { - r.values.Set("stored_fields", v) +func (r *Get) StoredFields(fields ...string) *Get { + r.values.Set("stored_fields", strings.Join(fields, ",")) return r } @@ -307,16 +335,16 @@ func (r *Get) StoredFields(v string) *Get { // Version Explicit version number for concurrency control. The specified version must // match the current version of the document for the request to succeed. // API name: version -func (r *Get) Version(v string) *Get { - r.values.Set("version", v) +func (r *Get) Version(versionnumber string) *Get { + r.values.Set("version", versionnumber) return r } // VersionType Specific version type: internal, external, external_gte. // API name: version_type -func (r *Get) VersionType(enum versiontype.VersionType) *Get { - r.values.Set("version_type", enum.String()) +func (r *Get) VersionType(versiontype versiontype.VersionType) *Get { + r.values.Set("version_type", versiontype.String()) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/get/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/get/response.go index fa7010b8d..081df7386 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/get/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/get/response.go @@ -16,15 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package get -import "encoding/json" +import ( + "encoding/json" +) // Response holds the response body struct for the package get // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/get/GetResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/get/GetResponse.ts#L23-L34 type Response struct { Fields map[string]json.RawMessage `json:"fields,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/getscript/get_script.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/getscript/get_script.go index 741eff9be..6921f6d9b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/getscript/get_script.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/getscript/get_script.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns a script. package getscript @@ -67,7 +67,7 @@ func NewGetScriptFunc(tp elastictransport.Interface) NewGetScript { return func(id string) *GetScript { n := New(tp) - n.Id(id) + n._id(id) return n } @@ -168,7 +168,6 @@ func (r GetScript) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -177,6 +176,10 @@ func (r GetScript) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -208,19 +211,19 @@ func (r *GetScript) Header(key, value string) *GetScript { return r } -// Id Script ID +// Id Identifier for the stored script or search template. // API Name: id -func (r *GetScript) Id(v string) *GetScript { +func (r *GetScript) _id(id string) *GetScript { r.paramSet |= idMask - r.id = v + r.id = id return r } // MasterTimeout Specify timeout for connection to master // API name: master_timeout -func (r *GetScript) MasterTimeout(v string) *GetScript { - r.values.Set("master_timeout", v) +func (r *GetScript) MasterTimeout(duration string) *GetScript { + r.values.Set("master_timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/getscript/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/getscript/response.go index f2f95082e..000cd800d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/getscript/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/getscript/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getscript @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getscript // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/get_script/GetScriptResponse.ts#L23-L29 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/get_script/GetScriptResponse.ts#L23-L29 type Response struct { Found bool `json:"found"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/getscriptcontext/get_script_context.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/getscriptcontext/get_script_context.go index 1f7d02967..34fc7ca94 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/getscriptcontext/get_script_context.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/getscriptcontext/get_script_context.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns all script contexts. package getscriptcontext @@ -157,7 +157,6 @@ func (r GetScriptContext) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -166,6 +165,10 @@ func (r GetScriptContext) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/getscriptcontext/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/getscriptcontext/response.go index 1661ace15..89d94828b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/getscriptcontext/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/getscriptcontext/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getscriptcontext @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getscriptcontext // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/get_script_context/GetScriptContextResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/get_script_context/GetScriptContextResponse.ts#L22-L26 type Response struct { Contexts []types.GetScriptContext `json:"contexts"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/getscriptlanguages/get_script_languages.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/getscriptlanguages/get_script_languages.go index 8e1d7dbe3..dbabe9f1d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/getscriptlanguages/get_script_languages.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/getscriptlanguages/get_script_languages.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns available script types, languages and contexts package getscriptlanguages @@ -157,7 +157,6 @@ func (r GetScriptLanguages) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -166,6 +165,10 @@ func (r GetScriptLanguages) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/getscriptlanguages/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/getscriptlanguages/response.go index 6af664a46..c5822f738 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/getscriptlanguages/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/getscriptlanguages/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getscriptlanguages @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getscriptlanguages // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/get_script_languages/GetScriptLanguagesResponse.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/get_script_languages/GetScriptLanguagesResponse.ts#L22-L27 type Response struct { LanguageContexts []types.LanguageContext `json:"language_contexts"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/getsource/get_source.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/getsource/get_source.go index 881f1b35a..a1cb3528d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/getsource/get_source.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/getsource/get_source.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns the source of a document. package getsource @@ -36,7 +36,6 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/versiontype" ) @@ -73,9 +72,9 @@ func NewGetSourceFunc(tp elastictransport.Interface) NewGetSource { return func(index, id string) *GetSource { n := New(tp) - n.Id(id) + n._id(id) - n.Index(index) + n._index(index) return n } @@ -179,7 +178,6 @@ func (r GetSource) Do(ctx context.Context) (Response, error) { } return *response, nil - } errorResponse := types.NewElasticsearchError() @@ -188,6 +186,10 @@ func (r GetSource) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -221,18 +223,18 @@ func (r *GetSource) Header(key, value string) *GetSource { // Id Unique identifier of the document. // API Name: id -func (r *GetSource) Id(v string) *GetSource { +func (r *GetSource) _id(id string) *GetSource { r.paramSet |= idMask - r.id = v + r.id = id return r } // Index Name of the index that contains the document. // API Name: index -func (r *GetSource) Index(v string) *GetSource { +func (r *GetSource) _index(index string) *GetSource { r.paramSet |= indexMask - r.index = v + r.index = index return r } @@ -240,16 +242,16 @@ func (r *GetSource) Index(v string) *GetSource { // Preference Specifies the node or shard the operation should be performed on. Random by // default. // API name: preference -func (r *GetSource) Preference(v string) *GetSource { - r.values.Set("preference", v) +func (r *GetSource) Preference(preference string) *GetSource { + r.values.Set("preference", preference) return r } // Realtime Boolean) If true, the request is real-time as opposed to near-real-time. // API name: realtime -func (r *GetSource) Realtime(b bool) *GetSource { - r.values.Set("realtime", strconv.FormatBool(b)) +func (r *GetSource) Realtime(realtime bool) *GetSource { + r.values.Set("realtime", strconv.FormatBool(realtime)) return r } @@ -257,16 +259,16 @@ func (r *GetSource) Realtime(b bool) *GetSource { // Refresh If true, Elasticsearch refreshes the affected shards to make this operation // visible to search. If false, do nothing with refreshes. // API name: refresh -func (r *GetSource) Refresh(b bool) *GetSource { - r.values.Set("refresh", strconv.FormatBool(b)) +func (r *GetSource) Refresh(refresh bool) *GetSource { + r.values.Set("refresh", strconv.FormatBool(refresh)) return r } // Routing Target the specified primary shard. // API name: routing -func (r *GetSource) Routing(v string) *GetSource { - r.values.Set("routing", v) +func (r *GetSource) Routing(routing string) *GetSource { + r.values.Set("routing", routing) return r } @@ -274,31 +276,31 @@ func (r *GetSource) Routing(v string) *GetSource { // Source_ True or false to return the _source field or not, or a list of fields to // return. // API name: _source -func (r *GetSource) Source_(v string) *GetSource { - r.values.Set("_source", v) +func (r *GetSource) Source_(sourceconfigparam string) *GetSource { + r.values.Set("_source", sourceconfigparam) return r } // SourceExcludes_ A comma-separated list of source fields to exclude in the response. // API name: _source_excludes -func (r *GetSource) SourceExcludes_(v string) *GetSource { - r.values.Set("_source_excludes", v) +func (r *GetSource) SourceExcludes_(fields ...string) *GetSource { + r.values.Set("_source_excludes", strings.Join(fields, ",")) return r } // SourceIncludes_ A comma-separated list of source fields to include in the response. // API name: _source_includes -func (r *GetSource) SourceIncludes_(v string) *GetSource { - r.values.Set("_source_includes", v) +func (r *GetSource) SourceIncludes_(fields ...string) *GetSource { + r.values.Set("_source_includes", strings.Join(fields, ",")) return r } // API name: stored_fields -func (r *GetSource) StoredFields(v string) *GetSource { - r.values.Set("stored_fields", v) +func (r *GetSource) StoredFields(fields ...string) *GetSource { + r.values.Set("stored_fields", strings.Join(fields, ",")) return r } @@ -306,16 +308,16 @@ func (r *GetSource) StoredFields(v string) *GetSource { // Version Explicit version number for concurrency control. The specified version must // match the current version of the document for the request to succeed. // API name: version -func (r *GetSource) Version(v string) *GetSource { - r.values.Set("version", v) +func (r *GetSource) Version(versionnumber string) *GetSource { + r.values.Set("version", versionnumber) return r } // VersionType Specific version type: internal, external, external_gte. // API name: version_type -func (r *GetSource) VersionType(enum versiontype.VersionType) *GetSource { - r.values.Set("version_type", enum.String()) +func (r *GetSource) VersionType(versiontype versiontype.VersionType) *GetSource { + r.values.Set("version_type", versiontype.String()) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/getsource/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/getsource/response.go index 56a3a2194..5d62f0f1e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/getsource/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/getsource/response.go @@ -16,14 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getsource -import "encoding/json" +import ( + "encoding/json" +) // Response holds the response body struct for the package getsource // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/get_source/SourceResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/get_source/SourceResponse.ts#L20-L22 -type Response json.RawMessage +type Response = json.RawMessage + +func NewResponse() *Response { + return new(Response) +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/healthreport/health_report.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/healthreport/health_report.go new file mode 100644 index 000000000..c9876e2aa --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/healthreport/health_report.go @@ -0,0 +1,249 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Returns the health of the cluster. +package healthreport + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + featureMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type HealthReport struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + buf *gobytes.Buffer + + paramSet int + + feature string +} + +// NewHealthReport type alias for index. +type NewHealthReport func() *HealthReport + +// NewHealthReportFunc returns a new instance of HealthReport with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewHealthReportFunc(tp elastictransport.Interface) NewHealthReport { + return func() *HealthReport { + n := New(tp) + + return n + } +} + +// Returns the health of the cluster. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/health-api.html +func New(tp elastictransport.Interface) *HealthReport { + r := &HealthReport{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + buf: gobytes.NewBuffer(nil), + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *HealthReport) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_health_report") + + method = http.MethodGet + case r.paramSet == featureMask: + path.WriteString("/") + path.WriteString("_health_report") + path.WriteString("/") + + path.WriteString(r.feature) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.buf) + } else { + req, err = http.NewRequest(method, r.path.String(), r.buf) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r HealthReport) Perform(ctx context.Context) (*http.Response, error) { + req, err := r.HttpRequest(ctx) + if err != nil { + return nil, err + } + + res, err := r.transport.Perform(req) + if err != nil { + return nil, fmt.Errorf("an error happened during the HealthReport query execution: %w", err) + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a healthreport.Response +func (r HealthReport) Do(ctx context.Context) (*Response, error) { + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r HealthReport) IsSuccess(ctx context.Context) (bool, error) { + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(ioutil.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + return false, nil +} + +// Header set a key, value pair in the HealthReport headers map. +func (r *HealthReport) Header(key, value string) *HealthReport { + r.headers.Set(key, value) + + return r +} + +// Feature A feature of the cluster, as returned by the top-level health report API. +// API Name: feature +func (r *HealthReport) Feature(features ...string) *HealthReport { + r.paramSet |= featureMask + r.feature = strings.Join(features, ",") + + return r +} + +// Timeout Explicit operation timeout. +// API name: timeout +func (r *HealthReport) Timeout(duration string) *HealthReport { + r.values.Set("timeout", duration) + + return r +} + +// Verbose Opt-in for more information about the health of the system. +// API name: verbose +func (r *HealthReport) Verbose(verbose bool) *HealthReport { + r.values.Set("verbose", strconv.FormatBool(verbose)) + + return r +} + +// Size Limit the number of affected resources the health report API returns. +// API name: size +func (r *HealthReport) Size(size int) *HealthReport { + r.values.Set("size", strconv.Itoa(size)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/healthreport/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/healthreport/response.go new file mode 100644 index 000000000..72cc127de --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/healthreport/response.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package healthreport + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indicatorhealthstatus" +) + +// Response holds the response body struct for the package healthreport +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/health_report/Response.ts#L22-L28 + +type Response struct { + ClusterName string `json:"cluster_name"` + Indicators types.Indicators `json:"indicators"` + Status *indicatorhealthstatus.IndicatorHealthStatus `json:"status,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/index/index.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/index/index.go index 21fd05bfb..dff33e360 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/index/index.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/index/index.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Creates or updates a document in an index. package index @@ -35,7 +35,6 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/optype" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/refresh" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/versiontype" @@ -59,8 +58,9 @@ type Index struct { buf *gobytes.Buffer - req interface{} - raw io.Reader + req interface{} + deferred []func(request interface{}) error + raw io.Reader paramSet int @@ -77,7 +77,7 @@ func NewIndexFunc(tp elastictransport.Interface) NewIndex { return func(index string) *Index { n := New(tp) - n.Index(index) + n._index(index) return n } @@ -112,6 +112,13 @@ func (r *Index) Request(req interface{}) *Index { return r } +// Document allows to set the request property with the appropriate payload. +func (r *Index) Document(document interface{}) *Index { + r.req = document + + return r +} + // HttpRequest returns the http.Request object built from the // given parameters. func (r *Index) HttpRequest(ctx context.Context) (*http.Request, error) { @@ -121,9 +128,19 @@ func (r *Index) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -131,6 +148,7 @@ func (r *Index) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -222,7 +240,6 @@ func (r Index) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -231,6 +248,10 @@ func (r Index) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -241,116 +262,128 @@ func (r *Index) Header(key, value string) *Index { return r } -// Id Document ID +// Id Unique identifier for the document. // API Name: id -func (r *Index) Id(v string) *Index { +func (r *Index) Id(id string) *Index { r.paramSet |= idMask - r.id = v + r.id = id return r } -// Index The name of the index +// Index Name of the data stream or index to target. // API Name: index -func (r *Index) Index(v string) *Index { +func (r *Index) _index(index string) *Index { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// IfPrimaryTerm only perform the index operation if the last operation that has changed the -// document has the specified primary term +// IfPrimaryTerm Only perform the operation if the document has this primary term. // API name: if_primary_term -func (r *Index) IfPrimaryTerm(v string) *Index { - r.values.Set("if_primary_term", v) +func (r *Index) IfPrimaryTerm(ifprimaryterm string) *Index { + r.values.Set("if_primary_term", ifprimaryterm) return r } -// IfSeqNo only perform the index operation if the last operation that has changed the -// document has the specified sequence number +// IfSeqNo Only perform the operation if the document has this sequence number. // API name: if_seq_no -func (r *Index) IfSeqNo(v string) *Index { - r.values.Set("if_seq_no", v) +func (r *Index) IfSeqNo(sequencenumber string) *Index { + r.values.Set("if_seq_no", sequencenumber) return r } -// OpType Explicit operation type. Defaults to `index` for requests with an explicit -// document ID, and to `create`for requests without an explicit document ID +// OpType Set to create to only index the document if it does not already exist (put if +// absent). +// If a document with the specified `_id` already exists, the indexing operation +// will fail. +// Same as using the `/_create` endpoint. +// Valid values: `index`, `create`. +// If document id is specified, it defaults to `index`. +// Otherwise, it defaults to `create`. // API name: op_type -func (r *Index) OpType(enum optype.OpType) *Index { - r.values.Set("op_type", enum.String()) +func (r *Index) OpType(optype optype.OpType) *Index { + r.values.Set("op_type", optype.String()) return r } -// Pipeline The pipeline id to preprocess incoming documents with +// Pipeline ID of the pipeline to use to preprocess incoming documents. +// If the index has a default ingest pipeline specified, then setting the value +// to `_none` disables the default ingest pipeline for this request. +// If a final pipeline is configured it will always run, regardless of the value +// of this parameter. // API name: pipeline -func (r *Index) Pipeline(v string) *Index { - r.values.Set("pipeline", v) +func (r *Index) Pipeline(pipeline string) *Index { + r.values.Set("pipeline", pipeline) return r } -// Refresh If `true` then refresh the affected shards to make this operation visible to -// search, if `wait_for` then wait for a refresh to make this operation visible -// to search, if `false` (the default) then do nothing with refreshes. +// Refresh If `true`, Elasticsearch refreshes the affected shards to make this operation +// visible to search, if `wait_for` then wait for a refresh to make this +// operation visible to search, if `false` do nothing with refreshes. +// Valid values: `true`, `false`, `wait_for`. // API name: refresh -func (r *Index) Refresh(enum refresh.Refresh) *Index { - r.values.Set("refresh", enum.String()) +func (r *Index) Refresh(refresh refresh.Refresh) *Index { + r.values.Set("refresh", refresh.String()) return r } -// Routing Specific routing value +// Routing Custom value used to route operations to a specific shard. // API name: routing -func (r *Index) Routing(v string) *Index { - r.values.Set("routing", v) +func (r *Index) Routing(routing string) *Index { + r.values.Set("routing", routing) return r } -// Timeout Explicit operation timeout +// Timeout Period the request waits for the following operations: automatic index +// creation, dynamic mapping updates, waiting for active shards. // API name: timeout -func (r *Index) Timeout(v string) *Index { - r.values.Set("timeout", v) +func (r *Index) Timeout(duration string) *Index { + r.values.Set("timeout", duration) return r } -// Version Explicit version number for concurrency control +// Version Explicit version number for concurrency control. +// The specified version must match the current version of the document for the +// request to succeed. // API name: version -func (r *Index) Version(v string) *Index { - r.values.Set("version", v) +func (r *Index) Version(versionnumber string) *Index { + r.values.Set("version", versionnumber) return r } -// VersionType Specific version type +// VersionType Specific version type: `external`, `external_gte`. // API name: version_type -func (r *Index) VersionType(enum versiontype.VersionType) *Index { - r.values.Set("version_type", enum.String()) +func (r *Index) VersionType(versiontype versiontype.VersionType) *Index { + r.values.Set("version_type", versiontype.String()) return r } -// WaitForActiveShards Sets the number of shard copies that must be active before proceeding with -// the index operation. Defaults to 1, meaning the primary shard only. Set to -// `all` for all shard copies, otherwise set to any non-negative value less than -// or equal to the total number of copies for the shard (number of replicas + 1) +// WaitForActiveShards The number of shard copies that must be active before proceeding with the +// operation. +// Set to all or any positive integer up to the total number of shards in the +// index (`number_of_replicas+1`). // API name: wait_for_active_shards -func (r *Index) WaitForActiveShards(v string) *Index { - r.values.Set("wait_for_active_shards", v) +func (r *Index) WaitForActiveShards(waitforactiveshards string) *Index { + r.values.Set("wait_for_active_shards", waitforactiveshards) return r } -// RequireAlias When true, requires destination to be an alias. Default is false +// RequireAlias If `true`, the destination must be an index alias. // API name: require_alias -func (r *Index) RequireAlias(b bool) *Index { - r.values.Set("require_alias", strconv.FormatBool(b)) +func (r *Index) RequireAlias(requirealias bool) *Index { + r.values.Set("require_alias", strconv.FormatBool(requirealias)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/index/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/index/request.go new file mode 100644 index 000000000..7021db52a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/index/request.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package index + +import ( + "encoding/json" +) + +// Request holds the request body struct for the package index +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/index/IndexRequest.ts#L35-L117 +type Request = json.RawMessage diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/index/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/index/response.go index 98889c466..a9ee62303 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/index/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/index/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package index @@ -27,7 +27,7 @@ import ( // Response holds the response body struct for the package index // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/index/IndexResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/index/IndexResponse.ts#L22-L24 type Response struct { ForcedRefresh *bool `json:"forced_refresh,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/info/info.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/info/info.go index 417bcdc1e..1b5c3e329 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/info/info.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/info/info.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns basic information about the cluster. package info @@ -155,7 +155,6 @@ func (r Info) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -164,6 +163,10 @@ func (r Info) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/info/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/info/response.go index 7e17edf3b..a5fbcd2ed 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/info/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/info/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package info @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package info // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/info/RootNodeInfoResponse.ts#L23-L31 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/info/RootNodeInfoResponse.ts#L23-L31 type Response struct { ClusterName string `json:"cluster_name"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/knnsearch/knn_search.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/knnsearch/knn_search.go index a8673f7f9..48e9f3aa0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/knnsearch/knn_search.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/knnsearch/knn_search.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Performs a kNN search. package knnsearch @@ -52,8 +52,9 @@ type KnnSearch struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -69,7 +70,7 @@ func NewKnnSearchFunc(tp elastictransport.Interface) NewKnnSearch { return func(index string) *KnnSearch { n := New(tp) - n.Index(index) + n._index(index) return n } @@ -84,6 +85,8 @@ func New(tp elastictransport.Interface) *KnnSearch { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -113,9 +116,19 @@ func (r *KnnSearch) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -123,6 +136,7 @@ func (r *KnnSearch) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -203,7 +217,6 @@ func (r KnnSearch) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -212,6 +225,10 @@ func (r KnnSearch) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -225,17 +242,79 @@ func (r *KnnSearch) Header(key, value string) *KnnSearch { // Index A comma-separated list of index names to search; // use `_all` or to perform the operation on all indices // API Name: index -func (r *KnnSearch) Index(v string) *KnnSearch { +func (r *KnnSearch) _index(index string) *KnnSearch { r.paramSet |= indexMask - r.index = v + r.index = index return r } // Routing A comma-separated list of specific routing values // API name: routing -func (r *KnnSearch) Routing(v string) *KnnSearch { - r.values.Set("routing", v) +func (r *KnnSearch) Routing(routing string) *KnnSearch { + r.values.Set("routing", routing) + + return r +} + +// DocvalueFields The request returns doc values for field names matching these patterns +// in the hits.fields property of the response. Accepts wildcard (*) patterns. +// API name: docvalue_fields +func (r *KnnSearch) DocvalueFields(docvaluefields ...types.FieldAndFormat) *KnnSearch { + r.req.DocvalueFields = docvaluefields + + return r +} + +// Fields The request returns values for field names matching these patterns +// in the hits.fields property of the response. Accepts wildcard (*) patterns. +// API name: fields +func (r *KnnSearch) Fields(fields ...string) *KnnSearch { + r.req.Fields = fields + + return r +} + +// Filter Query to filter the documents that can match. The kNN search will return the +// top +// `k` documents that also match this filter. The value can be a single query or +// a +// list of queries. If `filter` isn't provided, all documents are allowed to +// match. +// API name: filter +func (r *KnnSearch) Filter(filters ...types.Query) *KnnSearch { + r.req.Filter = filters + + return r +} + +// Knn kNN query to execute +// API name: knn +func (r *KnnSearch) Knn(knn *types.CoreKnnQuery) *KnnSearch { + + r.req.Knn = *knn + + return r +} + +// Source_ Indicates which source fields are returned for matching documents. These +// fields are returned in the hits._source property of the search response. +// API name: _source +func (r *KnnSearch) Source_(sourceconfig types.SourceConfig) *KnnSearch { + r.req.Source_ = sourceconfig + + return r +} + +// StoredFields List of stored fields to return as part of a hit. If no fields are specified, +// no stored fields are included in the response. If this field is specified, +// the _source +// parameter defaults to false. You can pass _source: true to return both source +// fields +// and stored fields in the search response. +// API name: stored_fields +func (r *KnnSearch) StoredFields(fields ...string) *KnnSearch { + r.req.StoredFields = fields return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/knnsearch/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/knnsearch/request.go index 0b35a9955..a82a308cb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/knnsearch/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/knnsearch/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package knnsearch @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package knnsearch // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/knn_search/KnnSearchRequest.ts#L27-L80 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/knn_search/KnnSearchRequest.ts#L27-L80 type Request struct { // DocvalueFields The request returns doc values for field names matching these patterns diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/knnsearch/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/knnsearch/response.go index 85714c0c8..0b4f3e775 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/knnsearch/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/knnsearch/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package knnsearch @@ -28,7 +28,7 @@ import ( // Response holds the response body struct for the package knnsearch // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/knn_search/KnnSearchResponse.ts#L26-L54 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/knn_search/KnnSearchResponse.ts#L26-L54 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/mget/mget.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/mget/mget.go index dc03f09a5..f60da7966 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/mget/mget.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/mget/mget.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Allows to get multiple documents in one request. package mget @@ -53,8 +53,9 @@ type Mget struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -83,6 +84,8 @@ func New(tp elastictransport.Interface) *Mget { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -112,9 +115,19 @@ func (r *Mget) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -122,6 +135,7 @@ func (r *Mget) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -207,7 +221,6 @@ func (r Mget) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -216,6 +229,10 @@ func (r Mget) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -229,9 +246,9 @@ func (r *Mget) Header(key, value string) *Mget { // Index Name of the index to retrieve documents from when `ids` are specified, or // when a document in the `docs` array does not specify an index. // API Name: index -func (r *Mget) Index(v string) *Mget { +func (r *Mget) Index(index string) *Mget { r.paramSet |= indexMask - r.index = v + r.index = index return r } @@ -239,32 +256,32 @@ func (r *Mget) Index(v string) *Mget { // Preference Specifies the node or shard the operation should be performed on. Random by // default. // API name: preference -func (r *Mget) Preference(v string) *Mget { - r.values.Set("preference", v) +func (r *Mget) Preference(preference string) *Mget { + r.values.Set("preference", preference) return r } // Realtime If `true`, the request is real-time as opposed to near-real-time. // API name: realtime -func (r *Mget) Realtime(b bool) *Mget { - r.values.Set("realtime", strconv.FormatBool(b)) +func (r *Mget) Realtime(realtime bool) *Mget { + r.values.Set("realtime", strconv.FormatBool(realtime)) return r } // Refresh If `true`, the request refreshes relevant shards before retrieving documents. // API name: refresh -func (r *Mget) Refresh(b bool) *Mget { - r.values.Set("refresh", strconv.FormatBool(b)) +func (r *Mget) Refresh(refresh bool) *Mget { + r.values.Set("refresh", strconv.FormatBool(refresh)) return r } // Routing Custom value used to route operations to a specific shard. // API name: routing -func (r *Mget) Routing(v string) *Mget { - r.values.Set("routing", v) +func (r *Mget) Routing(routing string) *Mget { + r.values.Set("routing", routing) return r } @@ -272,8 +289,8 @@ func (r *Mget) Routing(v string) *Mget { // Source_ True or false to return the `_source` field or not, or a list of fields to // return. // API name: _source -func (r *Mget) Source_(v string) *Mget { - r.values.Set("_source", v) +func (r *Mget) Source_(sourceconfigparam string) *Mget { + r.values.Set("_source", sourceconfigparam) return r } @@ -282,8 +299,8 @@ func (r *Mget) Source_(v string) *Mget { // You can also use this parameter to exclude fields from the subset specified // in `_source_includes` query parameter. // API name: _source_excludes -func (r *Mget) SourceExcludes_(v string) *Mget { - r.values.Set("_source_excludes", v) +func (r *Mget) SourceExcludes_(fields ...string) *Mget { + r.values.Set("_source_excludes", strings.Join(fields, ",")) return r } @@ -294,8 +311,8 @@ func (r *Mget) SourceExcludes_(v string) *Mget { // parameter. // If the `_source` parameter is `false`, this parameter is ignored. // API name: _source_includes -func (r *Mget) SourceIncludes_(v string) *Mget { - r.values.Set("_source_includes", v) +func (r *Mget) SourceIncludes_(fields ...string) *Mget { + r.values.Set("_source_includes", strings.Join(fields, ",")) return r } @@ -303,8 +320,26 @@ func (r *Mget) SourceIncludes_(v string) *Mget { // StoredFields If `true`, retrieves the document fields stored in the index rather than the // document `_source`. // API name: stored_fields -func (r *Mget) StoredFields(v string) *Mget { - r.values.Set("stored_fields", v) +func (r *Mget) StoredFields(fields ...string) *Mget { + r.values.Set("stored_fields", strings.Join(fields, ",")) + + return r +} + +// Docs The documents you want to retrieve. Required if no index is specified in the +// request URI. +// API name: docs +func (r *Mget) Docs(docs ...types.MgetOperation) *Mget { + r.req.Docs = docs + + return r +} + +// Ids The IDs of the documents you want to retrieve. Allowed when the index is +// specified in the request URI. +// API name: ids +func (r *Mget) Ids(ids ...string) *Mget { + r.req.Ids = ids return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/mget/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/mget/request.go index 43f2df05f..5a9992a3e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/mget/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/mget/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package mget @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package mget // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/mget/MultiGetRequest.ts#L25-L91 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/mget/MultiGetRequest.ts#L25-L91 type Request struct { // Docs The documents you want to retrieve. Required if no index is specified in the diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/mget/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/mget/response.go index 01b012326..1586ce75f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/mget/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/mget/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package mget @@ -26,10 +26,10 @@ import ( // Response holds the response body struct for the package mget // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/mget/MultiGetResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/mget/MultiGetResponse.ts#L22-L26 type Response struct { - Docs []types.ResponseItem `json:"docs"` + Docs []types.MgetResponseItem `json:"docs"` } // NewResponse returns a Response diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/totalhits_unmarshaler.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/msearch/helpers.go similarity index 58% rename from vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/totalhits_unmarshaler.go rename to vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/msearch/helpers.go index 1f8dc8924..cbf8603a8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/totalhits_unmarshaler.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/msearch/helpers.go @@ -15,34 +15,31 @@ // specific language governing permissions and limitations // under the License. -package types +package msearch import ( - "bytes" "encoding/json" - "strconv" + "fmt" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/totalhitsrelation" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) -// UnmarshalJSON implements Unmarshaler interface, it handles the shortcut for total hits. -func (t *TotalHits) UnmarshalJSON(data []byte) error { - type stub TotalHits - tmp := stub{} - dec := json.NewDecoder(bytes.NewReader(data)) - if _, err := strconv.Atoi(string(data)); err == nil { - err := dec.Decode(&t.Value) - if err != nil { - return err - } - t.Relation = totalhitsrelation.Eq - } else { - err := dec.Decode(&tmp) - if err != nil { - return err - } - *t = TotalHits(tmp) +// AddSearch is a helper function to add a new search to the buffer of the current msearch request. +func (r *Msearch) AddSearch(header types.MultisearchHeader, body types.MultisearchBody) error { + h, err := json.Marshal(header) + if err != nil { + return fmt.Errorf("msearch.AddSearch cannot serialize header: %w", err) } + b, err := json.Marshal(body) + if err != nil { + return fmt.Errorf("msearch.AddSearch cannot serialize body: %w", err) + } + + r.buf.Write(h) + r.buf.Write([]byte("\n")) + r.buf.Write(b) + r.buf.Write([]byte("\n")) + return nil } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/msearch/msearch.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/msearch/msearch.go new file mode 100644 index 000000000..062e950bf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/msearch/msearch.go @@ -0,0 +1,376 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Allows to execute several search operations in one request. +package msearch + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/searchtype" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Msearch struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + buf *gobytes.Buffer + + req *Request + deferred []func(request *Request) error + raw io.Reader + + paramSet int + + index string +} + +// NewMsearch type alias for index. +type NewMsearch func() *Msearch + +// NewMsearchFunc returns a new instance of Msearch with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewMsearchFunc(tp elastictransport.Interface) NewMsearch { + return func() *Msearch { + n := New(tp) + + return n + } +} + +// Allows to execute several search operations in one request. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/master/search-multi-search.html +func New(tp elastictransport.Interface) *Msearch { + r := &Msearch{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + buf: gobytes.NewBuffer(nil), + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Msearch) Raw(raw io.Reader) *Msearch { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Msearch) Request(req *Request) *Msearch { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Msearch) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw != nil { + r.buf.ReadFrom(r.raw) + } else if r.req != nil { + + for _, elem := range *r.req { + data, err := json.Marshal(elem) + if err != nil { + return nil, err + } + r.buf.Write(data) + r.buf.Write([]byte("\n")) + } + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Msearch: %w", err) + } + + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_msearch") + + method = http.MethodPost + case r.paramSet == indexMask: + path.WriteString("/") + + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_msearch") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.buf) + } else { + req, err = http.NewRequest(method, r.path.String(), r.buf) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.buf.Len() > 0 { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+x-ndjson;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Msearch) Perform(ctx context.Context) (*http.Response, error) { + req, err := r.HttpRequest(ctx) + if err != nil { + return nil, err + } + + res, err := r.transport.Perform(req) + if err != nil { + return nil, fmt.Errorf("an error happened during the Msearch query execution: %w", err) + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a msearch.Response +func (r Msearch) Do(ctx context.Context) (*Response, error) { + + response := NewResponse() + + r.TypedKeys(true) + + res, err := r.Perform(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + return nil, errorResponse +} + +// Header set a key, value pair in the Msearch headers map. +func (r *Msearch) Header(key, value string) *Msearch { + r.headers.Set(key, value) + + return r +} + +// Index Comma-separated list of data streams, indices, and index aliases to search. +// API Name: index +func (r *Msearch) Index(index string) *Msearch { + r.paramSet |= indexMask + r.index = index + + return r +} + +// AllowNoIndices If false, the request returns an error if any wildcard expression, index +// alias, or _all value targets only missing or closed indices. This behavior +// applies even if the request targets other open indices. For example, a +// request targeting foo*,bar* returns an error if an index starts with foo but +// no index starts with bar. +// API name: allow_no_indices +func (r *Msearch) AllowNoIndices(allownoindices bool) *Msearch { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// CcsMinimizeRoundtrips If true, network roundtrips between the coordinating node and remote clusters +// are minimized for cross-cluster search requests. +// API name: ccs_minimize_roundtrips +func (r *Msearch) CcsMinimizeRoundtrips(ccsminimizeroundtrips bool) *Msearch { + r.values.Set("ccs_minimize_roundtrips", strconv.FormatBool(ccsminimizeroundtrips)) + + return r +} + +// ExpandWildcards Type of index that wildcard expressions can match. If the request can target +// data streams, this argument determines whether wildcard expressions match +// hidden data streams. +// API name: expand_wildcards +func (r *Msearch) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Msearch { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// IgnoreThrottled If true, concrete, expanded or aliased indices are ignored when frozen. +// API name: ignore_throttled +func (r *Msearch) IgnoreThrottled(ignorethrottled bool) *Msearch { + r.values.Set("ignore_throttled", strconv.FormatBool(ignorethrottled)) + + return r +} + +// IgnoreUnavailable If true, missing or closed indices are not included in the response. +// API name: ignore_unavailable +func (r *Msearch) IgnoreUnavailable(ignoreunavailable bool) *Msearch { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// MaxConcurrentSearches Maximum number of concurrent searches the multi search API can execute. +// API name: max_concurrent_searches +func (r *Msearch) MaxConcurrentSearches(maxconcurrentsearches string) *Msearch { + r.values.Set("max_concurrent_searches", maxconcurrentsearches) + + return r +} + +// MaxConcurrentShardRequests Maximum number of concurrent shard requests that each sub-search request +// executes per node. +// API name: max_concurrent_shard_requests +func (r *Msearch) MaxConcurrentShardRequests(maxconcurrentshardrequests string) *Msearch { + r.values.Set("max_concurrent_shard_requests", maxconcurrentshardrequests) + + return r +} + +// PreFilterShardSize Defines a threshold that enforces a pre-filter roundtrip to prefilter search +// shards based on query rewriting if the number of shards the search request +// expands to exceeds the threshold. This filter roundtrip can limit the number +// of shards significantly if for instance a shard can not match any documents +// based on its rewrite method i.e., if date filters are mandatory to match but +// the shard bounds and the query are disjoint. +// API name: pre_filter_shard_size +func (r *Msearch) PreFilterShardSize(prefiltershardsize string) *Msearch { + r.values.Set("pre_filter_shard_size", prefiltershardsize) + + return r +} + +// RestTotalHitsAsInt If true, hits.total are returned as an integer in the response. Defaults to +// false, which returns an object. +// API name: rest_total_hits_as_int +func (r *Msearch) RestTotalHitsAsInt(resttotalhitsasint bool) *Msearch { + r.values.Set("rest_total_hits_as_int", strconv.FormatBool(resttotalhitsasint)) + + return r +} + +// Routing Custom routing value used to route search operations to a specific shard. +// API name: routing +func (r *Msearch) Routing(routing string) *Msearch { + r.values.Set("routing", routing) + + return r +} + +// SearchType Indicates whether global term and document frequencies should be used when +// scoring returned documents. +// API name: search_type +func (r *Msearch) SearchType(searchtype searchtype.SearchType) *Msearch { + r.values.Set("search_type", searchtype.String()) + + return r +} + +// TypedKeys Specifies whether aggregation and suggester names should be prefixed by their +// respective types in the response. +// API name: typed_keys +func (r *Msearch) TypedKeys(typedkeys bool) *Msearch { + r.values.Set("typed_keys", strconv.FormatBool(typedkeys)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/msearch/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/msearch/request.go new file mode 100644 index 000000000..e146cf8d0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/msearch/request.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package msearch + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Request holds the request body struct for the package msearch +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/msearch/MultiSearchRequest.ts#L25-L96 +type Request = []types.MsearchRequestItem diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/msearch/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/msearch/response.go new file mode 100644 index 000000000..064a970e5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/msearch/response.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package msearch + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Response holds the response body struct for the package msearch +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/msearch/MultiSearchResponse.ts#L25-L27 + +type Response struct { + Responses []types.MsearchResponseItem `json:"responses"` + Took int64 `json:"took"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/msearchtemplate/msearch_template.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/msearchtemplate/msearch_template.go new file mode 100644 index 000000000..c959f2de6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/msearchtemplate/msearch_template.go @@ -0,0 +1,309 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Allows to execute several search template operations in one request. +package msearchtemplate + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/searchtype" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type MsearchTemplate struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + buf *gobytes.Buffer + + req *Request + deferred []func(request *Request) error + raw io.Reader + + paramSet int + + index string +} + +// NewMsearchTemplate type alias for index. +type NewMsearchTemplate func() *MsearchTemplate + +// NewMsearchTemplateFunc returns a new instance of MsearchTemplate with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewMsearchTemplateFunc(tp elastictransport.Interface) NewMsearchTemplate { + return func() *MsearchTemplate { + n := New(tp) + + return n + } +} + +// Allows to execute several search template operations in one request. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html +func New(tp elastictransport.Interface) *MsearchTemplate { + r := &MsearchTemplate{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + buf: gobytes.NewBuffer(nil), + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *MsearchTemplate) Raw(raw io.Reader) *MsearchTemplate { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *MsearchTemplate) Request(req *Request) *MsearchTemplate { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *MsearchTemplate) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw != nil { + r.buf.ReadFrom(r.raw) + } else if r.req != nil { + + for _, elem := range *r.req { + data, err := json.Marshal(elem) + if err != nil { + return nil, err + } + r.buf.Write(data) + r.buf.Write([]byte("\n")) + } + + if err != nil { + return nil, fmt.Errorf("could not serialise request for MsearchTemplate: %w", err) + } + + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_msearch") + path.WriteString("/") + path.WriteString("template") + + method = http.MethodPost + case r.paramSet == indexMask: + path.WriteString("/") + + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_msearch") + path.WriteString("/") + path.WriteString("template") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.buf) + } else { + req, err = http.NewRequest(method, r.path.String(), r.buf) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.buf.Len() > 0 { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+x-ndjson;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r MsearchTemplate) Perform(ctx context.Context) (*http.Response, error) { + req, err := r.HttpRequest(ctx) + if err != nil { + return nil, err + } + + res, err := r.transport.Perform(req) + if err != nil { + return nil, fmt.Errorf("an error happened during the MsearchTemplate query execution: %w", err) + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a msearchtemplate.Response +func (r MsearchTemplate) Do(ctx context.Context) (*Response, error) { + + response := NewResponse() + + r.TypedKeys(true) + + res, err := r.Perform(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + return nil, errorResponse +} + +// Header set a key, value pair in the MsearchTemplate headers map. +func (r *MsearchTemplate) Header(key, value string) *MsearchTemplate { + r.headers.Set(key, value) + + return r +} + +// Index Comma-separated list of data streams, indices, and aliases to search. +// Supports wildcards (`*`). +// To search all data streams and indices, omit this parameter or use `*`. +// API Name: index +func (r *MsearchTemplate) Index(index string) *MsearchTemplate { + r.paramSet |= indexMask + r.index = index + + return r +} + +// CcsMinimizeRoundtrips If `true`, network round-trips are minimized for cross-cluster search +// requests. +// API name: ccs_minimize_roundtrips +func (r *MsearchTemplate) CcsMinimizeRoundtrips(ccsminimizeroundtrips bool) *MsearchTemplate { + r.values.Set("ccs_minimize_roundtrips", strconv.FormatBool(ccsminimizeroundtrips)) + + return r +} + +// MaxConcurrentSearches Maximum number of concurrent searches the API can run. +// API name: max_concurrent_searches +func (r *MsearchTemplate) MaxConcurrentSearches(maxconcurrentsearches string) *MsearchTemplate { + r.values.Set("max_concurrent_searches", maxconcurrentsearches) + + return r +} + +// SearchType The type of the search operation. +// Available options: `query_then_fetch`, `dfs_query_then_fetch`. +// API name: search_type +func (r *MsearchTemplate) SearchType(searchtype searchtype.SearchType) *MsearchTemplate { + r.values.Set("search_type", searchtype.String()) + + return r +} + +// RestTotalHitsAsInt If `true`, the response returns `hits.total` as an integer. +// If `false`, it returns `hits.total` as an object. +// API name: rest_total_hits_as_int +func (r *MsearchTemplate) RestTotalHitsAsInt(resttotalhitsasint bool) *MsearchTemplate { + r.values.Set("rest_total_hits_as_int", strconv.FormatBool(resttotalhitsasint)) + + return r +} + +// TypedKeys If `true`, the response prefixes aggregation and suggester names with their +// respective types. +// API name: typed_keys +func (r *MsearchTemplate) TypedKeys(typedkeys bool) *MsearchTemplate { + r.values.Set("typed_keys", strconv.FormatBool(typedkeys)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/msearchtemplate/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/msearchtemplate/request.go new file mode 100644 index 000000000..a690eddea --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/msearchtemplate/request.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package msearchtemplate + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Request holds the request body struct for the package msearchtemplate +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/msearch_template/MultiSearchTemplateRequest.ts#L25-L70 +type Request = []types.RequestItem diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/msearchtemplate/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/msearchtemplate/response.go new file mode 100644 index 000000000..98f78995f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/msearchtemplate/response.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package msearchtemplate + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Response holds the response body struct for the package msearchtemplate +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/msearch_template/MultiSearchTemplateResponse.ts#L22-L24 + +type Response struct { + Responses []types.MsearchResponseItem `json:"responses"` + Took int64 `json:"took"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/mtermvectors/mtermvectors.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/mtermvectors/mtermvectors.go index 3d4b16eb5..5f9d6d353 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/mtermvectors/mtermvectors.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/mtermvectors/mtermvectors.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns multiple termvectors in one request. package mtermvectors @@ -35,7 +35,6 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/versiontype" ) @@ -55,8 +54,9 @@ type Mtermvectors struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -85,6 +85,8 @@ func New(tp elastictransport.Interface) *Mtermvectors { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -114,9 +116,19 @@ func (r *Mtermvectors) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -124,6 +136,7 @@ func (r *Mtermvectors) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -209,7 +222,6 @@ func (r Mtermvectors) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -218,6 +230,10 @@ func (r Mtermvectors) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -228,120 +244,121 @@ func (r *Mtermvectors) Header(key, value string) *Mtermvectors { return r } -// Index The index in which the document resides. +// Index Name of the index that contains the documents. // API Name: index -func (r *Mtermvectors) Index(v string) *Mtermvectors { +func (r *Mtermvectors) Index(index string) *Mtermvectors { r.paramSet |= indexMask - r.index = v - - return r -} - -// Ids A comma-separated list of documents ids. You must define ids as parameter or -// set "ids" or "docs" in the request body -// API name: ids -func (r *Mtermvectors) Ids(v string) *Mtermvectors { - r.values.Set("ids", v) + r.index = index return r } -// Fields A comma-separated list of fields to return. Applies to all returned documents -// unless otherwise specified in body "params" or "docs". +// Fields Comma-separated list or wildcard expressions of fields to include in the +// statistics. +// Used as the default list unless a specific field list is provided in the +// `completion_fields` or `fielddata_fields` parameters. // API name: fields -func (r *Mtermvectors) Fields(v string) *Mtermvectors { - r.values.Set("fields", v) +func (r *Mtermvectors) Fields(fields ...string) *Mtermvectors { + r.values.Set("fields", strings.Join(fields, ",")) return r } -// FieldStatistics Specifies if document count, sum of document frequencies and sum of total -// term frequencies should be returned. Applies to all returned documents unless -// otherwise specified in body "params" or "docs". +// FieldStatistics If `true`, the response includes the document count, sum of document +// frequencies, and sum of total term frequencies. // API name: field_statistics -func (r *Mtermvectors) FieldStatistics(b bool) *Mtermvectors { - r.values.Set("field_statistics", strconv.FormatBool(b)) +func (r *Mtermvectors) FieldStatistics(fieldstatistics bool) *Mtermvectors { + r.values.Set("field_statistics", strconv.FormatBool(fieldstatistics)) return r } -// Offsets Specifies if term offsets should be returned. Applies to all returned -// documents unless otherwise specified in body "params" or "docs". +// Offsets If `true`, the response includes term offsets. // API name: offsets -func (r *Mtermvectors) Offsets(b bool) *Mtermvectors { - r.values.Set("offsets", strconv.FormatBool(b)) +func (r *Mtermvectors) Offsets(offsets bool) *Mtermvectors { + r.values.Set("offsets", strconv.FormatBool(offsets)) return r } -// Payloads Specifies if term payloads should be returned. Applies to all returned -// documents unless otherwise specified in body "params" or "docs". +// Payloads If `true`, the response includes term payloads. // API name: payloads -func (r *Mtermvectors) Payloads(b bool) *Mtermvectors { - r.values.Set("payloads", strconv.FormatBool(b)) +func (r *Mtermvectors) Payloads(payloads bool) *Mtermvectors { + r.values.Set("payloads", strconv.FormatBool(payloads)) return r } -// Positions Specifies if term positions should be returned. Applies to all returned -// documents unless otherwise specified in body "params" or "docs". +// Positions If `true`, the response includes term positions. // API name: positions -func (r *Mtermvectors) Positions(b bool) *Mtermvectors { - r.values.Set("positions", strconv.FormatBool(b)) +func (r *Mtermvectors) Positions(positions bool) *Mtermvectors { + r.values.Set("positions", strconv.FormatBool(positions)) return r } -// Preference Specify the node or shard the operation should be performed on (default: -// random) .Applies to all returned documents unless otherwise specified in body -// "params" or "docs". +// Preference Specifies the node or shard the operation should be performed on. +// Random by default. // API name: preference -func (r *Mtermvectors) Preference(v string) *Mtermvectors { - r.values.Set("preference", v) +func (r *Mtermvectors) Preference(preference string) *Mtermvectors { + r.values.Set("preference", preference) return r } -// Realtime Specifies if requests are real-time as opposed to near-real-time (default: -// true). +// Realtime If true, the request is real-time as opposed to near-real-time. // API name: realtime -func (r *Mtermvectors) Realtime(b bool) *Mtermvectors { - r.values.Set("realtime", strconv.FormatBool(b)) +func (r *Mtermvectors) Realtime(realtime bool) *Mtermvectors { + r.values.Set("realtime", strconv.FormatBool(realtime)) return r } -// Routing Specific routing value. Applies to all returned documents unless otherwise -// specified in body "params" or "docs". +// Routing Custom value used to route operations to a specific shard. // API name: routing -func (r *Mtermvectors) Routing(v string) *Mtermvectors { - r.values.Set("routing", v) +func (r *Mtermvectors) Routing(routing string) *Mtermvectors { + r.values.Set("routing", routing) return r } -// TermStatistics Specifies if total term frequency and document frequency should be returned. -// Applies to all returned documents unless otherwise specified in body "params" -// or "docs". +// TermStatistics If true, the response includes term frequency and document frequency. // API name: term_statistics -func (r *Mtermvectors) TermStatistics(b bool) *Mtermvectors { - r.values.Set("term_statistics", strconv.FormatBool(b)) +func (r *Mtermvectors) TermStatistics(termstatistics bool) *Mtermvectors { + r.values.Set("term_statistics", strconv.FormatBool(termstatistics)) return r } -// Version Explicit version number for concurrency control +// Version If `true`, returns the document version as part of a hit. // API name: version -func (r *Mtermvectors) Version(v string) *Mtermvectors { - r.values.Set("version", v) +func (r *Mtermvectors) Version(versionnumber string) *Mtermvectors { + r.values.Set("version", versionnumber) return r } -// VersionType Specific version type +// VersionType Specific version type. // API name: version_type -func (r *Mtermvectors) VersionType(enum versiontype.VersionType) *Mtermvectors { - r.values.Set("version_type", enum.String()) +func (r *Mtermvectors) VersionType(versiontype versiontype.VersionType) *Mtermvectors { + r.values.Set("version_type", versiontype.String()) + + return r +} + +// Docs Array of existing or artificial documents. +// API name: docs +func (r *Mtermvectors) Docs(docs ...types.MTermVectorsOperation) *Mtermvectors { + r.req.Docs = docs + + return r +} + +// Ids Simplified syntax to specify documents by their ID if they're in the same +// index. +// API name: ids +func (r *Mtermvectors) Ids(ids ...string) *Mtermvectors { + r.req.Ids = ids return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/mtermvectors/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/mtermvectors/request.go index 55cb92b68..10e2cb68b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/mtermvectors/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/mtermvectors/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package mtermvectors @@ -29,10 +29,14 @@ import ( // Request holds the request body struct for the package mtermvectors // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/mtermvectors/MultiTermVectorsRequest.ts#L31-L58 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/mtermvectors/MultiTermVectorsRequest.ts#L31-L109 type Request struct { + + // Docs Array of existing or artificial documents. Docs []types.MTermVectorsOperation `json:"docs,omitempty"` - Ids []string `json:"ids,omitempty"` + // Ids Simplified syntax to specify documents by their ID if they're in the same + // index. + Ids []string `json:"ids,omitempty"` } // NewRequest returns a Request diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/mtermvectors/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/mtermvectors/response.go index e5ed2229b..be5533136 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/mtermvectors/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/mtermvectors/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package mtermvectors @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package mtermvectors // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/mtermvectors/MultiTermVectorsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/mtermvectors/MultiTermVectorsResponse.ts#L22-L24 type Response struct { Docs []types.TermVectorsResult `json:"docs"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/openpointintime/open_point_in_time.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/openpointintime/open_point_in_time.go index 7783e43f4..cd7514226 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/openpointintime/open_point_in_time.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/openpointintime/open_point_in_time.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Open a point in time that can be used in subsequent searches package openpointintime @@ -36,6 +36,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" ) const ( @@ -68,7 +69,7 @@ func NewOpenPointInTimeFunc(tp elastictransport.Interface) NewOpenPointInTime { return func(index string) *OpenPointInTime { n := New(tp) - n.Index(index) + n._index(index) return n } @@ -169,7 +170,6 @@ func (r OpenPointInTime) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -178,6 +178,10 @@ func (r OpenPointInTime) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -212,52 +216,59 @@ func (r *OpenPointInTime) Header(key, value string) *OpenPointInTime { // Index A comma-separated list of index names to open point in time; use `_all` or // empty string to perform the operation on all indices // API Name: index -func (r *OpenPointInTime) Index(v string) *OpenPointInTime { +func (r *OpenPointInTime) _index(index string) *OpenPointInTime { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// KeepAlive Specific the time to live for the point in time +// KeepAlive Extends the time to live of the corresponding point in time. // API name: keep_alive -func (r *OpenPointInTime) KeepAlive(v string) *OpenPointInTime { - r.values.Set("keep_alive", v) +func (r *OpenPointInTime) KeepAlive(duration string) *OpenPointInTime { + r.values.Set("keep_alive", duration) return r } -// IgnoreUnavailable Whether specified concrete indices should be ignored when unavailable -// (missing or closed) +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. // API name: ignore_unavailable -func (r *OpenPointInTime) IgnoreUnavailable(b bool) *OpenPointInTime { - r.values.Set("ignore_unavailable", strconv.FormatBool(b)) +func (r *OpenPointInTime) IgnoreUnavailable(ignoreunavailable bool) *OpenPointInTime { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) return r } -// Preference Specify the node or shard the operation should be performed on (default: -// random) +// Preference Specifies the node or shard the operation should be performed on. +// Random by default. // API name: preference -func (r *OpenPointInTime) Preference(v string) *OpenPointInTime { - r.values.Set("preference", v) +func (r *OpenPointInTime) Preference(preference string) *OpenPointInTime { + r.values.Set("preference", preference) return r } -// Routing Specific routing value +// Routing Custom value used to route operations to a specific shard. // API name: routing -func (r *OpenPointInTime) Routing(v string) *OpenPointInTime { - r.values.Set("routing", v) +func (r *OpenPointInTime) Routing(routing string) *OpenPointInTime { + r.values.Set("routing", routing) return r } -// ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, -// closed or both. +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. Valid values are: +// `all`, `open`, `closed`, `hidden`, `none`. // API name: expand_wildcards -func (r *OpenPointInTime) ExpandWildcards(v string) *OpenPointInTime { - r.values.Set("expand_wildcards", v) +func (r *OpenPointInTime) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *OpenPointInTime { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/openpointintime/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/openpointintime/response.go index f894b4c14..1a942703f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/openpointintime/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/openpointintime/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package openpointintime // Response holds the response body struct for the package openpointintime // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/open_point_in_time/OpenPointInTimeResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/open_point_in_time/OpenPointInTimeResponse.ts#L22-L24 type Response struct { Id string `json:"id"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/ping/ping.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/ping/ping.go index 2e6b44a6a..9e803334d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/ping/ping.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/ping/ping.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns whether the cluster is running. package ping @@ -24,7 +24,6 @@ package ping import ( gobytes "bytes" "context" - "encoding/json" "errors" "fmt" "io" @@ -34,7 +33,6 @@ import ( "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" - "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -138,33 +136,8 @@ func (r Ping) Perform(ctx context.Context) (*http.Response, error) { } // Do runs the request through the transport, handle the response and returns a ping.Response -func (r Ping) Do(ctx context.Context) (*Response, error) { - - response := NewResponse() - - res, err := r.Perform(ctx) - if err != nil { - return nil, err - } - defer res.Body.Close() - - if res.StatusCode < 299 { - err = json.NewDecoder(res.Body).Decode(response) - if err != nil { - return nil, err - } - - return response, nil - - } - - errorResponse := types.NewElasticsearchError() - err = json.NewDecoder(res.Body).Decode(errorResponse) - if err != nil { - return nil, err - } - - return nil, errorResponse +func (r Ping) Do(ctx context.Context) (bool, error) { + return r.IsSuccess(ctx) } // IsSuccess allows to run a query with a context and retrieve the result as a boolean. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/putscript/put_script.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/putscript/put_script.go index 5f992735a..25eebde1d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/putscript/put_script.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/putscript/put_script.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Creates or updates a script. package putscript @@ -54,8 +54,9 @@ type PutScript struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -72,7 +73,7 @@ func NewPutScriptFunc(tp elastictransport.Interface) NewPutScript { return func(id string) *PutScript { n := New(tp) - n.Id(id) + n._id(id) return n } @@ -87,6 +88,8 @@ func New(tp elastictransport.Interface) *PutScript { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -116,9 +119,19 @@ func (r *PutScript) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -126,6 +139,7 @@ func (r *PutScript) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -217,7 +231,6 @@ func (r PutScript) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -226,6 +239,10 @@ func (r PutScript) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -236,36 +253,52 @@ func (r *PutScript) Header(key, value string) *PutScript { return r } -// Id Script ID +// Id Identifier for the stored script or search template. +// Must be unique within the cluster. // API Name: id -func (r *PutScript) Id(v string) *PutScript { +func (r *PutScript) _id(id string) *PutScript { r.paramSet |= idMask - r.id = v + r.id = id return r } -// Context Script context +// Context Context in which the script or search template should run. +// To prevent errors, the API immediately compiles the script or template in +// this context. // API Name: context -func (r *PutScript) Context(v string) *PutScript { +func (r *PutScript) Context(context string) *PutScript { r.paramSet |= contextMask - r.context = v + r.context = context return r } -// MasterTimeout Specify timeout for connection to master +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: master_timeout -func (r *PutScript) MasterTimeout(v string) *PutScript { - r.values.Set("master_timeout", v) +func (r *PutScript) MasterTimeout(duration string) *PutScript { + r.values.Set("master_timeout", duration) return r } -// Timeout Explicit operation timeout +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: timeout -func (r *PutScript) Timeout(v string) *PutScript { - r.values.Set("timeout", v) +func (r *PutScript) Timeout(duration string) *PutScript { + r.values.Set("timeout", duration) + + return r +} + +// Script Contains the script or search template, its parameters, and its language. +// API name: script +func (r *PutScript) Script(script *types.StoredScript) *PutScript { + + r.req.Script = *script return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/putscript/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/putscript/request.go index 1749d3f9e..051d959bb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/putscript/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/putscript/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putscript @@ -29,8 +29,10 @@ import ( // Request holds the request body struct for the package putscript // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/put_script/PutScriptRequest.ts#L25-L42 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/put_script/PutScriptRequest.ts#L25-L64 type Request struct { + + // Script Contains the script or search template, its parameters, and its language. Script types.StoredScript `json:"script"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/putscript/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/putscript/response.go index f15236678..8baf86d2b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/putscript/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/putscript/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putscript // Response holds the response body struct for the package putscript // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/put_script/PutScriptResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/put_script/PutScriptResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/rankeval/rank_eval.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/rankeval/rank_eval.go index 8998c3eb8..5cfbd77d2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/rankeval/rank_eval.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/rankeval/rank_eval.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Allows to evaluate the quality of ranked search results over a set of typical // search queries @@ -36,6 +36,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" ) const ( @@ -54,8 +55,9 @@ type RankEval struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -85,6 +87,8 @@ func New(tp elastictransport.Interface) *RankEval { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -114,9 +118,19 @@ func (r *RankEval) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -124,6 +138,7 @@ func (r *RankEval) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -209,7 +224,6 @@ func (r RankEval) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -218,6 +232,10 @@ func (r RankEval) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -233,9 +251,9 @@ func (r *RankEval) Header(key, value string) *RankEval { // To target all data streams and indices in a cluster, omit this parameter or // use `_all` or `*`. // API Name: index -func (r *RankEval) Index(v string) *RankEval { +func (r *RankEval) Index(index string) *RankEval { r.paramSet |= indexMask - r.index = v + r.index = index return r } @@ -246,8 +264,8 @@ func (r *RankEval) Index(v string) *RankEval { // request targeting `foo*,bar*` returns an error if an index starts with `foo` // but no index starts with `bar`. // API name: allow_no_indices -func (r *RankEval) AllowNoIndices(b bool) *RankEval { - r.values.Set("allow_no_indices", strconv.FormatBool(b)) +func (r *RankEval) AllowNoIndices(allownoindices bool) *RankEval { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) return r } @@ -255,24 +273,45 @@ func (r *RankEval) AllowNoIndices(b bool) *RankEval { // ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, // closed or both. // API name: expand_wildcards -func (r *RankEval) ExpandWildcards(v string) *RankEval { - r.values.Set("expand_wildcards", v) +func (r *RankEval) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *RankEval { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } // IgnoreUnavailable If `true`, missing or closed indices are not included in the response. // API name: ignore_unavailable -func (r *RankEval) IgnoreUnavailable(b bool) *RankEval { - r.values.Set("ignore_unavailable", strconv.FormatBool(b)) +func (r *RankEval) IgnoreUnavailable(ignoreunavailable bool) *RankEval { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) return r } // SearchType Search operation type // API name: search_type -func (r *RankEval) SearchType(v string) *RankEval { - r.values.Set("search_type", v) +func (r *RankEval) SearchType(searchtype string) *RankEval { + r.values.Set("search_type", searchtype) + + return r +} + +// Metric Definition of the evaluation metric to calculate. +// API name: metric +func (r *RankEval) Metric(metric *types.RankEvalMetric) *RankEval { + + r.req.Metric = metric + + return r +} + +// Requests A set of typical search requests, together with their provided ratings. +// API name: requests +func (r *RankEval) Requests(requests ...types.RankEvalRequestItem) *RankEval { + r.req.Requests = requests return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/rankeval/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/rankeval/request.go index 542449f95..98d8b21ae 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/rankeval/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/rankeval/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package rankeval @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package rankeval // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/rank_eval/RankEvalRequest.ts#L24-L61 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/rank_eval/RankEvalRequest.ts#L24-L61 type Request struct { // Metric Definition of the evaluation metric to calculate. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/rankeval/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/rankeval/response.go index 6cbf55354..b6d44daaf 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/rankeval/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/rankeval/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package rankeval @@ -28,7 +28,7 @@ import ( // Response holds the response body struct for the package rankeval // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/rank_eval/RankEvalResponse.ts#L26-L34 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/rank_eval/RankEvalResponse.ts#L26-L34 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/reindex/reindex.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/reindex/reindex.go index 13e17011e..0205dcfe2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/reindex/reindex.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/reindex/reindex.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Allows to copy documents from one index to another, optionally filtering the // source @@ -39,6 +39,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/conflicts" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -53,8 +54,9 @@ type Reindex struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int } @@ -85,6 +87,8 @@ func New(tp elastictransport.Interface) *Reindex { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -114,9 +118,19 @@ func (r *Reindex) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -124,6 +138,7 @@ func (r *Reindex) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -201,7 +216,6 @@ func (r Reindex) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -210,6 +224,10 @@ func (r Reindex) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -220,71 +238,125 @@ func (r *Reindex) Header(key, value string) *Reindex { return r } -// Refresh Should the affected indexes be refreshed? +// Refresh If `true`, the request refreshes affected shards to make this operation +// visible to search. // API name: refresh -func (r *Reindex) Refresh(b bool) *Reindex { - r.values.Set("refresh", strconv.FormatBool(b)) +func (r *Reindex) Refresh(refresh bool) *Reindex { + r.values.Set("refresh", strconv.FormatBool(refresh)) return r } -// RequestsPerSecond The throttle to set on this request in sub-requests per second. -1 means no -// throttle. +// RequestsPerSecond The throttle for this request in sub-requests per second. +// Defaults to no throttle. // API name: requests_per_second -func (r *Reindex) RequestsPerSecond(v string) *Reindex { - r.values.Set("requests_per_second", v) +func (r *Reindex) RequestsPerSecond(requestspersecond string) *Reindex { + r.values.Set("requests_per_second", requestspersecond) return r } -// Scroll Control how long to keep the search context alive +// Scroll Specifies how long a consistent view of the index should be maintained for +// scrolled search. // API name: scroll -func (r *Reindex) Scroll(v string) *Reindex { - r.values.Set("scroll", v) +func (r *Reindex) Scroll(duration string) *Reindex { + r.values.Set("scroll", duration) return r } -// Slices The number of slices this task should be divided into. Defaults to 1, meaning -// the task isn't sliced into subtasks. Can be set to `auto`. +// Slices The number of slices this task should be divided into. +// Defaults to 1 slice, meaning the task isn’t sliced into subtasks. // API name: slices -func (r *Reindex) Slices(v string) *Reindex { - r.values.Set("slices", v) +func (r *Reindex) Slices(slices string) *Reindex { + r.values.Set("slices", slices) return r } -// Timeout Time each individual bulk request should wait for shards that are -// unavailable. +// Timeout Period each indexing waits for automatic index creation, dynamic mapping +// updates, and waiting for active shards. // API name: timeout -func (r *Reindex) Timeout(v string) *Reindex { - r.values.Set("timeout", v) +func (r *Reindex) Timeout(duration string) *Reindex { + r.values.Set("timeout", duration) return r } -// WaitForActiveShards Sets the number of shard copies that must be active before proceeding with -// the reindex operation. Defaults to 1, meaning the primary shard only. Set to -// `all` for all shard copies, otherwise set to any non-negative value less than -// or equal to the total number of copies for the shard (number of replicas + 1) +// WaitForActiveShards The number of shard copies that must be active before proceeding with the +// operation. +// Set to `all` or any positive integer up to the total number of shards in the +// index (`number_of_replicas+1`). // API name: wait_for_active_shards -func (r *Reindex) WaitForActiveShards(v string) *Reindex { - r.values.Set("wait_for_active_shards", v) +func (r *Reindex) WaitForActiveShards(waitforactiveshards string) *Reindex { + r.values.Set("wait_for_active_shards", waitforactiveshards) return r } -// WaitForCompletion Should the request should block until the reindex is complete. +// WaitForCompletion If `true`, the request blocks until the operation is complete. // API name: wait_for_completion -func (r *Reindex) WaitForCompletion(b bool) *Reindex { - r.values.Set("wait_for_completion", strconv.FormatBool(b)) +func (r *Reindex) WaitForCompletion(waitforcompletion bool) *Reindex { + r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) return r } +// RequireAlias If `true`, the destination must be an index alias. // API name: require_alias -func (r *Reindex) RequireAlias(b bool) *Reindex { - r.values.Set("require_alias", strconv.FormatBool(b)) +func (r *Reindex) RequireAlias(requirealias bool) *Reindex { + r.values.Set("require_alias", strconv.FormatBool(requirealias)) + + return r +} + +// Conflicts Set to proceed to continue reindexing even if there are conflicts. +// API name: conflicts +func (r *Reindex) Conflicts(conflicts conflicts.Conflicts) *Reindex { + r.req.Conflicts = &conflicts + + return r +} + +// Dest The destination you are copying to. +// API name: dest +func (r *Reindex) Dest(dest *types.ReindexDestination) *Reindex { + + r.req.Dest = *dest + + return r +} + +// MaxDocs The maximum number of documents to reindex. +// API name: max_docs +func (r *Reindex) MaxDocs(maxdocs int64) *Reindex { + + r.req.MaxDocs = &maxdocs + + return r +} + +// Script The script to run to update the document source or metadata when reindexing. +// API name: script +func (r *Reindex) Script(script types.Script) *Reindex { + r.req.Script = script + + return r +} + +// API name: size +func (r *Reindex) Size(size int64) *Reindex { + + r.req.Size = &size + + return r +} + +// Source The source you are copying from. +// API name: source +func (r *Reindex) Source(source *types.ReindexSource) *Reindex { + + r.req.Source = *source return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/reindex/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/reindex/request.go index 00788b46d..a27b7781c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/reindex/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/reindex/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package reindex @@ -30,14 +30,20 @@ import ( // Request holds the request body struct for the package reindex // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/reindex/ReindexRequest.ts#L27-L51 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/reindex/ReindexRequest.ts#L27-L101 type Request struct { - Conflicts *conflicts.Conflicts `json:"conflicts,omitempty"` - Dest types.ReindexDestination `json:"dest"` - MaxDocs *int64 `json:"max_docs,omitempty"` - Script types.Script `json:"script,omitempty"` - Size *int64 `json:"size,omitempty"` - Source types.ReindexSource `json:"source"` + + // Conflicts Set to proceed to continue reindexing even if there are conflicts. + Conflicts *conflicts.Conflicts `json:"conflicts,omitempty"` + // Dest The destination you are copying to. + Dest types.ReindexDestination `json:"dest"` + // MaxDocs The maximum number of documents to reindex. + MaxDocs *int64 `json:"max_docs,omitempty"` + // Script The script to run to update the document source or metadata when reindexing. + Script types.Script `json:"script,omitempty"` + Size *int64 `json:"size,omitempty"` + // Source The source you are copying from. + Source types.ReindexSource `json:"source"` } // NewRequest returns a Request diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/reindex/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/reindex/response.go index bef4ee8c1..380636934 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/reindex/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/reindex/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package reindex @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package reindex // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/reindex/ReindexResponse.ts#L26-L45 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/reindex/ReindexResponse.ts#L26-L45 type Response struct { Batches *int64 `json:"batches,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/reindexrethrottle/reindex_rethrottle.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/reindexrethrottle/reindex_rethrottle.go index 35a50ba58..8a1d2487a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/reindexrethrottle/reindex_rethrottle.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/reindexrethrottle/reindex_rethrottle.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Changes the number of requests per second for a particular Reindex operation. package reindexrethrottle @@ -67,7 +67,7 @@ func NewReindexRethrottleFunc(tp elastictransport.Interface) NewReindexRethrottl return func(taskid string) *ReindexRethrottle { n := New(tp) - n.TaskId(taskid) + n._taskid(taskid) return n } @@ -170,7 +170,6 @@ func (r ReindexRethrottle) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -179,6 +178,10 @@ func (r ReindexRethrottle) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -210,20 +213,19 @@ func (r *ReindexRethrottle) Header(key, value string) *ReindexRethrottle { return r } -// TaskId The task id to rethrottle +// TaskId Identifier for the task. // API Name: taskid -func (r *ReindexRethrottle) TaskId(v string) *ReindexRethrottle { +func (r *ReindexRethrottle) _taskid(taskid string) *ReindexRethrottle { r.paramSet |= taskidMask - r.taskid = v + r.taskid = taskid return r } -// RequestsPerSecond The throttle to set on this request in floating sub-requests per second. -1 -// means set no throttle. +// RequestsPerSecond The throttle for this request in sub-requests per second. // API name: requests_per_second -func (r *ReindexRethrottle) RequestsPerSecond(v string) *ReindexRethrottle { - r.values.Set("requests_per_second", v) +func (r *ReindexRethrottle) RequestsPerSecond(requestspersecond string) *ReindexRethrottle { + r.values.Set("requests_per_second", requestspersecond) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/reindexrethrottle/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/reindexrethrottle/response.go index 55e734a6c..f8fdcc43d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/reindexrethrottle/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/reindexrethrottle/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package reindexrethrottle @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package reindexrethrottle // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/reindex_rethrottle/ReindexRethrottleResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/reindex_rethrottle/ReindexRethrottleResponse.ts#L23-L25 type Response struct { Nodes map[string]types.ReindexNode `json:"nodes"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/rendersearchtemplate/render_search_template.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/rendersearchtemplate/render_search_template.go index 524a2d90d..0dd7225ba 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/rendersearchtemplate/render_search_template.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/rendersearchtemplate/render_search_template.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Allows to use the Mustache language to pre-render a search definition. package rendersearchtemplate @@ -52,8 +52,9 @@ type RenderSearchTemplate struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -82,6 +83,8 @@ func New(tp elastictransport.Interface) *RenderSearchTemplate { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -111,9 +114,19 @@ func (r *RenderSearchTemplate) HttpRequest(ctx context.Context) (*http.Request, var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -121,6 +134,7 @@ func (r *RenderSearchTemplate) HttpRequest(ctx context.Context) (*http.Request, } r.buf.Write(data) + } r.path.Scheme = "http" @@ -210,7 +224,6 @@ func (r RenderSearchTemplate) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -219,6 +232,10 @@ func (r RenderSearchTemplate) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -229,11 +246,44 @@ func (r *RenderSearchTemplate) Header(key, value string) *RenderSearchTemplate { return r } -// Id The id of the stored search template +// Id ID of the search template to render. +// If no `source` is specified, this or the `id` request body parameter is +// required. // API Name: id -func (r *RenderSearchTemplate) Id(v string) *RenderSearchTemplate { +func (r *RenderSearchTemplate) Id(id string) *RenderSearchTemplate { r.paramSet |= idMask - r.id = v + r.id = id + + return r +} + +// API name: file +func (r *RenderSearchTemplate) File(file string) *RenderSearchTemplate { + + r.req.File = &file + + return r +} + +// Params Key-value pairs used to replace Mustache variables in the template. +// The key is the variable name. +// The value is the variable value. +// API name: params +func (r *RenderSearchTemplate) Params(params map[string]json.RawMessage) *RenderSearchTemplate { + + r.req.Params = params + + return r +} + +// Source An inline search template. +// Supports the same parameters as the search API's request body. +// These parameters also support Mustache variables. +// If no `id` or `` is specified, this parameter is required. +// API name: source +func (r *RenderSearchTemplate) Source(source string) *RenderSearchTemplate { + + r.req.Source = &source return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/rendersearchtemplate/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/rendersearchtemplate/request.go index bc4aced3d..142640337 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/rendersearchtemplate/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/rendersearchtemplate/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package rendersearchtemplate @@ -27,11 +27,18 @@ import ( // Request holds the request body struct for the package rendersearchtemplate // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/render_search_template/RenderSearchTemplateRequest.ts#L25-L39 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/render_search_template/RenderSearchTemplateRequest.ts#L25-L55 type Request struct { - File *string `json:"file,omitempty"` + File *string `json:"file,omitempty"` + // Params Key-value pairs used to replace Mustache variables in the template. + // The key is the variable name. + // The value is the variable value. Params map[string]json.RawMessage `json:"params,omitempty"` - Source *string `json:"source,omitempty"` + // Source An inline search template. + // Supports the same parameters as the search API's request body. + // These parameters also support Mustache variables. + // If no `id` or `` is specified, this parameter is required. + Source *string `json:"source,omitempty"` } // NewRequest returns a Request diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/rendersearchtemplate/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/rendersearchtemplate/response.go index c216cbc98..548b2acc2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/rendersearchtemplate/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/rendersearchtemplate/response.go @@ -16,15 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package rendersearchtemplate -import "encoding/json" +import ( + "encoding/json" +) // Response holds the response body struct for the package rendersearchtemplate // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/render_search_template/RenderSearchTemplateResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/render_search_template/RenderSearchTemplateResponse.ts#L23-L25 type Response struct { TemplateOutput map[string]json.RawMessage `json:"template_output"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/scriptspainlessexecute/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/scriptspainlessexecute/request.go index e6f3a8fce..81536b61f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/scriptspainlessexecute/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/scriptspainlessexecute/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package scriptspainlessexecute @@ -29,11 +29,15 @@ import ( // Request holds the request body struct for the package scriptspainlessexecute // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/scripts_painless_execute/ExecutePainlessScriptRequest.ts#L24-L35 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/scripts_painless_execute/ExecutePainlessScriptRequest.ts#L24-L46 type Request struct { - Context *string `json:"context,omitempty"` + + // Context The context that the script should run in. + Context *string `json:"context,omitempty"` + // ContextSetup Additional parameters for the `context`. ContextSetup *types.PainlessContextSetup `json:"context_setup,omitempty"` - Script *types.InlineScript `json:"script,omitempty"` + // Script The Painless script to execute. + Script *types.InlineScript `json:"script,omitempty"` } // NewRequest returns a Request diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/scriptspainlessexecute/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/scriptspainlessexecute/response.go index 7d1f55c67..6689b4e7a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/scriptspainlessexecute/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/scriptspainlessexecute/response.go @@ -16,15 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package scriptspainlessexecute -import "encoding/json" +import ( + "encoding/json" +) // Response holds the response body struct for the package scriptspainlessexecute // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/scripts_painless_execute/ExecutePainlessScriptResponse.ts#L20-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/scripts_painless_execute/ExecutePainlessScriptResponse.ts#L20-L24 type Response struct { Result json.RawMessage `json:"result,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/scriptspainlessexecute/scripts_painless_execute.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/scriptspainlessexecute/scripts_painless_execute.go index bc96b5da0..ab23a532c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/scriptspainlessexecute/scripts_painless_execute.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/scriptspainlessexecute/scripts_painless_execute.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Allows an arbitrary script to be executed and a result to be returned package scriptspainlessexecute @@ -48,8 +48,9 @@ type ScriptsPainlessExecute struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int } @@ -76,6 +77,8 @@ func New(tp elastictransport.Interface) *ScriptsPainlessExecute { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -105,9 +108,19 @@ func (r *ScriptsPainlessExecute) HttpRequest(ctx context.Context) (*http.Request var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -115,6 +128,7 @@ func (r *ScriptsPainlessExecute) HttpRequest(ctx context.Context) (*http.Request } r.buf.Write(data) + } r.path.Scheme = "http" @@ -196,7 +210,6 @@ func (r ScriptsPainlessExecute) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -205,6 +218,10 @@ func (r ScriptsPainlessExecute) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -214,3 +231,30 @@ func (r *ScriptsPainlessExecute) Header(key, value string) *ScriptsPainlessExecu return r } + +// Context The context that the script should run in. +// API name: context +func (r *ScriptsPainlessExecute) Context(context string) *ScriptsPainlessExecute { + + r.req.Context = &context + + return r +} + +// ContextSetup Additional parameters for the `context`. +// API name: context_setup +func (r *ScriptsPainlessExecute) ContextSetup(contextsetup *types.PainlessContextSetup) *ScriptsPainlessExecute { + + r.req.ContextSetup = contextsetup + + return r +} + +// Script The Painless script to execute. +// API name: script +func (r *ScriptsPainlessExecute) Script(script *types.InlineScript) *ScriptsPainlessExecute { + + r.req.Script = script + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/scroll/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/scroll/request.go index cab5d5238..1505ca565 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/scroll/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/scroll/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package scroll @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package scroll // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/scroll/ScrollRequest.ts#L24-L59 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/scroll/ScrollRequest.ts#L24-L59 type Request struct { // Scroll Period to retain the search context for scrolling. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/scroll/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/scroll/response.go index 75f6f84c5..89ba3bcf8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/scroll/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/scroll/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package scroll @@ -25,6 +25,7 @@ import ( "encoding/json" "errors" "io" + "strconv" "strings" "github.com/elastic/go-elasticsearch/v8/typedapi/types" @@ -32,7 +33,7 @@ import ( // Response holds the response body struct for the package scroll // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/scroll/ScrollResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/scroll/ScrollResponse.ts#L22-L24 type Response struct { Aggregations map[string]types.Aggregate `json:"aggregations,omitempty"` @@ -76,6 +77,10 @@ func (s *Response) UnmarshalJSON(data []byte) error { switch t { case "aggregations": + if s.Aggregations == nil { + s.Aggregations = make(map[string]types.Aggregate, 0) + } + for dec.More() { tt, err := dec.Token() if err != nil { @@ -88,415 +93,494 @@ func (s *Response) UnmarshalJSON(data []byte) error { if strings.Contains(value, "#") { elems := strings.Split(value, "#") if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]types.Aggregate, 0) + } switch elems[0] { + case "cardinality": o := types.NewCardinalityAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "hdr_percentiles": o := types.NewHdrPercentilesAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "hdr_percentile_ranks": o := types.NewHdrPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "tdigest_percentiles": o := types.NewTDigestPercentilesAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "tdigest_percentile_ranks": o := types.NewTDigestPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "percentiles_bucket": o := types.NewPercentilesBucketAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "median_absolute_deviation": o := types.NewMedianAbsoluteDeviationAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "min": o := types.NewMinAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "max": o := types.NewMaxAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "sum": o := types.NewSumAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "avg": o := types.NewAvgAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "weighted_avg": o := types.NewWeightedAvgAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "value_count": o := types.NewValueCountAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "simple_value": o := types.NewSimpleValueAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "derivative": o := types.NewDerivativeAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "bucket_metric_value": o := types.NewBucketMetricValueAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "stats": o := types.NewStatsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "stats_bucket": o := types.NewStatsBucketAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "extended_stats": o := types.NewExtendedStatsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "extended_stats_bucket": o := types.NewExtendedStatsBucketAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geo_bounds": o := types.NewGeoBoundsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geo_centroid": o := types.NewGeoCentroidAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "histogram": o := types.NewHistogramAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "date_histogram": o := types.NewDateHistogramAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "auto_date_histogram": o := types.NewAutoDateHistogramAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "variable_width_histogram": o := types.NewVariableWidthHistogramAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "sterms": o := types.NewStringTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "lterms": o := types.NewLongTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "dterms": o := types.NewDoubleTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "umterms": o := types.NewUnmappedTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "lrareterms": o := types.NewLongRareTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "srareterms": o := types.NewStringRareTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "umrareterms": o := types.NewUnmappedRareTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "multi_terms": o := types.NewMultiTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "missing": o := types.NewMissingAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "nested": o := types.NewNestedAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "reverse_nested": o := types.NewReverseNestedAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "global": o := types.NewGlobalAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "filter": o := types.NewFilterAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "children": o := types.NewChildrenAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "parent": o := types.NewParentAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "sampler": o := types.NewSamplerAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "unmapped_sampler": o := types.NewUnmappedSamplerAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geohash_grid": o := types.NewGeoHashGridAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geotile_grid": o := types.NewGeoTileGridAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geohex_grid": o := types.NewGeoHexGridAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "range": o := types.NewRangeAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "date_range": o := types.NewDateRangeAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geo_distance": o := types.NewGeoDistanceAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "ip_range": o := types.NewIpRangeAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "ip_prefix": o := types.NewIpPrefixAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "filters": o := types.NewFiltersAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "adjacency_matrix": o := types.NewAdjacencyMatrixAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "siglterms": o := types.NewSignificantLongTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "sigsterms": o := types.NewSignificantStringTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "umsigterms": o := types.NewUnmappedSignificantTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "composite": o := types.NewCompositeAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := types.NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := types.NewScriptedMetricAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "top_hits": o := types.NewTopHitsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "inference": o := types.NewInferenceAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "string_stats": o := types.NewStringStatsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "box_plot": o := types.NewBoxPlotAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "top_metrics": o := types.NewTopMetricsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "t_test": o := types.NewTTestAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "rate": o := types.NewRateAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "simple_long_value": o := types.NewCumulativeCardinalityAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "matrix_stats": o := types.NewMatrixStatsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geo_line": o := types.NewGeoLineAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + default: o := make(map[string]interface{}, 0) if err := dec.Decode(&o); err != nil { @@ -523,6 +607,9 @@ func (s *Response) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]json.RawMessage, 0) + } if err := dec.Decode(&s.Fields); err != nil { return err } @@ -533,13 +620,34 @@ func (s *Response) UnmarshalJSON(data []byte) error { } case "max_score": - if err := dec.Decode(&s.MaxScore); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := types.Float64(value) + s.MaxScore = &f + case float64: + f := types.Float64(v) + s.MaxScore = &f } case "num_reduce_phases": - if err := dec.Decode(&s.NumReducePhases); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.NumReducePhases = &value + case float64: + f := int64(v) + s.NumReducePhases = &f } case "pit_id": @@ -563,23 +671,109 @@ func (s *Response) UnmarshalJSON(data []byte) error { } case "suggest": - if err := dec.Decode(&s.Suggest); err != nil { - return err + if s.Suggest == nil { + s.Suggest = make(map[string][]types.Suggest, 0) + } + + for dec.More() { + tt, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + if value, ok := tt.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Suggest == nil { + s.Suggest = make(map[string][]types.Suggest, 0) + } + switch elems[0] { + + case "completion": + o := types.NewCompletionSuggest() + if err := dec.Decode(&o); err != nil { + return err + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + case "phrase": + o := types.NewPhraseSuggest() + if err := dec.Decode(&o); err != nil { + return err + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + case "term": + o := types.NewTermSuggest() + if err := dec.Decode(&o); err != nil { + return err + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + default: + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + } + } else { + return errors.New("cannot decode JSON for field Suggest") + } + } else { + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Suggest[value] = append(s.Suggest[value], o) + } + } } case "terminated_early": - if err := dec.Decode(&s.TerminatedEarly); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.TerminatedEarly = &value + case bool: + s.TerminatedEarly = &v } case "timed_out": - if err := dec.Decode(&s.TimedOut); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.TimedOut = value + case bool: + s.TimedOut = v } case "took": - if err := dec.Decode(&s.Took); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Took = value + case float64: + f := int64(v) + s.Took = f } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/scroll/scroll.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/scroll/scroll.go index a7a8c2861..9f3b0164e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/scroll/scroll.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/scroll/scroll.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Allows to retrieve a large numbers of results from a single search request. package scroll @@ -53,8 +53,9 @@ type Scroll struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -83,6 +84,8 @@ func New(tp elastictransport.Interface) *Scroll { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -112,9 +115,19 @@ func (r *Scroll) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -122,6 +135,7 @@ func (r *Scroll) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -211,7 +225,6 @@ func (r Scroll) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -220,6 +233,10 @@ func (r Scroll) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -232,26 +249,26 @@ func (r *Scroll) Header(key, value string) *Scroll { // ScrollId The scroll ID // API Name: scrollid -func (r *Scroll) ScrollId(v string) *Scroll { +func (r *Scroll) ScrollId(scrollid string) *Scroll { r.paramSet |= scrollidMask - r.scrollid = v + r.scrollid = scrollid return r } -// Scroll Period to retain the search context for scrolling. -// API name: scroll -func (r *Scroll) Scroll(v string) *Scroll { - r.values.Set("scroll", v) +// RestTotalHitsAsInt If true, the API response’s hit.total property is returned as an integer. If +// false, the API response’s hit.total property is returned as an object. +// API name: rest_total_hits_as_int +func (r *Scroll) RestTotalHitsAsInt(resttotalhitsasint bool) *Scroll { + r.values.Set("rest_total_hits_as_int", strconv.FormatBool(resttotalhitsasint)) return r } -// RestTotalHitsAsInt If true, the API response’s hit.total property is returned as an integer. If -// false, the API response’s hit.total property is returned as an object. -// API name: rest_total_hits_as_int -func (r *Scroll) RestTotalHitsAsInt(b bool) *Scroll { - r.values.Set("rest_total_hits_as_int", strconv.FormatBool(b)) +// Scroll Period to retain the search context for scrolling. +// API name: scroll +func (r *Scroll) Scroll(duration types.Duration) *Scroll { + r.req.Scroll = duration return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/search/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/search/request.go index 61593ab87..a5990aecd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/search/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/search/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package search @@ -29,92 +29,122 @@ import ( // Request holds the request body struct for the package search // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/SearchRequest.ts#L52-L245 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/SearchRequest.ts#L53-L506 type Request struct { + + // Aggregations Defines the aggregations that are run as part of the search request. Aggregations map[string]types.Aggregations `json:"aggregations,omitempty"` - Collapse *types.FieldCollapse `json:"collapse,omitempty"` - // DocvalueFields Array of wildcard (*) patterns. The request returns doc values for field - // names matching these patterns in the hits.fields property of the response. + // Collapse Collapses search results the values of the specified field. + Collapse *types.FieldCollapse `json:"collapse,omitempty"` + // DocvalueFields Array of wildcard (`*`) patterns. + // The request returns doc values for field names matching these patterns in the + // `hits.fields` property of the response. DocvalueFields []types.FieldAndFormat `json:"docvalue_fields,omitempty"` // Explain If true, returns detailed information about score computation as part of a // hit. Explain *bool `json:"explain,omitempty"` // Ext Configuration of search extensions defined by Elasticsearch plugins. Ext map[string]json.RawMessage `json:"ext,omitempty"` - // Fields Array of wildcard (*) patterns. The request returns values for field names - // matching these patterns in the hits.fields property of the response. + // Fields Array of wildcard (`*`) patterns. + // The request returns values for field names matching these patterns in the + // `hits.fields` property of the response. Fields []types.FieldAndFormat `json:"fields,omitempty"` - // From Starting document offset. By default, you cannot page through more than - // 10,000 - // hits using the from and size parameters. To page through more hits, use the - // search_after parameter. - From *int `json:"from,omitempty"` + // From Starting document offset. + // Needs to be non-negative. + // By default, you cannot page through more than 10,000 hits using the `from` + // and `size` parameters. + // To page through more hits, use the `search_after` parameter. + From *int `json:"from,omitempty"` + // Highlight Specifies the highlighter to use for retrieving highlighted snippets from one + // or more fields in your search results. Highlight *types.Highlight `json:"highlight,omitempty"` // IndicesBoost Boosts the _score of documents from specified indices. IndicesBoost []map[string]types.Float64 `json:"indices_boost,omitempty"` // Knn Defines the approximate kNN search to run. Knn []types.KnnQuery `json:"knn,omitempty"` - // MinScore Minimum _score for matching documents. Documents with a lower _score are - // not included in the search results. + // MinScore Minimum `_score` for matching documents. + // Documents with a lower `_score` are not included in the search results. MinScore *types.Float64 `json:"min_score,omitempty"` - // Pit Limits the search to a point in time (PIT). If you provide a PIT, you - // cannot specify an in the request path. - Pit *types.PointInTimeReference `json:"pit,omitempty"` - PostFilter *types.Query `json:"post_filter,omitempty"` - Profile *bool `json:"profile,omitempty"` + // Pit Limits the search to a point in time (PIT). + // If you provide a PIT, you cannot specify an `` in the request path. + Pit *types.PointInTimeReference `json:"pit,omitempty"` + // PostFilter Use the `post_filter` parameter to filter search results. + // The search hits are filtered after the aggregations are calculated. + // A post filter has no impact on the aggregation results. + PostFilter *types.Query `json:"post_filter,omitempty"` + // Profile Set to `true` to return detailed timing information about the execution of + // individual components in a search request. + // NOTE: This is a debugging tool and adds significant overhead to search + // execution. + Profile *bool `json:"profile,omitempty"` // Query Defines the search definition using the Query DSL. - Query *types.Query `json:"query,omitempty"` + Query *types.Query `json:"query,omitempty"` + // Rank Defines the Reciprocal Rank Fusion (RRF) to use. + Rank *types.RankContainer `json:"rank,omitempty"` + // Rescore Can be used to improve precision by reordering just the top (for example 100 + // - 500) documents returned by the `query` and `post_filter` phases. Rescore []types.Rescore `json:"rescore,omitempty"` - // RuntimeMappings Defines one or more runtime fields in the search request. These fields take - // precedence over mapped fields with the same name. - RuntimeMappings map[string]types.RuntimeField `json:"runtime_mappings,omitempty"` + // RuntimeMappings Defines one or more runtime fields in the search request. + // These fields take precedence over mapped fields with the same name. + RuntimeMappings types.RuntimeFields `json:"runtime_mappings,omitempty"` // ScriptFields Retrieve a script evaluation (based on different fields) for each hit. ScriptFields map[string]types.ScriptField `json:"script_fields,omitempty"` - SearchAfter []types.FieldValue `json:"search_after,omitempty"` - // SeqNoPrimaryTerm If true, returns sequence number and primary term of the last modification - // of each hit. See Optimistic concurrency control. + // SearchAfter Used to retrieve the next page of hits using a set of sort values from the + // previous page. + SearchAfter []types.FieldValue `json:"search_after,omitempty"` + // SeqNoPrimaryTerm If `true`, returns sequence number and primary term of the last modification + // of each hit. SeqNoPrimaryTerm *bool `json:"seq_no_primary_term,omitempty"` - // Size The number of hits to return. By default, you cannot page through more - // than 10,000 hits using the from and size parameters. To page through more - // hits, use the search_after parameter. - Size *int `json:"size,omitempty"` - Slice *types.SlicedScroll `json:"slice,omitempty"` - Sort []types.SortCombinations `json:"sort,omitempty"` - // Source_ Indicates which source fields are returned for matching documents. These - // fields are returned in the hits._source property of the search response. + // Size The number of hits to return. + // By default, you cannot page through more than 10,000 hits using the `from` + // and `size` parameters. + // To page through more hits, use the `search_after` parameter. + Size *int `json:"size,omitempty"` + // Slice Can be used to split a scrolled search into multiple slices that can be + // consumed independently. + Slice *types.SlicedScroll `json:"slice,omitempty"` + // Sort A comma-separated list of : pairs. + Sort []types.SortCombinations `json:"sort,omitempty"` + // Source_ Indicates which source fields are returned for matching documents. + // These fields are returned in the hits._source property of the search + // response. Source_ types.SourceConfig `json:"_source,omitempty"` - // Stats Stats groups to associate with the search. Each group maintains a statistics - // aggregation for its associated searches. You can retrieve these stats using - // the indices stats API. + // Stats Stats groups to associate with the search. + // Each group maintains a statistics aggregation for its associated searches. + // You can retrieve these stats using the indices stats API. Stats []string `json:"stats,omitempty"` - // StoredFields List of stored fields to return as part of a hit. If no fields are specified, - // no stored fields are included in the response. If this field is specified, - // the _source - // parameter defaults to false. You can pass _source: true to return both source - // fields - // and stored fields in the search response. - StoredFields []string `json:"stored_fields,omitempty"` - Suggest *types.Suggester `json:"suggest,omitempty"` - // TerminateAfter Maximum number of documents to collect for each shard. If a query reaches - // this - // limit, Elasticsearch terminates the query early. Elasticsearch collects - // documents - // before sorting. Defaults to 0, which does not terminate query execution - // early. + // StoredFields List of stored fields to return as part of a hit. + // If no fields are specified, no stored fields are included in the response. + // If this field is specified, the `_source` parameter defaults to `false`. + // You can pass `_source: true` to return both source fields and stored fields + // in the search response. + StoredFields []string `json:"stored_fields,omitempty"` + // Suggest Defines a suggester that provides similar looking terms based on a provided + // text. + Suggest *types.Suggester `json:"suggest,omitempty"` + // TerminateAfter Maximum number of documents to collect for each shard. + // If a query reaches this limit, Elasticsearch terminates the query early. + // Elasticsearch collects documents before sorting. + // Use with caution. + // Elasticsearch applies this parameter to each shard handling the request. + // When possible, let Elasticsearch perform early termination automatically. + // Avoid specifying this parameter for requests that target data streams with + // backing indices across multiple data tiers. + // If set to `0` (default), the query does not terminate early. TerminateAfter *int64 `json:"terminate_after,omitempty"` - // Timeout Specifies the period of time to wait for a response from each shard. If no - // response - // is received before the timeout expires, the request fails and returns an - // error. + // Timeout Specifies the period of time to wait for a response from each shard. + // If no response is received before the timeout expires, the request fails and + // returns an error. // Defaults to no timeout. Timeout *string `json:"timeout,omitempty"` // TrackScores If true, calculate and return document scores, even if the scores are not // used for sorting. TrackScores *bool `json:"track_scores,omitempty"` - // TrackTotalHits Number of hits matching the query to count accurately. If true, the exact - // number of hits is returned at the cost of some performance. If false, the - // response does not include the total number of hits matching the query. - // Defaults to 10,000 hits. + // TrackTotalHits Number of hits matching the query to count accurately. + // If `true`, the exact number of hits is returned at the cost of some + // performance. + // If `false`, the response does not include the total number of hits matching + // the query. TrackTotalHits types.TrackHits `json:"track_total_hits,omitempty"` // Version If true, returns document version as part of a hit. Version *bool `json:"version,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/search/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/search/response.go index ef73c6e91..a878d366f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/search/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/search/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package search @@ -25,6 +25,7 @@ import ( "encoding/json" "errors" "io" + "strconv" "strings" "github.com/elastic/go-elasticsearch/v8/typedapi/types" @@ -32,7 +33,7 @@ import ( // Response holds the response body struct for the package search // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/SearchResponse.ts#L34-L36 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/SearchResponse.ts#L34-L36 type Response struct { Aggregations map[string]types.Aggregate `json:"aggregations,omitempty"` @@ -76,6 +77,10 @@ func (s *Response) UnmarshalJSON(data []byte) error { switch t { case "aggregations": + if s.Aggregations == nil { + s.Aggregations = make(map[string]types.Aggregate, 0) + } + for dec.More() { tt, err := dec.Token() if err != nil { @@ -88,415 +93,494 @@ func (s *Response) UnmarshalJSON(data []byte) error { if strings.Contains(value, "#") { elems := strings.Split(value, "#") if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]types.Aggregate, 0) + } switch elems[0] { + case "cardinality": o := types.NewCardinalityAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "hdr_percentiles": o := types.NewHdrPercentilesAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "hdr_percentile_ranks": o := types.NewHdrPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "tdigest_percentiles": o := types.NewTDigestPercentilesAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "tdigest_percentile_ranks": o := types.NewTDigestPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "percentiles_bucket": o := types.NewPercentilesBucketAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "median_absolute_deviation": o := types.NewMedianAbsoluteDeviationAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "min": o := types.NewMinAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "max": o := types.NewMaxAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "sum": o := types.NewSumAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "avg": o := types.NewAvgAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "weighted_avg": o := types.NewWeightedAvgAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "value_count": o := types.NewValueCountAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "simple_value": o := types.NewSimpleValueAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "derivative": o := types.NewDerivativeAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "bucket_metric_value": o := types.NewBucketMetricValueAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "stats": o := types.NewStatsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "stats_bucket": o := types.NewStatsBucketAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "extended_stats": o := types.NewExtendedStatsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "extended_stats_bucket": o := types.NewExtendedStatsBucketAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geo_bounds": o := types.NewGeoBoundsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geo_centroid": o := types.NewGeoCentroidAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "histogram": o := types.NewHistogramAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "date_histogram": o := types.NewDateHistogramAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "auto_date_histogram": o := types.NewAutoDateHistogramAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "variable_width_histogram": o := types.NewVariableWidthHistogramAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "sterms": o := types.NewStringTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "lterms": o := types.NewLongTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "dterms": o := types.NewDoubleTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "umterms": o := types.NewUnmappedTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "lrareterms": o := types.NewLongRareTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "srareterms": o := types.NewStringRareTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "umrareterms": o := types.NewUnmappedRareTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "multi_terms": o := types.NewMultiTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "missing": o := types.NewMissingAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "nested": o := types.NewNestedAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "reverse_nested": o := types.NewReverseNestedAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "global": o := types.NewGlobalAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "filter": o := types.NewFilterAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "children": o := types.NewChildrenAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "parent": o := types.NewParentAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "sampler": o := types.NewSamplerAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "unmapped_sampler": o := types.NewUnmappedSamplerAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geohash_grid": o := types.NewGeoHashGridAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geotile_grid": o := types.NewGeoTileGridAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geohex_grid": o := types.NewGeoHexGridAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "range": o := types.NewRangeAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "date_range": o := types.NewDateRangeAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geo_distance": o := types.NewGeoDistanceAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "ip_range": o := types.NewIpRangeAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "ip_prefix": o := types.NewIpPrefixAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "filters": o := types.NewFiltersAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "adjacency_matrix": o := types.NewAdjacencyMatrixAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "siglterms": o := types.NewSignificantLongTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "sigsterms": o := types.NewSignificantStringTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "umsigterms": o := types.NewUnmappedSignificantTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "composite": o := types.NewCompositeAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := types.NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := types.NewScriptedMetricAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "top_hits": o := types.NewTopHitsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "inference": o := types.NewInferenceAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "string_stats": o := types.NewStringStatsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "box_plot": o := types.NewBoxPlotAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "top_metrics": o := types.NewTopMetricsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "t_test": o := types.NewTTestAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "rate": o := types.NewRateAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "simple_long_value": o := types.NewCumulativeCardinalityAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "matrix_stats": o := types.NewMatrixStatsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geo_line": o := types.NewGeoLineAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + default: o := make(map[string]interface{}, 0) if err := dec.Decode(&o); err != nil { @@ -523,6 +607,9 @@ func (s *Response) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]json.RawMessage, 0) + } if err := dec.Decode(&s.Fields); err != nil { return err } @@ -533,13 +620,34 @@ func (s *Response) UnmarshalJSON(data []byte) error { } case "max_score": - if err := dec.Decode(&s.MaxScore); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := types.Float64(value) + s.MaxScore = &f + case float64: + f := types.Float64(v) + s.MaxScore = &f } case "num_reduce_phases": - if err := dec.Decode(&s.NumReducePhases); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.NumReducePhases = &value + case float64: + f := int64(v) + s.NumReducePhases = &f } case "pit_id": @@ -563,23 +671,109 @@ func (s *Response) UnmarshalJSON(data []byte) error { } case "suggest": - if err := dec.Decode(&s.Suggest); err != nil { - return err + if s.Suggest == nil { + s.Suggest = make(map[string][]types.Suggest, 0) + } + + for dec.More() { + tt, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + if value, ok := tt.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Suggest == nil { + s.Suggest = make(map[string][]types.Suggest, 0) + } + switch elems[0] { + + case "completion": + o := types.NewCompletionSuggest() + if err := dec.Decode(&o); err != nil { + return err + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + case "phrase": + o := types.NewPhraseSuggest() + if err := dec.Decode(&o); err != nil { + return err + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + case "term": + o := types.NewTermSuggest() + if err := dec.Decode(&o); err != nil { + return err + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + default: + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + } + } else { + return errors.New("cannot decode JSON for field Suggest") + } + } else { + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Suggest[value] = append(s.Suggest[value], o) + } + } } case "terminated_early": - if err := dec.Decode(&s.TerminatedEarly); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.TerminatedEarly = &value + case bool: + s.TerminatedEarly = &v } case "timed_out": - if err := dec.Decode(&s.TimedOut); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.TimedOut = value + case bool: + s.TimedOut = v } case "took": - if err := dec.Decode(&s.Took); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Took = value + case float64: + f := int64(v) + s.Took = f } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/search/search.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/search/search.go index ec6fe235f..7b23d6044 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/search/search.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/search/search.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns results matching a query. package search @@ -35,7 +35,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/operator" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/searchtype" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/suggestmode" @@ -57,8 +57,9 @@ type Search struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -87,6 +88,8 @@ func New(tp elastictransport.Interface) *Search { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -116,9 +119,19 @@ func (r *Search) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -126,6 +139,7 @@ func (r *Search) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -213,7 +227,6 @@ func (r Search) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -222,6 +235,10 @@ func (r Search) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -232,388 +249,656 @@ func (r *Search) Header(key, value string) *Search { return r } -// Index A comma-separated list of index names to search; use `_all` or empty string -// to perform the operation on all indices +// Index Comma-separated list of data streams, indices, and aliases to search. +// Supports wildcards (`*`). +// To search all data streams and indices, omit this parameter or use `*` or +// `_all`. // API Name: index -func (r *Search) Index(v string) *Search { +func (r *Search) Index(index string) *Search { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// AllowNoIndices Whether to ignore if a wildcard indices expression resolves into no concrete -// indices. (This includes `_all` string or when no indices have been specified) +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. +// For example, a request targeting `foo*,bar*` returns an error if an index +// starts with `foo` but no index starts with `bar`. // API name: allow_no_indices -func (r *Search) AllowNoIndices(b bool) *Search { - r.values.Set("allow_no_indices", strconv.FormatBool(b)) +func (r *Search) AllowNoIndices(allownoindices bool) *Search { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) return r } -// AllowPartialSearchResults Indicate if an error should be returned if there is a partial search failure -// or timeout +// AllowPartialSearchResults If true, returns partial results if there are shard request timeouts or shard +// failures. If false, returns an error with no partial results. // API name: allow_partial_search_results -func (r *Search) AllowPartialSearchResults(b bool) *Search { - r.values.Set("allow_partial_search_results", strconv.FormatBool(b)) +func (r *Search) AllowPartialSearchResults(allowpartialsearchresults bool) *Search { + r.values.Set("allow_partial_search_results", strconv.FormatBool(allowpartialsearchresults)) return r } -// Analyzer The analyzer to use for the query string +// Analyzer Analyzer to use for the query string. +// This parameter can only be used when the q query string parameter is +// specified. // API name: analyzer -func (r *Search) Analyzer(v string) *Search { - r.values.Set("analyzer", v) +func (r *Search) Analyzer(analyzer string) *Search { + r.values.Set("analyzer", analyzer) return r } -// AnalyzeWildcard Specify whether wildcard and prefix queries should be analyzed (default: -// false) +// AnalyzeWildcard If true, wildcard and prefix queries are analyzed. +// This parameter can only be used when the q query string parameter is +// specified. // API name: analyze_wildcard -func (r *Search) AnalyzeWildcard(b bool) *Search { - r.values.Set("analyze_wildcard", strconv.FormatBool(b)) +func (r *Search) AnalyzeWildcard(analyzewildcard bool) *Search { + r.values.Set("analyze_wildcard", strconv.FormatBool(analyzewildcard)) return r } // BatchedReduceSize The number of shard results that should be reduced at once on the -// coordinating node. This value should be used as a protection mechanism to -// reduce the memory overhead per search request if the potential number of -// shards in the request can be large. +// coordinating node. +// This value should be used as a protection mechanism to reduce the memory +// overhead per search request if the potential number of shards in the request +// can be large. // API name: batched_reduce_size -func (r *Search) BatchedReduceSize(v string) *Search { - r.values.Set("batched_reduce_size", v) +func (r *Search) BatchedReduceSize(batchedreducesize string) *Search { + r.values.Set("batched_reduce_size", batchedreducesize) return r } -// CcsMinimizeRoundtrips Indicates whether network round-trips should be minimized as part of -// cross-cluster search requests execution +// CcsMinimizeRoundtrips If true, network round-trips between the coordinating node and the remote +// clusters are minimized when executing cross-cluster search (CCS) requests. // API name: ccs_minimize_roundtrips -func (r *Search) CcsMinimizeRoundtrips(b bool) *Search { - r.values.Set("ccs_minimize_roundtrips", strconv.FormatBool(b)) +func (r *Search) CcsMinimizeRoundtrips(ccsminimizeroundtrips bool) *Search { + r.values.Set("ccs_minimize_roundtrips", strconv.FormatBool(ccsminimizeroundtrips)) return r } -// DefaultOperator The default operator for query string query (AND or OR) +// DefaultOperator The default operator for query string query: AND or OR. +// This parameter can only be used when the `q` query string parameter is +// specified. // API name: default_operator -func (r *Search) DefaultOperator(enum operator.Operator) *Search { - r.values.Set("default_operator", enum.String()) +func (r *Search) DefaultOperator(defaultoperator operator.Operator) *Search { + r.values.Set("default_operator", defaultoperator.String()) return r } -// Df The field to use as default where no field prefix is given in the query -// string +// Df Field to use as default where no field prefix is given in the query string. +// This parameter can only be used when the q query string parameter is +// specified. // API name: df -func (r *Search) Df(v string) *Search { - r.values.Set("df", v) +func (r *Search) Df(df string) *Search { + r.values.Set("df", df) return r } -// DocvalueFields A comma-separated list of fields to return as the docvalue representation of -// a field for each hit -// API name: docvalue_fields -func (r *Search) DocvalueFields(v string) *Search { - r.values.Set("docvalue_fields", v) - - return r -} - -// ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, -// closed or both. +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. // API name: expand_wildcards -func (r *Search) ExpandWildcards(v string) *Search { - r.values.Set("expand_wildcards", v) - - return r -} - -// Explain Specify whether to return detailed information about score computation as -// part of a hit -// API name: explain -func (r *Search) Explain(b bool) *Search { - r.values.Set("explain", strconv.FormatBool(b)) +func (r *Search) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Search { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } -// IgnoreThrottled Whether specified concrete, expanded or aliased indices should be ignored -// when throttled +// IgnoreThrottled If `true`, concrete, expanded or aliased indices will be ignored when frozen. // API name: ignore_throttled -func (r *Search) IgnoreThrottled(b bool) *Search { - r.values.Set("ignore_throttled", strconv.FormatBool(b)) +func (r *Search) IgnoreThrottled(ignorethrottled bool) *Search { + r.values.Set("ignore_throttled", strconv.FormatBool(ignorethrottled)) return r } -// IgnoreUnavailable Whether specified concrete indices should be ignored when unavailable -// (missing or closed) +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. // API name: ignore_unavailable -func (r *Search) IgnoreUnavailable(b bool) *Search { - r.values.Set("ignore_unavailable", strconv.FormatBool(b)) +func (r *Search) IgnoreUnavailable(ignoreunavailable bool) *Search { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) return r } -// Lenient Specify whether format-based query failures (such as providing text to a -// numeric field) should be ignored +// Lenient If `true`, format-based query failures (such as providing text to a numeric +// field) in the query string will be ignored. +// This parameter can only be used when the `q` query string parameter is +// specified. // API name: lenient -func (r *Search) Lenient(b bool) *Search { - r.values.Set("lenient", strconv.FormatBool(b)) +func (r *Search) Lenient(lenient bool) *Search { + r.values.Set("lenient", strconv.FormatBool(lenient)) return r } -// MaxConcurrentShardRequests The number of concurrent shard requests per node this search executes -// concurrently. This value should be used to limit the impact of the search on -// the cluster in order to limit the number of concurrent shard requests +// MaxConcurrentShardRequests Defines the number of concurrent shard requests per node this search executes +// concurrently. +// This value should be used to limit the impact of the search on the cluster in +// order to limit the number of concurrent shard requests. // API name: max_concurrent_shard_requests -func (r *Search) MaxConcurrentShardRequests(v string) *Search { - r.values.Set("max_concurrent_shard_requests", v) +func (r *Search) MaxConcurrentShardRequests(maxconcurrentshardrequests string) *Search { + r.values.Set("max_concurrent_shard_requests", maxconcurrentshardrequests) return r } -// MinCompatibleShardNode The minimum compatible version that all shards involved in search should have -// for this request to be successful +// MinCompatibleShardNode The minimum version of the node that can handle the request +// Any handling node with a lower version will fail the request. // API name: min_compatible_shard_node -func (r *Search) MinCompatibleShardNode(v string) *Search { - r.values.Set("min_compatible_shard_node", v) - - return r -} - -// Preference Specify the node or shard the operation should be performed on (default: -// random) +func (r *Search) MinCompatibleShardNode(versionstring string) *Search { + r.values.Set("min_compatible_shard_node", versionstring) + + return r +} + +// Preference Nodes and shards used for the search. +// By default, Elasticsearch selects from eligible nodes and shards using +// adaptive replica selection, accounting for allocation awareness. Valid values +// are: +// `_only_local` to run the search only on shards on the local node; +// `_local` to, if possible, run the search on shards on the local node, or if +// not, select shards using the default method; +// `_only_nodes:,` to run the search on only the specified +// nodes IDs, where, if suitable shards exist on more than one selected node, +// use shards on those nodes using the default method, or if none of the +// specified nodes are available, select shards from any available node using +// the default method; +// `_prefer_nodes:,` to if possible, run the search on the +// specified nodes IDs, or if not, select shards using the default method; +// `_shards:,` to run the search only on the specified shards; +// `` (any string that does not start with `_`) to route searches +// with the same `` to the same shards in the same order. // API name: preference -func (r *Search) Preference(v string) *Search { - r.values.Set("preference", v) +func (r *Search) Preference(preference string) *Search { + r.values.Set("preference", preference) return r } -// PreFilterShardSize A threshold that enforces a pre-filter roundtrip to prefilter search shards -// based on query rewriting if the number of shards the search request expands -// to exceeds the threshold. This filter roundtrip can limit the number of -// shards significantly if for instance a shard can not match any documents -// based on its rewrite method ie. if date filters are mandatory to match but -// the shard bounds and the query are disjoint. +// PreFilterShardSize Defines a threshold that enforces a pre-filter roundtrip to prefilter search +// shards based on query rewriting if the number of shards the search request +// expands to exceeds the threshold. +// This filter roundtrip can limit the number of shards significantly if for +// instance a shard can not match any documents based on its rewrite method (if +// date filters are mandatory to match but the shard bounds and the query are +// disjoint). +// When unspecified, the pre-filter phase is executed if any of these conditions +// is met: +// the request targets more than 128 shards; +// the request targets one or more read-only index; +// the primary sort of the query targets an indexed field. // API name: pre_filter_shard_size -func (r *Search) PreFilterShardSize(v string) *Search { - r.values.Set("pre_filter_shard_size", v) +func (r *Search) PreFilterShardSize(prefiltershardsize string) *Search { + r.values.Set("pre_filter_shard_size", prefiltershardsize) return r } -// RequestCache Specify if request cache should be used for this request or not, defaults to -// index level setting +// RequestCache If `true`, the caching of search results is enabled for requests where `size` +// is `0`. +// Defaults to index level settings. // API name: request_cache -func (r *Search) RequestCache(b bool) *Search { - r.values.Set("request_cache", strconv.FormatBool(b)) +func (r *Search) RequestCache(requestcache bool) *Search { + r.values.Set("request_cache", strconv.FormatBool(requestcache)) return r } -// Routing A comma-separated list of specific routing values +// Routing Custom value used to route operations to a specific shard. // API name: routing -func (r *Search) Routing(v string) *Search { - r.values.Set("routing", v) +func (r *Search) Routing(routing string) *Search { + r.values.Set("routing", routing) return r } -// Scroll Specify how long a consistent view of the index should be maintained for -// scrolled search +// Scroll Period to retain the search context for scrolling. See Scroll search results. +// By default, this value cannot exceed `1d` (24 hours). +// You can change this limit using the `search.max_keep_alive` cluster-level +// setting. // API name: scroll -func (r *Search) Scroll(v string) *Search { - r.values.Set("scroll", v) +func (r *Search) Scroll(duration string) *Search { + r.values.Set("scroll", duration) return r } -// SearchType Search operation type +// SearchType How distributed term frequencies are calculated for relevance scoring. // API name: search_type -func (r *Search) SearchType(enum searchtype.SearchType) *Search { - r.values.Set("search_type", enum.String()) +func (r *Search) SearchType(searchtype searchtype.SearchType) *Search { + r.values.Set("search_type", searchtype.String()) return r } -// Stats Specific 'tag' of the request for logging and statistical purposes -// API name: stats -func (r *Search) Stats(v string) *Search { - r.values.Set("stats", v) +// SuggestField Specifies which field to use for suggestions. +// API name: suggest_field +func (r *Search) SuggestField(field string) *Search { + r.values.Set("suggest_field", field) return r } -// StoredFields A comma-separated list of stored fields to return as part of a hit -// API name: stored_fields -func (r *Search) StoredFields(v string) *Search { - r.values.Set("stored_fields", v) +// SuggestMode Specifies the suggest mode. +// This parameter can only be used when the `suggest_field` and `suggest_text` +// query string parameters are specified. +// API name: suggest_mode +func (r *Search) SuggestMode(suggestmode suggestmode.SuggestMode) *Search { + r.values.Set("suggest_mode", suggestmode.String()) return r } -// SuggestField Specifies which field to use for suggestions. -// API name: suggest_field -func (r *Search) SuggestField(v string) *Search { - r.values.Set("suggest_field", v) +// SuggestSize Number of suggestions to return. +// This parameter can only be used when the `suggest_field` and `suggest_text` +// query string parameters are specified. +// API name: suggest_size +func (r *Search) SuggestSize(suggestsize string) *Search { + r.values.Set("suggest_size", suggestsize) return r } -// SuggestMode Specify suggest mode -// API name: suggest_mode -func (r *Search) SuggestMode(enum suggestmode.SuggestMode) *Search { - r.values.Set("suggest_mode", enum.String()) +// SuggestText The source text for which the suggestions should be returned. +// This parameter can only be used when the `suggest_field` and `suggest_text` +// query string parameters are specified. +// API name: suggest_text +func (r *Search) SuggestText(suggesttext string) *Search { + r.values.Set("suggest_text", suggesttext) return r } -// SuggestSize How many suggestions to return in response -// API name: suggest_size -func (r *Search) SuggestSize(v string) *Search { - r.values.Set("suggest_size", v) +// TypedKeys If `true`, aggregation and suggester names are be prefixed by their +// respective types in the response. +// API name: typed_keys +func (r *Search) TypedKeys(typedkeys bool) *Search { + r.values.Set("typed_keys", strconv.FormatBool(typedkeys)) return r } -// SuggestText The source text for which the suggestions should be returned. -// API name: suggest_text -func (r *Search) SuggestText(v string) *Search { - r.values.Set("suggest_text", v) +// RestTotalHitsAsInt Indicates whether `hits.total` should be rendered as an integer or an object +// in the rest search response. +// API name: rest_total_hits_as_int +func (r *Search) RestTotalHitsAsInt(resttotalhitsasint bool) *Search { + r.values.Set("rest_total_hits_as_int", strconv.FormatBool(resttotalhitsasint)) return r } -// TerminateAfter The maximum number of documents to collect for each shard, upon reaching -// which the query execution will terminate early. -// API name: terminate_after -func (r *Search) TerminateAfter(v string) *Search { - r.values.Set("terminate_after", v) +// SourceExcludes_ A comma-separated list of source fields to exclude from the response. +// You can also use this parameter to exclude fields from the subset specified +// in `_source_includes` query parameter. +// If the `_source` parameter is `false`, this parameter is ignored. +// API name: _source_excludes +func (r *Search) SourceExcludes_(fields ...string) *Search { + r.values.Set("_source_excludes", strings.Join(fields, ",")) return r } -// Timeout Explicit operation timeout -// API name: timeout -func (r *Search) Timeout(v string) *Search { - r.values.Set("timeout", v) +// SourceIncludes_ A comma-separated list of source fields to include in the response. +// If this parameter is specified, only these source fields are returned. +// You can exclude fields from this subset using the `_source_excludes` query +// parameter. +// If the `_source` parameter is `false`, this parameter is ignored. +// API name: _source_includes +func (r *Search) SourceIncludes_(fields ...string) *Search { + r.values.Set("_source_includes", strings.Join(fields, ",")) return r } -// TrackTotalHits Indicate if the number of documents that match the query should be tracked. A -// number can also be specified, to accurately track the total hit count up to -// the number. -// API name: track_total_hits -func (r *Search) TrackTotalHits(v string) *Search { - r.values.Set("track_total_hits", v) +// Q Query in the Lucene query string syntax using query parameter search. +// Query parameter searches do not support the full Elasticsearch Query DSL but +// are handy for testing. +// API name: q +func (r *Search) Q(q string) *Search { + r.values.Set("q", q) return r } -// TrackScores Whether to calculate and return scores even if they are not used for sorting -// API name: track_scores -func (r *Search) TrackScores(b bool) *Search { - r.values.Set("track_scores", strconv.FormatBool(b)) +// Aggregations Defines the aggregations that are run as part of the search request. +// API name: aggregations +func (r *Search) Aggregations(aggregations map[string]types.Aggregations) *Search { + + r.req.Aggregations = aggregations return r } -// TypedKeys Specify whether aggregation and suggester names should be prefixed by their -// respective types in the response -// API name: typed_keys -func (r *Search) TypedKeys(b bool) *Search { - r.values.Set("typed_keys", strconv.FormatBool(b)) +// Collapse Collapses search results the values of the specified field. +// API name: collapse +func (r *Search) Collapse(collapse *types.FieldCollapse) *Search { + + r.req.Collapse = collapse return r } -// RestTotalHitsAsInt Indicates whether hits.total should be rendered as an integer or an object in -// the rest search response -// API name: rest_total_hits_as_int -func (r *Search) RestTotalHitsAsInt(b bool) *Search { - r.values.Set("rest_total_hits_as_int", strconv.FormatBool(b)) +// DocvalueFields Array of wildcard (`*`) patterns. +// The request returns doc values for field names matching these patterns in the +// `hits.fields` property of the response. +// API name: docvalue_fields +func (r *Search) DocvalueFields(docvaluefields ...types.FieldAndFormat) *Search { + r.req.DocvalueFields = docvaluefields return r } -// Version Specify whether to return document version as part of a hit -// API name: version -func (r *Search) Version(b bool) *Search { - r.values.Set("version", strconv.FormatBool(b)) +// Explain If true, returns detailed information about score computation as part of a +// hit. +// API name: explain +func (r *Search) Explain(explain bool) *Search { + r.req.Explain = &explain return r } -// Source_ True or false to return the _source field or not, or a list of fields to -// return -// API name: _source -func (r *Search) Source_(v string) *Search { - r.values.Set("_source", v) +// Ext Configuration of search extensions defined by Elasticsearch plugins. +// API name: ext +func (r *Search) Ext(ext map[string]json.RawMessage) *Search { + + r.req.Ext = ext return r } -// SourceExcludes_ A list of fields to exclude from the returned _source field -// API name: _source_excludes -func (r *Search) SourceExcludes_(v string) *Search { - r.values.Set("_source_excludes", v) +// Fields Array of wildcard (`*`) patterns. +// The request returns values for field names matching these patterns in the +// `hits.fields` property of the response. +// API name: fields +func (r *Search) Fields(fields ...types.FieldAndFormat) *Search { + r.req.Fields = fields return r } -// SourceIncludes_ A list of fields to extract and return from the _source field -// API name: _source_includes -func (r *Search) SourceIncludes_(v string) *Search { - r.values.Set("_source_includes", v) +// From Starting document offset. +// Needs to be non-negative. +// By default, you cannot page through more than 10,000 hits using the `from` +// and `size` parameters. +// To page through more hits, use the `search_after` parameter. +// API name: from +func (r *Search) From(from int) *Search { + r.req.From = &from return r } -// SeqNoPrimaryTerm Specify whether to return sequence number and primary term of the last -// modification of each hit -// API name: seq_no_primary_term -func (r *Search) SeqNoPrimaryTerm(b bool) *Search { - r.values.Set("seq_no_primary_term", strconv.FormatBool(b)) +// Highlight Specifies the highlighter to use for retrieving highlighted snippets from one +// or more fields in your search results. +// API name: highlight +func (r *Search) Highlight(highlight *types.Highlight) *Search { + + r.req.Highlight = highlight return r } -// Q Query in the Lucene query string syntax -// API name: q -func (r *Search) Q(v string) *Search { - r.values.Set("q", v) +// IndicesBoost Boosts the _score of documents from specified indices. +// API name: indices_boost +func (r *Search) IndicesBoost(indicesboosts ...map[string]types.Float64) *Search { + r.req.IndicesBoost = indicesboosts return r } -// Size Number of hits to return (default: 10) +// Knn Defines the approximate kNN search to run. +// API name: knn +func (r *Search) Knn(knns ...types.KnnQuery) *Search { + r.req.Knn = knns + + return r +} + +// MinScore Minimum `_score` for matching documents. +// Documents with a lower `_score` are not included in the search results. +// API name: min_score +func (r *Search) MinScore(minscore types.Float64) *Search { + + r.req.MinScore = &minscore + + return r +} + +// Pit Limits the search to a point in time (PIT). +// If you provide a PIT, you cannot specify an `` in the request path. +// API name: pit +func (r *Search) Pit(pit *types.PointInTimeReference) *Search { + + r.req.Pit = pit + + return r +} + +// PostFilter Use the `post_filter` parameter to filter search results. +// The search hits are filtered after the aggregations are calculated. +// A post filter has no impact on the aggregation results. +// API name: post_filter +func (r *Search) PostFilter(postfilter *types.Query) *Search { + + r.req.PostFilter = postfilter + + return r +} + +// Profile Set to `true` to return detailed timing information about the execution of +// individual components in a search request. +// NOTE: This is a debugging tool and adds significant overhead to search +// execution. +// API name: profile +func (r *Search) Profile(profile bool) *Search { + r.req.Profile = &profile + + return r +} + +// Query Defines the search definition using the Query DSL. +// API name: query +func (r *Search) Query(query *types.Query) *Search { + + r.req.Query = query + + return r +} + +// Rank Defines the Reciprocal Rank Fusion (RRF) to use. +// API name: rank +func (r *Search) Rank(rank *types.RankContainer) *Search { + + r.req.Rank = rank + + return r +} + +// Rescore Can be used to improve precision by reordering just the top (for example 100 +// - 500) documents returned by the `query` and `post_filter` phases. +// API name: rescore +func (r *Search) Rescore(rescores ...types.Rescore) *Search { + r.req.Rescore = rescores + + return r +} + +// RuntimeMappings Defines one or more runtime fields in the search request. +// These fields take precedence over mapped fields with the same name. +// API name: runtime_mappings +func (r *Search) RuntimeMappings(runtimefields types.RuntimeFields) *Search { + r.req.RuntimeMappings = runtimefields + + return r +} + +// ScriptFields Retrieve a script evaluation (based on different fields) for each hit. +// API name: script_fields +func (r *Search) ScriptFields(scriptfields map[string]types.ScriptField) *Search { + + r.req.ScriptFields = scriptfields + + return r +} + +// SearchAfter Used to retrieve the next page of hits using a set of sort values from the +// previous page. +// API name: search_after +func (r *Search) SearchAfter(sortresults ...types.FieldValue) *Search { + r.req.SearchAfter = sortresults + + return r +} + +// SeqNoPrimaryTerm If `true`, returns sequence number and primary term of the last modification +// of each hit. +// API name: seq_no_primary_term +func (r *Search) SeqNoPrimaryTerm(seqnoprimaryterm bool) *Search { + r.req.SeqNoPrimaryTerm = &seqnoprimaryterm + + return r +} + +// Size The number of hits to return. +// By default, you cannot page through more than 10,000 hits using the `from` +// and `size` parameters. +// To page through more hits, use the `search_after` parameter. // API name: size -func (r *Search) Size(i int) *Search { - r.values.Set("size", strconv.Itoa(i)) +func (r *Search) Size(size int) *Search { + r.req.Size = &size return r } -// From Starting offset (default: 0) -// API name: from -func (r *Search) From(i int) *Search { - r.values.Set("from", strconv.Itoa(i)) +// Slice Can be used to split a scrolled search into multiple slices that can be +// consumed independently. +// API name: slice +func (r *Search) Slice(slice *types.SlicedScroll) *Search { + + r.req.Slice = slice return r } -// Sort A comma-separated list of : pairs +// Sort A comma-separated list of : pairs. // API name: sort -func (r *Search) Sort(v string) *Search { - r.values.Set("sort", v) +func (r *Search) Sort(sorts ...types.SortCombinations) *Search { + r.req.Sort = sorts + + return r +} + +// Source_ Indicates which source fields are returned for matching documents. +// These fields are returned in the hits._source property of the search +// response. +// API name: _source +func (r *Search) Source_(sourceconfig types.SourceConfig) *Search { + r.req.Source_ = sourceconfig + + return r +} + +// Stats Stats groups to associate with the search. +// Each group maintains a statistics aggregation for its associated searches. +// You can retrieve these stats using the indices stats API. +// API name: stats +func (r *Search) Stats(stats ...string) *Search { + r.req.Stats = stats + + return r +} + +// StoredFields List of stored fields to return as part of a hit. +// If no fields are specified, no stored fields are included in the response. +// If this field is specified, the `_source` parameter defaults to `false`. +// You can pass `_source: true` to return both source fields and stored fields +// in the search response. +// API name: stored_fields +func (r *Search) StoredFields(fields ...string) *Search { + r.req.StoredFields = fields + + return r +} + +// Suggest Defines a suggester that provides similar looking terms based on a provided +// text. +// API name: suggest +func (r *Search) Suggest(suggest *types.Suggester) *Search { + + r.req.Suggest = suggest + + return r +} + +// TerminateAfter Maximum number of documents to collect for each shard. +// If a query reaches this limit, Elasticsearch terminates the query early. +// Elasticsearch collects documents before sorting. +// Use with caution. +// Elasticsearch applies this parameter to each shard handling the request. +// When possible, let Elasticsearch perform early termination automatically. +// Avoid specifying this parameter for requests that target data streams with +// backing indices across multiple data tiers. +// If set to `0` (default), the query does not terminate early. +// API name: terminate_after +func (r *Search) TerminateAfter(terminateafter int64) *Search { + + r.req.TerminateAfter = &terminateafter + + return r +} + +// Timeout Specifies the period of time to wait for a response from each shard. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// Defaults to no timeout. +// API name: timeout +func (r *Search) Timeout(timeout string) *Search { + + r.req.Timeout = &timeout + + return r +} + +// TrackScores If true, calculate and return document scores, even if the scores are not +// used for sorting. +// API name: track_scores +func (r *Search) TrackScores(trackscores bool) *Search { + r.req.TrackScores = &trackscores + + return r +} + +// TrackTotalHits Number of hits matching the query to count accurately. +// If `true`, the exact number of hits is returned at the cost of some +// performance. +// If `false`, the response does not include the total number of hits matching +// the query. +// API name: track_total_hits +func (r *Search) TrackTotalHits(trackhits types.TrackHits) *Search { + r.req.TrackTotalHits = trackhits + + return r +} + +// Version If true, returns document version as part of a hit. +// API name: version +func (r *Search) Version(version bool) *Search { + r.req.Version = &version return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/searchmvt/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/searchmvt/request.go index 606d9ff01..15441a3ad 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/searchmvt/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/searchmvt/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package searchmvt @@ -31,7 +31,7 @@ import ( // Request holds the request body struct for the package searchmvt // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search_mvt/SearchMvtRequest.ts#L33-L188 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search_mvt/SearchMvtRequest.ts#L33-L188 type Request struct { // Aggs Sub-aggregations for the geotile_grid. @@ -80,7 +80,7 @@ type Request struct { Query *types.Query `json:"query,omitempty"` // RuntimeMappings Defines one or more runtime fields in the search request. These fields take // precedence over mapped fields with the same name. - RuntimeMappings map[string]types.RuntimeField `json:"runtime_mappings,omitempty"` + RuntimeMappings types.RuntimeFields `json:"runtime_mappings,omitempty"` // Size Maximum number of features to return in the hits layer. Accepts 0-10000. // If 0, results don’t include the hits layer. Size *int `json:"size,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/searchmvt/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/searchmvt/response.go index 51bc948cb..7562ad8f5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/searchmvt/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/searchmvt/response.go @@ -16,15 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package searchmvt // Response holds the response body struct for the package searchmvt // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search_mvt/SearchMvtResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search_mvt/SearchMvtResponse.ts#L22-L24 -type Response []byte +type Response = []byte // NewResponse returns a Response func NewResponse() Response { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/searchmvt/search_mvt.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/searchmvt/search_mvt.go index 86efd91c9..e5242acfc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/searchmvt/search_mvt.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/searchmvt/search_mvt.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Searches a vector tile for geospatial values. Returns results as a binary // Mapbox vector tile. @@ -31,12 +31,10 @@ import ( "io" "net/http" "net/url" - "strconv" "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gridaggregationtype" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gridtype" ) @@ -65,8 +63,9 @@ type SearchMvt struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -86,15 +85,15 @@ func NewSearchMvtFunc(tp elastictransport.Interface) NewSearchMvt { return func(index, field, zoom, x, y string) *SearchMvt { n := New(tp) - n.Index(index) + n._index(index) - n.Field(field) + n._field(field) - n.Zoom(zoom) + n._zoom(zoom) - n.X(x) + n._x(x) - n.Y(y) + n._y(y) return n } @@ -110,6 +109,8 @@ func New(tp elastictransport.Interface) *SearchMvt { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -139,9 +140,19 @@ func (r *SearchMvt) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -149,6 +160,7 @@ func (r *SearchMvt) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -241,7 +253,6 @@ func (r SearchMvt) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -250,6 +261,10 @@ func (r SearchMvt) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -262,45 +277,71 @@ func (r *SearchMvt) Header(key, value string) *SearchMvt { // Index Comma-separated list of data streams, indices, or aliases to search // API Name: index -func (r *SearchMvt) Index(v string) *SearchMvt { +func (r *SearchMvt) _index(index string) *SearchMvt { r.paramSet |= indexMask - r.index = v + r.index = index return r } // Field Field containing geospatial data to return // API Name: field -func (r *SearchMvt) Field(v string) *SearchMvt { +func (r *SearchMvt) _field(field string) *SearchMvt { r.paramSet |= fieldMask - r.field = v + r.field = field return r } // Zoom Zoom level for the vector tile to search // API Name: zoom -func (r *SearchMvt) Zoom(v string) *SearchMvt { +func (r *SearchMvt) _zoom(zoom string) *SearchMvt { r.paramSet |= zoomMask - r.zoom = v + r.zoom = zoom return r } // X X coordinate for the vector tile to search // API Name: x -func (r *SearchMvt) X(v string) *SearchMvt { +func (r *SearchMvt) _x(x string) *SearchMvt { r.paramSet |= xMask - r.x = v + r.x = x return r } // Y Y coordinate for the vector tile to search // API Name: y -func (r *SearchMvt) Y(v string) *SearchMvt { +func (r *SearchMvt) _y(y string) *SearchMvt { r.paramSet |= yMask - r.y = v + r.y = y + + return r +} + +// Aggs Sub-aggregations for the geotile_grid. +// +// Supports the following aggregation types: +// - avg +// - cardinality +// - max +// - min +// - sum +// API name: aggs +func (r *SearchMvt) Aggs(aggs map[string]types.Aggregations) *SearchMvt { + + r.req.Aggs = aggs + + return r +} + +// Buffer Size, in pixels, of a clipping buffer outside the tile. This allows renderers +// to avoid outline artifacts from geometries that extend past the extent of the +// tile. +// API name: buffer +func (r *SearchMvt) Buffer(buffer int) *SearchMvt { + r.req.Buffer = &buffer return r } @@ -311,8 +352,8 @@ func (r *SearchMvt) Y(v string) *SearchMvt { // the // tile with wrap_longitude set to false. The resulting // bounding box may be larger than the vector tile. // API name: exact_bounds -func (r *SearchMvt) ExactBounds(b bool) *SearchMvt { - r.values.Set("exact_bounds", strconv.FormatBool(b)) +func (r *SearchMvt) ExactBounds(exactbounds bool) *SearchMvt { + r.req.ExactBounds = &exactbounds return r } @@ -320,16 +361,26 @@ func (r *SearchMvt) ExactBounds(b bool) *SearchMvt { // Extent Size, in pixels, of a side of the tile. Vector tiles are square with equal // sides. // API name: extent -func (r *SearchMvt) Extent(i int) *SearchMvt { - r.values.Set("extent", strconv.Itoa(i)) +func (r *SearchMvt) Extent(extent int) *SearchMvt { + r.req.Extent = &extent return r } -// GridAgg Aggregation used to create a grid for `field`. +// Fields Fields to return in the `hits` layer. Supports wildcards (`*`). +// This parameter does not support fields with array values. Fields with array +// values may return inconsistent results. +// API name: fields +func (r *SearchMvt) Fields(fields ...string) *SearchMvt { + r.req.Fields = fields + + return r +} + +// GridAgg Aggregation used to create a grid for the `field`. // API name: grid_agg -func (r *SearchMvt) GridAgg(enum gridaggregationtype.GridAggregationType) *SearchMvt { - r.values.Set("grid_agg", enum.String()) +func (r *SearchMvt) GridAgg(gridagg gridaggregationtype.GridAggregationType) *SearchMvt { + r.req.GridAgg = &gridagg return r } @@ -340,8 +391,8 @@ func (r *SearchMvt) GridAgg(enum gridaggregationtype.GridAggregationType) *Searc // results // don’t include the aggs layer. // API name: grid_precision -func (r *SearchMvt) GridPrecision(i int) *SearchMvt { - r.values.Set("grid_precision", strconv.Itoa(i)) +func (r *SearchMvt) GridPrecision(gridprecision int) *SearchMvt { + r.req.GridPrecision = &gridprecision return r } @@ -354,8 +405,26 @@ func (r *SearchMvt) GridPrecision(i int) *SearchMvt { // centroid // of the cell. // API name: grid_type -func (r *SearchMvt) GridType(enum gridtype.GridType) *SearchMvt { - r.values.Set("grid_type", enum.String()) +func (r *SearchMvt) GridType(gridtype gridtype.GridType) *SearchMvt { + r.req.GridType = &gridtype + + return r +} + +// Query Query DSL used to filter documents for the search. +// API name: query +func (r *SearchMvt) Query(query *types.Query) *SearchMvt { + + r.req.Query = query + + return r +} + +// RuntimeMappings Defines one or more runtime fields in the search request. These fields take +// precedence over mapped fields with the same name. +// API name: runtime_mappings +func (r *SearchMvt) RuntimeMappings(runtimefields types.RuntimeFields) *SearchMvt { + r.req.RuntimeMappings = runtimefields return r } @@ -363,8 +432,30 @@ func (r *SearchMvt) GridType(enum gridtype.GridType) *SearchMvt { // Size Maximum number of features to return in the hits layer. Accepts 0-10000. // If 0, results don’t include the hits layer. // API name: size -func (r *SearchMvt) Size(i int) *SearchMvt { - r.values.Set("size", strconv.Itoa(i)) +func (r *SearchMvt) Size(size int) *SearchMvt { + r.req.Size = &size + + return r +} + +// Sort Sorts features in the hits layer. By default, the API calculates a bounding +// box for each feature. It sorts features based on this box’s diagonal length, +// from longest to shortest. +// API name: sort +func (r *SearchMvt) Sort(sorts ...types.SortCombinations) *SearchMvt { + r.req.Sort = sorts + + return r +} + +// TrackTotalHits Number of hits matching the query to count accurately. If `true`, the exact +// number +// of hits is returned at the cost of some performance. If `false`, the response +// does +// not include the total number of hits matching the query. +// API name: track_total_hits +func (r *SearchMvt) TrackTotalHits(trackhits types.TrackHits) *SearchMvt { + r.req.TrackTotalHits = trackhits return r } @@ -373,8 +464,8 @@ func (r *SearchMvt) Size(i int) *SearchMvt { // representing // suggested label positions for the original features. // API name: with_labels -func (r *SearchMvt) WithLabels(b bool) *SearchMvt { - r.values.Set("with_labels", strconv.FormatBool(b)) +func (r *SearchMvt) WithLabels(withlabels bool) *SearchMvt { + r.req.WithLabels = &withlabels return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/searchshards/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/searchshards/response.go index 2d7d086e4..3bc90389a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/searchshards/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/searchshards/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package searchshards @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package searchshards // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search_shards/SearchShardsResponse.ts#L25-L31 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search_shards/SearchShardsResponse.ts#L25-L31 type Response struct { Indices map[string]types.ShardStoreIndex `json:"indices"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/searchshards/search_shards.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/searchshards/search_shards.go index a7fa72cf0..3d014b8d5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/searchshards/search_shards.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/searchshards/search_shards.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns information about the indices and shards that a search request would // be executed against. @@ -37,6 +37,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" ) const ( @@ -174,7 +175,6 @@ func (r SearchShards) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -183,6 +183,10 @@ func (r SearchShards) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -214,65 +218,74 @@ func (r *SearchShards) Header(key, value string) *SearchShards { return r } -// Index A comma-separated list of index names to search; use `_all` or empty string -// to perform the operation on all indices +// Index Returns the indices and shards that a search request would be executed +// against. // API Name: index -func (r *SearchShards) Index(v string) *SearchShards { +func (r *SearchShards) Index(index string) *SearchShards { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// AllowNoIndices Whether to ignore if a wildcard indices expression resolves into no concrete -// indices. (This includes `_all` string or when no indices have been specified) +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. +// For example, a request targeting `foo*,bar*` returns an error if an index +// starts with `foo` but no index starts with `bar`. // API name: allow_no_indices -func (r *SearchShards) AllowNoIndices(b bool) *SearchShards { - r.values.Set("allow_no_indices", strconv.FormatBool(b)) +func (r *SearchShards) AllowNoIndices(allownoindices bool) *SearchShards { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) return r } -// ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, -// closed or both. +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. +// Valid values are: `all`, `open`, `closed`, `hidden`, `none`. // API name: expand_wildcards -func (r *SearchShards) ExpandWildcards(v string) *SearchShards { - r.values.Set("expand_wildcards", v) +func (r *SearchShards) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *SearchShards { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } -// IgnoreUnavailable Whether specified concrete indices should be ignored when unavailable -// (missing or closed) +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. // API name: ignore_unavailable -func (r *SearchShards) IgnoreUnavailable(b bool) *SearchShards { - r.values.Set("ignore_unavailable", strconv.FormatBool(b)) +func (r *SearchShards) IgnoreUnavailable(ignoreunavailable bool) *SearchShards { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) return r } -// Local Return local information, do not retrieve the state from master node -// (default: false) +// Local If `true`, the request retrieves information from the local node only. // API name: local -func (r *SearchShards) Local(b bool) *SearchShards { - r.values.Set("local", strconv.FormatBool(b)) +func (r *SearchShards) Local(local bool) *SearchShards { + r.values.Set("local", strconv.FormatBool(local)) return r } -// Preference Specify the node or shard the operation should be performed on (default: -// random) +// Preference Specifies the node or shard the operation should be performed on. +// Random by default. // API name: preference -func (r *SearchShards) Preference(v string) *SearchShards { - r.values.Set("preference", v) +func (r *SearchShards) Preference(preference string) *SearchShards { + r.values.Set("preference", preference) return r } -// Routing Specific routing value +// Routing Custom value used to route operations to a specific shard. // API name: routing -func (r *SearchShards) Routing(v string) *SearchShards { - r.values.Set("routing", v) +func (r *SearchShards) Routing(routing string) *SearchShards { + r.values.Set("routing", routing) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/searchtemplate/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/searchtemplate/request.go index 0b43e0308..9221fbaba 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/searchtemplate/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/searchtemplate/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package searchtemplate @@ -27,14 +27,21 @@ import ( // Request holds the request body struct for the package searchtemplate // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search_template/SearchTemplateRequest.ts#L32-L96 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search_template/SearchTemplateRequest.ts#L32-L134 type Request struct { + + // Explain If `true`, returns detailed information about score calculation as part of + // each hit. Explain *bool `json:"explain,omitempty"` // Id ID of the search template to use. If no source is specified, // this parameter is required. - Id *string `json:"id,omitempty"` - Params map[string]json.RawMessage `json:"params,omitempty"` - Profile *bool `json:"profile,omitempty"` + Id *string `json:"id,omitempty"` + // Params Key-value pairs used to replace Mustache variables in the template. + // The key is the variable name. + // The value is the variable value. + Params map[string]json.RawMessage `json:"params,omitempty"` + // Profile If `true`, the query execution is profiled. + Profile *bool `json:"profile,omitempty"` // Source An inline search template. Supports the same parameters as the search API's // request body. Also supports Mustache variables. If no id is specified, this // parameter is required. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/searchtemplate/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/searchtemplate/response.go index c6693f8ba..56953b305 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/searchtemplate/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/searchtemplate/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package searchtemplate @@ -25,6 +25,7 @@ import ( "encoding/json" "errors" "io" + "strconv" "strings" "github.com/elastic/go-elasticsearch/v8/typedapi/types" @@ -32,7 +33,7 @@ import ( // Response holds the response body struct for the package searchtemplate // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search_template/SearchTemplateResponse.ts#L30-L48 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search_template/SearchTemplateResponse.ts#L30-L48 type Response struct { Aggregations map[string]types.Aggregate `json:"aggregations,omitempty"` @@ -76,6 +77,10 @@ func (s *Response) UnmarshalJSON(data []byte) error { switch t { case "aggregations": + if s.Aggregations == nil { + s.Aggregations = make(map[string]types.Aggregate, 0) + } + for dec.More() { tt, err := dec.Token() if err != nil { @@ -88,415 +93,494 @@ func (s *Response) UnmarshalJSON(data []byte) error { if strings.Contains(value, "#") { elems := strings.Split(value, "#") if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]types.Aggregate, 0) + } switch elems[0] { + case "cardinality": o := types.NewCardinalityAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "hdr_percentiles": o := types.NewHdrPercentilesAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "hdr_percentile_ranks": o := types.NewHdrPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "tdigest_percentiles": o := types.NewTDigestPercentilesAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "tdigest_percentile_ranks": o := types.NewTDigestPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "percentiles_bucket": o := types.NewPercentilesBucketAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "median_absolute_deviation": o := types.NewMedianAbsoluteDeviationAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "min": o := types.NewMinAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "max": o := types.NewMaxAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "sum": o := types.NewSumAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "avg": o := types.NewAvgAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "weighted_avg": o := types.NewWeightedAvgAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "value_count": o := types.NewValueCountAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "simple_value": o := types.NewSimpleValueAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "derivative": o := types.NewDerivativeAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "bucket_metric_value": o := types.NewBucketMetricValueAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "stats": o := types.NewStatsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "stats_bucket": o := types.NewStatsBucketAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "extended_stats": o := types.NewExtendedStatsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "extended_stats_bucket": o := types.NewExtendedStatsBucketAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geo_bounds": o := types.NewGeoBoundsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geo_centroid": o := types.NewGeoCentroidAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "histogram": o := types.NewHistogramAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "date_histogram": o := types.NewDateHistogramAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "auto_date_histogram": o := types.NewAutoDateHistogramAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "variable_width_histogram": o := types.NewVariableWidthHistogramAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "sterms": o := types.NewStringTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "lterms": o := types.NewLongTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "dterms": o := types.NewDoubleTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "umterms": o := types.NewUnmappedTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "lrareterms": o := types.NewLongRareTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "srareterms": o := types.NewStringRareTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "umrareterms": o := types.NewUnmappedRareTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "multi_terms": o := types.NewMultiTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "missing": o := types.NewMissingAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "nested": o := types.NewNestedAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "reverse_nested": o := types.NewReverseNestedAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "global": o := types.NewGlobalAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "filter": o := types.NewFilterAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "children": o := types.NewChildrenAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "parent": o := types.NewParentAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "sampler": o := types.NewSamplerAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "unmapped_sampler": o := types.NewUnmappedSamplerAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geohash_grid": o := types.NewGeoHashGridAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geotile_grid": o := types.NewGeoTileGridAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geohex_grid": o := types.NewGeoHexGridAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "range": o := types.NewRangeAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "date_range": o := types.NewDateRangeAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geo_distance": o := types.NewGeoDistanceAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "ip_range": o := types.NewIpRangeAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "ip_prefix": o := types.NewIpPrefixAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "filters": o := types.NewFiltersAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "adjacency_matrix": o := types.NewAdjacencyMatrixAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "siglterms": o := types.NewSignificantLongTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "sigsterms": o := types.NewSignificantStringTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "umsigterms": o := types.NewUnmappedSignificantTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "composite": o := types.NewCompositeAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := types.NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := types.NewScriptedMetricAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "top_hits": o := types.NewTopHitsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "inference": o := types.NewInferenceAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "string_stats": o := types.NewStringStatsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "box_plot": o := types.NewBoxPlotAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "top_metrics": o := types.NewTopMetricsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "t_test": o := types.NewTTestAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "rate": o := types.NewRateAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "simple_long_value": o := types.NewCumulativeCardinalityAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "matrix_stats": o := types.NewMatrixStatsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geo_line": o := types.NewGeoLineAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + default: o := make(map[string]interface{}, 0) if err := dec.Decode(&o); err != nil { @@ -523,6 +607,9 @@ func (s *Response) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]json.RawMessage, 0) + } if err := dec.Decode(&s.Fields); err != nil { return err } @@ -533,13 +620,34 @@ func (s *Response) UnmarshalJSON(data []byte) error { } case "max_score": - if err := dec.Decode(&s.MaxScore); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := types.Float64(value) + s.MaxScore = &f + case float64: + f := types.Float64(v) + s.MaxScore = &f } case "num_reduce_phases": - if err := dec.Decode(&s.NumReducePhases); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.NumReducePhases = &value + case float64: + f := int64(v) + s.NumReducePhases = &f } case "pit_id": @@ -563,23 +671,109 @@ func (s *Response) UnmarshalJSON(data []byte) error { } case "suggest": - if err := dec.Decode(&s.Suggest); err != nil { - return err + if s.Suggest == nil { + s.Suggest = make(map[string][]types.Suggest, 0) + } + + for dec.More() { + tt, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + if value, ok := tt.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Suggest == nil { + s.Suggest = make(map[string][]types.Suggest, 0) + } + switch elems[0] { + + case "completion": + o := types.NewCompletionSuggest() + if err := dec.Decode(&o); err != nil { + return err + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + case "phrase": + o := types.NewPhraseSuggest() + if err := dec.Decode(&o); err != nil { + return err + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + case "term": + o := types.NewTermSuggest() + if err := dec.Decode(&o); err != nil { + return err + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + default: + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + } + } else { + return errors.New("cannot decode JSON for field Suggest") + } + } else { + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Suggest[value] = append(s.Suggest[value], o) + } + } } case "terminated_early": - if err := dec.Decode(&s.TerminatedEarly); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.TerminatedEarly = &value + case bool: + s.TerminatedEarly = &v } case "timed_out": - if err := dec.Decode(&s.TimedOut); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.TimedOut = value + case bool: + s.TimedOut = v } case "took": - if err := dec.Decode(&s.Took); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Took = value + case float64: + f := int64(v) + s.Took = f } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/searchtemplate/search_template.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/searchtemplate/search_template.go index a5e91693f..73fc45e2c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/searchtemplate/search_template.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/searchtemplate/search_template.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Allows to use the Mustache language to pre-render a search definition. package searchtemplate @@ -35,7 +35,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/searchtype" ) @@ -55,8 +55,9 @@ type SearchTemplate struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -85,6 +86,8 @@ func New(tp elastictransport.Interface) *SearchTemplate { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -114,9 +117,19 @@ func (r *SearchTemplate) HttpRequest(ctx context.Context) (*http.Request, error) var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -124,6 +137,7 @@ func (r *SearchTemplate) HttpRequest(ctx context.Context) (*http.Request, error) } r.buf.Write(data) + } r.path.Scheme = "http" @@ -215,7 +229,6 @@ func (r SearchTemplate) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -224,6 +237,10 @@ func (r SearchTemplate) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -237,88 +254,81 @@ func (r *SearchTemplate) Header(key, value string) *SearchTemplate { // Index Comma-separated list of data streams, indices, // and aliases to search. Supports wildcards (*). // API Name: index -func (r *SearchTemplate) Index(v string) *SearchTemplate { +func (r *SearchTemplate) Index(index string) *SearchTemplate { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// AllowNoIndices Whether to ignore if a wildcard indices expression resolves into no concrete -// indices. (This includes `_all` string or when no indices have been specified) +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. +// For example, a request targeting `foo*,bar*` returns an error if an index +// starts with `foo` but no index starts with `bar`. // API name: allow_no_indices -func (r *SearchTemplate) AllowNoIndices(b bool) *SearchTemplate { - r.values.Set("allow_no_indices", strconv.FormatBool(b)) +func (r *SearchTemplate) AllowNoIndices(allownoindices bool) *SearchTemplate { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) return r } -// CcsMinimizeRoundtrips Indicates whether network round-trips should be minimized as part of -// cross-cluster search requests execution +// CcsMinimizeRoundtrips If `true`, network round-trips are minimized for cross-cluster search +// requests. // API name: ccs_minimize_roundtrips -func (r *SearchTemplate) CcsMinimizeRoundtrips(b bool) *SearchTemplate { - r.values.Set("ccs_minimize_roundtrips", strconv.FormatBool(b)) +func (r *SearchTemplate) CcsMinimizeRoundtrips(ccsminimizeroundtrips bool) *SearchTemplate { + r.values.Set("ccs_minimize_roundtrips", strconv.FormatBool(ccsminimizeroundtrips)) return r } -// ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, -// closed or both. +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. +// Valid values are: `all`, `open`, `closed`, `hidden`, `none`. // API name: expand_wildcards -func (r *SearchTemplate) ExpandWildcards(v string) *SearchTemplate { - r.values.Set("expand_wildcards", v) - - return r -} - -// Explain Specify whether to return detailed information about score computation as -// part of a hit -// API name: explain -func (r *SearchTemplate) Explain(b bool) *SearchTemplate { - r.values.Set("explain", strconv.FormatBool(b)) +func (r *SearchTemplate) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *SearchTemplate { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } -// IgnoreThrottled Whether specified concrete, expanded or aliased indices should be ignored -// when throttled +// IgnoreThrottled If `true`, specified concrete, expanded, or aliased indices are not included +// in the response when throttled. // API name: ignore_throttled -func (r *SearchTemplate) IgnoreThrottled(b bool) *SearchTemplate { - r.values.Set("ignore_throttled", strconv.FormatBool(b)) +func (r *SearchTemplate) IgnoreThrottled(ignorethrottled bool) *SearchTemplate { + r.values.Set("ignore_throttled", strconv.FormatBool(ignorethrottled)) return r } -// IgnoreUnavailable Whether specified concrete indices should be ignored when unavailable -// (missing or closed) +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. // API name: ignore_unavailable -func (r *SearchTemplate) IgnoreUnavailable(b bool) *SearchTemplate { - r.values.Set("ignore_unavailable", strconv.FormatBool(b)) +func (r *SearchTemplate) IgnoreUnavailable(ignoreunavailable bool) *SearchTemplate { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) return r } -// Preference Specify the node or shard the operation should be performed on (default: -// random) +// Preference Specifies the node or shard the operation should be performed on. +// Random by default. // API name: preference -func (r *SearchTemplate) Preference(v string) *SearchTemplate { - r.values.Set("preference", v) - - return r -} - -// Profile Specify whether to profile the query execution -// API name: profile -func (r *SearchTemplate) Profile(b bool) *SearchTemplate { - r.values.Set("profile", strconv.FormatBool(b)) +func (r *SearchTemplate) Preference(preference string) *SearchTemplate { + r.values.Set("preference", preference) return r } // Routing Custom value used to route operations to a specific shard. // API name: routing -func (r *SearchTemplate) Routing(v string) *SearchTemplate { - r.values.Set("routing", v) +func (r *SearchTemplate) Routing(routing string) *SearchTemplate { + r.values.Set("routing", routing) return r } @@ -326,33 +336,81 @@ func (r *SearchTemplate) Routing(v string) *SearchTemplate { // Scroll Specifies how long a consistent view of the index // should be maintained for scrolled search. // API name: scroll -func (r *SearchTemplate) Scroll(v string) *SearchTemplate { - r.values.Set("scroll", v) +func (r *SearchTemplate) Scroll(duration string) *SearchTemplate { + r.values.Set("scroll", duration) return r } // SearchType The type of the search operation. // API name: search_type -func (r *SearchTemplate) SearchType(enum searchtype.SearchType) *SearchTemplate { - r.values.Set("search_type", enum.String()) +func (r *SearchTemplate) SearchType(searchtype searchtype.SearchType) *SearchTemplate { + r.values.Set("search_type", searchtype.String()) return r } // RestTotalHitsAsInt If true, hits.total are rendered as an integer in the response. // API name: rest_total_hits_as_int -func (r *SearchTemplate) RestTotalHitsAsInt(b bool) *SearchTemplate { - r.values.Set("rest_total_hits_as_int", strconv.FormatBool(b)) +func (r *SearchTemplate) RestTotalHitsAsInt(resttotalhitsasint bool) *SearchTemplate { + r.values.Set("rest_total_hits_as_int", strconv.FormatBool(resttotalhitsasint)) return r } -// TypedKeys Specify whether aggregation and suggester names should be prefixed by their -// respective types in the response +// TypedKeys If `true`, the response prefixes aggregation and suggester names with their +// respective types. // API name: typed_keys -func (r *SearchTemplate) TypedKeys(b bool) *SearchTemplate { - r.values.Set("typed_keys", strconv.FormatBool(b)) +func (r *SearchTemplate) TypedKeys(typedkeys bool) *SearchTemplate { + r.values.Set("typed_keys", strconv.FormatBool(typedkeys)) + + return r +} + +// Explain If `true`, returns detailed information about score calculation as part of +// each hit. +// API name: explain +func (r *SearchTemplate) Explain(explain bool) *SearchTemplate { + r.req.Explain = &explain + + return r +} + +// Id ID of the search template to use. If no source is specified, +// this parameter is required. +// API name: id +func (r *SearchTemplate) Id(id string) *SearchTemplate { + r.req.Id = &id + + return r +} + +// Params Key-value pairs used to replace Mustache variables in the template. +// The key is the variable name. +// The value is the variable value. +// API name: params +func (r *SearchTemplate) Params(params map[string]json.RawMessage) *SearchTemplate { + + r.req.Params = params + + return r +} + +// Profile If `true`, the query execution is profiled. +// API name: profile +func (r *SearchTemplate) Profile(profile bool) *SearchTemplate { + r.req.Profile = &profile + + return r +} + +// Source An inline search template. Supports the same parameters as the search API's +// request body. Also supports Mustache variables. If no id is specified, this +// parameter is required. +// API name: source +func (r *SearchTemplate) Source(source string) *SearchTemplate { + + r.req.Source = &source return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/termsenum/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/termsenum/request.go index a93c5a98d..f8561ac0b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/termsenum/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/termsenum/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package termsenum @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package termsenum // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/terms_enum/TermsEnumRequest.ts#L26-L65 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/terms_enum/TermsEnumRequest.ts#L26-L65 type Request struct { // CaseInsensitive When true the provided search string is matched against index terms without diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/termsenum/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/termsenum/response.go index 693591ee7..8e7419f84 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/termsenum/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/termsenum/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package termsenum @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package termsenum // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/terms_enum/TermsEnumResponse.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/terms_enum/TermsEnumResponse.ts#L22-L28 type Response struct { Complete bool `json:"complete"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/termsenum/terms_enum.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/termsenum/terms_enum.go index 0e773af0a..3c355c6e9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/termsenum/terms_enum.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/termsenum/terms_enum.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // The terms enum API can be used to discover terms in the index that begin // with the provided string. It is designed for low-latency look-ups used in @@ -54,8 +54,9 @@ type TermsEnum struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -71,7 +72,7 @@ func NewTermsEnumFunc(tp elastictransport.Interface) NewTermsEnum { return func(index string) *TermsEnum { n := New(tp) - n.Index(index) + n._index(index) return n } @@ -88,6 +89,8 @@ func New(tp elastictransport.Interface) *TermsEnum { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -117,9 +120,19 @@ func (r *TermsEnum) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -127,6 +140,7 @@ func (r *TermsEnum) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -207,7 +221,6 @@ func (r TermsEnum) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -216,6 +229,10 @@ func (r TermsEnum) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -229,9 +246,73 @@ func (r *TermsEnum) Header(key, value string) *TermsEnum { // Index Comma-separated list of data streams, indices, and index aliases to search. // Wildcard (*) expressions are supported. // API Name: index -func (r *TermsEnum) Index(v string) *TermsEnum { +func (r *TermsEnum) _index(index string) *TermsEnum { r.paramSet |= indexMask - r.index = v + r.index = index + + return r +} + +// CaseInsensitive When true the provided search string is matched against index terms without +// case sensitivity. +// API name: case_insensitive +func (r *TermsEnum) CaseInsensitive(caseinsensitive bool) *TermsEnum { + r.req.CaseInsensitive = &caseinsensitive + + return r +} + +// Field The string to match at the start of indexed terms. If not provided, all terms +// in the field are considered. +// API name: field +func (r *TermsEnum) Field(field string) *TermsEnum { + r.req.Field = field + + return r +} + +// IndexFilter Allows to filter an index shard if the provided query rewrites to match_none. +// API name: index_filter +func (r *TermsEnum) IndexFilter(indexfilter *types.Query) *TermsEnum { + + r.req.IndexFilter = indexfilter + + return r +} + +// API name: search_after +func (r *TermsEnum) SearchAfter(searchafter string) *TermsEnum { + + r.req.SearchAfter = &searchafter + + return r +} + +// Size How many matching terms to return. +// API name: size +func (r *TermsEnum) Size(size int) *TermsEnum { + r.req.Size = &size + + return r +} + +// String The string after which terms in the index should be returned. Allows for a +// form of pagination if the last result from one request is passed as the +// search_after parameter for a subsequent request. +// API name: string +func (r *TermsEnum) String(string string) *TermsEnum { + + r.req.String = &string + + return r +} + +// Timeout The maximum length of time to spend collecting results. Defaults to "1s" (one +// second). If the timeout is exceeded the complete flag set to false in the +// response and the results may be partial or empty. +// API name: timeout +func (r *TermsEnum) Timeout(duration types.Duration) *TermsEnum { + r.req.Timeout = duration return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/termvectors/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/termvectors/request.go index 3dd41ba55..f3b6672df 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/termvectors/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/termvectors/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package termvectors @@ -29,11 +29,16 @@ import ( // Request holds the request body struct for the package termvectors // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/termvectors/TermVectorsRequest.ts#L33-L61 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/termvectors/TermVectorsRequest.ts#L33-L118 type Request struct { - Doc json.RawMessage `json:"doc,omitempty"` - Filter *types.TermVectorsFilter `json:"filter,omitempty"` - PerFieldAnalyzer map[string]string `json:"per_field_analyzer,omitempty"` + + // Doc An artificial document (a document not present in the index) for which you + // want to retrieve term vectors. + Doc json.RawMessage `json:"doc,omitempty"` + // Filter Filter terms based on their tf-idf scores. + Filter *types.TermVectorsFilter `json:"filter,omitempty"` + // PerFieldAnalyzer Overrides the default per-field analyzer. + PerFieldAnalyzer map[string]string `json:"per_field_analyzer,omitempty"` } // NewRequest returns a Request diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/termvectors/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/termvectors/response.go index f7cc2b70c..b994c76be 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/termvectors/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/termvectors/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package termvectors @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package termvectors // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/termvectors/TermVectorsResponse.ts#L25-L34 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/termvectors/TermVectorsResponse.ts#L25-L34 type Response struct { Found bool `json:"found"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/termvectors/termvectors.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/termvectors/termvectors.go index 7def5e7f4..638bf1af8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/termvectors/termvectors.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/termvectors/termvectors.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns information and statistics about terms in the fields of a particular // document. @@ -36,7 +36,6 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/versiontype" ) @@ -58,8 +57,9 @@ type Termvectors struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -76,7 +76,7 @@ func NewTermvectorsFunc(tp elastictransport.Interface) NewTermvectors { return func(index string) *Termvectors { n := New(tp) - n.Index(index) + n._index(index) return n } @@ -92,6 +92,8 @@ func New(tp elastictransport.Interface) *Termvectors { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -121,9 +123,19 @@ func (r *Termvectors) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -131,6 +143,7 @@ func (r *Termvectors) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -222,7 +235,6 @@ func (r Termvectors) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -231,6 +243,10 @@ func (r Termvectors) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -241,111 +257,156 @@ func (r *Termvectors) Header(key, value string) *Termvectors { return r } -// Index The index in which the document resides. +// Index Name of the index that contains the document. // API Name: index -func (r *Termvectors) Index(v string) *Termvectors { +func (r *Termvectors) _index(index string) *Termvectors { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// Id The id of the document, when not specified a doc param should be supplied. +// Id Unique identifier of the document. // API Name: id -func (r *Termvectors) Id(v string) *Termvectors { +func (r *Termvectors) Id(id string) *Termvectors { r.paramSet |= idMask - r.id = v + r.id = id return r } -// Fields A comma-separated list of fields to return. +// Fields Comma-separated list or wildcard expressions of fields to include in the +// statistics. +// Used as the default list unless a specific field list is provided in the +// `completion_fields` or `fielddata_fields` parameters. // API name: fields -func (r *Termvectors) Fields(v string) *Termvectors { - r.values.Set("fields", v) +func (r *Termvectors) Fields(fields ...string) *Termvectors { + r.values.Set("fields", strings.Join(fields, ",")) return r } -// FieldStatistics Specifies if document count, sum of document frequencies and sum of total -// term frequencies should be returned. +// FieldStatistics If `true`, the response includes the document count, sum of document +// frequencies, and sum of total term frequencies. // API name: field_statistics -func (r *Termvectors) FieldStatistics(b bool) *Termvectors { - r.values.Set("field_statistics", strconv.FormatBool(b)) +func (r *Termvectors) FieldStatistics(fieldstatistics bool) *Termvectors { + r.values.Set("field_statistics", strconv.FormatBool(fieldstatistics)) return r } -// Offsets Specifies if term offsets should be returned. +// Offsets If `true`, the response includes term offsets. // API name: offsets -func (r *Termvectors) Offsets(b bool) *Termvectors { - r.values.Set("offsets", strconv.FormatBool(b)) +func (r *Termvectors) Offsets(offsets bool) *Termvectors { + r.values.Set("offsets", strconv.FormatBool(offsets)) return r } -// Payloads Specifies if term payloads should be returned. +// Payloads If `true`, the response includes term payloads. // API name: payloads -func (r *Termvectors) Payloads(b bool) *Termvectors { - r.values.Set("payloads", strconv.FormatBool(b)) +func (r *Termvectors) Payloads(payloads bool) *Termvectors { + r.values.Set("payloads", strconv.FormatBool(payloads)) return r } -// Positions Specifies if term positions should be returned. +// Positions If `true`, the response includes term positions. // API name: positions -func (r *Termvectors) Positions(b bool) *Termvectors { - r.values.Set("positions", strconv.FormatBool(b)) +func (r *Termvectors) Positions(positions bool) *Termvectors { + r.values.Set("positions", strconv.FormatBool(positions)) return r } -// Preference Specify the node or shard the operation should be performed on (default: -// random). +// Preference Specifies the node or shard the operation should be performed on. +// Random by default. // API name: preference -func (r *Termvectors) Preference(v string) *Termvectors { - r.values.Set("preference", v) +func (r *Termvectors) Preference(preference string) *Termvectors { + r.values.Set("preference", preference) return r } -// Realtime Specifies if request is real-time as opposed to near-real-time (default: -// true). +// Realtime If true, the request is real-time as opposed to near-real-time. // API name: realtime -func (r *Termvectors) Realtime(b bool) *Termvectors { - r.values.Set("realtime", strconv.FormatBool(b)) +func (r *Termvectors) Realtime(realtime bool) *Termvectors { + r.values.Set("realtime", strconv.FormatBool(realtime)) return r } -// Routing Specific routing value. +// Routing Custom value used to route operations to a specific shard. // API name: routing -func (r *Termvectors) Routing(v string) *Termvectors { - r.values.Set("routing", v) +func (r *Termvectors) Routing(routing string) *Termvectors { + r.values.Set("routing", routing) return r } -// TermStatistics Specifies if total term frequency and document frequency should be returned. +// TermStatistics If `true`, the response includes term frequency and document frequency. // API name: term_statistics -func (r *Termvectors) TermStatistics(b bool) *Termvectors { - r.values.Set("term_statistics", strconv.FormatBool(b)) +func (r *Termvectors) TermStatistics(termstatistics bool) *Termvectors { + r.values.Set("term_statistics", strconv.FormatBool(termstatistics)) return r } -// Version Explicit version number for concurrency control +// Version If `true`, returns the document version as part of a hit. // API name: version -func (r *Termvectors) Version(v string) *Termvectors { - r.values.Set("version", v) +func (r *Termvectors) Version(versionnumber string) *Termvectors { + r.values.Set("version", versionnumber) return r } -// VersionType Specific version type +// VersionType Specific version type. // API name: version_type -func (r *Termvectors) VersionType(enum versiontype.VersionType) *Termvectors { - r.values.Set("version_type", enum.String()) +func (r *Termvectors) VersionType(versiontype versiontype.VersionType) *Termvectors { + r.values.Set("version_type", versiontype.String()) + + return r +} + +// Doc An artificial document (a document not present in the index) for which you +// want to retrieve term vectors. +// API name: doc +// +// doc should be a json.RawMessage or a structure +// if a structure is provided, the client will defer a json serialization +// prior to sending the payload to Elasticsearch. +func (r *Termvectors) Doc(doc interface{}) *Termvectors { + switch casted := doc.(type) { + case json.RawMessage: + r.req.Doc = casted + default: + r.deferred = append(r.deferred, func(request *Request) error { + data, err := json.Marshal(doc) + if err != nil { + return err + } + r.req.Doc = data + return nil + }) + } + + return r +} + +// Filter Filter terms based on their tf-idf scores. +// API name: filter +func (r *Termvectors) Filter(filter *types.TermVectorsFilter) *Termvectors { + + r.req.Filter = filter + + return r +} + +// PerFieldAnalyzer Overrides the default per-field analyzer. +// API name: per_field_analyzer +func (r *Termvectors) PerFieldAnalyzer(perfieldanalyzer map[string]string) *Termvectors { + + r.req.PerFieldAnalyzer = perfieldanalyzer return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/update/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/update/request.go index 507528e8f..a96daf8f0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/update/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/update/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package update @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package update // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/update/UpdateRequest.ts#L38-L151 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/update/UpdateRequest.ts#L38-L151 type Request struct { // DetectNoop Set to false to disable setting 'result' in the response diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/update/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/update/response.go index 8798038f6..5b781b658 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/update/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/update/response.go @@ -16,20 +16,29 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package update import ( "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/result" ) // Response holds the response body struct for the package update // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/update/UpdateResponse.ts#L27-L29 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/update/UpdateResponse.ts#L27-L29 type Response struct { - Get *types.InlineGet `json:"get,omitempty"` + ForcedRefresh *bool `json:"forced_refresh,omitempty"` + Get *types.InlineGet `json:"get,omitempty"` + Id_ string `json:"_id"` + Index_ string `json:"_index"` + PrimaryTerm_ int64 `json:"_primary_term"` + Result result.Result `json:"result"` + SeqNo_ int64 `json:"_seq_no"` + Shards_ types.ShardStatistics `json:"_shards"` + Version_ int64 `json:"_version"` } // NewResponse returns a Response diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/update/update.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/update/update.go index 96af92344..ccc3fe4b5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/update/update.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/update/update.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Updates a document with a script or partial document. package update @@ -35,7 +35,6 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/refresh" ) @@ -57,8 +56,9 @@ type Update struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -75,9 +75,9 @@ func NewUpdateFunc(tp elastictransport.Interface) NewUpdate { return func(index, id string) *Update { n := New(tp) - n.Id(id) + n._id(id) - n.Index(index) + n._index(index) return n } @@ -92,6 +92,8 @@ func New(tp elastictransport.Interface) *Update { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -121,9 +123,19 @@ func (r *Update) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -131,6 +143,7 @@ func (r *Update) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -214,7 +227,6 @@ func (r Update) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -223,6 +235,10 @@ func (r Update) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -235,42 +251,42 @@ func (r *Update) Header(key, value string) *Update { // Id Document ID // API Name: id -func (r *Update) Id(v string) *Update { +func (r *Update) _id(id string) *Update { r.paramSet |= idMask - r.id = v + r.id = id return r } // Index The name of the index // API Name: index -func (r *Update) Index(v string) *Update { +func (r *Update) _index(index string) *Update { r.paramSet |= indexMask - r.index = v + r.index = index return r } // IfPrimaryTerm Only perform the operation if the document has this primary term. // API name: if_primary_term -func (r *Update) IfPrimaryTerm(v string) *Update { - r.values.Set("if_primary_term", v) +func (r *Update) IfPrimaryTerm(ifprimaryterm string) *Update { + r.values.Set("if_primary_term", ifprimaryterm) return r } // IfSeqNo Only perform the operation if the document has this sequence number. // API name: if_seq_no -func (r *Update) IfSeqNo(v string) *Update { - r.values.Set("if_seq_no", v) +func (r *Update) IfSeqNo(sequencenumber string) *Update { + r.values.Set("if_seq_no", sequencenumber) return r } // Lang The script language. // API name: lang -func (r *Update) Lang(v string) *Update { - r.values.Set("lang", v) +func (r *Update) Lang(lang string) *Update { + r.values.Set("lang", lang) return r } @@ -280,16 +296,16 @@ func (r *Update) Lang(v string) *Update { // operation // visible to search, if 'false' do nothing with refreshes. // API name: refresh -func (r *Update) Refresh(enum refresh.Refresh) *Update { - r.values.Set("refresh", enum.String()) +func (r *Update) Refresh(refresh refresh.Refresh) *Update { + r.values.Set("refresh", refresh.String()) return r } // RequireAlias If true, the destination must be an index alias. // API name: require_alias -func (r *Update) RequireAlias(b bool) *Update { - r.values.Set("require_alias", strconv.FormatBool(b)) +func (r *Update) RequireAlias(requirealias bool) *Update { + r.values.Set("require_alias", strconv.FormatBool(requirealias)) return r } @@ -297,16 +313,16 @@ func (r *Update) RequireAlias(b bool) *Update { // RetryOnConflict Specify how many times should the operation be retried when a conflict // occurs. // API name: retry_on_conflict -func (r *Update) RetryOnConflict(i int) *Update { - r.values.Set("retry_on_conflict", strconv.Itoa(i)) +func (r *Update) RetryOnConflict(retryonconflict int) *Update { + r.values.Set("retry_on_conflict", strconv.Itoa(retryonconflict)) return r } // Routing Custom value used to route operations to a specific shard. // API name: routing -func (r *Update) Routing(v string) *Update { - r.values.Set("routing", v) +func (r *Update) Routing(routing string) *Update { + r.values.Set("routing", routing) return r } @@ -315,8 +331,8 @@ func (r *Update) Routing(v string) *Update { // This guarantees Elasticsearch waits for at least the timeout before failing. // The actual wait time could be longer, particularly when multiple waits occur. // API name: timeout -func (r *Update) Timeout(v string) *Update { - r.values.Set("timeout", v) +func (r *Update) Timeout(duration string) *Update { + r.values.Set("timeout", duration) return r } @@ -327,34 +343,117 @@ func (r *Update) Timeout(v string) *Update { // index // (number_of_replicas+1). Defaults to 1 meaning the primary shard. // API name: wait_for_active_shards -func (r *Update) WaitForActiveShards(v string) *Update { - r.values.Set("wait_for_active_shards", v) - - return r -} - -// Source_ Set to false to disable source retrieval. You can also specify a -// comma-separated -// list of the fields you want to retrieve. -// API name: _source -func (r *Update) Source_(v string) *Update { - r.values.Set("_source", v) +func (r *Update) WaitForActiveShards(waitforactiveshards string) *Update { + r.values.Set("wait_for_active_shards", waitforactiveshards) return r } // SourceExcludes_ Specify the source fields you want to exclude. // API name: _source_excludes -func (r *Update) SourceExcludes_(v string) *Update { - r.values.Set("_source_excludes", v) +func (r *Update) SourceExcludes_(fields ...string) *Update { + r.values.Set("_source_excludes", strings.Join(fields, ",")) return r } // SourceIncludes_ Specify the source fields you want to retrieve. // API name: _source_includes -func (r *Update) SourceIncludes_(v string) *Update { - r.values.Set("_source_includes", v) +func (r *Update) SourceIncludes_(fields ...string) *Update { + r.values.Set("_source_includes", strings.Join(fields, ",")) + + return r +} + +// DetectNoop Set to false to disable setting 'result' in the response +// to 'noop' if no change to the document occurred. +// API name: detect_noop +func (r *Update) DetectNoop(detectnoop bool) *Update { + r.req.DetectNoop = &detectnoop + + return r +} + +// Doc A partial update to an existing document. +// API name: doc +// +// doc should be a json.RawMessage or a structure +// if a structure is provided, the client will defer a json serialization +// prior to sending the payload to Elasticsearch. +func (r *Update) Doc(doc interface{}) *Update { + switch casted := doc.(type) { + case json.RawMessage: + r.req.Doc = casted + default: + r.deferred = append(r.deferred, func(request *Request) error { + data, err := json.Marshal(doc) + if err != nil { + return err + } + r.req.Doc = data + return nil + }) + } + + return r +} + +// DocAsUpsert Set to true to use the contents of 'doc' as the value of 'upsert' +// API name: doc_as_upsert +func (r *Update) DocAsUpsert(docasupsert bool) *Update { + r.req.DocAsUpsert = &docasupsert + + return r +} + +// Script Script to execute to update the document. +// API name: script +func (r *Update) Script(script types.Script) *Update { + r.req.Script = script + + return r +} + +// ScriptedUpsert Set to true to execute the script whether or not the document exists. +// API name: scripted_upsert +func (r *Update) ScriptedUpsert(scriptedupsert bool) *Update { + r.req.ScriptedUpsert = &scriptedupsert + + return r +} + +// Source_ Set to false to disable source retrieval. You can also specify a +// comma-separated +// list of the fields you want to retrieve. +// API name: _source +func (r *Update) Source_(sourceconfig types.SourceConfig) *Update { + r.req.Source_ = sourceconfig + + return r +} + +// Upsert If the document does not already exist, the contents of 'upsert' are inserted +// as a +// new document. If the document exists, the 'script' is executed. +// API name: upsert +// +// upsert should be a json.RawMessage or a structure +// if a structure is provided, the client will defer a json serialization +// prior to sending the payload to Elasticsearch. +func (r *Update) Upsert(upsert interface{}) *Update { + switch casted := upsert.(type) { + case json.RawMessage: + r.req.Upsert = casted + default: + r.deferred = append(r.deferred, func(request *Request) error { + data, err := json.Marshal(upsert) + if err != nil { + return err + } + r.req.Upsert = data + return nil + }) + } return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/updatebyquery/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/updatebyquery/request.go index 8529fa3cb..80460e8b6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/updatebyquery/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/updatebyquery/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package updatebyquery @@ -30,13 +30,20 @@ import ( // Request holds the request body struct for the package updatebyquery // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/update_by_query/UpdateByQueryRequest.ts#L37-L85 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/update_by_query/UpdateByQueryRequest.ts#L37-L221 type Request struct { + + // Conflicts What to do if update by query hits version conflicts: `abort` or `proceed`. Conflicts *conflicts.Conflicts `json:"conflicts,omitempty"` - MaxDocs *int64 `json:"max_docs,omitempty"` - Query *types.Query `json:"query,omitempty"` - Script types.Script `json:"script,omitempty"` - Slice *types.SlicedScroll `json:"slice,omitempty"` + // MaxDocs The maximum number of documents to update. + MaxDocs *int64 `json:"max_docs,omitempty"` + // Query Specifies the documents to update using the Query DSL. + Query *types.Query `json:"query,omitempty"` + // Script The script to run to update the document source or metadata when updating. + Script types.Script `json:"script,omitempty"` + // Slice Slice the request manually using the provided slice ID and total number of + // slices. + Slice *types.SlicedScroll `json:"slice,omitempty"` } // NewRequest returns a Request diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/updatebyquery/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/updatebyquery/response.go index 5d2f35219..b8d8d2ca3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/updatebyquery/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/updatebyquery/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package updatebyquery @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updatebyquery // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/update_by_query/UpdateByQueryResponse.ts#L26-L45 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/update_by_query/UpdateByQueryResponse.ts#L26-L45 type Response struct { Batches *int64 `json:"batches,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/updatebyquery/update_by_query.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/updatebyquery/update_by_query.go index e27351974..8f6edaf81 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/updatebyquery/update_by_query.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/updatebyquery/update_by_query.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Performs an update on every document in the index without changing the // source, @@ -37,8 +37,8 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/conflicts" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/operator" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/searchtype" ) @@ -59,8 +59,9 @@ type UpdateByQuery struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -76,7 +77,7 @@ func NewUpdateByQueryFunc(tp elastictransport.Interface) NewUpdateByQuery { return func(index string) *UpdateByQuery { n := New(tp) - n.Index(index) + n._index(index) return n } @@ -93,6 +94,8 @@ func New(tp elastictransport.Interface) *UpdateByQuery { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -122,9 +125,19 @@ func (r *UpdateByQuery) HttpRequest(ctx context.Context) (*http.Request, error) var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -132,6 +145,7 @@ func (r *UpdateByQuery) HttpRequest(ctx context.Context) (*http.Request, error) } r.buf.Write(data) + } r.path.Scheme = "http" @@ -212,7 +226,6 @@ func (r UpdateByQuery) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -221,6 +234,10 @@ func (r UpdateByQuery) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -231,241 +248,251 @@ func (r *UpdateByQuery) Header(key, value string) *UpdateByQuery { return r } -// Index A comma-separated list of index names to search; use `_all` or empty string -// to perform the operation on all indices +// Index Comma-separated list of data streams, indices, and aliases to search. +// Supports wildcards (`*`). +// To search all data streams or indices, omit this parameter or use `*` or +// `_all`. // API Name: index -func (r *UpdateByQuery) Index(v string) *UpdateByQuery { +func (r *UpdateByQuery) _index(index string) *UpdateByQuery { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// AllowNoIndices Whether to ignore if a wildcard indices expression resolves into no concrete -// indices. (This includes `_all` string or when no indices have been specified) +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. +// For example, a request targeting `foo*,bar*` returns an error if an index +// starts with `foo` but no index starts with `bar`. // API name: allow_no_indices -func (r *UpdateByQuery) AllowNoIndices(b bool) *UpdateByQuery { - r.values.Set("allow_no_indices", strconv.FormatBool(b)) +func (r *UpdateByQuery) AllowNoIndices(allownoindices bool) *UpdateByQuery { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) return r } -// Analyzer The analyzer to use for the query string +// Analyzer Analyzer to use for the query string. // API name: analyzer -func (r *UpdateByQuery) Analyzer(v string) *UpdateByQuery { - r.values.Set("analyzer", v) +func (r *UpdateByQuery) Analyzer(analyzer string) *UpdateByQuery { + r.values.Set("analyzer", analyzer) return r } -// AnalyzeWildcard Specify whether wildcard and prefix queries should be analyzed (default: -// false) +// AnalyzeWildcard If `true`, wildcard and prefix queries are analyzed. // API name: analyze_wildcard -func (r *UpdateByQuery) AnalyzeWildcard(b bool) *UpdateByQuery { - r.values.Set("analyze_wildcard", strconv.FormatBool(b)) - - return r -} - -// Conflicts What to do when the update by query hits version conflicts? -// API name: conflicts -func (r *UpdateByQuery) Conflicts(enum conflicts.Conflicts) *UpdateByQuery { - r.values.Set("conflicts", enum.String()) +func (r *UpdateByQuery) AnalyzeWildcard(analyzewildcard bool) *UpdateByQuery { + r.values.Set("analyze_wildcard", strconv.FormatBool(analyzewildcard)) return r } -// DefaultOperator The default operator for query string query (AND or OR) +// DefaultOperator The default operator for query string query: `AND` or `OR`. // API name: default_operator -func (r *UpdateByQuery) DefaultOperator(enum operator.Operator) *UpdateByQuery { - r.values.Set("default_operator", enum.String()) +func (r *UpdateByQuery) DefaultOperator(defaultoperator operator.Operator) *UpdateByQuery { + r.values.Set("default_operator", defaultoperator.String()) return r } -// Df The field to use as default where no field prefix is given in the query -// string +// Df Field to use as default where no field prefix is given in the query string. // API name: df -func (r *UpdateByQuery) Df(v string) *UpdateByQuery { - r.values.Set("df", v) +func (r *UpdateByQuery) Df(df string) *UpdateByQuery { + r.values.Set("df", df) return r } -// ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, -// closed or both. +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. +// Valid values are: `all`, `open`, `closed`, `hidden`, `none`. // API name: expand_wildcards -func (r *UpdateByQuery) ExpandWildcards(v string) *UpdateByQuery { - r.values.Set("expand_wildcards", v) +func (r *UpdateByQuery) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *UpdateByQuery { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } // From Starting offset (default: 0) // API name: from -func (r *UpdateByQuery) From(v string) *UpdateByQuery { - r.values.Set("from", v) +func (r *UpdateByQuery) From(from string) *UpdateByQuery { + r.values.Set("from", from) return r } -// IgnoreUnavailable Whether specified concrete indices should be ignored when unavailable -// (missing or closed) +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. // API name: ignore_unavailable -func (r *UpdateByQuery) IgnoreUnavailable(b bool) *UpdateByQuery { - r.values.Set("ignore_unavailable", strconv.FormatBool(b)) +func (r *UpdateByQuery) IgnoreUnavailable(ignoreunavailable bool) *UpdateByQuery { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) return r } -// Lenient Specify whether format-based query failures (such as providing text to a -// numeric field) should be ignored +// Lenient If `true`, format-based query failures (such as providing text to a numeric +// field) in the query string will be ignored. // API name: lenient -func (r *UpdateByQuery) Lenient(b bool) *UpdateByQuery { - r.values.Set("lenient", strconv.FormatBool(b)) - - return r -} - -// MaxDocs Maximum number of documents to process (default: all documents) -// API name: max_docs -func (r *UpdateByQuery) MaxDocs(v string) *UpdateByQuery { - r.values.Set("max_docs", v) +func (r *UpdateByQuery) Lenient(lenient bool) *UpdateByQuery { + r.values.Set("lenient", strconv.FormatBool(lenient)) return r } -// Pipeline Ingest pipeline to set on index requests made by this action. (default: none) +// Pipeline ID of the pipeline to use to preprocess incoming documents. +// If the index has a default ingest pipeline specified, then setting the value +// to `_none` disables the default ingest pipeline for this request. +// If a final pipeline is configured it will always run, regardless of the value +// of this parameter. // API name: pipeline -func (r *UpdateByQuery) Pipeline(v string) *UpdateByQuery { - r.values.Set("pipeline", v) +func (r *UpdateByQuery) Pipeline(pipeline string) *UpdateByQuery { + r.values.Set("pipeline", pipeline) return r } -// Preference Specify the node or shard the operation should be performed on (default: -// random) +// Preference Specifies the node or shard the operation should be performed on. +// Random by default. // API name: preference -func (r *UpdateByQuery) Preference(v string) *UpdateByQuery { - r.values.Set("preference", v) +func (r *UpdateByQuery) Preference(preference string) *UpdateByQuery { + r.values.Set("preference", preference) return r } -// Refresh Should the affected indexes be refreshed? +// Refresh If `true`, Elasticsearch refreshes affected shards to make the operation +// visible to search. // API name: refresh -func (r *UpdateByQuery) Refresh(b bool) *UpdateByQuery { - r.values.Set("refresh", strconv.FormatBool(b)) +func (r *UpdateByQuery) Refresh(refresh bool) *UpdateByQuery { + r.values.Set("refresh", strconv.FormatBool(refresh)) return r } -// RequestCache Specify if request cache should be used for this request or not, defaults to -// index level setting +// RequestCache If `true`, the request cache is used for this request. // API name: request_cache -func (r *UpdateByQuery) RequestCache(b bool) *UpdateByQuery { - r.values.Set("request_cache", strconv.FormatBool(b)) +func (r *UpdateByQuery) RequestCache(requestcache bool) *UpdateByQuery { + r.values.Set("request_cache", strconv.FormatBool(requestcache)) return r } -// RequestsPerSecond The throttle to set on this request in sub-requests per second. -1 means no -// throttle. +// RequestsPerSecond The throttle for this request in sub-requests per second. // API name: requests_per_second -func (r *UpdateByQuery) RequestsPerSecond(v string) *UpdateByQuery { - r.values.Set("requests_per_second", v) +func (r *UpdateByQuery) RequestsPerSecond(requestspersecond string) *UpdateByQuery { + r.values.Set("requests_per_second", requestspersecond) return r } -// Routing A comma-separated list of specific routing values +// Routing Custom value used to route operations to a specific shard. // API name: routing -func (r *UpdateByQuery) Routing(v string) *UpdateByQuery { - r.values.Set("routing", v) +func (r *UpdateByQuery) Routing(routing string) *UpdateByQuery { + r.values.Set("routing", routing) return r } -// Scroll Specify how long a consistent view of the index should be maintained for -// scrolled search +// Scroll Period to retain the search context for scrolling. // API name: scroll -func (r *UpdateByQuery) Scroll(v string) *UpdateByQuery { - r.values.Set("scroll", v) +func (r *UpdateByQuery) Scroll(duration string) *UpdateByQuery { + r.values.Set("scroll", duration) return r } -// ScrollSize Size on the scroll request powering the update by query +// ScrollSize Size of the scroll request that powers the operation. // API name: scroll_size -func (r *UpdateByQuery) ScrollSize(v string) *UpdateByQuery { - r.values.Set("scroll_size", v) +func (r *UpdateByQuery) ScrollSize(scrollsize string) *UpdateByQuery { + r.values.Set("scroll_size", scrollsize) return r } -// SearchTimeout Explicit timeout for each search request. Defaults to no timeout. +// SearchTimeout Explicit timeout for each search request. // API name: search_timeout -func (r *UpdateByQuery) SearchTimeout(v string) *UpdateByQuery { - r.values.Set("search_timeout", v) +func (r *UpdateByQuery) SearchTimeout(duration string) *UpdateByQuery { + r.values.Set("search_timeout", duration) return r } -// SearchType Search operation type +// SearchType The type of the search operation. Available options: `query_then_fetch`, +// `dfs_query_then_fetch`. // API name: search_type -func (r *UpdateByQuery) SearchType(enum searchtype.SearchType) *UpdateByQuery { - r.values.Set("search_type", enum.String()) +func (r *UpdateByQuery) SearchType(searchtype searchtype.SearchType) *UpdateByQuery { + r.values.Set("search_type", searchtype.String()) return r } -// Slices The number of slices this task should be divided into. Defaults to 1, meaning -// the task isn't sliced into subtasks. Can be set to `auto`. +// Slices The number of slices this task should be divided into. // API name: slices -func (r *UpdateByQuery) Slices(v string) *UpdateByQuery { - r.values.Set("slices", v) +func (r *UpdateByQuery) Slices(slices string) *UpdateByQuery { + r.values.Set("slices", slices) return r } -// Sort A comma-separated list of : pairs +// Sort A comma-separated list of : pairs. // API name: sort -func (r *UpdateByQuery) Sort(v string) *UpdateByQuery { - r.values.Set("sort", v) +func (r *UpdateByQuery) Sort(sorts ...string) *UpdateByQuery { + tmp := []string{} + for _, item := range sorts { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("sort", strings.Join(tmp, ",")) return r } -// Stats Specific 'tag' of the request for logging and statistical purposes +// Stats Specific `tag` of the request for logging and statistical purposes. // API name: stats -func (r *UpdateByQuery) Stats(v string) *UpdateByQuery { - r.values.Set("stats", v) +func (r *UpdateByQuery) Stats(stats ...string) *UpdateByQuery { + tmp := []string{} + for _, item := range stats { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("stats", strings.Join(tmp, ",")) return r } -// TerminateAfter The maximum number of documents to collect for each shard, upon reaching -// which the query execution will terminate early. +// TerminateAfter Maximum number of documents to collect for each shard. +// If a query reaches this limit, Elasticsearch terminates the query early. +// Elasticsearch collects documents before sorting. +// Use with caution. +// Elasticsearch applies this parameter to each shard handling the request. +// When possible, let Elasticsearch perform early termination automatically. +// Avoid specifying this parameter for requests that target data streams with +// backing indices across multiple data tiers. // API name: terminate_after -func (r *UpdateByQuery) TerminateAfter(v string) *UpdateByQuery { - r.values.Set("terminate_after", v) +func (r *UpdateByQuery) TerminateAfter(terminateafter string) *UpdateByQuery { + r.values.Set("terminate_after", terminateafter) return r } -// Timeout Time each individual bulk request should wait for shards that are -// unavailable. +// Timeout Period each update request waits for the following operations: dynamic +// mapping updates, waiting for active shards. // API name: timeout -func (r *UpdateByQuery) Timeout(v string) *UpdateByQuery { - r.values.Set("timeout", v) +func (r *UpdateByQuery) Timeout(duration string) *UpdateByQuery { + r.values.Set("timeout", duration) return r } -// Version Specify whether to return document version as part of a hit +// Version If `true`, returns the document version as part of a hit. // API name: version -func (r *UpdateByQuery) Version(b bool) *UpdateByQuery { - r.values.Set("version", strconv.FormatBool(b)) +func (r *UpdateByQuery) Version(version bool) *UpdateByQuery { + r.values.Set("version", strconv.FormatBool(version)) return r } @@ -473,29 +500,71 @@ func (r *UpdateByQuery) Version(b bool) *UpdateByQuery { // VersionType Should the document increment the version number (internal) on hit or not // (reindex) // API name: version_type -func (r *UpdateByQuery) VersionType(b bool) *UpdateByQuery { - r.values.Set("version_type", strconv.FormatBool(b)) +func (r *UpdateByQuery) VersionType(versiontype bool) *UpdateByQuery { + r.values.Set("version_type", strconv.FormatBool(versiontype)) return r } -// WaitForActiveShards Sets the number of shard copies that must be active before proceeding with -// the update by query operation. Defaults to 1, meaning the primary shard only. -// Set to `all` for all shard copies, otherwise set to any non-negative value -// less than or equal to the total number of copies for the shard (number of -// replicas + 1) +// WaitForActiveShards The number of shard copies that must be active before proceeding with the +// operation. +// Set to `all` or any positive integer up to the total number of shards in the +// index (`number_of_replicas+1`). // API name: wait_for_active_shards -func (r *UpdateByQuery) WaitForActiveShards(v string) *UpdateByQuery { - r.values.Set("wait_for_active_shards", v) +func (r *UpdateByQuery) WaitForActiveShards(waitforactiveshards string) *UpdateByQuery { + r.values.Set("wait_for_active_shards", waitforactiveshards) return r } -// WaitForCompletion Should the request should block until the update by query operation is -// complete. +// WaitForCompletion If `true`, the request blocks until the operation is complete. // API name: wait_for_completion -func (r *UpdateByQuery) WaitForCompletion(b bool) *UpdateByQuery { - r.values.Set("wait_for_completion", strconv.FormatBool(b)) +func (r *UpdateByQuery) WaitForCompletion(waitforcompletion bool) *UpdateByQuery { + r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) + + return r +} + +// Conflicts What to do if update by query hits version conflicts: `abort` or `proceed`. +// API name: conflicts +func (r *UpdateByQuery) Conflicts(conflicts conflicts.Conflicts) *UpdateByQuery { + r.req.Conflicts = &conflicts + + return r +} + +// MaxDocs The maximum number of documents to update. +// API name: max_docs +func (r *UpdateByQuery) MaxDocs(maxdocs int64) *UpdateByQuery { + + r.req.MaxDocs = &maxdocs + + return r +} + +// Query Specifies the documents to update using the Query DSL. +// API name: query +func (r *UpdateByQuery) Query(query *types.Query) *UpdateByQuery { + + r.req.Query = query + + return r +} + +// Script The script to run to update the document source or metadata when updating. +// API name: script +func (r *UpdateByQuery) Script(script types.Script) *UpdateByQuery { + r.req.Script = script + + return r +} + +// Slice Slice the request manually using the provided slice ID and total number of +// slices. +// API name: slice +func (r *UpdateByQuery) Slice(slice *types.SlicedScroll) *UpdateByQuery { + + r.req.Slice = slice return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/updatebyqueryrethrottle/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/updatebyqueryrethrottle/response.go index a16053e49..d70d1c849 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/updatebyqueryrethrottle/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/updatebyqueryrethrottle/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package updatebyqueryrethrottle @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updatebyqueryrethrottle // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/update_by_query_rethrottle/UpdateByQueryRethrottleResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/update_by_query_rethrottle/UpdateByQueryRethrottleResponse.ts#L23-L25 type Response struct { Nodes map[string]types.UpdateByQueryRethrottleNode `json:"nodes"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/updatebyqueryrethrottle/update_by_query_rethrottle.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/updatebyqueryrethrottle/update_by_query_rethrottle.go index c34741236..047f9e9f7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/updatebyqueryrethrottle/update_by_query_rethrottle.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/updatebyqueryrethrottle/update_by_query_rethrottle.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Changes the number of requests per second for a particular Update By Query // operation. @@ -68,7 +68,7 @@ func NewUpdateByQueryRethrottleFunc(tp elastictransport.Interface) NewUpdateByQu return func(taskid string) *UpdateByQueryRethrottle { n := New(tp) - n.TaskId(taskid) + n._taskid(taskid) return n } @@ -172,7 +172,6 @@ func (r UpdateByQueryRethrottle) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -181,6 +180,10 @@ func (r UpdateByQueryRethrottle) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -212,20 +215,19 @@ func (r *UpdateByQueryRethrottle) Header(key, value string) *UpdateByQueryRethro return r } -// TaskId The task id to rethrottle +// TaskId The ID for the task. // API Name: taskid -func (r *UpdateByQueryRethrottle) TaskId(v string) *UpdateByQueryRethrottle { +func (r *UpdateByQueryRethrottle) _taskid(taskid string) *UpdateByQueryRethrottle { r.paramSet |= taskidMask - r.taskid = v + r.taskid = taskid return r } -// RequestsPerSecond The throttle to set on this request in floating sub-requests per second. -1 -// means set no throttle. +// RequestsPerSecond The throttle for this request in sub-requests per second. // API name: requests_per_second -func (r *UpdateByQueryRethrottle) RequestsPerSecond(v string) *UpdateByQueryRethrottle { - r.values.Set("requests_per_second", v) +func (r *UpdateByQueryRethrottle) RequestsPerSecond(requestspersecond string) *UpdateByQueryRethrottle { + r.values.Set("requests_per_second", requestspersecond) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/danglingindices/deletedanglingindex/delete_dangling_index.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/danglingindices/deletedanglingindex/delete_dangling_index.go index b8719e4b7..99e5b9178 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/danglingindices/deletedanglingindex/delete_dangling_index.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/danglingindices/deletedanglingindex/delete_dangling_index.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Deletes the specified dangling index package deletedanglingindex @@ -68,7 +68,7 @@ func NewDeleteDanglingIndexFunc(tp elastictransport.Interface) NewDeleteDangling return func(indexuuid string) *DeleteDanglingIndex { n := New(tp) - n.IndexUuid(indexuuid) + n._indexuuid(indexuuid) return n } @@ -169,7 +169,6 @@ func (r DeleteDanglingIndex) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -178,6 +177,10 @@ func (r DeleteDanglingIndex) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -211,33 +214,33 @@ func (r *DeleteDanglingIndex) Header(key, value string) *DeleteDanglingIndex { // IndexUuid The UUID of the dangling index // API Name: indexuuid -func (r *DeleteDanglingIndex) IndexUuid(v string) *DeleteDanglingIndex { +func (r *DeleteDanglingIndex) _indexuuid(indexuuid string) *DeleteDanglingIndex { r.paramSet |= indexuuidMask - r.indexuuid = v + r.indexuuid = indexuuid return r } // AcceptDataLoss Must be set to true in order to delete the dangling index // API name: accept_data_loss -func (r *DeleteDanglingIndex) AcceptDataLoss(b bool) *DeleteDanglingIndex { - r.values.Set("accept_data_loss", strconv.FormatBool(b)) +func (r *DeleteDanglingIndex) AcceptDataLoss(acceptdataloss bool) *DeleteDanglingIndex { + r.values.Set("accept_data_loss", strconv.FormatBool(acceptdataloss)) return r } // MasterTimeout Specify timeout for connection to master // API name: master_timeout -func (r *DeleteDanglingIndex) MasterTimeout(v string) *DeleteDanglingIndex { - r.values.Set("master_timeout", v) +func (r *DeleteDanglingIndex) MasterTimeout(duration string) *DeleteDanglingIndex { + r.values.Set("master_timeout", duration) return r } // Timeout Explicit operation timeout // API name: timeout -func (r *DeleteDanglingIndex) Timeout(v string) *DeleteDanglingIndex { - r.values.Set("timeout", v) +func (r *DeleteDanglingIndex) Timeout(duration string) *DeleteDanglingIndex { + r.values.Set("timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/danglingindices/deletedanglingindex/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/danglingindices/deletedanglingindex/response.go index 3c3f512a6..c172e27aa 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/danglingindices/deletedanglingindex/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/danglingindices/deletedanglingindex/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package deletedanglingindex // Response holds the response body struct for the package deletedanglingindex // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/dangling_indices/delete_dangling_index/DeleteDanglingIndexResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/dangling_indices/delete_dangling_index/DeleteDanglingIndexResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/danglingindices/importdanglingindex/import_dangling_index.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/danglingindices/importdanglingindex/import_dangling_index.go index 8da760261..9dda9b418 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/danglingindices/importdanglingindex/import_dangling_index.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/danglingindices/importdanglingindex/import_dangling_index.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Imports the specified dangling index package importdanglingindex @@ -68,7 +68,7 @@ func NewImportDanglingIndexFunc(tp elastictransport.Interface) NewImportDangling return func(indexuuid string) *ImportDanglingIndex { n := New(tp) - n.IndexUuid(indexuuid) + n._indexuuid(indexuuid) return n } @@ -169,7 +169,6 @@ func (r ImportDanglingIndex) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -178,6 +177,10 @@ func (r ImportDanglingIndex) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -211,33 +214,33 @@ func (r *ImportDanglingIndex) Header(key, value string) *ImportDanglingIndex { // IndexUuid The UUID of the dangling index // API Name: indexuuid -func (r *ImportDanglingIndex) IndexUuid(v string) *ImportDanglingIndex { +func (r *ImportDanglingIndex) _indexuuid(indexuuid string) *ImportDanglingIndex { r.paramSet |= indexuuidMask - r.indexuuid = v + r.indexuuid = indexuuid return r } // AcceptDataLoss Must be set to true in order to import the dangling index // API name: accept_data_loss -func (r *ImportDanglingIndex) AcceptDataLoss(b bool) *ImportDanglingIndex { - r.values.Set("accept_data_loss", strconv.FormatBool(b)) +func (r *ImportDanglingIndex) AcceptDataLoss(acceptdataloss bool) *ImportDanglingIndex { + r.values.Set("accept_data_loss", strconv.FormatBool(acceptdataloss)) return r } // MasterTimeout Specify timeout for connection to master // API name: master_timeout -func (r *ImportDanglingIndex) MasterTimeout(v string) *ImportDanglingIndex { - r.values.Set("master_timeout", v) +func (r *ImportDanglingIndex) MasterTimeout(duration string) *ImportDanglingIndex { + r.values.Set("master_timeout", duration) return r } // Timeout Explicit operation timeout // API name: timeout -func (r *ImportDanglingIndex) Timeout(v string) *ImportDanglingIndex { - r.values.Set("timeout", v) +func (r *ImportDanglingIndex) Timeout(duration string) *ImportDanglingIndex { + r.values.Set("timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/danglingindices/importdanglingindex/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/danglingindices/importdanglingindex/response.go index 2affbe1b2..fc1de5422 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/danglingindices/importdanglingindex/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/danglingindices/importdanglingindex/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package importdanglingindex // Response holds the response body struct for the package importdanglingindex // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/dangling_indices/import_dangling_index/ImportDanglingIndexResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/dangling_indices/import_dangling_index/ImportDanglingIndexResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/danglingindices/listdanglingindices/list_dangling_indices.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/danglingindices/listdanglingindices/list_dangling_indices.go index 2c7935c73..5952e28d5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/danglingindices/listdanglingindices/list_dangling_indices.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/danglingindices/listdanglingindices/list_dangling_indices.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns all dangling indices. package listdanglingindices @@ -157,7 +157,6 @@ func (r ListDanglingIndices) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -166,6 +165,10 @@ func (r ListDanglingIndices) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/danglingindices/listdanglingindices/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/danglingindices/listdanglingindices/response.go index 1564b8d0f..9ff07079c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/danglingindices/listdanglingindices/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/danglingindices/listdanglingindices/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package listdanglingindices @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package listdanglingindices // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/dangling_indices/list_dangling_indices/ListDanglingIndicesResponse.ts#L23-L27 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/dangling_indices/list_dangling_indices/ListDanglingIndicesResponse.ts#L23-L27 type Response struct { DanglingIndices []types.DanglingIndex `json:"dangling_indices"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/deletepolicy/delete_policy.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/deletepolicy/delete_policy.go index 8508f27b2..91657f7d9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/deletepolicy/delete_policy.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/deletepolicy/delete_policy.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Deletes an existing enrich policy and its enrich index. package deletepolicy @@ -67,7 +67,7 @@ func NewDeletePolicyFunc(tp elastictransport.Interface) NewDeletePolicy { return func(name string) *DeletePolicy { n := New(tp) - n.Name(name) + n._name(name) return n } @@ -170,7 +170,6 @@ func (r DeletePolicy) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -179,6 +178,10 @@ func (r DeletePolicy) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -210,11 +213,11 @@ func (r *DeletePolicy) Header(key, value string) *DeletePolicy { return r } -// Name The name of the enrich policy +// Name Enrich policy to delete. // API Name: name -func (r *DeletePolicy) Name(v string) *DeletePolicy { +func (r *DeletePolicy) _name(name string) *DeletePolicy { r.paramSet |= nameMask - r.name = v + r.name = name return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/deletepolicy/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/deletepolicy/response.go index 14bf41ce4..1597c2986 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/deletepolicy/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/deletepolicy/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package deletepolicy // Response holds the response body struct for the package deletepolicy // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/enrich/delete_policy/DeleteEnrichPolicyResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/enrich/delete_policy/DeleteEnrichPolicyResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/executepolicy/execute_policy.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/executepolicy/execute_policy.go index f149c60c9..a8b311d1b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/executepolicy/execute_policy.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/executepolicy/execute_policy.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Creates the enrich index for an existing enrich policy. package executepolicy @@ -68,7 +68,7 @@ func NewExecutePolicyFunc(tp elastictransport.Interface) NewExecutePolicy { return func(name string) *ExecutePolicy { n := New(tp) - n.Name(name) + n._name(name) return n } @@ -76,7 +76,7 @@ func NewExecutePolicyFunc(tp elastictransport.Interface) NewExecutePolicy { // Creates the enrich index for an existing enrich policy. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/execute-enrich-policy-api.html +// https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/execute-enrich-policy-api.html func New(tp elastictransport.Interface) *ExecutePolicy { r := &ExecutePolicy{ transport: tp, @@ -173,7 +173,6 @@ func (r ExecutePolicy) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -182,6 +181,10 @@ func (r ExecutePolicy) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -213,19 +216,20 @@ func (r *ExecutePolicy) Header(key, value string) *ExecutePolicy { return r } -// Name The name of the enrich policy +// Name Enrich policy to execute. // API Name: name -func (r *ExecutePolicy) Name(v string) *ExecutePolicy { +func (r *ExecutePolicy) _name(name string) *ExecutePolicy { r.paramSet |= nameMask - r.name = v + r.name = name return r } -// WaitForCompletion Should the request should block until the execution is complete. +// WaitForCompletion If `true`, the request blocks other enrich policy execution requests until +// complete. // API name: wait_for_completion -func (r *ExecutePolicy) WaitForCompletion(b bool) *ExecutePolicy { - r.values.Set("wait_for_completion", strconv.FormatBool(b)) +func (r *ExecutePolicy) WaitForCompletion(waitforcompletion bool) *ExecutePolicy { + r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/executepolicy/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/executepolicy/response.go index eb5efeb99..d8d9ff986 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/executepolicy/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/executepolicy/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package executepolicy @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package executepolicy // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/enrich/execute_policy/ExecuteEnrichPolicyResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/enrich/execute_policy/ExecuteEnrichPolicyResponse.ts#L23-L28 type Response struct { Status types.ExecuteEnrichPolicyStatus `json:"status"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/getpolicy/get_policy.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/getpolicy/get_policy.go index 44be7a8c6..1d2d3834f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/getpolicy/get_policy.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/getpolicy/get_policy.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Gets information about an enrich policy. package getpolicy @@ -175,7 +175,6 @@ func (r GetPolicy) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -184,6 +183,10 @@ func (r GetPolicy) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -215,11 +218,12 @@ func (r *GetPolicy) Header(key, value string) *GetPolicy { return r } -// Name A comma-separated list of enrich policy names +// Name Comma-separated list of enrich policy names used to limit the request. +// To return information for all enrich policies, omit this parameter. // API Name: name -func (r *GetPolicy) Name(v string) *GetPolicy { +func (r *GetPolicy) Name(name string) *GetPolicy { r.paramSet |= nameMask - r.name = v + r.name = name return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/getpolicy/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/getpolicy/response.go index 906f4083e..4831e17b3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/getpolicy/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/getpolicy/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getpolicy @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getpolicy // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/enrich/get_policy/GetEnrichPolicyResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/enrich/get_policy/GetEnrichPolicyResponse.ts#L22-L24 type Response struct { Policies []types.Summary `json:"policies"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/putpolicy/put_policy.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/putpolicy/put_policy.go index 4ef15c108..e57125575 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/putpolicy/put_policy.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/putpolicy/put_policy.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Creates a new enrich policy. package putpolicy @@ -52,8 +52,9 @@ type PutPolicy struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -69,7 +70,7 @@ func NewPutPolicyFunc(tp elastictransport.Interface) NewPutPolicy { return func(name string) *PutPolicy { n := New(tp) - n.Name(name) + n._name(name) return n } @@ -77,13 +78,15 @@ func NewPutPolicyFunc(tp elastictransport.Interface) NewPutPolicy { // Creates a new enrich policy. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/put-enrich-policy-api.html +// https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/put-enrich-policy-api.html func New(tp elastictransport.Interface) *PutPolicy { r := &PutPolicy{ transport: tp, values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -113,9 +116,19 @@ func (r *PutPolicy) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -123,6 +136,7 @@ func (r *PutPolicy) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -205,7 +219,6 @@ func (r PutPolicy) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -214,6 +227,10 @@ func (r PutPolicy) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -224,11 +241,39 @@ func (r *PutPolicy) Header(key, value string) *PutPolicy { return r } -// Name The name of the enrich policy +// Name Name of the enrich policy to create or update. // API Name: name -func (r *PutPolicy) Name(v string) *PutPolicy { +func (r *PutPolicy) _name(name string) *PutPolicy { r.paramSet |= nameMask - r.name = v + r.name = name + + return r +} + +// GeoMatch Matches enrich data to incoming documents based on a `geo_shape` query. +// API name: geo_match +func (r *PutPolicy) GeoMatch(geomatch *types.EnrichPolicy) *PutPolicy { + + r.req.GeoMatch = geomatch + + return r +} + +// Match Matches enrich data to incoming documents based on a `term` query. +// API name: match +func (r *PutPolicy) Match(match *types.EnrichPolicy) *PutPolicy { + + r.req.Match = match + + return r +} + +// Range Matches a number, date, or IP address in incoming documents to a range in the +// enrich index based on a `term` query. +// API name: range +func (r *PutPolicy) Range(range_ *types.EnrichPolicy) *PutPolicy { + + r.req.Range = range_ return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/putpolicy/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/putpolicy/request.go index 93ffa3599..20d24d125 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/putpolicy/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/putpolicy/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putpolicy @@ -29,11 +29,16 @@ import ( // Request holds the request body struct for the package putpolicy // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/enrich/put_policy/PutEnrichPolicyRequest.ts#L24-L38 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/enrich/put_policy/PutEnrichPolicyRequest.ts#L24-L52 type Request struct { + + // GeoMatch Matches enrich data to incoming documents based on a `geo_shape` query. GeoMatch *types.EnrichPolicy `json:"geo_match,omitempty"` - Match *types.EnrichPolicy `json:"match,omitempty"` - Range *types.EnrichPolicy `json:"range,omitempty"` + // Match Matches enrich data to incoming documents based on a `term` query. + Match *types.EnrichPolicy `json:"match,omitempty"` + // Range Matches a number, date, or IP address in incoming documents to a range in the + // enrich index based on a `term` query. + Range *types.EnrichPolicy `json:"range,omitempty"` } // NewRequest returns a Request diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/putpolicy/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/putpolicy/response.go index b37b96d12..1104bb832 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/putpolicy/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/putpolicy/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putpolicy // Response holds the response body struct for the package putpolicy // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/enrich/put_policy/PutEnrichPolicyResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/enrich/put_policy/PutEnrichPolicyResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/stats/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/stats/response.go index a49efbbed..374f8aa99 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/stats/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/stats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package stats @@ -26,12 +26,19 @@ import ( // Response holds the response body struct for the package stats // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/enrich/stats/EnrichStatsResponse.ts#L22-L29 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/enrich/stats/EnrichStatsResponse.ts#L22-L39 type Response struct { - CacheStats []types.CacheStats `json:"cache_stats,omitempty"` - CoordinatorStats []types.CoordinatorStats `json:"coordinator_stats"` - ExecutingPolicies []types.ExecutingPolicy `json:"executing_policies"` + + // CacheStats Objects containing information about the enrich cache stats on each ingest + // node. + CacheStats []types.CacheStats `json:"cache_stats,omitempty"` + // CoordinatorStats Objects containing information about each coordinating ingest node for + // configured enrich processors. + CoordinatorStats []types.CoordinatorStats `json:"coordinator_stats"` + // ExecutingPolicies Objects containing information about each enrich policy that is currently + // executing. + ExecutingPolicies []types.ExecutingPolicy `json:"executing_policies"` } // NewResponse returns a Response diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/stats/stats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/stats/stats.go index b93308cfa..3d2c78347 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/stats/stats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/enrich/stats/stats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Gets enrich coordinator statistics and information about enrich policies that // are currently executing. @@ -161,7 +161,6 @@ func (r Stats) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -170,6 +169,10 @@ func (r Stats) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/eql/delete/delete.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/eql/delete/delete.go index a93c55326..c61610b8e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/eql/delete/delete.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/eql/delete/delete.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Deletes an async EQL search by ID. If the search is still running, the search // request will be cancelled. Otherwise, the saved search results are deleted. @@ -68,7 +68,7 @@ func NewDeleteFunc(tp elastictransport.Interface) NewDelete { return func(id string) *Delete { n := New(tp) - n.Id(id) + n._id(id) return n } @@ -172,7 +172,6 @@ func (r Delete) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -181,6 +180,10 @@ func (r Delete) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -213,10 +216,13 @@ func (r *Delete) Header(key, value string) *Delete { } // Id Identifier for the search to delete. +// A search ID is provided in the EQL search API's response for an async search. +// A search ID is also provided if the request’s `keep_on_completion` parameter +// is `true`. // API Name: id -func (r *Delete) Id(v string) *Delete { +func (r *Delete) _id(id string) *Delete { r.paramSet |= idMask - r.id = v + r.id = id return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/eql/delete/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/eql/delete/response.go index 87029dcb0..3b6baf4cd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/eql/delete/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/eql/delete/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package delete // Response holds the response body struct for the package delete // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/eql/delete/EqlDeleteResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/eql/delete/EqlDeleteResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/eql/get/get.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/eql/get/get.go index f122ef900..fe5009bc3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/eql/get/get.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/eql/get/get.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns async results from previously executed Event Query Language (EQL) // search @@ -68,7 +68,7 @@ func NewGetFunc(tp elastictransport.Interface) NewGet { return func(id string) *Get { n := New(tp) - n.Id(id) + n._id(id) return n } @@ -77,7 +77,7 @@ func NewGetFunc(tp elastictransport.Interface) NewGet { // Returns async results from previously executed Event Query Language (EQL) // search // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/eql-search-api.html +// https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/get-async-eql-search-api.html func New(tp elastictransport.Interface) *Get { r := &Get{ transport: tp, @@ -172,7 +172,6 @@ func (r Get) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -181,6 +180,10 @@ func (r Get) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -214,9 +217,9 @@ func (r *Get) Header(key, value string) *Get { // Id Identifier for the search. // API Name: id -func (r *Get) Id(v string) *Get { +func (r *Get) _id(id string) *Get { r.paramSet |= idMask - r.id = v + r.id = id return r } @@ -224,17 +227,18 @@ func (r *Get) Id(v string) *Get { // KeepAlive Period for which the search and its results are stored on the cluster. // Defaults to the keep_alive value set by the search’s EQL search API request. // API name: keep_alive -func (r *Get) KeepAlive(v string) *Get { - r.values.Set("keep_alive", v) +func (r *Get) KeepAlive(duration string) *Get { + r.values.Set("keep_alive", duration) return r } -// WaitForCompletionTimeout Timeout duration to wait for the request to finish. Defaults to no timeout, -// meaning the request waits for complete search results. +// WaitForCompletionTimeout Timeout duration to wait for the request to finish. +// Defaults to no timeout, meaning the request waits for complete search +// results. // API name: wait_for_completion_timeout -func (r *Get) WaitForCompletionTimeout(v string) *Get { - r.values.Set("wait_for_completion_timeout", v) +func (r *Get) WaitForCompletionTimeout(duration string) *Get { + r.values.Set("wait_for_completion_timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/eql/get/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/eql/get/response.go index ae22b9ef3..8f48eb8ac 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/eql/get/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/eql/get/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package get @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package get // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/eql/get/EqlGetResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/eql/get/EqlGetResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/eql/getstatus/get_status.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/eql/getstatus/get_status.go index 997fd31bf..6c2f73ad5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/eql/getstatus/get_status.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/eql/getstatus/get_status.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns the status of a previously submitted async or stored Event Query // Language (EQL) search @@ -68,7 +68,7 @@ func NewGetStatusFunc(tp elastictransport.Interface) NewGetStatus { return func(id string) *GetStatus { n := New(tp) - n.Id(id) + n._id(id) return n } @@ -77,7 +77,7 @@ func NewGetStatusFunc(tp elastictransport.Interface) NewGetStatus { // Returns the status of a previously submitted async or stored Event Query // Language (EQL) search // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/eql-search-api.html +// https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/get-async-eql-status-api.html func New(tp elastictransport.Interface) *GetStatus { r := &GetStatus{ transport: tp, @@ -174,7 +174,6 @@ func (r GetStatus) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -183,6 +182,10 @@ func (r GetStatus) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -216,9 +219,9 @@ func (r *GetStatus) Header(key, value string) *GetStatus { // Id Identifier for the search. // API Name: id -func (r *GetStatus) Id(v string) *GetStatus { +func (r *GetStatus) _id(id string) *GetStatus { r.paramSet |= idMask - r.id = v + r.id = id return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/eql/getstatus/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/eql/getstatus/response.go index abedb766a..69b246c51 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/eql/getstatus/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/eql/getstatus/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getstatus // Response holds the response body struct for the package getstatus // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/eql/get_status/EqlGetStatusResponse.ts#L24-L51 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/eql/get_status/EqlGetStatusResponse.ts#L24-L51 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/eql/search/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/eql/search/request.go index 50b5f97a6..d6ec226eb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/eql/search/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/eql/search/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package search @@ -30,7 +30,7 @@ import ( // Request holds the request body struct for the package search // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/eql/search/EqlSearchRequest.ts#L28-L115 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/eql/search/EqlSearchRequest.ts#L28-L118 type Request struct { CaseSensitive *bool `json:"case_sensitive,omitempty"` // EventCategoryField Field containing the event classification, such as process, file, or network. @@ -48,7 +48,7 @@ type Request struct { // Query EQL query you wish to run. Query string `json:"query"` ResultPosition *resultposition.ResultPosition `json:"result_position,omitempty"` - RuntimeMappings map[string]types.RuntimeField `json:"runtime_mappings,omitempty"` + RuntimeMappings types.RuntimeFields `json:"runtime_mappings,omitempty"` // Size For basic queries, the maximum number of matching events to return. Defaults // to 10 Size *uint `json:"size,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/eql/search/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/eql/search/response.go index 46d37d544..561474d2c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/eql/search/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/eql/search/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package search @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package search // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/eql/search/EqlSearchResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/eql/search/EqlSearchResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/eql/search/search.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/eql/search/search.go index 84a54610d..35562398a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/eql/search/search.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/eql/search/search.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns results matching a query expressed in Event Query Language (EQL) package search @@ -35,6 +35,8 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/resultposition" ) const ( @@ -53,8 +55,9 @@ type Search struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -70,7 +73,7 @@ func NewSearchFunc(tp elastictransport.Interface) NewSearch { return func(index string) *Search { n := New(tp) - n.Index(index) + n._index(index) return n } @@ -85,6 +88,8 @@ func New(tp elastictransport.Interface) *Search { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -114,9 +119,19 @@ func (r *Search) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -124,6 +139,7 @@ func (r *Search) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -206,7 +222,6 @@ func (r Search) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -215,6 +230,10 @@ func (r Search) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -227,56 +246,147 @@ func (r *Search) Header(key, value string) *Search { // Index The name of the index to scope the operation // API Name: index -func (r *Search) Index(v string) *Search { +func (r *Search) _index(index string) *Search { r.paramSet |= indexMask - r.index = v + r.index = index return r } // API name: allow_no_indices -func (r *Search) AllowNoIndices(b bool) *Search { - r.values.Set("allow_no_indices", strconv.FormatBool(b)) +func (r *Search) AllowNoIndices(allownoindices bool) *Search { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) return r } // API name: expand_wildcards -func (r *Search) ExpandWildcards(v string) *Search { - r.values.Set("expand_wildcards", v) +func (r *Search) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Search { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } // IgnoreUnavailable If true, missing or closed indices are not included in the response. // API name: ignore_unavailable -func (r *Search) IgnoreUnavailable(b bool) *Search { - r.values.Set("ignore_unavailable", strconv.FormatBool(b)) +func (r *Search) IgnoreUnavailable(ignoreunavailable bool) *Search { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// API name: case_sensitive +func (r *Search) CaseSensitive(casesensitive bool) *Search { + r.req.CaseSensitive = &casesensitive + + return r +} + +// EventCategoryField Field containing the event classification, such as process, file, or network. +// API name: event_category_field +func (r *Search) EventCategoryField(field string) *Search { + r.req.EventCategoryField = &field + + return r +} + +// FetchSize Maximum number of events to search at a time for sequence queries. +// API name: fetch_size +func (r *Search) FetchSize(fetchsize uint) *Search { + + r.req.FetchSize = &fetchsize + + return r +} + +// Fields Array of wildcard (*) patterns. The response returns values for field names +// matching these patterns in the fields property of each hit. +// API name: fields +func (r *Search) Fields(fields ...types.FieldAndFormat) *Search { + r.req.Fields = fields + + return r +} + +// Filter Query, written in Query DSL, used to filter the events on which the EQL query +// runs. +// API name: filter +func (r *Search) Filter(filters ...types.Query) *Search { + r.req.Filter = filters return r } -// KeepAlive Period for which the search and its results are stored on the cluster. // API name: keep_alive -func (r *Search) KeepAlive(v string) *Search { - r.values.Set("keep_alive", v) +func (r *Search) KeepAlive(duration types.Duration) *Search { + r.req.KeepAlive = duration return r } -// KeepOnCompletion If true, the search and its results are stored on the cluster. // API name: keep_on_completion -func (r *Search) KeepOnCompletion(b bool) *Search { - r.values.Set("keep_on_completion", strconv.FormatBool(b)) +func (r *Search) KeepOnCompletion(keeponcompletion bool) *Search { + r.req.KeepOnCompletion = &keeponcompletion + + return r +} + +// Query EQL query you wish to run. +// API name: query +func (r *Search) Query(query string) *Search { + + r.req.Query = query + + return r +} + +// API name: result_position +func (r *Search) ResultPosition(resultposition resultposition.ResultPosition) *Search { + r.req.ResultPosition = &resultposition + + return r +} + +// API name: runtime_mappings +func (r *Search) RuntimeMappings(runtimefields types.RuntimeFields) *Search { + r.req.RuntimeMappings = runtimefields + + return r +} + +// Size For basic queries, the maximum number of matching events to return. Defaults +// to 10 +// API name: size +func (r *Search) Size(size uint) *Search { + + r.req.Size = &size + + return r +} + +// TiebreakerField Field used to sort hits with the same timestamp in ascending order +// API name: tiebreaker_field +func (r *Search) TiebreakerField(field string) *Search { + r.req.TiebreakerField = &field + + return r +} + +// TimestampField Field containing event timestamp. Default "@timestamp" +// API name: timestamp_field +func (r *Search) TimestampField(field string) *Search { + r.req.TimestampField = &field return r } -// WaitForCompletionTimeout Timeout duration to wait for the request to finish. Defaults to no timeout, -// meaning the request waits for complete search results. // API name: wait_for_completion_timeout -func (r *Search) WaitForCompletionTimeout(v string) *Search { - r.values.Set("wait_for_completion_timeout", v) +func (r *Search) WaitForCompletionTimeout(duration types.Duration) *Search { + r.req.WaitForCompletionTimeout = duration return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/features/getfeatures/get_features.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/features/getfeatures/get_features.go index 68650effc..b5c3ba990 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/features/getfeatures/get_features.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/features/getfeatures/get_features.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Gets a list of features which can be included in snapshots using the // feature_states field when creating a snapshot @@ -159,7 +159,6 @@ func (r GetFeatures) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -168,6 +167,10 @@ func (r GetFeatures) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/features/getfeatures/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/features/getfeatures/response.go index 3743f1a4d..87343c215 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/features/getfeatures/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/features/getfeatures/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getfeatures @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getfeatures // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/features/get_features/GetFeaturesResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/features/get_features/GetFeaturesResponse.ts#L22-L26 type Response struct { Features []types.Feature `json:"features"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/features/resetfeatures/reset_features.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/features/resetfeatures/reset_features.go index 5662157eb..c62fd4758 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/features/resetfeatures/reset_features.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/features/resetfeatures/reset_features.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Resets the internal state of features, usually by deleting system indices package resetfeatures @@ -159,7 +159,6 @@ func (r ResetFeatures) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -168,6 +167,10 @@ func (r ResetFeatures) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/features/resetfeatures/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/features/resetfeatures/response.go index d739efcb3..adbcf42e1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/features/resetfeatures/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/features/resetfeatures/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package resetfeatures @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package resetfeatures // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/features/reset_features/ResetFeaturesResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/features/reset_features/ResetFeaturesResponse.ts#L22-L26 type Response struct { Features []types.Feature `json:"features"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/fleet/globalcheckpoints/global_checkpoints.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/fleet/globalcheckpoints/global_checkpoints.go index 323f9a8d6..2c7dd8c77 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/fleet/globalcheckpoints/global_checkpoints.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/fleet/globalcheckpoints/global_checkpoints.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns the current global checkpoints for an index. This API is design for // internal use by the fleet server project. @@ -69,7 +69,7 @@ func NewGlobalCheckpointsFunc(tp elastictransport.Interface) NewGlobalCheckpoint return func(index string) *GlobalCheckpoints { n := New(tp) - n.Index(index) + n._index(index) return n } @@ -179,7 +179,6 @@ func (r GlobalCheckpoints) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -188,6 +187,10 @@ func (r GlobalCheckpoints) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -221,9 +224,9 @@ func (r *GlobalCheckpoints) Header(key, value string) *GlobalCheckpoints { // Index A single index or index alias that resolves to a single index. // API Name: index -func (r *GlobalCheckpoints) Index(v string) *GlobalCheckpoints { +func (r *GlobalCheckpoints) _index(index string) *GlobalCheckpoints { r.paramSet |= indexMask - r.index = v + r.index = index return r } @@ -232,8 +235,8 @@ func (r *GlobalCheckpoints) Index(v string) *GlobalCheckpoints { // global checkpoints // to advance past the provided `checkpoints`. // API name: wait_for_advance -func (r *GlobalCheckpoints) WaitForAdvance(b bool) *GlobalCheckpoints { - r.values.Set("wait_for_advance", strconv.FormatBool(b)) +func (r *GlobalCheckpoints) WaitForAdvance(waitforadvance bool) *GlobalCheckpoints { + r.values.Set("wait_for_advance", strconv.FormatBool(waitforadvance)) return r } @@ -243,8 +246,8 @@ func (r *GlobalCheckpoints) WaitForAdvance(b bool) *GlobalCheckpoints { // and all primary shards be active. Can only be true when `wait_for_advance` is // true. // API name: wait_for_index -func (r *GlobalCheckpoints) WaitForIndex(b bool) *GlobalCheckpoints { - r.values.Set("wait_for_index", strconv.FormatBool(b)) +func (r *GlobalCheckpoints) WaitForIndex(waitforindex bool) *GlobalCheckpoints { + r.values.Set("wait_for_index", strconv.FormatBool(waitforindex)) return r } @@ -256,16 +259,20 @@ func (r *GlobalCheckpoints) WaitForIndex(b bool) *GlobalCheckpoints { // will cause Elasticsearch to immediately return the current global // checkpoints. // API name: checkpoints -func (r *GlobalCheckpoints) Checkpoints(v string) *GlobalCheckpoints { - r.values.Set("checkpoints", v) +func (r *GlobalCheckpoints) Checkpoints(checkpoints ...int64) *GlobalCheckpoints { + tmp := []string{} + for _, item := range checkpoints { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("checkpoints", strings.Join(tmp, ",")) return r } // Timeout Period to wait for a global checkpoints to advance past `checkpoints`. // API name: timeout -func (r *GlobalCheckpoints) Timeout(v string) *GlobalCheckpoints { - r.values.Set("timeout", v) +func (r *GlobalCheckpoints) Timeout(duration string) *GlobalCheckpoints { + r.values.Set("timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/fleet/globalcheckpoints/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/fleet/globalcheckpoints/response.go index 15a85a3f8..4e08659c7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/fleet/globalcheckpoints/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/fleet/globalcheckpoints/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package globalcheckpoints // Response holds the response body struct for the package globalcheckpoints // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/fleet/global_checkpoints/GlobalCheckpointsResponse.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/fleet/global_checkpoints/GlobalCheckpointsResponse.ts#L22-L27 type Response struct { GlobalCheckpoints []int64 `json:"global_checkpoints"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/fleet/msearch/msearch.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/fleet/msearch/msearch.go new file mode 100644 index 000000000..a722e9618 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/fleet/msearch/msearch.go @@ -0,0 +1,407 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Multi Search API where the search will only be executed after specified +// checkpoints are available due to a refresh. This API is designed for internal +// use by the fleet server project. +package msearch + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/searchtype" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Msearch struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + buf *gobytes.Buffer + + req *Request + deferred []func(request *Request) error + raw io.Reader + + paramSet int + + index string +} + +// NewMsearch type alias for index. +type NewMsearch func() *Msearch + +// NewMsearchFunc returns a new instance of Msearch with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewMsearchFunc(tp elastictransport.Interface) NewMsearch { + return func() *Msearch { + n := New(tp) + + return n + } +} + +// Multi Search API where the search will only be executed after specified +// checkpoints are available due to a refresh. This API is designed for internal +// use by the fleet server project. +// +// +func New(tp elastictransport.Interface) *Msearch { + r := &Msearch{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + buf: gobytes.NewBuffer(nil), + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Msearch) Raw(raw io.Reader) *Msearch { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Msearch) Request(req *Request) *Msearch { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Msearch) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw != nil { + r.buf.ReadFrom(r.raw) + } else if r.req != nil { + + for _, elem := range *r.req { + data, err := json.Marshal(elem) + if err != nil { + return nil, err + } + r.buf.Write(data) + r.buf.Write([]byte("\n")) + } + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Msearch: %w", err) + } + + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_fleet") + path.WriteString("/") + path.WriteString("_fleet_msearch") + + method = http.MethodPost + case r.paramSet == indexMask: + path.WriteString("/") + + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_fleet") + path.WriteString("/") + path.WriteString("_fleet_msearch") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.buf) + } else { + req, err = http.NewRequest(method, r.path.String(), r.buf) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.buf.Len() > 0 { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+x-ndjson;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Msearch) Perform(ctx context.Context) (*http.Response, error) { + req, err := r.HttpRequest(ctx) + if err != nil { + return nil, err + } + + res, err := r.transport.Perform(req) + if err != nil { + return nil, fmt.Errorf("an error happened during the Msearch query execution: %w", err) + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a msearch.Response +func (r Msearch) Do(ctx context.Context) (*Response, error) { + + response := NewResponse() + + r.TypedKeys(true) + + res, err := r.Perform(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + return nil, errorResponse +} + +// Header set a key, value pair in the Msearch headers map. +func (r *Msearch) Header(key, value string) *Msearch { + r.headers.Set(key, value) + + return r +} + +// Index A single target to search. If the target is an index alias, it must resolve +// to a single index. +// API Name: index +func (r *Msearch) Index(index string) *Msearch { + r.paramSet |= indexMask + r.index = index + + return r +} + +// AllowNoIndices If false, the request returns an error if any wildcard expression, index +// alias, or _all value targets only missing or closed indices. This behavior +// applies even if the request targets other open indices. For example, a +// request targeting foo*,bar* returns an error if an index starts with foo but +// no index starts with bar. +// API name: allow_no_indices +func (r *Msearch) AllowNoIndices(allownoindices bool) *Msearch { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) + + return r +} + +// CcsMinimizeRoundtrips If true, network roundtrips between the coordinating node and remote clusters +// are minimized for cross-cluster search requests. +// API name: ccs_minimize_roundtrips +func (r *Msearch) CcsMinimizeRoundtrips(ccsminimizeroundtrips bool) *Msearch { + r.values.Set("ccs_minimize_roundtrips", strconv.FormatBool(ccsminimizeroundtrips)) + + return r +} + +// ExpandWildcards Type of index that wildcard expressions can match. If the request can target +// data streams, this argument determines whether wildcard expressions match +// hidden data streams. +// API name: expand_wildcards +func (r *Msearch) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Msearch { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// IgnoreThrottled If true, concrete, expanded or aliased indices are ignored when frozen. +// API name: ignore_throttled +func (r *Msearch) IgnoreThrottled(ignorethrottled bool) *Msearch { + r.values.Set("ignore_throttled", strconv.FormatBool(ignorethrottled)) + + return r +} + +// IgnoreUnavailable If true, missing or closed indices are not included in the response. +// API name: ignore_unavailable +func (r *Msearch) IgnoreUnavailable(ignoreunavailable bool) *Msearch { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// MaxConcurrentSearches Maximum number of concurrent searches the multi search API can execute. +// API name: max_concurrent_searches +func (r *Msearch) MaxConcurrentSearches(maxconcurrentsearches string) *Msearch { + r.values.Set("max_concurrent_searches", maxconcurrentsearches) + + return r +} + +// MaxConcurrentShardRequests Maximum number of concurrent shard requests that each sub-search request +// executes per node. +// API name: max_concurrent_shard_requests +func (r *Msearch) MaxConcurrentShardRequests(maxconcurrentshardrequests string) *Msearch { + r.values.Set("max_concurrent_shard_requests", maxconcurrentshardrequests) + + return r +} + +// PreFilterShardSize Defines a threshold that enforces a pre-filter roundtrip to prefilter search +// shards based on query rewriting if the number of shards the search request +// expands to exceeds the threshold. This filter roundtrip can limit the number +// of shards significantly if for instance a shard can not match any documents +// based on its rewrite method i.e., if date filters are mandatory to match but +// the shard bounds and the query are disjoint. +// API name: pre_filter_shard_size +func (r *Msearch) PreFilterShardSize(prefiltershardsize string) *Msearch { + r.values.Set("pre_filter_shard_size", prefiltershardsize) + + return r +} + +// SearchType Indicates whether global term and document frequencies should be used when +// scoring returned documents. +// API name: search_type +func (r *Msearch) SearchType(searchtype searchtype.SearchType) *Msearch { + r.values.Set("search_type", searchtype.String()) + + return r +} + +// RestTotalHitsAsInt If true, hits.total are returned as an integer in the response. Defaults to +// false, which returns an object. +// API name: rest_total_hits_as_int +func (r *Msearch) RestTotalHitsAsInt(resttotalhitsasint bool) *Msearch { + r.values.Set("rest_total_hits_as_int", strconv.FormatBool(resttotalhitsasint)) + + return r +} + +// TypedKeys Specifies whether aggregation and suggester names should be prefixed by their +// respective types in the response. +// API name: typed_keys +func (r *Msearch) TypedKeys(typedkeys bool) *Msearch { + r.values.Set("typed_keys", strconv.FormatBool(typedkeys)) + + return r +} + +// WaitForCheckpoints A comma separated list of checkpoints. When configured, the search API will +// only be executed on a shard +// after the relevant checkpoint has become visible for search. Defaults to an +// empty list which will cause +// Elasticsearch to immediately execute the search. +// API name: wait_for_checkpoints +func (r *Msearch) WaitForCheckpoints(waitforcheckpoints ...int64) *Msearch { + tmp := []string{} + for _, item := range waitforcheckpoints { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("wait_for_checkpoints", strings.Join(tmp, ",")) + + return r +} + +// AllowPartialSearchResults If true, returns partial results if there are shard request timeouts or +// [shard +// failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures). +// If false, returns +// an error with no partial results. Defaults to the configured cluster setting +// `search.default_allow_partial_results` +// which is true by default. +// API name: allow_partial_search_results +func (r *Msearch) AllowPartialSearchResults(allowpartialsearchresults bool) *Msearch { + r.values.Set("allow_partial_search_results", strconv.FormatBool(allowpartialsearchresults)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/ping/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/fleet/msearch/request.go similarity index 65% rename from vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/ping/response.go rename to vendor/github.com/elastic/go-elasticsearch/v8/typedapi/fleet/msearch/request.go index 6dad16fc0..70e951df6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/ping/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/fleet/msearch/request.go @@ -16,19 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 -package ping +package msearch -// Response holds the response body struct for the package ping -// -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/ping/PingResponse.ts#L22-L24 - -type Response struct { -} +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) -// NewResponse returns a Response -func NewResponse() *Response { - r := &Response{} - return r -} +// Request holds the request body struct for the package msearch +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/fleet/msearch/MultiSearchRequest.ts#L32-L115 +type Request = []types.MsearchRequestItem diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/existscomponenttemplate/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/fleet/msearch/response.go similarity index 67% rename from vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/existscomponenttemplate/response.go rename to vendor/github.com/elastic/go-elasticsearch/v8/typedapi/fleet/msearch/response.go index c7ea2f1ff..3d4c58258 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/existscomponenttemplate/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/fleet/msearch/response.go @@ -16,15 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 -package existscomponenttemplate +package msearch -// Response holds the response body struct for the package existscomponenttemplate +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Response holds the response body struct for the package msearch // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/exists_component_template/ClusterComponentTemplateExistsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/fleet/msearch/MultiSearchResponse.ts#L25-L29 type Response struct { + Docs []types.MsearchResponseItem `json:"docs"` } // NewResponse returns a Response diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/fleet/search/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/fleet/search/request.go index ddb3a0fe1..3f31faaa3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/fleet/search/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/fleet/search/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package search @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package search // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/fleet/search/SearchRequest.ts#L55-L260 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/fleet/search/SearchRequest.ts#L55-L260 type Request struct { Aggregations map[string]types.Aggregations `json:"aggregations,omitempty"` Collapse *types.FieldCollapse `json:"collapse,omitempty"` @@ -65,7 +65,7 @@ type Request struct { Rescore []types.Rescore `json:"rescore,omitempty"` // RuntimeMappings Defines one or more runtime fields in the search request. These fields take // precedence over mapped fields with the same name. - RuntimeMappings map[string]types.RuntimeField `json:"runtime_mappings,omitempty"` + RuntimeMappings types.RuntimeFields `json:"runtime_mappings,omitempty"` // ScriptFields Retrieve a script evaluation (based on different fields) for each hit. ScriptFields map[string]types.ScriptField `json:"script_fields,omitempty"` SearchAfter []types.FieldValue `json:"search_after,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/fleet/search/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/fleet/search/response.go index 276790cf5..e7490ecaf 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/fleet/search/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/fleet/search/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package search @@ -25,6 +25,7 @@ import ( "encoding/json" "errors" "io" + "strconv" "strings" "github.com/elastic/go-elasticsearch/v8/typedapi/types" @@ -32,7 +33,7 @@ import ( // Response holds the response body struct for the package search // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/fleet/search/SearchResponse.ts#L33-L50 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/fleet/search/SearchResponse.ts#L33-L50 type Response struct { Aggregations map[string]types.Aggregate `json:"aggregations,omitempty"` @@ -76,6 +77,10 @@ func (s *Response) UnmarshalJSON(data []byte) error { switch t { case "aggregations": + if s.Aggregations == nil { + s.Aggregations = make(map[string]types.Aggregate, 0) + } + for dec.More() { tt, err := dec.Token() if err != nil { @@ -88,415 +93,494 @@ func (s *Response) UnmarshalJSON(data []byte) error { if strings.Contains(value, "#") { elems := strings.Split(value, "#") if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]types.Aggregate, 0) + } switch elems[0] { + case "cardinality": o := types.NewCardinalityAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "hdr_percentiles": o := types.NewHdrPercentilesAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "hdr_percentile_ranks": o := types.NewHdrPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "tdigest_percentiles": o := types.NewTDigestPercentilesAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "tdigest_percentile_ranks": o := types.NewTDigestPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "percentiles_bucket": o := types.NewPercentilesBucketAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "median_absolute_deviation": o := types.NewMedianAbsoluteDeviationAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "min": o := types.NewMinAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "max": o := types.NewMaxAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "sum": o := types.NewSumAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "avg": o := types.NewAvgAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "weighted_avg": o := types.NewWeightedAvgAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "value_count": o := types.NewValueCountAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "simple_value": o := types.NewSimpleValueAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "derivative": o := types.NewDerivativeAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "bucket_metric_value": o := types.NewBucketMetricValueAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "stats": o := types.NewStatsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "stats_bucket": o := types.NewStatsBucketAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "extended_stats": o := types.NewExtendedStatsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "extended_stats_bucket": o := types.NewExtendedStatsBucketAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geo_bounds": o := types.NewGeoBoundsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geo_centroid": o := types.NewGeoCentroidAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "histogram": o := types.NewHistogramAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "date_histogram": o := types.NewDateHistogramAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "auto_date_histogram": o := types.NewAutoDateHistogramAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "variable_width_histogram": o := types.NewVariableWidthHistogramAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "sterms": o := types.NewStringTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "lterms": o := types.NewLongTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "dterms": o := types.NewDoubleTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "umterms": o := types.NewUnmappedTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "lrareterms": o := types.NewLongRareTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "srareterms": o := types.NewStringRareTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "umrareterms": o := types.NewUnmappedRareTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "multi_terms": o := types.NewMultiTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "missing": o := types.NewMissingAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "nested": o := types.NewNestedAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "reverse_nested": o := types.NewReverseNestedAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "global": o := types.NewGlobalAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "filter": o := types.NewFilterAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "children": o := types.NewChildrenAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "parent": o := types.NewParentAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "sampler": o := types.NewSamplerAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "unmapped_sampler": o := types.NewUnmappedSamplerAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geohash_grid": o := types.NewGeoHashGridAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geotile_grid": o := types.NewGeoTileGridAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geohex_grid": o := types.NewGeoHexGridAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "range": o := types.NewRangeAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "date_range": o := types.NewDateRangeAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geo_distance": o := types.NewGeoDistanceAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "ip_range": o := types.NewIpRangeAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "ip_prefix": o := types.NewIpPrefixAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "filters": o := types.NewFiltersAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "adjacency_matrix": o := types.NewAdjacencyMatrixAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "siglterms": o := types.NewSignificantLongTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "sigsterms": o := types.NewSignificantStringTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "umsigterms": o := types.NewUnmappedSignificantTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "composite": o := types.NewCompositeAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := types.NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := types.NewScriptedMetricAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "top_hits": o := types.NewTopHitsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "inference": o := types.NewInferenceAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "string_stats": o := types.NewStringStatsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "box_plot": o := types.NewBoxPlotAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "top_metrics": o := types.NewTopMetricsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "t_test": o := types.NewTTestAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "rate": o := types.NewRateAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "simple_long_value": o := types.NewCumulativeCardinalityAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "matrix_stats": o := types.NewMatrixStatsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geo_line": o := types.NewGeoLineAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + default: o := make(map[string]interface{}, 0) if err := dec.Decode(&o); err != nil { @@ -523,6 +607,9 @@ func (s *Response) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]json.RawMessage, 0) + } if err := dec.Decode(&s.Fields); err != nil { return err } @@ -533,13 +620,34 @@ func (s *Response) UnmarshalJSON(data []byte) error { } case "max_score": - if err := dec.Decode(&s.MaxScore); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := types.Float64(value) + s.MaxScore = &f + case float64: + f := types.Float64(v) + s.MaxScore = &f } case "num_reduce_phases": - if err := dec.Decode(&s.NumReducePhases); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.NumReducePhases = &value + case float64: + f := int64(v) + s.NumReducePhases = &f } case "pit_id": @@ -563,23 +671,109 @@ func (s *Response) UnmarshalJSON(data []byte) error { } case "suggest": - if err := dec.Decode(&s.Suggest); err != nil { - return err + if s.Suggest == nil { + s.Suggest = make(map[string][]types.Suggest, 0) + } + + for dec.More() { + tt, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + if value, ok := tt.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Suggest == nil { + s.Suggest = make(map[string][]types.Suggest, 0) + } + switch elems[0] { + + case "completion": + o := types.NewCompletionSuggest() + if err := dec.Decode(&o); err != nil { + return err + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + case "phrase": + o := types.NewPhraseSuggest() + if err := dec.Decode(&o); err != nil { + return err + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + case "term": + o := types.NewTermSuggest() + if err := dec.Decode(&o); err != nil { + return err + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + default: + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + } + } else { + return errors.New("cannot decode JSON for field Suggest") + } + } else { + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Suggest[value] = append(s.Suggest[value], o) + } + } } case "terminated_early": - if err := dec.Decode(&s.TerminatedEarly); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.TerminatedEarly = &value + case bool: + s.TerminatedEarly = &v } case "timed_out": - if err := dec.Decode(&s.TimedOut); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.TimedOut = value + case bool: + s.TimedOut = v } case "took": - if err := dec.Decode(&s.Took); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Took = value + case float64: + f := int64(v) + s.Took = f } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/fleet/search/search.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/fleet/search/search.go index fcdc02d97..9826cc29b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/fleet/search/search.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/fleet/search/search.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Search API where the search will only be executed after specified checkpoints // are available due to a refresh. This API is designed for internal use by the @@ -37,7 +37,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/operator" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/searchtype" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/suggestmode" @@ -59,8 +59,9 @@ type Search struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -76,7 +77,7 @@ func NewSearchFunc(tp elastictransport.Interface) NewSearch { return func(index string) *Search { n := New(tp) - n.Index(index) + n._index(index) return n } @@ -93,6 +94,8 @@ func New(tp elastictransport.Interface) *Search { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -122,9 +125,19 @@ func (r *Search) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -132,6 +145,7 @@ func (r *Search) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -216,7 +230,6 @@ func (r Search) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -225,6 +238,10 @@ func (r Search) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -238,331 +255,525 @@ func (r *Search) Header(key, value string) *Search { // Index A single target to search. If the target is an index alias, it must resolve // to a single index. // API Name: index -func (r *Search) Index(v string) *Search { +func (r *Search) _index(index string) *Search { r.paramSet |= indexMask - r.index = v + r.index = index return r } // API name: allow_no_indices -func (r *Search) AllowNoIndices(b bool) *Search { - r.values.Set("allow_no_indices", strconv.FormatBool(b)) +func (r *Search) AllowNoIndices(allownoindices bool) *Search { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) return r } // API name: analyzer -func (r *Search) Analyzer(v string) *Search { - r.values.Set("analyzer", v) +func (r *Search) Analyzer(analyzer string) *Search { + r.values.Set("analyzer", analyzer) return r } // API name: analyze_wildcard -func (r *Search) AnalyzeWildcard(b bool) *Search { - r.values.Set("analyze_wildcard", strconv.FormatBool(b)) +func (r *Search) AnalyzeWildcard(analyzewildcard bool) *Search { + r.values.Set("analyze_wildcard", strconv.FormatBool(analyzewildcard)) return r } // API name: batched_reduce_size -func (r *Search) BatchedReduceSize(v string) *Search { - r.values.Set("batched_reduce_size", v) +func (r *Search) BatchedReduceSize(batchedreducesize string) *Search { + r.values.Set("batched_reduce_size", batchedreducesize) return r } // API name: ccs_minimize_roundtrips -func (r *Search) CcsMinimizeRoundtrips(b bool) *Search { - r.values.Set("ccs_minimize_roundtrips", strconv.FormatBool(b)) +func (r *Search) CcsMinimizeRoundtrips(ccsminimizeroundtrips bool) *Search { + r.values.Set("ccs_minimize_roundtrips", strconv.FormatBool(ccsminimizeroundtrips)) return r } // API name: default_operator -func (r *Search) DefaultOperator(enum operator.Operator) *Search { - r.values.Set("default_operator", enum.String()) +func (r *Search) DefaultOperator(defaultoperator operator.Operator) *Search { + r.values.Set("default_operator", defaultoperator.String()) return r } // API name: df -func (r *Search) Df(v string) *Search { - r.values.Set("df", v) - - return r -} - -// API name: docvalue_fields -func (r *Search) DocvalueFields(v string) *Search { - r.values.Set("docvalue_fields", v) +func (r *Search) Df(df string) *Search { + r.values.Set("df", df) return r } // API name: expand_wildcards -func (r *Search) ExpandWildcards(v string) *Search { - r.values.Set("expand_wildcards", v) - - return r -} - -// API name: explain -func (r *Search) Explain(b bool) *Search { - r.values.Set("explain", strconv.FormatBool(b)) +func (r *Search) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Search { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } // API name: ignore_throttled -func (r *Search) IgnoreThrottled(b bool) *Search { - r.values.Set("ignore_throttled", strconv.FormatBool(b)) +func (r *Search) IgnoreThrottled(ignorethrottled bool) *Search { + r.values.Set("ignore_throttled", strconv.FormatBool(ignorethrottled)) return r } // API name: ignore_unavailable -func (r *Search) IgnoreUnavailable(b bool) *Search { - r.values.Set("ignore_unavailable", strconv.FormatBool(b)) +func (r *Search) IgnoreUnavailable(ignoreunavailable bool) *Search { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) return r } // API name: lenient -func (r *Search) Lenient(b bool) *Search { - r.values.Set("lenient", strconv.FormatBool(b)) +func (r *Search) Lenient(lenient bool) *Search { + r.values.Set("lenient", strconv.FormatBool(lenient)) return r } // API name: max_concurrent_shard_requests -func (r *Search) MaxConcurrentShardRequests(v string) *Search { - r.values.Set("max_concurrent_shard_requests", v) +func (r *Search) MaxConcurrentShardRequests(maxconcurrentshardrequests string) *Search { + r.values.Set("max_concurrent_shard_requests", maxconcurrentshardrequests) return r } // API name: min_compatible_shard_node -func (r *Search) MinCompatibleShardNode(v string) *Search { - r.values.Set("min_compatible_shard_node", v) +func (r *Search) MinCompatibleShardNode(versionstring string) *Search { + r.values.Set("min_compatible_shard_node", versionstring) return r } // API name: preference -func (r *Search) Preference(v string) *Search { - r.values.Set("preference", v) +func (r *Search) Preference(preference string) *Search { + r.values.Set("preference", preference) return r } // API name: pre_filter_shard_size -func (r *Search) PreFilterShardSize(v string) *Search { - r.values.Set("pre_filter_shard_size", v) +func (r *Search) PreFilterShardSize(prefiltershardsize string) *Search { + r.values.Set("pre_filter_shard_size", prefiltershardsize) return r } // API name: request_cache -func (r *Search) RequestCache(b bool) *Search { - r.values.Set("request_cache", strconv.FormatBool(b)) +func (r *Search) RequestCache(requestcache bool) *Search { + r.values.Set("request_cache", strconv.FormatBool(requestcache)) return r } // API name: routing -func (r *Search) Routing(v string) *Search { - r.values.Set("routing", v) +func (r *Search) Routing(routing string) *Search { + r.values.Set("routing", routing) return r } // API name: scroll -func (r *Search) Scroll(v string) *Search { - r.values.Set("scroll", v) +func (r *Search) Scroll(duration string) *Search { + r.values.Set("scroll", duration) return r } // API name: search_type -func (r *Search) SearchType(enum searchtype.SearchType) *Search { - r.values.Set("search_type", enum.String()) +func (r *Search) SearchType(searchtype searchtype.SearchType) *Search { + r.values.Set("search_type", searchtype.String()) return r } -// API name: stats -func (r *Search) Stats(v string) *Search { - r.values.Set("stats", v) +// SuggestField Specifies which field to use for suggestions. +// API name: suggest_field +func (r *Search) SuggestField(field string) *Search { + r.values.Set("suggest_field", field) return r } -// API name: stored_fields -func (r *Search) StoredFields(v string) *Search { - r.values.Set("stored_fields", v) +// API name: suggest_mode +func (r *Search) SuggestMode(suggestmode suggestmode.SuggestMode) *Search { + r.values.Set("suggest_mode", suggestmode.String()) return r } -// SuggestField Specifies which field to use for suggestions. -// API name: suggest_field -func (r *Search) SuggestField(v string) *Search { - r.values.Set("suggest_field", v) +// API name: suggest_size +func (r *Search) SuggestSize(suggestsize string) *Search { + r.values.Set("suggest_size", suggestsize) return r } -// API name: suggest_mode -func (r *Search) SuggestMode(enum suggestmode.SuggestMode) *Search { - r.values.Set("suggest_mode", enum.String()) +// SuggestText The source text for which the suggestions should be returned. +// API name: suggest_text +func (r *Search) SuggestText(suggesttext string) *Search { + r.values.Set("suggest_text", suggesttext) return r } -// API name: suggest_size -func (r *Search) SuggestSize(v string) *Search { - r.values.Set("suggest_size", v) +// API name: typed_keys +func (r *Search) TypedKeys(typedkeys bool) *Search { + r.values.Set("typed_keys", strconv.FormatBool(typedkeys)) return r } -// SuggestText The source text for which the suggestions should be returned. -// API name: suggest_text -func (r *Search) SuggestText(v string) *Search { - r.values.Set("suggest_text", v) +// API name: rest_total_hits_as_int +func (r *Search) RestTotalHitsAsInt(resttotalhitsasint bool) *Search { + r.values.Set("rest_total_hits_as_int", strconv.FormatBool(resttotalhitsasint)) return r } -// API name: terminate_after -func (r *Search) TerminateAfter(v string) *Search { - r.values.Set("terminate_after", v) +// API name: _source_excludes +func (r *Search) SourceExcludes_(fields ...string) *Search { + r.values.Set("_source_excludes", strings.Join(fields, ",")) return r } -// API name: timeout -func (r *Search) Timeout(v string) *Search { - r.values.Set("timeout", v) +// API name: _source_includes +func (r *Search) SourceIncludes_(fields ...string) *Search { + r.values.Set("_source_includes", strings.Join(fields, ",")) return r } -// API name: track_total_hits -func (r *Search) TrackTotalHits(v string) *Search { - r.values.Set("track_total_hits", v) +// API name: q +func (r *Search) Q(q string) *Search { + r.values.Set("q", q) return r } -// API name: track_scores -func (r *Search) TrackScores(b bool) *Search { - r.values.Set("track_scores", strconv.FormatBool(b)) +// WaitForCheckpoints A comma separated list of checkpoints. When configured, the search API will +// only be executed on a shard +// after the relevant checkpoint has become visible for search. Defaults to an +// empty list which will cause +// Elasticsearch to immediately execute the search. +// API name: wait_for_checkpoints +func (r *Search) WaitForCheckpoints(waitforcheckpoints ...int64) *Search { + tmp := []string{} + for _, item := range waitforcheckpoints { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("wait_for_checkpoints", strings.Join(tmp, ",")) return r } -// API name: typed_keys -func (r *Search) TypedKeys(b bool) *Search { - r.values.Set("typed_keys", strconv.FormatBool(b)) +// AllowPartialSearchResults If true, returns partial results if there are shard request timeouts or +// [shard +// failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures). +// If false, returns +// an error with no partial results. Defaults to the configured cluster setting +// `search.default_allow_partial_results` +// which is true by default. +// API name: allow_partial_search_results +func (r *Search) AllowPartialSearchResults(allowpartialsearchresults bool) *Search { + r.values.Set("allow_partial_search_results", strconv.FormatBool(allowpartialsearchresults)) return r } -// API name: rest_total_hits_as_int -func (r *Search) RestTotalHitsAsInt(b bool) *Search { - r.values.Set("rest_total_hits_as_int", strconv.FormatBool(b)) +// API name: aggregations +func (r *Search) Aggregations(aggregations map[string]types.Aggregations) *Search { + + r.req.Aggregations = aggregations return r } -// API name: version -func (r *Search) Version(b bool) *Search { - r.values.Set("version", strconv.FormatBool(b)) +// API name: collapse +func (r *Search) Collapse(collapse *types.FieldCollapse) *Search { + + r.req.Collapse = collapse return r } -// API name: _source -func (r *Search) Source_(v string) *Search { - r.values.Set("_source", v) +// DocvalueFields Array of wildcard (*) patterns. The request returns doc values for field +// names matching these patterns in the hits.fields property of the response. +// API name: docvalue_fields +func (r *Search) DocvalueFields(docvaluefields ...types.FieldAndFormat) *Search { + r.req.DocvalueFields = docvaluefields return r } -// API name: _source_excludes -func (r *Search) SourceExcludes_(v string) *Search { - r.values.Set("_source_excludes", v) +// Explain If true, returns detailed information about score computation as part of a +// hit. +// API name: explain +func (r *Search) Explain(explain bool) *Search { + r.req.Explain = &explain return r } -// API name: _source_includes -func (r *Search) SourceIncludes_(v string) *Search { - r.values.Set("_source_includes", v) +// Ext Configuration of search extensions defined by Elasticsearch plugins. +// API name: ext +func (r *Search) Ext(ext map[string]json.RawMessage) *Search { + + r.req.Ext = ext return r } -// API name: seq_no_primary_term -func (r *Search) SeqNoPrimaryTerm(b bool) *Search { - r.values.Set("seq_no_primary_term", strconv.FormatBool(b)) +// Fields Array of wildcard (*) patterns. The request returns values for field names +// matching these patterns in the hits.fields property of the response. +// API name: fields +func (r *Search) Fields(fields ...types.FieldAndFormat) *Search { + r.req.Fields = fields return r } -// API name: q -func (r *Search) Q(v string) *Search { - r.values.Set("q", v) +// From Starting document offset. By default, you cannot page through more than +// 10,000 +// hits using the from and size parameters. To page through more hits, use the +// search_after parameter. +// API name: from +func (r *Search) From(from int) *Search { + r.req.From = &from + + return r +} + +// API name: highlight +func (r *Search) Highlight(highlight *types.Highlight) *Search { + + r.req.Highlight = highlight + + return r +} + +// IndicesBoost Boosts the _score of documents from specified indices. +// API name: indices_boost +func (r *Search) IndicesBoost(indicesboosts ...map[string]types.Float64) *Search { + r.req.IndicesBoost = indicesboosts + + return r +} + +// MinScore Minimum _score for matching documents. Documents with a lower _score are +// not included in the search results. +// API name: min_score +func (r *Search) MinScore(minscore types.Float64) *Search { + + r.req.MinScore = &minscore + + return r +} + +// Pit Limits the search to a point in time (PIT). If you provide a PIT, you +// cannot specify an in the request path. +// API name: pit +func (r *Search) Pit(pit *types.PointInTimeReference) *Search { + + r.req.Pit = pit return r } +// API name: post_filter +func (r *Search) PostFilter(postfilter *types.Query) *Search { + + r.req.PostFilter = postfilter + + return r +} + +// API name: profile +func (r *Search) Profile(profile bool) *Search { + r.req.Profile = &profile + + return r +} + +// Query Defines the search definition using the Query DSL. +// API name: query +func (r *Search) Query(query *types.Query) *Search { + + r.req.Query = query + + return r +} + +// API name: rescore +func (r *Search) Rescore(rescores ...types.Rescore) *Search { + r.req.Rescore = rescores + + return r +} + +// RuntimeMappings Defines one or more runtime fields in the search request. These fields take +// precedence over mapped fields with the same name. +// API name: runtime_mappings +func (r *Search) RuntimeMappings(runtimefields types.RuntimeFields) *Search { + r.req.RuntimeMappings = runtimefields + + return r +} + +// ScriptFields Retrieve a script evaluation (based on different fields) for each hit. +// API name: script_fields +func (r *Search) ScriptFields(scriptfields map[string]types.ScriptField) *Search { + + r.req.ScriptFields = scriptfields + + return r +} + +// API name: search_after +func (r *Search) SearchAfter(sortresults ...types.FieldValue) *Search { + r.req.SearchAfter = sortresults + + return r +} + +// SeqNoPrimaryTerm If true, returns sequence number and primary term of the last modification +// of each hit. See Optimistic concurrency control. +// API name: seq_no_primary_term +func (r *Search) SeqNoPrimaryTerm(seqnoprimaryterm bool) *Search { + r.req.SeqNoPrimaryTerm = &seqnoprimaryterm + + return r +} + +// Size The number of hits to return. By default, you cannot page through more +// than 10,000 hits using the from and size parameters. To page through more +// hits, use the search_after parameter. // API name: size -func (r *Search) Size(i int) *Search { - r.values.Set("size", strconv.Itoa(i)) +func (r *Search) Size(size int) *Search { + r.req.Size = &size return r } -// API name: from -func (r *Search) From(i int) *Search { - r.values.Set("from", strconv.Itoa(i)) +// API name: slice +func (r *Search) Slice(slice *types.SlicedScroll) *Search { + + r.req.Slice = slice return r } // API name: sort -func (r *Search) Sort(v string) *Search { - r.values.Set("sort", v) +func (r *Search) Sort(sorts ...types.SortCombinations) *Search { + r.req.Sort = sorts return r } -// WaitForCheckpoints A comma separated list of checkpoints. When configured, the search API will -// only be executed on a shard -// after the relevant checkpoint has become visible for search. Defaults to an -// empty list which will cause -// Elasticsearch to immediately execute the search. -// API name: wait_for_checkpoints -func (r *Search) WaitForCheckpoints(v string) *Search { - r.values.Set("wait_for_checkpoints", v) +// Source_ Indicates which source fields are returned for matching documents. These +// fields are returned in the hits._source property of the search response. +// API name: _source +func (r *Search) Source_(sourceconfig types.SourceConfig) *Search { + r.req.Source_ = sourceconfig return r } -// AllowPartialSearchResults If true, returns partial results if there are shard request timeouts or -// [shard -// failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures). -// If false, returns -// an error with no partial results. Defaults to the configured cluster setting -// `search.default_allow_partial_results` -// which is true by default. -// API name: allow_partial_search_results -func (r *Search) AllowPartialSearchResults(b bool) *Search { - r.values.Set("allow_partial_search_results", strconv.FormatBool(b)) +// Stats Stats groups to associate with the search. Each group maintains a statistics +// aggregation for its associated searches. You can retrieve these stats using +// the indices stats API. +// API name: stats +func (r *Search) Stats(stats ...string) *Search { + r.req.Stats = stats + + return r +} + +// StoredFields List of stored fields to return as part of a hit. If no fields are specified, +// no stored fields are included in the response. If this field is specified, +// the _source +// parameter defaults to false. You can pass _source: true to return both source +// fields +// and stored fields in the search response. +// API name: stored_fields +func (r *Search) StoredFields(fields ...string) *Search { + r.req.StoredFields = fields + + return r +} + +// API name: suggest +func (r *Search) Suggest(suggest *types.Suggester) *Search { + + r.req.Suggest = suggest + + return r +} + +// TerminateAfter Maximum number of documents to collect for each shard. If a query reaches +// this +// limit, Elasticsearch terminates the query early. Elasticsearch collects +// documents +// before sorting. Defaults to 0, which does not terminate query execution +// early. +// API name: terminate_after +func (r *Search) TerminateAfter(terminateafter int64) *Search { + + r.req.TerminateAfter = &terminateafter + + return r +} + +// Timeout Specifies the period of time to wait for a response from each shard. If no +// response +// is received before the timeout expires, the request fails and returns an +// error. +// Defaults to no timeout. +// API name: timeout +func (r *Search) Timeout(timeout string) *Search { + + r.req.Timeout = &timeout + + return r +} + +// TrackScores If true, calculate and return document scores, even if the scores are not +// used for sorting. +// API name: track_scores +func (r *Search) TrackScores(trackscores bool) *Search { + r.req.TrackScores = &trackscores + + return r +} + +// TrackTotalHits Number of hits matching the query to count accurately. If true, the exact +// number of hits is returned at the cost of some performance. If false, the +// response does not include the total number of hits matching the query. +// Defaults to 10,000 hits. +// API name: track_total_hits +func (r *Search) TrackTotalHits(trackhits types.TrackHits) *Search { + r.req.TrackTotalHits = trackhits + + return r +} + +// Version If true, returns document version as part of a hit. +// API name: version +func (r *Search) Version(version bool) *Search { + r.req.Version = &version return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/graph/explore/explore.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/graph/explore/explore.go index f6cd849b6..722d46b46 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/graph/explore/explore.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/graph/explore/explore.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Explore extracted and summarized information about the documents and terms in // an index. @@ -53,8 +53,9 @@ type Explore struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -70,7 +71,7 @@ func NewExploreFunc(tp elastictransport.Interface) NewExplore { return func(index string) *Explore { n := New(tp) - n.Index(index) + n._index(index) return n } @@ -79,13 +80,15 @@ func NewExploreFunc(tp elastictransport.Interface) NewExplore { // Explore extracted and summarized information about the documents and terms in // an index. // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/graph-explore-api.html +// https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/graph-explore-api.html func New(tp elastictransport.Interface) *Explore { r := &Explore{ transport: tp, values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -115,9 +118,19 @@ func (r *Explore) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -125,6 +138,7 @@ func (r *Explore) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -207,7 +221,6 @@ func (r Explore) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -216,6 +229,10 @@ func (r Explore) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -226,28 +243,68 @@ func (r *Explore) Header(key, value string) *Explore { return r } -// Index A comma-separated list of index names to search; use `_all` or empty string -// to perform the operation on all indices +// Index Name of the index. // API Name: index -func (r *Explore) Index(v string) *Explore { +func (r *Explore) _index(index string) *Explore { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// Routing Specific routing value +// Routing Custom value used to route operations to a specific shard. // API name: routing -func (r *Explore) Routing(v string) *Explore { - r.values.Set("routing", v) +func (r *Explore) Routing(routing string) *Explore { + r.values.Set("routing", routing) return r } -// Timeout Explicit operation timeout +// Timeout Specifies the period of time to wait for a response from each shard. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// Defaults to no timeout. // API name: timeout -func (r *Explore) Timeout(v string) *Explore { - r.values.Set("timeout", v) +func (r *Explore) Timeout(duration string) *Explore { + r.values.Set("timeout", duration) + + return r +} + +// Connections Specifies or more fields from which you want to extract terms that are +// associated with the specified vertices. +// API name: connections +func (r *Explore) Connections(connections *types.Hop) *Explore { + + r.req.Connections = connections + + return r +} + +// Controls Direct the Graph API how to build the graph. +// API name: controls +func (r *Explore) Controls(controls *types.ExploreControls) *Explore { + + r.req.Controls = controls + + return r +} + +// Query A seed query that identifies the documents of interest. Can be any valid +// Elasticsearch query. +// API name: query +func (r *Explore) Query(query *types.Query) *Explore { + + r.req.Query = query + + return r +} + +// Vertices Specifies one or more fields that contain the terms you want to include in +// the graph as vertices. +// API name: vertices +func (r *Explore) Vertices(vertices ...types.VertexDefinition) *Explore { + r.req.Vertices = vertices return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/graph/explore/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/graph/explore/request.go index d477a25c7..2d368fea4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/graph/explore/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/graph/explore/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package explore @@ -29,12 +29,20 @@ import ( // Request holds the request body struct for the package explore // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/graph/explore/GraphExploreRequest.ts#L28-L47 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/graph/explore/GraphExploreRequest.ts#L28-L72 type Request struct { - Connections *types.Hop `json:"connections,omitempty"` - Controls *types.ExploreControls `json:"controls,omitempty"` - Query *types.Query `json:"query,omitempty"` - Vertices []types.VertexDefinition `json:"vertices,omitempty"` + + // Connections Specifies or more fields from which you want to extract terms that are + // associated with the specified vertices. + Connections *types.Hop `json:"connections,omitempty"` + // Controls Direct the Graph API how to build the graph. + Controls *types.ExploreControls `json:"controls,omitempty"` + // Query A seed query that identifies the documents of interest. Can be any valid + // Elasticsearch query. + Query *types.Query `json:"query,omitempty"` + // Vertices Specifies one or more fields that contain the terms you want to include in + // the graph as vertices. + Vertices []types.VertexDefinition `json:"vertices,omitempty"` } // NewRequest returns a Request diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/graph/explore/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/graph/explore/response.go index e35522138..40fb4ae38 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/graph/explore/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/graph/explore/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package explore @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package explore // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/graph/explore/GraphExploreResponse.ts#L25-L33 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/graph/explore/GraphExploreResponse.ts#L25-L33 type Response struct { Connections []types.Connection `json:"connections"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/deletelifecycle/delete_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/deletelifecycle/delete_lifecycle.go index 523a3f2a9..f18d0e123 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/deletelifecycle/delete_lifecycle.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/deletelifecycle/delete_lifecycle.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Deletes the specified lifecycle policy definition. A currently used policy // cannot be deleted. @@ -68,7 +68,7 @@ func NewDeleteLifecycleFunc(tp elastictransport.Interface) NewDeleteLifecycle { return func(policy string) *DeleteLifecycle { n := New(tp) - n.Policy(policy) + n._policy(policy) return n } @@ -172,7 +172,6 @@ func (r DeleteLifecycle) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -181,6 +180,10 @@ func (r DeleteLifecycle) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -214,9 +217,9 @@ func (r *DeleteLifecycle) Header(key, value string) *DeleteLifecycle { // Policy Identifier for the policy. // API Name: policy -func (r *DeleteLifecycle) Policy(v string) *DeleteLifecycle { +func (r *DeleteLifecycle) _policy(policy string) *DeleteLifecycle { r.paramSet |= policyMask - r.policy = v + r.policy = policy return r } @@ -224,8 +227,8 @@ func (r *DeleteLifecycle) Policy(v string) *DeleteLifecycle { // MasterTimeout Period to wait for a connection to the master node. If no response is // received before the timeout expires, the request fails and returns an error. // API name: master_timeout -func (r *DeleteLifecycle) MasterTimeout(v string) *DeleteLifecycle { - r.values.Set("master_timeout", v) +func (r *DeleteLifecycle) MasterTimeout(duration string) *DeleteLifecycle { + r.values.Set("master_timeout", duration) return r } @@ -233,8 +236,8 @@ func (r *DeleteLifecycle) MasterTimeout(v string) *DeleteLifecycle { // Timeout Period to wait for a response. If no response is received before the timeout // expires, the request fails and returns an error. // API name: timeout -func (r *DeleteLifecycle) Timeout(v string) *DeleteLifecycle { - r.values.Set("timeout", v) +func (r *DeleteLifecycle) Timeout(duration string) *DeleteLifecycle { + r.values.Set("timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/deletelifecycle/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/deletelifecycle/response.go index 3b4876faa..e6ebb37c4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/deletelifecycle/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/deletelifecycle/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package deletelifecycle // Response holds the response body struct for the package deletelifecycle // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ilm/delete_lifecycle/DeleteLifecycleResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ilm/delete_lifecycle/DeleteLifecycleResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/explainlifecycle/explain_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/explainlifecycle/explain_lifecycle.go index da162b3d2..0fa0a174d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/explainlifecycle/explain_lifecycle.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/explainlifecycle/explain_lifecycle.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves information about the index's current lifecycle state, such as the // currently executing phase, action, and step. @@ -69,7 +69,7 @@ func NewExplainLifecycleFunc(tp elastictransport.Interface) NewExplainLifecycle return func(index string) *ExplainLifecycle { n := New(tp) - n.Index(index) + n._index(index) return n } @@ -173,7 +173,6 @@ func (r ExplainLifecycle) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -182,6 +181,10 @@ func (r ExplainLifecycle) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -217,9 +220,9 @@ func (r *ExplainLifecycle) Header(key, value string) *ExplainLifecycle { // Supports wildcards (`*`). // To target all data streams and indices, use `*` or `_all`. // API Name: index -func (r *ExplainLifecycle) Index(v string) *ExplainLifecycle { +func (r *ExplainLifecycle) _index(index string) *ExplainLifecycle { r.paramSet |= indexMask - r.index = v + r.index = index return r } @@ -228,16 +231,16 @@ func (r *ExplainLifecycle) Index(v string) *ExplainLifecycle { // in an error state, either due to an encountering an error while executing the // policy, or attempting to use a policy that does not exist. // API name: only_errors -func (r *ExplainLifecycle) OnlyErrors(b bool) *ExplainLifecycle { - r.values.Set("only_errors", strconv.FormatBool(b)) +func (r *ExplainLifecycle) OnlyErrors(onlyerrors bool) *ExplainLifecycle { + r.values.Set("only_errors", strconv.FormatBool(onlyerrors)) return r } // OnlyManaged Filters the returned indices to only indices that are managed by ILM. // API name: only_managed -func (r *ExplainLifecycle) OnlyManaged(b bool) *ExplainLifecycle { - r.values.Set("only_managed", strconv.FormatBool(b)) +func (r *ExplainLifecycle) OnlyManaged(onlymanaged bool) *ExplainLifecycle { + r.values.Set("only_managed", strconv.FormatBool(onlymanaged)) return r } @@ -245,8 +248,8 @@ func (r *ExplainLifecycle) OnlyManaged(b bool) *ExplainLifecycle { // MasterTimeout Period to wait for a connection to the master node. If no response is // received before the timeout expires, the request fails and returns an error. // API name: master_timeout -func (r *ExplainLifecycle) MasterTimeout(v string) *ExplainLifecycle { - r.values.Set("master_timeout", v) +func (r *ExplainLifecycle) MasterTimeout(duration string) *ExplainLifecycle { + r.values.Set("master_timeout", duration) return r } @@ -254,8 +257,8 @@ func (r *ExplainLifecycle) MasterTimeout(v string) *ExplainLifecycle { // Timeout Period to wait for a response. If no response is received before the timeout // expires, the request fails and returns an error. // API name: timeout -func (r *ExplainLifecycle) Timeout(v string) *ExplainLifecycle { - r.values.Set("timeout", v) +func (r *ExplainLifecycle) Timeout(duration string) *ExplainLifecycle { + r.values.Set("timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/explainlifecycle/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/explainlifecycle/response.go index 8537163a8..50c185b33 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/explainlifecycle/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/explainlifecycle/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package explainlifecycle @@ -31,7 +31,7 @@ import ( // Response holds the response body struct for the package explainlifecycle // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ilm/explain_lifecycle/ExplainLifecycleResponse.ts#L24-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ilm/explain_lifecycle/ExplainLifecycleResponse.ts#L24-L28 type Response struct { Indices map[string]types.LifecycleExplain `json:"indices"` @@ -60,6 +60,9 @@ func (s *Response) UnmarshalJSON(data []byte) error { switch t { case "indices": + if s.Indices == nil { + s.Indices = make(map[string]types.LifecycleExplain, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -83,9 +86,11 @@ func (s *Response) UnmarshalJSON(data []byte) error { } s.Indices[key] = oo default: - if err := dec.Decode(&s.Indices); err != nil { + oo := new(types.LifecycleExplain) + if err := localDec.Decode(&oo); err != nil { return err } + s.Indices[key] = oo } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/getlifecycle/get_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/getlifecycle/get_lifecycle.go index 6d30b68cc..79d8aa7cf 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/getlifecycle/get_lifecycle.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/getlifecycle/get_lifecycle.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns the specified policy definition. Includes the policy version and last // modified date. @@ -177,7 +177,6 @@ func (r GetLifecycle) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -186,6 +185,10 @@ func (r GetLifecycle) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -219,9 +222,9 @@ func (r *GetLifecycle) Header(key, value string) *GetLifecycle { // Policy Identifier for the policy. // API Name: policy -func (r *GetLifecycle) Policy(v string) *GetLifecycle { +func (r *GetLifecycle) Policy(policy string) *GetLifecycle { r.paramSet |= policyMask - r.policy = v + r.policy = policy return r } @@ -229,8 +232,8 @@ func (r *GetLifecycle) Policy(v string) *GetLifecycle { // MasterTimeout Period to wait for a connection to the master node. If no response is // received before the timeout expires, the request fails and returns an error. // API name: master_timeout -func (r *GetLifecycle) MasterTimeout(v string) *GetLifecycle { - r.values.Set("master_timeout", v) +func (r *GetLifecycle) MasterTimeout(duration string) *GetLifecycle { + r.values.Set("master_timeout", duration) return r } @@ -238,8 +241,8 @@ func (r *GetLifecycle) MasterTimeout(v string) *GetLifecycle { // Timeout Period to wait for a response. If no response is received before the timeout // expires, the request fails and returns an error. // API name: timeout -func (r *GetLifecycle) Timeout(v string) *GetLifecycle { - r.values.Set("timeout", v) +func (r *GetLifecycle) Timeout(duration string) *GetLifecycle { + r.values.Set("timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/getlifecycle/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/getlifecycle/response.go index fd025f91f..54675d76a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/getlifecycle/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/getlifecycle/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getlifecycle @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getlifecycle // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ilm/get_lifecycle/GetLifecycleResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ilm/get_lifecycle/GetLifecycleResponse.ts#L23-L25 type Response map[string]types.Lifecycle diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/getstatus/get_status.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/getstatus/get_status.go index 33fe19082..1e385ee0b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/getstatus/get_status.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/getstatus/get_status.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves the current index lifecycle management (ILM) status. package getstatus @@ -159,7 +159,6 @@ func (r GetStatus) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -168,6 +167,10 @@ func (r GetStatus) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/getstatus/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/getstatus/response.go index 0076b1b25..98801722e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/getstatus/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/getstatus/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getstatus @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getstatus // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ilm/get_status/GetIlmStatusResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ilm/get_status/GetIlmStatusResponse.ts#L22-L24 type Response struct { OperationMode lifecycleoperationmode.LifecycleOperationMode `json:"operation_mode"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/migratetodatatiers/migrate_to_data_tiers.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/migratetodatatiers/migrate_to_data_tiers.go index 308fa3b3b..6f9172558 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/migratetodatatiers/migrate_to_data_tiers.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/migratetodatatiers/migrate_to_data_tiers.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Migrates the indices and ILM policies away from custom node attribute // allocation routing to data tiers routing @@ -50,8 +50,9 @@ type MigrateToDataTiers struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int } @@ -79,6 +80,8 @@ func New(tp elastictransport.Interface) *MigrateToDataTiers { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -108,9 +111,19 @@ func (r *MigrateToDataTiers) HttpRequest(ctx context.Context) (*http.Request, er var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -118,6 +131,7 @@ func (r *MigrateToDataTiers) HttpRequest(ctx context.Context) (*http.Request, er } r.buf.Write(data) + } r.path.Scheme = "http" @@ -197,7 +211,6 @@ func (r MigrateToDataTiers) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -206,6 +219,10 @@ func (r MigrateToDataTiers) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -221,8 +238,24 @@ func (r *MigrateToDataTiers) Header(key, value string) *MigrateToDataTiers { // This provides a way to retrieve the indices and ILM policies that need to be // migrated. // API name: dry_run -func (r *MigrateToDataTiers) DryRun(b bool) *MigrateToDataTiers { - r.values.Set("dry_run", strconv.FormatBool(b)) +func (r *MigrateToDataTiers) DryRun(dryrun bool) *MigrateToDataTiers { + r.values.Set("dry_run", strconv.FormatBool(dryrun)) + + return r +} + +// API name: legacy_template_to_delete +func (r *MigrateToDataTiers) LegacyTemplateToDelete(legacytemplatetodelete string) *MigrateToDataTiers { + + r.req.LegacyTemplateToDelete = &legacytemplatetodelete + + return r +} + +// API name: node_attribute +func (r *MigrateToDataTiers) NodeAttribute(nodeattribute string) *MigrateToDataTiers { + + r.req.NodeAttribute = &nodeattribute return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/migratetodatatiers/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/migratetodatatiers/request.go index 8c7e932ba..edc3fa715 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/migratetodatatiers/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/migratetodatatiers/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package migratetodatatiers @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package migratetodatatiers // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ilm/migrate_to_data_tiers/Request.ts#L22-L44 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ilm/migrate_to_data_tiers/Request.ts#L22-L43 type Request struct { LegacyTemplateToDelete *string `json:"legacy_template_to_delete,omitempty"` NodeAttribute *string `json:"node_attribute,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/migratetodatatiers/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/migratetodatatiers/response.go index 8170c5a46..ce94afbb3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/migratetodatatiers/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/migratetodatatiers/response.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package migratetodatatiers +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Response holds the response body struct for the package migratetodatatiers // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ilm/migrate_to_data_tiers/Response.ts#L22-L32 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ilm/migrate_to_data_tiers/Response.ts#L22-L32 type Response struct { DryRun bool `json:"dry_run"` @@ -39,3 +47,84 @@ func NewResponse() *Response { r := &Response{} return r } + +func (s *Response) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "dry_run": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DryRun = value + case bool: + s.DryRun = v + } + + case "migrated_component_templates": + if err := dec.Decode(&s.MigratedComponentTemplates); err != nil { + return err + } + + case "migrated_composable_templates": + if err := dec.Decode(&s.MigratedComposableTemplates); err != nil { + return err + } + + case "migrated_ilm_policies": + if err := dec.Decode(&s.MigratedIlmPolicies); err != nil { + return err + } + + case "migrated_indices": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.MigratedIndices = append(s.MigratedIndices, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.MigratedIndices); err != nil { + return err + } + } + + case "migrated_legacy_templates": + if err := dec.Decode(&s.MigratedLegacyTemplates); err != nil { + return err + } + + case "removed_legacy_template": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RemovedLegacyTemplate = o + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/movetostep/move_to_step.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/movetostep/move_to_step.go index bb8cd3e5c..55119dbe9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/movetostep/move_to_step.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/movetostep/move_to_step.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Manually moves an index into the specified step and executes that step. package movetostep @@ -52,8 +52,9 @@ type MoveToStep struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -69,7 +70,7 @@ func NewMoveToStepFunc(tp elastictransport.Interface) NewMoveToStep { return func(index string) *MoveToStep { n := New(tp) - n.Index(index) + n._index(index) return n } @@ -84,6 +85,8 @@ func New(tp elastictransport.Interface) *MoveToStep { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -113,9 +116,19 @@ func (r *MoveToStep) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -123,6 +136,7 @@ func (r *MoveToStep) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -205,7 +219,6 @@ func (r MoveToStep) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -214,6 +227,10 @@ func (r MoveToStep) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -226,9 +243,25 @@ func (r *MoveToStep) Header(key, value string) *MoveToStep { // Index The name of the index whose lifecycle step is to change // API Name: index -func (r *MoveToStep) Index(v string) *MoveToStep { +func (r *MoveToStep) _index(index string) *MoveToStep { r.paramSet |= indexMask - r.index = v + r.index = index + + return r +} + +// API name: current_step +func (r *MoveToStep) CurrentStep(currentstep *types.StepKey) *MoveToStep { + + r.req.CurrentStep = currentstep + + return r +} + +// API name: next_step +func (r *MoveToStep) NextStep(nextstep *types.StepKey) *MoveToStep { + + r.req.NextStep = nextstep return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/movetostep/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/movetostep/request.go index a364bb180..0e4dc0251 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/movetostep/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/movetostep/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package movetostep @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package movetostep // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ilm/move_to_step/MoveToStepRequest.ts#L24-L37 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ilm/move_to_step/MoveToStepRequest.ts#L24-L36 type Request struct { CurrentStep *types.StepKey `json:"current_step,omitempty"` NextStep *types.StepKey `json:"next_step,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/movetostep/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/movetostep/response.go index 278654a7c..441a3fe8c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/movetostep/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/movetostep/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package movetostep // Response holds the response body struct for the package movetostep // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ilm/move_to_step/MoveToStepResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ilm/move_to_step/MoveToStepResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/putlifecycle/put_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/putlifecycle/put_lifecycle.go index 28cec2eab..e2fda035a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/putlifecycle/put_lifecycle.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/putlifecycle/put_lifecycle.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Creates a lifecycle policy package putlifecycle @@ -52,8 +52,9 @@ type PutLifecycle struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -69,7 +70,7 @@ func NewPutLifecycleFunc(tp elastictransport.Interface) NewPutLifecycle { return func(policy string) *PutLifecycle { n := New(tp) - n.Policy(policy) + n._policy(policy) return n } @@ -84,6 +85,8 @@ func New(tp elastictransport.Interface) *PutLifecycle { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -113,9 +116,19 @@ func (r *PutLifecycle) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -123,6 +136,7 @@ func (r *PutLifecycle) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -205,7 +219,6 @@ func (r PutLifecycle) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -214,6 +227,10 @@ func (r PutLifecycle) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -226,9 +243,9 @@ func (r *PutLifecycle) Header(key, value string) *PutLifecycle { // Policy Identifier for the policy. // API Name: policy -func (r *PutLifecycle) Policy(v string) *PutLifecycle { +func (r *PutLifecycle) _policy(policy string) *PutLifecycle { r.paramSet |= policyMask - r.policy = v + r.policy = policy return r } @@ -236,8 +253,8 @@ func (r *PutLifecycle) Policy(v string) *PutLifecycle { // MasterTimeout Period to wait for a connection to the master node. If no response is // received before the timeout expires, the request fails and returns an error. // API name: master_timeout -func (r *PutLifecycle) MasterTimeout(v string) *PutLifecycle { - r.values.Set("master_timeout", v) +func (r *PutLifecycle) MasterTimeout(duration string) *PutLifecycle { + r.values.Set("master_timeout", duration) return r } @@ -245,8 +262,8 @@ func (r *PutLifecycle) MasterTimeout(v string) *PutLifecycle { // Timeout Period to wait for a response. If no response is received before the timeout // expires, the request fails and returns an error. // API name: timeout -func (r *PutLifecycle) Timeout(v string) *PutLifecycle { - r.values.Set("timeout", v) +func (r *PutLifecycle) Timeout(duration string) *PutLifecycle { + r.values.Set("timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/putlifecycle/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/putlifecycle/request.go index 113077908..6a65843e7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/putlifecycle/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/putlifecycle/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putlifecycle @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package putlifecycle // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ilm/put_lifecycle/PutLifecycleRequest.ts#L25-L56 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ilm/put_lifecycle/PutLifecycleRequest.ts#L25-L55 type Request struct { Policy *types.IlmPolicy `json:"policy,omitempty"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/putlifecycle/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/putlifecycle/response.go index aa52fba68..7f227ed04 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/putlifecycle/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/putlifecycle/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putlifecycle // Response holds the response body struct for the package putlifecycle // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ilm/put_lifecycle/PutLifecycleResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ilm/put_lifecycle/PutLifecycleResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/removepolicy/remove_policy.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/removepolicy/remove_policy.go index 005d50e19..0b36a63b8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/removepolicy/remove_policy.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/removepolicy/remove_policy.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Removes the assigned lifecycle policy and stops managing the specified index package removepolicy @@ -67,7 +67,7 @@ func NewRemovePolicyFunc(tp elastictransport.Interface) NewRemovePolicy { return func(index string) *RemovePolicy { n := New(tp) - n.Index(index) + n._index(index) return n } @@ -170,7 +170,6 @@ func (r RemovePolicy) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -179,6 +178,10 @@ func (r RemovePolicy) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -212,9 +215,9 @@ func (r *RemovePolicy) Header(key, value string) *RemovePolicy { // Index The name of the index to remove policy on // API Name: index -func (r *RemovePolicy) Index(v string) *RemovePolicy { +func (r *RemovePolicy) _index(index string) *RemovePolicy { r.paramSet |= indexMask - r.index = v + r.index = index return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/removepolicy/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/removepolicy/response.go index 42052f74c..8339f6470 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/removepolicy/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/removepolicy/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package removepolicy // Response holds the response body struct for the package removepolicy // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ilm/remove_policy/RemovePolicyResponse.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ilm/remove_policy/RemovePolicyResponse.ts#L22-L27 type Response struct { FailedIndexes []string `json:"failed_indexes"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/retry/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/retry/response.go index 41f05f228..441c28a79 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/retry/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/retry/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package retry // Response holds the response body struct for the package retry // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ilm/retry/RetryIlmResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ilm/retry/RetryIlmResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/retry/retry.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/retry/retry.go index a4250b4eb..0fb8aa5cd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/retry/retry.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/retry/retry.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retries executing the policy for an index that is in the ERROR step. package retry @@ -67,7 +67,7 @@ func NewRetryFunc(tp elastictransport.Interface) NewRetry { return func(index string) *Retry { n := New(tp) - n.Index(index) + n._index(index) return n } @@ -170,7 +170,6 @@ func (r Retry) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -179,6 +178,10 @@ func (r Retry) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -213,9 +216,9 @@ func (r *Retry) Header(key, value string) *Retry { // Index The name of the indices (comma-separated) whose failed lifecycle step is to // be retry // API Name: index -func (r *Retry) Index(v string) *Retry { +func (r *Retry) _index(index string) *Retry { r.paramSet |= indexMask - r.index = v + r.index = index return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/start/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/start/response.go index ee082127d..857c0ea2a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/start/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/start/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package start // Response holds the response body struct for the package start // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ilm/start/StartIlmResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ilm/start/StartIlmResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/start/start.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/start/start.go index 9f2fb25f9..db9138a2c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/start/start.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/start/start.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Start the index lifecycle management (ILM) plugin. package start @@ -159,7 +159,6 @@ func (r Start) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -168,6 +167,10 @@ func (r Start) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -200,15 +203,15 @@ func (r *Start) Header(key, value string) *Start { } // API name: master_timeout -func (r *Start) MasterTimeout(v string) *Start { - r.values.Set("master_timeout", v) +func (r *Start) MasterTimeout(duration string) *Start { + r.values.Set("master_timeout", duration) return r } // API name: timeout -func (r *Start) Timeout(v string) *Start { - r.values.Set("timeout", v) +func (r *Start) Timeout(duration string) *Start { + r.values.Set("timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/stop/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/stop/response.go index fbd0ebb99..bc192b88e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/stop/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/stop/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package stop // Response holds the response body struct for the package stop // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ilm/stop/StopIlmResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ilm/stop/StopIlmResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/stop/stop.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/stop/stop.go index 70544c39f..16ce10d62 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/stop/stop.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ilm/stop/stop.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Halts all lifecycle management operations and stops the index lifecycle // management (ILM) plugin @@ -161,7 +161,6 @@ func (r Stop) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -170,6 +169,10 @@ func (r Stop) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -202,15 +205,15 @@ func (r *Stop) Header(key, value string) *Stop { } // API name: master_timeout -func (r *Stop) MasterTimeout(v string) *Stop { - r.values.Set("master_timeout", v) +func (r *Stop) MasterTimeout(duration string) *Stop { + r.values.Set("master_timeout", duration) return r } // API name: timeout -func (r *Stop) Timeout(v string) *Stop { - r.values.Set("timeout", v) +func (r *Stop) Timeout(duration string) *Stop { + r.values.Set("timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/addblock/add_block.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/addblock/add_block.go index 9a3340e03..78c1e959f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/addblock/add_block.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/addblock/add_block.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Adds a block to an index. package addblock @@ -36,6 +36,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" ) const ( @@ -71,9 +72,9 @@ func NewAddBlockFunc(tp elastictransport.Interface) NewAddBlock { return func(index, block string) *AddBlock { n := New(tp) - n.Index(index) + n._index(index) - n.Block(block) + n._block(block) return n } @@ -177,7 +178,6 @@ func (r AddBlock) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -186,6 +186,10 @@ func (r AddBlock) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -219,18 +223,18 @@ func (r *AddBlock) Header(key, value string) *AddBlock { // Index A comma separated list of indices to add a block to // API Name: index -func (r *AddBlock) Index(v string) *AddBlock { +func (r *AddBlock) _index(index string) *AddBlock { r.paramSet |= indexMask - r.index = v + r.index = index return r } // Block The block to add (one of read, write, read_only or metadata) // API Name: block -func (r *AddBlock) Block(v string) *AddBlock { +func (r *AddBlock) _block(block string) *AddBlock { r.paramSet |= blockMask - r.block = v + r.block = block return r } @@ -238,8 +242,8 @@ func (r *AddBlock) Block(v string) *AddBlock { // AllowNoIndices Whether to ignore if a wildcard indices expression resolves into no concrete // indices. (This includes `_all` string or when no indices have been specified) // API name: allow_no_indices -func (r *AddBlock) AllowNoIndices(b bool) *AddBlock { - r.values.Set("allow_no_indices", strconv.FormatBool(b)) +func (r *AddBlock) AllowNoIndices(allownoindices bool) *AddBlock { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) return r } @@ -247,8 +251,12 @@ func (r *AddBlock) AllowNoIndices(b bool) *AddBlock { // ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, // closed or both. // API name: expand_wildcards -func (r *AddBlock) ExpandWildcards(v string) *AddBlock { - r.values.Set("expand_wildcards", v) +func (r *AddBlock) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *AddBlock { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } @@ -256,24 +264,24 @@ func (r *AddBlock) ExpandWildcards(v string) *AddBlock { // IgnoreUnavailable Whether specified concrete indices should be ignored when unavailable // (missing or closed) // API name: ignore_unavailable -func (r *AddBlock) IgnoreUnavailable(b bool) *AddBlock { - r.values.Set("ignore_unavailable", strconv.FormatBool(b)) +func (r *AddBlock) IgnoreUnavailable(ignoreunavailable bool) *AddBlock { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) return r } // MasterTimeout Specify timeout for connection to master // API name: master_timeout -func (r *AddBlock) MasterTimeout(v string) *AddBlock { - r.values.Set("master_timeout", v) +func (r *AddBlock) MasterTimeout(duration string) *AddBlock { + r.values.Set("master_timeout", duration) return r } // Timeout Explicit operation timeout // API name: timeout -func (r *AddBlock) Timeout(v string) *AddBlock { - r.values.Set("timeout", v) +func (r *AddBlock) Timeout(duration string) *AddBlock { + r.values.Set("timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/addblock/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/addblock/response.go index 5800f16a6..01cba81c8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/addblock/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/addblock/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package addblock @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package addblock // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/add_block/IndicesAddBlockResponse.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/add_block/IndicesAddBlockResponse.ts#L22-L28 type Response struct { Acknowledged bool `json:"acknowledged"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/analyze/analyze.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/analyze/analyze.go index 349f05e56..de5f48635 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/analyze/analyze.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/analyze/analyze.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Performs the analysis process on a text and return the tokens breakdown of // the text. @@ -53,8 +53,9 @@ type Analyze struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -77,13 +78,15 @@ func NewAnalyzeFunc(tp elastictransport.Interface) NewAnalyze { // Performs the analysis process on a text and return the tokens breakdown of // the text. // -// https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-analyze.html +// https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-analyze.html func New(tp elastictransport.Interface) *Analyze { r := &Analyze{ transport: tp, values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -113,9 +116,19 @@ func (r *Analyze) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -123,6 +136,7 @@ func (r *Analyze) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -208,7 +222,6 @@ func (r Analyze) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -217,6 +230,10 @@ func (r Analyze) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -227,11 +244,95 @@ func (r *Analyze) Header(key, value string) *Analyze { return r } -// Index The name of the index to scope the operation +// Index Index used to derive the analyzer. +// If specified, the `analyzer` or field parameter overrides this value. +// If no index is specified or the index does not have a default analyzer, the +// analyze API uses the standard analyzer. // API Name: index -func (r *Analyze) Index(v string) *Analyze { +func (r *Analyze) Index(index string) *Analyze { r.paramSet |= indexMask - r.index = v + r.index = index + + return r +} + +// Analyzer The name of the analyzer that should be applied to the provided `text`. +// This could be a built-in analyzer, or an analyzer that’s been configured in +// the index. +// API name: analyzer +func (r *Analyze) Analyzer(analyzer string) *Analyze { + + r.req.Analyzer = &analyzer + + return r +} + +// Attributes Array of token attributes used to filter the output of the `explain` +// parameter. +// API name: attributes +func (r *Analyze) Attributes(attributes ...string) *Analyze { + r.req.Attributes = attributes + + return r +} + +// CharFilter Array of character filters used to preprocess characters before the +// tokenizer. +// API name: char_filter +func (r *Analyze) CharFilter(charfilters ...types.CharFilter) *Analyze { + r.req.CharFilter = charfilters + + return r +} + +// Explain If `true`, the response includes token attributes and additional details. +// API name: explain +func (r *Analyze) Explain(explain bool) *Analyze { + r.req.Explain = &explain + + return r +} + +// Field Field used to derive the analyzer. +// To use this parameter, you must specify an index. +// If specified, the `analyzer` parameter overrides this value. +// API name: field +func (r *Analyze) Field(field string) *Analyze { + r.req.Field = &field + + return r +} + +// Filter Array of token filters used to apply after the tokenizer. +// API name: filter +func (r *Analyze) Filter(filters ...types.TokenFilter) *Analyze { + r.req.Filter = filters + + return r +} + +// Normalizer Normalizer to use to convert text into a single token. +// API name: normalizer +func (r *Analyze) Normalizer(normalizer string) *Analyze { + + r.req.Normalizer = &normalizer + + return r +} + +// Text Text to analyze. +// If an array of strings is provided, it is analyzed as a multi-value field. +// API name: text +func (r *Analyze) Text(texttoanalyzes ...string) *Analyze { + r.req.Text = texttoanalyzes + + return r +} + +// Tokenizer Tokenizer to use to convert text into tokens. +// API name: tokenizer +func (r *Analyze) Tokenizer(tokenizer types.Tokenizer) *Analyze { + r.req.Tokenizer = tokenizer return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/analyze/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/analyze/request.go index efa2600ab..848023ccb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/analyze/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/analyze/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package analyze @@ -29,17 +29,34 @@ import ( // Request holds the request body struct for the package analyze // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/analyze/IndicesAnalyzeRequest.ts#L27-L47 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/analyze/IndicesAnalyzeRequest.ts#L27-L92 type Request struct { - Analyzer *string `json:"analyzer,omitempty"` - Attributes []string `json:"attributes,omitempty"` - CharFilter []types.CharFilter `json:"char_filter,omitempty"` - Explain *bool `json:"explain,omitempty"` - Field *string `json:"field,omitempty"` - Filter []types.TokenFilter `json:"filter,omitempty"` - Normalizer *string `json:"normalizer,omitempty"` - Text []string `json:"text,omitempty"` - Tokenizer types.Tokenizer `json:"tokenizer,omitempty"` + + // Analyzer The name of the analyzer that should be applied to the provided `text`. + // This could be a built-in analyzer, or an analyzer that’s been configured in + // the index. + Analyzer *string `json:"analyzer,omitempty"` + // Attributes Array of token attributes used to filter the output of the `explain` + // parameter. + Attributes []string `json:"attributes,omitempty"` + // CharFilter Array of character filters used to preprocess characters before the + // tokenizer. + CharFilter []types.CharFilter `json:"char_filter,omitempty"` + // Explain If `true`, the response includes token attributes and additional details. + Explain *bool `json:"explain,omitempty"` + // Field Field used to derive the analyzer. + // To use this parameter, you must specify an index. + // If specified, the `analyzer` parameter overrides this value. + Field *string `json:"field,omitempty"` + // Filter Array of token filters used to apply after the tokenizer. + Filter []types.TokenFilter `json:"filter,omitempty"` + // Normalizer Normalizer to use to convert text into a single token. + Normalizer *string `json:"normalizer,omitempty"` + // Text Text to analyze. + // If an array of strings is provided, it is analyzed as a multi-value field. + Text []string `json:"text,omitempty"` + // Tokenizer Tokenizer to use to convert text into tokens. + Tokenizer types.Tokenizer `json:"tokenizer,omitempty"` } // NewRequest returns a Request diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/analyze/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/analyze/response.go index d8ad2b218..549c3fc93 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/analyze/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/analyze/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package analyze @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package analyze // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/analyze/IndicesAnalyzeResponse.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/analyze/IndicesAnalyzeResponse.ts#L22-L27 type Response struct { Detail *types.AnalyzeDetail `json:"detail,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/clearcache/clear_cache.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/clearcache/clear_cache.go index 277083462..ac78d6523 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/clearcache/clear_cache.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/clearcache/clear_cache.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Clears all or specific caches for one or more indices. package clearcache @@ -36,6 +36,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" ) const ( @@ -176,7 +177,6 @@ func (r ClearCache) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -185,6 +185,10 @@ func (r ClearCache) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -216,71 +220,83 @@ func (r *ClearCache) Header(key, value string) *ClearCache { return r } -// Index A comma-separated list of index name to limit the operation +// Index Comma-separated list of data streams, indices, and aliases used to limit the +// request. +// Supports wildcards (`*`). +// To target all data streams and indices, omit this parameter or use `*` or +// `_all`. // API Name: index -func (r *ClearCache) Index(v string) *ClearCache { +func (r *ClearCache) Index(index string) *ClearCache { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// AllowNoIndices Whether to ignore if a wildcard indices expression resolves into no concrete -// indices. (This includes `_all` string or when no indices have been specified) +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. // API name: allow_no_indices -func (r *ClearCache) AllowNoIndices(b bool) *ClearCache { - r.values.Set("allow_no_indices", strconv.FormatBool(b)) +func (r *ClearCache) AllowNoIndices(allownoindices bool) *ClearCache { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) return r } -// ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, -// closed or both. +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. +// Valid values are: `all`, `open`, `closed`, `hidden`, `none`. // API name: expand_wildcards -func (r *ClearCache) ExpandWildcards(v string) *ClearCache { - r.values.Set("expand_wildcards", v) +func (r *ClearCache) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *ClearCache { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } -// Fielddata Clear field data +// Fielddata If `true`, clears the fields cache. +// Use the `fields` parameter to clear the cache of specific fields only. // API name: fielddata -func (r *ClearCache) Fielddata(b bool) *ClearCache { - r.values.Set("fielddata", strconv.FormatBool(b)) +func (r *ClearCache) Fielddata(fielddata bool) *ClearCache { + r.values.Set("fielddata", strconv.FormatBool(fielddata)) return r } -// Fields A comma-separated list of fields to clear when using the `fielddata` -// parameter (default: all) +// Fields Comma-separated list of field names used to limit the `fielddata` parameter. // API name: fields -func (r *ClearCache) Fields(v string) *ClearCache { - r.values.Set("fields", v) +func (r *ClearCache) Fields(fields ...string) *ClearCache { + r.values.Set("fields", strings.Join(fields, ",")) return r } -// IgnoreUnavailable Whether specified concrete indices should be ignored when unavailable -// (missing or closed) +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. // API name: ignore_unavailable -func (r *ClearCache) IgnoreUnavailable(b bool) *ClearCache { - r.values.Set("ignore_unavailable", strconv.FormatBool(b)) +func (r *ClearCache) IgnoreUnavailable(ignoreunavailable bool) *ClearCache { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) return r } -// Query Clear query caches +// Query If `true`, clears the query cache. // API name: query -func (r *ClearCache) Query(b bool) *ClearCache { - r.values.Set("query", strconv.FormatBool(b)) +func (r *ClearCache) Query(query bool) *ClearCache { + r.values.Set("query", strconv.FormatBool(query)) return r } -// Request Clear request cache +// Request If `true`, clears the request cache. // API name: request -func (r *ClearCache) Request(b bool) *ClearCache { - r.values.Set("request", strconv.FormatBool(b)) +func (r *ClearCache) Request(request bool) *ClearCache { + r.values.Set("request", strconv.FormatBool(request)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/clearcache/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/clearcache/response.go index 51faa203a..da1e7b289 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/clearcache/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/clearcache/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package clearcache @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package clearcache // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/clear_cache/IndicesClearCacheResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/clear_cache/IndicesClearCacheResponse.ts#L22-L24 type Response struct { Shards_ types.ShardStatistics `json:"_shards"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/clone/clone.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/clone/clone.go index 87ad8d742..a96d30de8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/clone/clone.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/clone/clone.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Clones an index package clone @@ -54,8 +54,9 @@ type Clone struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -72,9 +73,9 @@ func NewCloneFunc(tp elastictransport.Interface) NewClone { return func(index, target string) *Clone { n := New(tp) - n.Index(index) + n._index(index) - n.Target(target) + n._target(target) return n } @@ -89,6 +90,8 @@ func New(tp elastictransport.Interface) *Clone { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -118,9 +121,19 @@ func (r *Clone) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -128,6 +141,7 @@ func (r *Clone) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -211,7 +225,6 @@ func (r Clone) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -220,6 +233,10 @@ func (r Clone) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -230,45 +247,69 @@ func (r *Clone) Header(key, value string) *Clone { return r } -// Index The name of the source index to clone +// Index Name of the source index to clone. // API Name: index -func (r *Clone) Index(v string) *Clone { +func (r *Clone) _index(index string) *Clone { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// Target The name of the target index to clone into +// Target Name of the target index to create. // API Name: target -func (r *Clone) Target(v string) *Clone { +func (r *Clone) _target(target string) *Clone { r.paramSet |= targetMask - r.target = v + r.target = target return r } -// MasterTimeout Specify timeout for connection to master +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: master_timeout -func (r *Clone) MasterTimeout(v string) *Clone { - r.values.Set("master_timeout", v) +func (r *Clone) MasterTimeout(duration string) *Clone { + r.values.Set("master_timeout", duration) return r } -// Timeout Explicit operation timeout +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: timeout -func (r *Clone) Timeout(v string) *Clone { - r.values.Set("timeout", v) +func (r *Clone) Timeout(duration string) *Clone { + r.values.Set("timeout", duration) return r } -// WaitForActiveShards Set the number of active shards to wait for on the cloned index before the -// operation returns. +// WaitForActiveShards The number of shard copies that must be active before proceeding with the +// operation. +// Set to `all` or any positive integer up to the total number of shards in the +// index (`number_of_replicas+1`). // API name: wait_for_active_shards -func (r *Clone) WaitForActiveShards(v string) *Clone { - r.values.Set("wait_for_active_shards", v) +func (r *Clone) WaitForActiveShards(waitforactiveshards string) *Clone { + r.values.Set("wait_for_active_shards", waitforactiveshards) + + return r +} + +// Aliases Aliases for the resulting index. +// API name: aliases +func (r *Clone) Aliases(aliases map[string]types.Alias) *Clone { + + r.req.Aliases = aliases + + return r +} + +// Settings Configuration options for the target index. +// API name: settings +func (r *Clone) Settings(settings map[string]json.RawMessage) *Clone { + + r.req.Settings = settings return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/clone/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/clone/request.go index 76b0f4fbe..2bd0e9573 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/clone/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/clone/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package clone @@ -29,9 +29,12 @@ import ( // Request holds the request body struct for the package clone // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/clone/IndicesCloneRequest.ts#L27-L46 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/clone/IndicesCloneRequest.ts#L27-L75 type Request struct { - Aliases map[string]types.Alias `json:"aliases,omitempty"` + + // Aliases Aliases for the resulting index. + Aliases map[string]types.Alias `json:"aliases,omitempty"` + // Settings Configuration options for the target index. Settings map[string]json.RawMessage `json:"settings,omitempty"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/clone/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/clone/response.go index 8784d83d1..4e3efbde1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/clone/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/clone/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package clone // Response holds the response body struct for the package clone // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/clone/IndicesCloneResponse.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/clone/IndicesCloneResponse.ts#L22-L28 type Response struct { Acknowledged bool `json:"acknowledged"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/close/close.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/close/close.go index 4841734a8..623f1af04 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/close/close.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/close/close.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Closes an index. package close @@ -36,6 +36,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" ) const ( @@ -68,7 +69,7 @@ func NewCloseFunc(tp elastictransport.Interface) NewClose { return func(index string) *Close { n := New(tp) - n.Index(index) + n._index(index) return n } @@ -76,7 +77,7 @@ func NewCloseFunc(tp elastictransport.Interface) NewClose { // Closes an index. // -// https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-open-close.html +// https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-close.html func New(tp elastictransport.Interface) *Close { r := &Close{ transport: tp, @@ -169,7 +170,6 @@ func (r Close) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -178,6 +178,10 @@ func (r Close) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -209,62 +213,78 @@ func (r *Close) Header(key, value string) *Close { return r } -// Index A comma separated list of indices to close +// Index Comma-separated list or wildcard expression of index names used to limit the +// request. // API Name: index -func (r *Close) Index(v string) *Close { +func (r *Close) _index(index string) *Close { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// AllowNoIndices Whether to ignore if a wildcard indices expression resolves into no concrete -// indices. (This includes `_all` string or when no indices have been specified) +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. // API name: allow_no_indices -func (r *Close) AllowNoIndices(b bool) *Close { - r.values.Set("allow_no_indices", strconv.FormatBool(b)) +func (r *Close) AllowNoIndices(allownoindices bool) *Close { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) return r } -// ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, -// closed or both. +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. +// Valid values are: `all`, `open`, `closed`, `hidden`, `none`. // API name: expand_wildcards -func (r *Close) ExpandWildcards(v string) *Close { - r.values.Set("expand_wildcards", v) +func (r *Close) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Close { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } -// IgnoreUnavailable Whether specified concrete indices should be ignored when unavailable -// (missing or closed) +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. // API name: ignore_unavailable -func (r *Close) IgnoreUnavailable(b bool) *Close { - r.values.Set("ignore_unavailable", strconv.FormatBool(b)) +func (r *Close) IgnoreUnavailable(ignoreunavailable bool) *Close { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) return r } -// MasterTimeout Specify timeout for connection to master +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: master_timeout -func (r *Close) MasterTimeout(v string) *Close { - r.values.Set("master_timeout", v) +func (r *Close) MasterTimeout(duration string) *Close { + r.values.Set("master_timeout", duration) return r } -// Timeout Explicit operation timeout +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: timeout -func (r *Close) Timeout(v string) *Close { - r.values.Set("timeout", v) +func (r *Close) Timeout(duration string) *Close { + r.values.Set("timeout", duration) return r } -// WaitForActiveShards Sets the number of active shards to wait for before the operation returns. +// WaitForActiveShards The number of shard copies that must be active before proceeding with the +// operation. +// Set to `all` or any positive integer up to the total number of shards in the +// index (`number_of_replicas+1`). // API name: wait_for_active_shards -func (r *Close) WaitForActiveShards(v string) *Close { - r.values.Set("wait_for_active_shards", v) +func (r *Close) WaitForActiveShards(waitforactiveshards string) *Close { + r.values.Set("wait_for_active_shards", waitforactiveshards) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/close/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/close/response.go index a5bea2c75..f020d83f3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/close/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/close/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package close @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package close // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/close/CloseIndexResponse.ts#L24-L30 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/close/CloseIndexResponse.ts#L24-L30 type Response struct { Acknowledged bool `json:"acknowledged"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/create/create.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/create/create.go index ba33376bc..5d6b534e3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/create/create.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/create/create.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Creates an index with optional settings and mappings. package create @@ -52,8 +52,9 @@ type Create struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -69,7 +70,7 @@ func NewCreateFunc(tp elastictransport.Interface) NewCreate { return func(index string) *Create { n := New(tp) - n.Index(index) + n._index(index) return n } @@ -77,13 +78,15 @@ func NewCreateFunc(tp elastictransport.Interface) NewCreate { // Creates an index with optional settings and mappings. // -// https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-create-index.html +// https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-create-index.html func New(tp elastictransport.Interface) *Create { r := &Create{ transport: tp, values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -113,9 +116,19 @@ func (r *Create) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -123,6 +136,7 @@ func (r *Create) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -201,7 +215,6 @@ func (r Create) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -210,6 +223,10 @@ func (r Create) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -220,35 +237,72 @@ func (r *Create) Header(key, value string) *Create { return r } -// Index The name of the index +// Index Name of the index you wish to create. // API Name: index -func (r *Create) Index(v string) *Create { +func (r *Create) _index(index string) *Create { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// MasterTimeout Specify timeout for connection to master +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: master_timeout -func (r *Create) MasterTimeout(v string) *Create { - r.values.Set("master_timeout", v) +func (r *Create) MasterTimeout(duration string) *Create { + r.values.Set("master_timeout", duration) return r } -// Timeout Explicit operation timeout +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: timeout -func (r *Create) Timeout(v string) *Create { - r.values.Set("timeout", v) +func (r *Create) Timeout(duration string) *Create { + r.values.Set("timeout", duration) return r } -// WaitForActiveShards Set the number of active shards to wait for before the operation returns. +// WaitForActiveShards The number of shard copies that must be active before proceeding with the +// operation. +// Set to `all` or any positive integer up to the total number of shards in the +// index (`number_of_replicas+1`). // API name: wait_for_active_shards -func (r *Create) WaitForActiveShards(v string) *Create { - r.values.Set("wait_for_active_shards", v) +func (r *Create) WaitForActiveShards(waitforactiveshards string) *Create { + r.values.Set("wait_for_active_shards", waitforactiveshards) + + return r +} + +// Aliases Aliases for the index. +// API name: aliases +func (r *Create) Aliases(aliases map[string]types.Alias) *Create { + + r.req.Aliases = aliases + + return r +} + +// Mappings Mapping for fields in the index. If specified, this mapping can include: +// - Field names +// - Field data types +// - Mapping parameters +// API name: mappings +func (r *Create) Mappings(mappings *types.TypeMapping) *Create { + + r.req.Mappings = mappings + + return r +} + +// Settings Configuration options for the index. +// API name: settings +func (r *Create) Settings(settings *types.IndexSettings) *Create { + + r.req.Settings = settings return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/create/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/create/request.go index 7f81d4708..ce45e12af 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/create/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/create/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package create @@ -29,14 +29,17 @@ import ( // Request holds the request body struct for the package create // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/create/IndicesCreateRequest.ts#L28-L56 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/create/IndicesCreateRequest.ts#L28-L81 type Request struct { + + // Aliases Aliases for the index. Aliases map[string]types.Alias `json:"aliases,omitempty"` // Mappings Mapping for fields in the index. If specified, this mapping can include: // - Field names // - Field data types // - Mapping parameters - Mappings *types.TypeMapping `json:"mappings,omitempty"` + Mappings *types.TypeMapping `json:"mappings,omitempty"` + // Settings Configuration options for the index. Settings *types.IndexSettings `json:"settings,omitempty"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/create/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/create/response.go index 1aa74761a..e99381227 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/create/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/create/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package create // Response holds the response body struct for the package create // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/create/IndicesCreateResponse.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/create/IndicesCreateResponse.ts#L22-L28 type Response struct { Acknowledged bool `json:"acknowledged"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/createdatastream/create_data_stream.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/createdatastream/create_data_stream.go index ee1d80fc5..42f3b6af4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/createdatastream/create_data_stream.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/createdatastream/create_data_stream.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Creates a data stream package createdatastream @@ -67,7 +67,7 @@ func NewCreateDataStreamFunc(tp elastictransport.Interface) NewCreateDataStream return func(name string) *CreateDataStream { n := New(tp) - n.Name(name) + n._name(name) return n } @@ -168,7 +168,6 @@ func (r CreateDataStream) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -177,6 +176,10 @@ func (r CreateDataStream) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -208,11 +211,18 @@ func (r *CreateDataStream) Header(key, value string) *CreateDataStream { return r } -// Name The name of the data stream +// Name Name of the data stream, which must meet the following criteria: +// Lowercase only; +// Cannot include `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `#`, `:`, or a +// space character; +// Cannot start with `-`, `_`, `+`, or `.ds-`; +// Cannot be `.` or `..`; +// Cannot be longer than 255 bytes. Multi-byte characters count towards this +// limit faster. // API Name: name -func (r *CreateDataStream) Name(v string) *CreateDataStream { +func (r *CreateDataStream) _name(name string) *CreateDataStream { r.paramSet |= nameMask - r.name = v + r.name = name return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/createdatastream/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/createdatastream/response.go index bb3a63418..5e0bdb0cc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/createdatastream/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/createdatastream/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package createdatastream // Response holds the response body struct for the package createdatastream // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/create_data_stream/IndicesCreateDataStreamResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/create_data_stream/IndicesCreateDataStreamResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/datastreamsstats/data_streams_stats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/datastreamsstats/data_streams_stats.go index 22b25a203..da10f855d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/datastreamsstats/data_streams_stats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/datastreamsstats/data_streams_stats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Provides statistics on operations happening in a data stream. package datastreamsstats @@ -35,6 +35,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" ) const ( @@ -175,7 +176,6 @@ func (r DataStreamsStats) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -184,6 +184,10 @@ func (r DataStreamsStats) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -215,19 +219,26 @@ func (r *DataStreamsStats) Header(key, value string) *DataStreamsStats { return r } -// Name A comma-separated list of data stream names; use `_all` or empty string to -// perform the operation on all data streams +// Name Comma-separated list of data streams used to limit the request. +// Wildcard expressions (`*`) are supported. +// To target all data streams in a cluster, omit this parameter or use `*`. // API Name: name -func (r *DataStreamsStats) Name(v string) *DataStreamsStats { +func (r *DataStreamsStats) Name(name string) *DataStreamsStats { r.paramSet |= nameMask - r.name = v + r.name = name return r } +// ExpandWildcards Type of data stream that wildcard patterns can match. +// Supports comma-separated values, such as `open,hidden`. // API name: expand_wildcards -func (r *DataStreamsStats) ExpandWildcards(v string) *DataStreamsStats { - r.values.Set("expand_wildcards", v) +func (r *DataStreamsStats) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *DataStreamsStats { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/datastreamsstats/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/datastreamsstats/response.go index fbed95f15..361ac20ce 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/datastreamsstats/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/datastreamsstats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package datastreamsstats @@ -26,15 +26,23 @@ import ( // Response holds the response body struct for the package datastreamsstats // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/data_streams_stats/IndicesDataStreamsStatsResponse.ts#L25-L34 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/data_streams_stats/IndicesDataStreamsStatsResponse.ts#L25-L43 type Response struct { - BackingIndices int `json:"backing_indices"` - DataStreamCount int `json:"data_stream_count"` - DataStreams []types.DataStreamsStatsItem `json:"data_streams"` - Shards_ types.ShardStatistics `json:"_shards"` - TotalStoreSizeBytes int `json:"total_store_size_bytes"` - TotalStoreSizes types.ByteSize `json:"total_store_sizes,omitempty"` + + // BackingIndices Total number of backing indices for the selected data streams. + BackingIndices int `json:"backing_indices"` + // DataStreamCount Total number of selected data streams. + DataStreamCount int `json:"data_stream_count"` + // DataStreams Contains statistics for the selected data streams. + DataStreams []types.DataStreamsStatsItem `json:"data_streams"` + // Shards_ Contains information about shards that attempted to execute the request. + Shards_ types.ShardStatistics `json:"_shards"` + // TotalStoreSizeBytes Total size, in bytes, of all shards for the selected data streams. + TotalStoreSizeBytes int `json:"total_store_size_bytes"` + // TotalStoreSizes Total size of all shards for the selected data streams. + // This property is included only if the `human` query parameter is `true` + TotalStoreSizes types.ByteSize `json:"total_store_sizes,omitempty"` } // NewResponse returns a Response diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/delete/delete.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/delete/delete.go index 90297f855..0bbcc990b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/delete/delete.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/delete/delete.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Deletes an index. package delete @@ -36,6 +36,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" ) const ( @@ -68,7 +69,7 @@ func NewDeleteFunc(tp elastictransport.Interface) NewDelete { return func(index string) *Delete { n := New(tp) - n.Index(index) + n._index(index) return n } @@ -167,7 +168,6 @@ func (r Delete) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -176,6 +176,10 @@ func (r Delete) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -207,54 +211,70 @@ func (r *Delete) Header(key, value string) *Delete { return r } -// Index A comma-separated list of indices to delete; use `_all` or `*` string to -// delete all indices +// Index Comma-separated list of indices to delete. +// You cannot specify index aliases. +// By default, this parameter does not support wildcards (`*`) or `_all`. +// To use wildcards or `_all`, set the `action.destructive_requires_name` +// cluster setting to `false`. // API Name: index -func (r *Delete) Index(v string) *Delete { +func (r *Delete) _index(index string) *Delete { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// AllowNoIndices Ignore if a wildcard expression resolves to no concrete indices (default: -// false) +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. // API name: allow_no_indices -func (r *Delete) AllowNoIndices(b bool) *Delete { - r.values.Set("allow_no_indices", strconv.FormatBool(b)) +func (r *Delete) AllowNoIndices(allownoindices bool) *Delete { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) return r } -// ExpandWildcards Whether wildcard expressions should get expanded to open, closed, or hidden -// indices +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. +// Valid values are: `all`, `open`, `closed`, `hidden`, `none`. // API name: expand_wildcards -func (r *Delete) ExpandWildcards(v string) *Delete { - r.values.Set("expand_wildcards", v) +func (r *Delete) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Delete { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } -// IgnoreUnavailable Ignore unavailable indexes (default: false) +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. // API name: ignore_unavailable -func (r *Delete) IgnoreUnavailable(b bool) *Delete { - r.values.Set("ignore_unavailable", strconv.FormatBool(b)) +func (r *Delete) IgnoreUnavailable(ignoreunavailable bool) *Delete { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) return r } -// MasterTimeout Specify timeout for connection to master +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: master_timeout -func (r *Delete) MasterTimeout(v string) *Delete { - r.values.Set("master_timeout", v) +func (r *Delete) MasterTimeout(duration string) *Delete { + r.values.Set("master_timeout", duration) return r } -// Timeout Explicit operation timeout +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: timeout -func (r *Delete) Timeout(v string) *Delete { - r.values.Set("timeout", v) +func (r *Delete) Timeout(duration string) *Delete { + r.values.Set("timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/delete/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/delete/response.go index 37e220bfc..f74bd3881 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/delete/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/delete/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package delete @@ -26,10 +26,14 @@ import ( // Response holds the response body struct for the package delete // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/delete/IndicesDeleteResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/delete/IndicesDeleteResponse.ts#L22-L24 type Response struct { - Shards_ *types.ShardStatistics `json:"_shards,omitempty"` + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` + Shards_ *types.ShardStatistics `json:"_shards,omitempty"` } // NewResponse returns a Response diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/deletealias/delete_alias.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/deletealias/delete_alias.go index 02761451b..ceba66e97 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/deletealias/delete_alias.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/deletealias/delete_alias.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Deletes an alias. package deletealias @@ -70,9 +70,9 @@ func NewDeleteAliasFunc(tp elastictransport.Interface) NewDeleteAlias { return func(index, name string) *DeleteAlias { n := New(tp) - n.Index(index) + n._index(index) - n.Name(name) + n._name(name) return n } @@ -187,7 +187,6 @@ func (r DeleteAlias) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -196,6 +195,10 @@ func (r DeleteAlias) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -227,38 +230,42 @@ func (r *DeleteAlias) Header(key, value string) *DeleteAlias { return r } -// Index A comma-separated list of index names (supports wildcards); use `_all` for -// all indices +// Index Comma-separated list of data streams or indices used to limit the request. +// Supports wildcards (`*`). // API Name: index -func (r *DeleteAlias) Index(v string) *DeleteAlias { +func (r *DeleteAlias) _index(index string) *DeleteAlias { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// Name A comma-separated list of aliases to delete (supports wildcards); use `_all` -// to delete all aliases for the specified indices. +// Name Comma-separated list of aliases to remove. +// Supports wildcards (`*`). To remove all aliases, use `*` or `_all`. // API Name: name -func (r *DeleteAlias) Name(v string) *DeleteAlias { +func (r *DeleteAlias) _name(name string) *DeleteAlias { r.paramSet |= nameMask - r.name = v + r.name = name return r } -// MasterTimeout Specify timeout for connection to master +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: master_timeout -func (r *DeleteAlias) MasterTimeout(v string) *DeleteAlias { - r.values.Set("master_timeout", v) +func (r *DeleteAlias) MasterTimeout(duration string) *DeleteAlias { + r.values.Set("master_timeout", duration) return r } -// Timeout Explicit timestamp for the document +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: timeout -func (r *DeleteAlias) Timeout(v string) *DeleteAlias { - r.values.Set("timeout", v) +func (r *DeleteAlias) Timeout(duration string) *DeleteAlias { + r.values.Set("timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/deletealias/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/deletealias/response.go index 24bd7dbe1..42647deef 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/deletealias/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/deletealias/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package deletealias // Response holds the response body struct for the package deletealias // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/delete_alias/IndicesDeleteAliasResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/delete_alias/IndicesDeleteAliasResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/deletedatalifecycle/delete_data_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/deletedatalifecycle/delete_data_lifecycle.go new file mode 100644 index 000000000..2e9a1ced5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/deletedatalifecycle/delete_data_lifecycle.go @@ -0,0 +1,254 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Deletes the data stream lifecycle of the selected data streams. +package deletedatalifecycle + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteDataLifecycle struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + buf *gobytes.Buffer + + paramSet int + + name string +} + +// NewDeleteDataLifecycle type alias for index. +type NewDeleteDataLifecycle func(name string) *DeleteDataLifecycle + +// NewDeleteDataLifecycleFunc returns a new instance of DeleteDataLifecycle with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteDataLifecycleFunc(tp elastictransport.Interface) NewDeleteDataLifecycle { + return func(name string) *DeleteDataLifecycle { + n := New(tp) + + n._name(name) + + return n + } +} + +// Deletes the data stream lifecycle of the selected data streams. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-delete-lifecycle.html +func New(tp elastictransport.Interface) *DeleteDataLifecycle { + r := &DeleteDataLifecycle{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + buf: gobytes.NewBuffer(nil), + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteDataLifecycle) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_data_stream") + path.WriteString("/") + + path.WriteString(r.name) + path.WriteString("/") + path.WriteString("_lifecycle") + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.buf) + } else { + req, err = http.NewRequest(method, r.path.String(), r.buf) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteDataLifecycle) Perform(ctx context.Context) (*http.Response, error) { + req, err := r.HttpRequest(ctx) + if err != nil { + return nil, err + } + + res, err := r.transport.Perform(req) + if err != nil { + return nil, fmt.Errorf("an error happened during the DeleteDataLifecycle query execution: %w", err) + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deletedatalifecycle.Response +func (r DeleteDataLifecycle) Do(ctx context.Context) (*Response, error) { + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteDataLifecycle) IsSuccess(ctx context.Context) (bool, error) { + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(ioutil.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + return false, nil +} + +// Header set a key, value pair in the DeleteDataLifecycle headers map. +func (r *DeleteDataLifecycle) Header(key, value string) *DeleteDataLifecycle { + r.headers.Set(key, value) + + return r +} + +// Name A comma-separated list of data streams of which the data stream lifecycle +// will be deleted; use `*` to get all data streams +// API Name: name +func (r *DeleteDataLifecycle) _name(name string) *DeleteDataLifecycle { + r.paramSet |= nameMask + r.name = name + + return r +} + +// ExpandWildcards Whether wildcard expressions should get expanded to open or closed indices +// (default: open) +// API name: expand_wildcards +func (r *DeleteDataLifecycle) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *DeleteDataLifecycle { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// MasterTimeout Specify timeout for connection to master +// API name: master_timeout +func (r *DeleteDataLifecycle) MasterTimeout(duration string) *DeleteDataLifecycle { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Explicit timestamp for the document +// API name: timeout +func (r *DeleteDataLifecycle) Timeout(duration string) *DeleteDataLifecycle { + r.values.Set("timeout", duration) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/deletedatalifecycle/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/deletedatalifecycle/response.go new file mode 100644 index 000000000..be2089224 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/deletedatalifecycle/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package deletedatalifecycle + +// Response holds the response body struct for the package deletedatalifecycle +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/delete_data_lifecycle/IndicesDeleteDataLifecycleResponse.ts#L22-L24 + +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/deletedatastream/delete_data_stream.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/deletedatastream/delete_data_stream.go index 36373c62c..e6349d3b6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/deletedatastream/delete_data_stream.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/deletedatastream/delete_data_stream.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Deletes a data stream. package deletedatastream @@ -35,6 +35,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" ) const ( @@ -67,7 +68,7 @@ func NewDeleteDataStreamFunc(tp elastictransport.Interface) NewDeleteDataStream return func(name string) *DeleteDataStream { n := New(tp) - n.Name(name) + n._name(name) return n } @@ -168,7 +169,6 @@ func (r DeleteDataStream) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -177,6 +177,10 @@ func (r DeleteDataStream) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -208,21 +212,25 @@ func (r *DeleteDataStream) Header(key, value string) *DeleteDataStream { return r } -// Name A comma-separated list of data streams to delete; use `*` to delete all data -// streams +// Name Comma-separated list of data streams to delete. Wildcard (`*`) expressions +// are supported. // API Name: name -func (r *DeleteDataStream) Name(v string) *DeleteDataStream { +func (r *DeleteDataStream) _name(name string) *DeleteDataStream { r.paramSet |= nameMask - r.name = v + r.name = name return r } -// ExpandWildcards Whether wildcard expressions should get expanded to open or closed indices -// (default: open) +// ExpandWildcards Type of data stream that wildcard patterns can match. Supports +// comma-separated values,such as `open,hidden`. // API name: expand_wildcards -func (r *DeleteDataStream) ExpandWildcards(v string) *DeleteDataStream { - r.values.Set("expand_wildcards", v) +func (r *DeleteDataStream) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *DeleteDataStream { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/deletedatastream/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/deletedatastream/response.go index 443d2f636..a9118e446 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/deletedatastream/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/deletedatastream/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package deletedatastream // Response holds the response body struct for the package deletedatastream // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/delete_data_stream/IndicesDeleteDataStreamResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/delete_data_stream/IndicesDeleteDataStreamResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/deleteindextemplate/delete_index_template.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/deleteindextemplate/delete_index_template.go index 18a1598a8..a4d2914ea 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/deleteindextemplate/delete_index_template.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/deleteindextemplate/delete_index_template.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Deletes an index template. package deleteindextemplate @@ -67,7 +67,7 @@ func NewDeleteIndexTemplateFunc(tp elastictransport.Interface) NewDeleteIndexTem return func(name string) *DeleteIndexTemplate { n := New(tp) - n.Name(name) + n._name(name) return n } @@ -168,7 +168,6 @@ func (r DeleteIndexTemplate) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -177,6 +176,10 @@ func (r DeleteIndexTemplate) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -211,9 +214,9 @@ func (r *DeleteIndexTemplate) Header(key, value string) *DeleteIndexTemplate { // Name Comma-separated list of index template names used to limit the request. // Wildcard (*) expressions are supported. // API Name: name -func (r *DeleteIndexTemplate) Name(v string) *DeleteIndexTemplate { +func (r *DeleteIndexTemplate) _name(name string) *DeleteIndexTemplate { r.paramSet |= nameMask - r.name = v + r.name = name return r } @@ -221,8 +224,8 @@ func (r *DeleteIndexTemplate) Name(v string) *DeleteIndexTemplate { // MasterTimeout Period to wait for a connection to the master node. If no response is // received before the timeout expires, the request fails and returns an error. // API name: master_timeout -func (r *DeleteIndexTemplate) MasterTimeout(v string) *DeleteIndexTemplate { - r.values.Set("master_timeout", v) +func (r *DeleteIndexTemplate) MasterTimeout(duration string) *DeleteIndexTemplate { + r.values.Set("master_timeout", duration) return r } @@ -230,8 +233,8 @@ func (r *DeleteIndexTemplate) MasterTimeout(v string) *DeleteIndexTemplate { // Timeout Period to wait for a response. If no response is received before the timeout // expires, the request fails and returns an error. // API name: timeout -func (r *DeleteIndexTemplate) Timeout(v string) *DeleteIndexTemplate { - r.values.Set("timeout", v) +func (r *DeleteIndexTemplate) Timeout(duration string) *DeleteIndexTemplate { + r.values.Set("timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/deleteindextemplate/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/deleteindextemplate/response.go index 09b39943f..c88b97381 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/deleteindextemplate/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/deleteindextemplate/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package deleteindextemplate // Response holds the response body struct for the package deleteindextemplate // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/delete_index_template/IndicesDeleteIndexTemplateResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/delete_index_template/IndicesDeleteIndexTemplateResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/deletetemplate/delete_template.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/deletetemplate/delete_template.go index 80cd02b0b..92e48972f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/deletetemplate/delete_template.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/deletetemplate/delete_template.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Deletes an index template. package deletetemplate @@ -67,7 +67,7 @@ func NewDeleteTemplateFunc(tp elastictransport.Interface) NewDeleteTemplate { return func(name string) *DeleteTemplate { n := New(tp) - n.Name(name) + n._name(name) return n } @@ -168,7 +168,6 @@ func (r DeleteTemplate) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -177,6 +176,10 @@ func (r DeleteTemplate) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -208,27 +211,32 @@ func (r *DeleteTemplate) Header(key, value string) *DeleteTemplate { return r } -// Name The name of the template +// Name The name of the legacy index template to delete. +// Wildcard (`*`) expressions are supported. // API Name: name -func (r *DeleteTemplate) Name(v string) *DeleteTemplate { +func (r *DeleteTemplate) _name(name string) *DeleteTemplate { r.paramSet |= nameMask - r.name = v + r.name = name return r } -// MasterTimeout Specify timeout for connection to master +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: master_timeout -func (r *DeleteTemplate) MasterTimeout(v string) *DeleteTemplate { - r.values.Set("master_timeout", v) +func (r *DeleteTemplate) MasterTimeout(duration string) *DeleteTemplate { + r.values.Set("master_timeout", duration) return r } -// Timeout Explicit operation timeout +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: timeout -func (r *DeleteTemplate) Timeout(v string) *DeleteTemplate { - r.values.Set("timeout", v) +func (r *DeleteTemplate) Timeout(duration string) *DeleteTemplate { + r.values.Set("timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/deletetemplate/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/deletetemplate/response.go index bb548380e..40968fef1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/deletetemplate/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/deletetemplate/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package deletetemplate // Response holds the response body struct for the package deletetemplate // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/delete_template/IndicesDeleteTemplateResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/delete_template/IndicesDeleteTemplateResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/diskusage/disk_usage.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/diskusage/disk_usage.go index 4b5872067..1353fe482 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/diskusage/disk_usage.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/diskusage/disk_usage.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Analyzes the disk usage of each field of an index or data stream package diskusage @@ -36,6 +36,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" ) const ( @@ -68,7 +69,7 @@ func NewDiskUsageFunc(tp elastictransport.Interface) NewDiskUsage { return func(index string) *DiskUsage { n := New(tp) - n.Index(index) + n._index(index) return n } @@ -76,7 +77,7 @@ func NewDiskUsageFunc(tp elastictransport.Interface) NewDiskUsage { // Analyzes the disk usage of each field of an index or data stream // -// https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-disk-usage.html +// https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-disk-usage.html func New(tp elastictransport.Interface) *DiskUsage { r := &DiskUsage{ transport: tp, @@ -169,7 +170,6 @@ func (r DiskUsage) Do(ctx context.Context) (Response, error) { } return *response, nil - } errorResponse := types.NewElasticsearchError() @@ -178,6 +178,10 @@ func (r DiskUsage) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -210,61 +214,66 @@ func (r *DiskUsage) Header(key, value string) *DiskUsage { } // Index Comma-separated list of data streams, indices, and aliases used to limit the -// request. It’s recommended to execute this API with a single index (or the -// latest backing index of a data stream) as the API consumes resources -// significantly. +// request. +// It’s recommended to execute this API with a single index (or the latest +// backing index of a data stream) as the API consumes resources significantly. // API Name: index -func (r *DiskUsage) Index(v string) *DiskUsage { +func (r *DiskUsage) _index(index string) *DiskUsage { r.paramSet |= indexMask - r.index = v + r.index = index return r } // AllowNoIndices If false, the request returns an error if any wildcard expression, index -// alias, or _all value targets only missing or closed indices. This behavior -// applies even if the request targets other open indices. For example, a -// request targeting foo*,bar* returns an error if an index starts with foo but -// no index starts with bar. +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. +// For example, a request targeting `foo*,bar*` returns an error if an index +// starts with `foo` but no index starts with `bar`. // API name: allow_no_indices -func (r *DiskUsage) AllowNoIndices(b bool) *DiskUsage { - r.values.Set("allow_no_indices", strconv.FormatBool(b)) +func (r *DiskUsage) AllowNoIndices(allownoindices bool) *DiskUsage { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) return r } -// ExpandWildcards Type of index that wildcard patterns can match. If the request can target -// data streams, this argument determines whether wildcard expressions match -// hidden data streams. Supports comma-separated values, such as open,hidden. +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. // API name: expand_wildcards -func (r *DiskUsage) ExpandWildcards(v string) *DiskUsage { - r.values.Set("expand_wildcards", v) +func (r *DiskUsage) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *DiskUsage { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } -// Flush If true, the API performs a flush before analysis. If false, the response may -// not include uncommitted data. +// Flush If `true`, the API performs a flush before analysis. +// If `false`, the response may not include uncommitted data. // API name: flush -func (r *DiskUsage) Flush(b bool) *DiskUsage { - r.values.Set("flush", strconv.FormatBool(b)) +func (r *DiskUsage) Flush(flush bool) *DiskUsage { + r.values.Set("flush", strconv.FormatBool(flush)) return r } -// IgnoreUnavailable If true, missing or closed indices are not included in the response. +// IgnoreUnavailable If `true`, missing or closed indices are not included in the response. // API name: ignore_unavailable -func (r *DiskUsage) IgnoreUnavailable(b bool) *DiskUsage { - r.values.Set("ignore_unavailable", strconv.FormatBool(b)) +func (r *DiskUsage) IgnoreUnavailable(ignoreunavailable bool) *DiskUsage { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) return r } -// RunExpensiveTasks Analyzing field disk usage is resource-intensive. To use the API, this -// parameter must be set to true. +// RunExpensiveTasks Analyzing field disk usage is resource-intensive. +// To use the API, this parameter must be set to `true`. // API name: run_expensive_tasks -func (r *DiskUsage) RunExpensiveTasks(b bool) *DiskUsage { - r.values.Set("run_expensive_tasks", strconv.FormatBool(b)) +func (r *DiskUsage) RunExpensiveTasks(runexpensivetasks bool) *DiskUsage { + r.values.Set("run_expensive_tasks", strconv.FormatBool(runexpensivetasks)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/diskusage/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/diskusage/response.go index b3aab9843..553eeb620 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/diskusage/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/diskusage/response.go @@ -16,14 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package diskusage -import "encoding/json" +import ( + "encoding/json" +) // Response holds the response body struct for the package diskusage // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/disk_usage/IndicesDiskUsageResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/disk_usage/IndicesDiskUsageResponse.ts#L22-L24 -type Response json.RawMessage +type Response = json.RawMessage + +func NewResponse() *Response { + return new(Response) +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/downsample/downsample.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/downsample/downsample.go index 9d314b76e..c4d8557b7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/downsample/downsample.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/downsample/downsample.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Downsample an index package downsample @@ -54,8 +54,9 @@ type Downsample struct { buf *gobytes.Buffer - req *types.DownsampleConfig - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -72,9 +73,9 @@ func NewDownsampleFunc(tp elastictransport.Interface) NewDownsample { return func(index, targetindex string) *Downsample { n := New(tp) - n.Index(index) + n._index(index) - n.TargetIndex(targetindex) + n._targetindex(targetindex) return n } @@ -82,7 +83,7 @@ func NewDownsampleFunc(tp elastictransport.Interface) NewDownsample { // Downsample an index // -// https://www.elastic.co/guide/en/elasticsearch/reference/current/xpack-rollup.html +// https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-downsample-data-stream.html func New(tp elastictransport.Interface) *Downsample { r := &Downsample{ transport: tp, @@ -103,7 +104,7 @@ func (r *Downsample) Raw(raw io.Reader) *Downsample { } // Request allows to set the request property with the appropriate payload. -func (r *Downsample) Request(req *types.DownsampleConfig) *Downsample { +func (r *Downsample) Request(req *Request) *Downsample { r.req = req return r @@ -118,9 +119,19 @@ func (r *Downsample) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -128,6 +139,7 @@ func (r *Downsample) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -211,7 +223,6 @@ func (r Downsample) Do(ctx context.Context) (Response, error) { } return *response, nil - } errorResponse := types.NewElasticsearchError() @@ -220,6 +231,10 @@ func (r Downsample) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -230,20 +245,28 @@ func (r *Downsample) Header(key, value string) *Downsample { return r } -// Index The index to downsample +// Index Name of the time series index to downsample. // API Name: index -func (r *Downsample) Index(v string) *Downsample { +func (r *Downsample) _index(index string) *Downsample { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// TargetIndex The name of the target index to store downsampled data +// TargetIndex Name of the index to create. // API Name: targetindex -func (r *Downsample) TargetIndex(v string) *Downsample { +func (r *Downsample) _targetindex(targetindex string) *Downsample { r.paramSet |= targetindexMask - r.targetindex = v + r.targetindex = targetindex + + return r +} + +// FixedInterval The interval at which to aggregate the original time series index. +// API name: fixed_interval +func (r *Downsample) FixedInterval(durationlarge string) *Downsample { + r.req.FixedInterval = durationlarge return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/downsample/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/downsample/request.go new file mode 100644 index 000000000..ff66ed3db --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/downsample/request.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package downsample + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Request holds the request body struct for the package downsample +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/downsample/Request.ts#L24-L44 +type Request = types.DownsampleConfig diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/downsample/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/downsample/response.go index b72411307..019f0241f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/downsample/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/downsample/response.go @@ -16,14 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package downsample -import "encoding/json" +import ( + "encoding/json" +) // Response holds the response body struct for the package downsample // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/downsample/Response.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/downsample/Response.ts#L22-L24 -type Response json.RawMessage +type Response = json.RawMessage + +func NewResponse() *Response { + return new(Response) +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/exists/exists.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/exists/exists.go index 3c1a1bc1e..b302e9317 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/exists/exists.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/exists/exists.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns information about whether a particular index exists. package exists @@ -24,7 +24,6 @@ package exists import ( gobytes "bytes" "context" - "encoding/json" "errors" "fmt" "io" @@ -35,7 +34,7 @@ import ( "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" - "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" ) const ( @@ -68,7 +67,7 @@ func NewExistsFunc(tp elastictransport.Interface) NewExists { return func(index string) *Exists { n := New(tp) - n.Index(index) + n._index(index) return n } @@ -150,33 +149,8 @@ func (r Exists) Perform(ctx context.Context) (*http.Response, error) { } // Do runs the request through the transport, handle the response and returns a exists.Response -func (r Exists) Do(ctx context.Context) (*Response, error) { - - response := NewResponse() - - res, err := r.Perform(ctx) - if err != nil { - return nil, err - } - defer res.Body.Close() - - if res.StatusCode < 299 { - err = json.NewDecoder(res.Body).Decode(response) - if err != nil { - return nil, err - } - - return response, nil - - } - - errorResponse := types.NewElasticsearchError() - err = json.NewDecoder(res.Body).Decode(errorResponse) - if err != nil { - return nil, err - } - - return nil, errorResponse +func (r Exists) Do(ctx context.Context) (bool, error) { + return r.IsSuccess(ctx) } // IsSuccess allows to run a query with a context and retrieve the result as a boolean. @@ -207,62 +181,71 @@ func (r *Exists) Header(key, value string) *Exists { return r } -// Index A comma-separated list of index names +// Index Comma-separated list of data streams, indices, and aliases. Supports +// wildcards (`*`). // API Name: index -func (r *Exists) Index(v string) *Exists { +func (r *Exists) _index(index string) *Exists { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// AllowNoIndices Ignore if a wildcard expression resolves to no concrete indices (default: -// false) +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. // API name: allow_no_indices -func (r *Exists) AllowNoIndices(b bool) *Exists { - r.values.Set("allow_no_indices", strconv.FormatBool(b)) +func (r *Exists) AllowNoIndices(allownoindices bool) *Exists { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) return r } -// ExpandWildcards Whether wildcard expressions should get expanded to open or closed indices -// (default: open) +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. +// Valid values are: `all`, `open`, `closed`, `hidden`, `none`. // API name: expand_wildcards -func (r *Exists) ExpandWildcards(v string) *Exists { - r.values.Set("expand_wildcards", v) +func (r *Exists) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Exists { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } -// FlatSettings Return settings in flat format (default: false) +// FlatSettings If `true`, returns settings in flat format. // API name: flat_settings -func (r *Exists) FlatSettings(b bool) *Exists { - r.values.Set("flat_settings", strconv.FormatBool(b)) +func (r *Exists) FlatSettings(flatsettings bool) *Exists { + r.values.Set("flat_settings", strconv.FormatBool(flatsettings)) return r } -// IgnoreUnavailable Ignore unavailable indexes (default: false) +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. // API name: ignore_unavailable -func (r *Exists) IgnoreUnavailable(b bool) *Exists { - r.values.Set("ignore_unavailable", strconv.FormatBool(b)) +func (r *Exists) IgnoreUnavailable(ignoreunavailable bool) *Exists { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) return r } -// IncludeDefaults Whether to return all default setting for each of the indices. +// IncludeDefaults If `true`, return all default settings in the response. // API name: include_defaults -func (r *Exists) IncludeDefaults(b bool) *Exists { - r.values.Set("include_defaults", strconv.FormatBool(b)) +func (r *Exists) IncludeDefaults(includedefaults bool) *Exists { + r.values.Set("include_defaults", strconv.FormatBool(includedefaults)) return r } -// Local Return local information, do not retrieve the state from master node -// (default: false) +// Local If `true`, the request retrieves information from the local node only. // API name: local -func (r *Exists) Local(b bool) *Exists { - r.values.Set("local", strconv.FormatBool(b)) +func (r *Exists) Local(local bool) *Exists { + r.values.Set("local", strconv.FormatBool(local)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/existsalias/exists_alias.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/existsalias/exists_alias.go index 494bfd978..67b17b2b4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/existsalias/exists_alias.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/existsalias/exists_alias.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns information about whether a particular alias exists. package existsalias @@ -24,7 +24,6 @@ package existsalias import ( gobytes "bytes" "context" - "encoding/json" "errors" "fmt" "io" @@ -35,7 +34,7 @@ import ( "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" - "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" ) const ( @@ -71,7 +70,7 @@ func NewExistsAliasFunc(tp elastictransport.Interface) NewExistsAlias { return func(name string) *ExistsAlias { n := New(tp) - n.Name(name) + n._name(name) return n } @@ -166,33 +165,8 @@ func (r ExistsAlias) Perform(ctx context.Context) (*http.Response, error) { } // Do runs the request through the transport, handle the response and returns a existsalias.Response -func (r ExistsAlias) Do(ctx context.Context) (*Response, error) { - - response := NewResponse() - - res, err := r.Perform(ctx) - if err != nil { - return nil, err - } - defer res.Body.Close() - - if res.StatusCode < 299 { - err = json.NewDecoder(res.Body).Decode(response) - if err != nil { - return nil, err - } - - return response, nil - - } - - errorResponse := types.NewElasticsearchError() - err = json.NewDecoder(res.Body).Decode(errorResponse) - if err != nil { - return nil, err - } - - return nil, errorResponse +func (r ExistsAlias) Do(ctx context.Context) (bool, error) { + return r.IsSuccess(ctx) } // IsSuccess allows to run a query with a context and retrieve the result as a boolean. @@ -223,56 +197,66 @@ func (r *ExistsAlias) Header(key, value string) *ExistsAlias { return r } -// Name A comma-separated list of alias names to return +// Name Comma-separated list of aliases to check. Supports wildcards (`*`). // API Name: name -func (r *ExistsAlias) Name(v string) *ExistsAlias { +func (r *ExistsAlias) _name(name string) *ExistsAlias { r.paramSet |= nameMask - r.name = v + r.name = name return r } -// Index A comma-separated list of index names to filter aliases +// Index Comma-separated list of data streams or indices used to limit the request. +// Supports wildcards (`*`). +// To target all data streams and indices, omit this parameter or use `*` or +// `_all`. // API Name: index -func (r *ExistsAlias) Index(v string) *ExistsAlias { +func (r *ExistsAlias) Index(index string) *ExistsAlias { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// AllowNoIndices Whether to ignore if a wildcard indices expression resolves into no concrete -// indices. (This includes `_all` string or when no indices have been specified) +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. // API name: allow_no_indices -func (r *ExistsAlias) AllowNoIndices(b bool) *ExistsAlias { - r.values.Set("allow_no_indices", strconv.FormatBool(b)) +func (r *ExistsAlias) AllowNoIndices(allownoindices bool) *ExistsAlias { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) return r } -// ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, -// closed or both. +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. +// Valid values are: `all`, `open`, `closed`, `hidden`, `none`. // API name: expand_wildcards -func (r *ExistsAlias) ExpandWildcards(v string) *ExistsAlias { - r.values.Set("expand_wildcards", v) +func (r *ExistsAlias) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *ExistsAlias { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } -// IgnoreUnavailable Whether specified concrete indices should be ignored when unavailable -// (missing or closed) +// IgnoreUnavailable If `false`, requests that include a missing data stream or index in the +// target indices or data streams return an error. // API name: ignore_unavailable -func (r *ExistsAlias) IgnoreUnavailable(b bool) *ExistsAlias { - r.values.Set("ignore_unavailable", strconv.FormatBool(b)) +func (r *ExistsAlias) IgnoreUnavailable(ignoreunavailable bool) *ExistsAlias { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) return r } -// Local Return local information, do not retrieve the state from master node -// (default: false) +// Local If `true`, the request retrieves information from the local node only. // API name: local -func (r *ExistsAlias) Local(b bool) *ExistsAlias { - r.values.Set("local", strconv.FormatBool(b)) +func (r *ExistsAlias) Local(local bool) *ExistsAlias { + r.values.Set("local", strconv.FormatBool(local)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/existsalias/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/existsalias/response.go deleted file mode 100644 index 4786f663f..000000000 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/existsalias/response.go +++ /dev/null @@ -1,34 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 - -package existsalias - -// Response holds the response body struct for the package existsalias -// -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/exists_alias/IndicesExistsAliasResponse.ts#L22-L24 - -type Response struct { -} - -// NewResponse returns a Response -func NewResponse() *Response { - r := &Response{} - return r -} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/existsindextemplate/exists_index_template.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/existsindextemplate/exists_index_template.go index 98cbf1979..00d21636f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/existsindextemplate/exists_index_template.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/existsindextemplate/exists_index_template.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns information about whether a particular index template exists. package existsindextemplate @@ -24,7 +24,6 @@ package existsindextemplate import ( gobytes "bytes" "context" - "encoding/json" "errors" "fmt" "io" @@ -34,7 +33,6 @@ import ( "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" - "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) const ( @@ -67,7 +65,7 @@ func NewExistsIndexTemplateFunc(tp elastictransport.Interface) NewExistsIndexTem return func(name string) *ExistsIndexTemplate { n := New(tp) - n.Name(name) + n._name(name) return n } @@ -151,33 +149,8 @@ func (r ExistsIndexTemplate) Perform(ctx context.Context) (*http.Response, error } // Do runs the request through the transport, handle the response and returns a existsindextemplate.Response -func (r ExistsIndexTemplate) Do(ctx context.Context) (*Response, error) { - - response := NewResponse() - - res, err := r.Perform(ctx) - if err != nil { - return nil, err - } - defer res.Body.Close() - - if res.StatusCode < 299 { - err = json.NewDecoder(res.Body).Decode(response) - if err != nil { - return nil, err - } - - return response, nil - - } - - errorResponse := types.NewElasticsearchError() - err = json.NewDecoder(res.Body).Decode(errorResponse) - if err != nil { - return nil, err - } - - return nil, errorResponse +func (r ExistsIndexTemplate) Do(ctx context.Context) (bool, error) { + return r.IsSuccess(ctx) } // IsSuccess allows to run a query with a context and retrieve the result as a boolean. @@ -211,9 +184,9 @@ func (r *ExistsIndexTemplate) Header(key, value string) *ExistsIndexTemplate { // Name Comma-separated list of index template names used to limit the request. // Wildcard (*) expressions are supported. // API Name: name -func (r *ExistsIndexTemplate) Name(v string) *ExistsIndexTemplate { +func (r *ExistsIndexTemplate) _name(name string) *ExistsIndexTemplate { r.paramSet |= nameMask - r.name = v + r.name = name return r } @@ -221,8 +194,8 @@ func (r *ExistsIndexTemplate) Name(v string) *ExistsIndexTemplate { // MasterTimeout Period to wait for a connection to the master node. If no response is // received before the timeout expires, the request fails and returns an error. // API name: master_timeout -func (r *ExistsIndexTemplate) MasterTimeout(v string) *ExistsIndexTemplate { - r.values.Set("master_timeout", v) +func (r *ExistsIndexTemplate) MasterTimeout(duration string) *ExistsIndexTemplate { + r.values.Set("master_timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/existsindextemplate/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/existsindextemplate/response.go deleted file mode 100644 index ab60ade5e..000000000 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/existsindextemplate/response.go +++ /dev/null @@ -1,34 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 - -package existsindextemplate - -// Response holds the response body struct for the package existsindextemplate -// -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/exists_index_template/IndicesExistsIndexTemplateResponse.ts#L22-L29 - -type Response struct { -} - -// NewResponse returns a Response -func NewResponse() *Response { - r := &Response{} - return r -} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/existstemplate/exists_template.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/existstemplate/exists_template.go index 7daad8ec1..6287f4bf2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/existstemplate/exists_template.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/existstemplate/exists_template.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns information about whether a particular index template exists. package existstemplate @@ -24,7 +24,6 @@ package existstemplate import ( gobytes "bytes" "context" - "encoding/json" "errors" "fmt" "io" @@ -35,7 +34,6 @@ import ( "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" - "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) const ( @@ -68,7 +66,7 @@ func NewExistsTemplateFunc(tp elastictransport.Interface) NewExistsTemplate { return func(name string) *ExistsTemplate { n := New(tp) - n.Name(name) + n._name(name) return n } @@ -152,33 +150,8 @@ func (r ExistsTemplate) Perform(ctx context.Context) (*http.Response, error) { } // Do runs the request through the transport, handle the response and returns a existstemplate.Response -func (r ExistsTemplate) Do(ctx context.Context) (*Response, error) { - - response := NewResponse() - - res, err := r.Perform(ctx) - if err != nil { - return nil, err - } - defer res.Body.Close() - - if res.StatusCode < 299 { - err = json.NewDecoder(res.Body).Decode(response) - if err != nil { - return nil, err - } - - return response, nil - - } - - errorResponse := types.NewElasticsearchError() - err = json.NewDecoder(res.Body).Decode(errorResponse) - if err != nil { - return nil, err - } - - return nil, errorResponse +func (r ExistsTemplate) Do(ctx context.Context) (bool, error) { + return r.IsSuccess(ctx) } // IsSuccess allows to run a query with a context and retrieve the result as a boolean. @@ -211,17 +184,17 @@ func (r *ExistsTemplate) Header(key, value string) *ExistsTemplate { // Name The comma separated names of the index templates // API Name: name -func (r *ExistsTemplate) Name(v string) *ExistsTemplate { +func (r *ExistsTemplate) _name(name string) *ExistsTemplate { r.paramSet |= nameMask - r.name = v + r.name = name return r } // FlatSettings Return settings in flat format (default: false) // API name: flat_settings -func (r *ExistsTemplate) FlatSettings(b bool) *ExistsTemplate { - r.values.Set("flat_settings", strconv.FormatBool(b)) +func (r *ExistsTemplate) FlatSettings(flatsettings bool) *ExistsTemplate { + r.values.Set("flat_settings", strconv.FormatBool(flatsettings)) return r } @@ -229,16 +202,16 @@ func (r *ExistsTemplate) FlatSettings(b bool) *ExistsTemplate { // Local Return local information, do not retrieve the state from master node // (default: false) // API name: local -func (r *ExistsTemplate) Local(b bool) *ExistsTemplate { - r.values.Set("local", strconv.FormatBool(b)) +func (r *ExistsTemplate) Local(local bool) *ExistsTemplate { + r.values.Set("local", strconv.FormatBool(local)) return r } // MasterTimeout Explicit operation timeout for connection to master node // API name: master_timeout -func (r *ExistsTemplate) MasterTimeout(v string) *ExistsTemplate { - r.values.Set("master_timeout", v) +func (r *ExistsTemplate) MasterTimeout(duration string) *ExistsTemplate { + r.values.Set("master_timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/existstemplate/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/existstemplate/response.go deleted file mode 100644 index b98edc7ad..000000000 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/existstemplate/response.go +++ /dev/null @@ -1,34 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 - -package existstemplate - -// Response holds the response body struct for the package existstemplate -// -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/exists_template/IndicesExistsTemplateResponse.ts#L22-L24 - -type Response struct { -} - -// NewResponse returns a Response -func NewResponse() *Response { - r := &Response{} - return r -} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/explaindatalifecycle/explain_data_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/explaindatalifecycle/explain_data_lifecycle.go new file mode 100644 index 000000000..cf3e00e2e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/explaindatalifecycle/explain_data_lifecycle.go @@ -0,0 +1,243 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Retrieves information about the index's current data stream lifecycle, such +// as any potential encountered error, time since creation etc. +package explaindatalifecycle + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + indexMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ExplainDataLifecycle struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + buf *gobytes.Buffer + + paramSet int + + index string +} + +// NewExplainDataLifecycle type alias for index. +type NewExplainDataLifecycle func(index string) *ExplainDataLifecycle + +// NewExplainDataLifecycleFunc returns a new instance of ExplainDataLifecycle with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewExplainDataLifecycleFunc(tp elastictransport.Interface) NewExplainDataLifecycle { + return func(index string) *ExplainDataLifecycle { + n := New(tp) + + n._index(index) + + return n + } +} + +// Retrieves information about the index's current data stream lifecycle, such +// as any potential encountered error, time since creation etc. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams-explain-lifecycle.html +func New(tp elastictransport.Interface) *ExplainDataLifecycle { + r := &ExplainDataLifecycle{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + buf: gobytes.NewBuffer(nil), + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ExplainDataLifecycle) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == indexMask: + path.WriteString("/") + + path.WriteString(r.index) + path.WriteString("/") + path.WriteString("_lifecycle") + path.WriteString("/") + path.WriteString("explain") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.buf) + } else { + req, err = http.NewRequest(method, r.path.String(), r.buf) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ExplainDataLifecycle) Perform(ctx context.Context) (*http.Response, error) { + req, err := r.HttpRequest(ctx) + if err != nil { + return nil, err + } + + res, err := r.transport.Perform(req) + if err != nil { + return nil, fmt.Errorf("an error happened during the ExplainDataLifecycle query execution: %w", err) + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a explaindatalifecycle.Response +func (r ExplainDataLifecycle) Do(ctx context.Context) (*Response, error) { + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r ExplainDataLifecycle) IsSuccess(ctx context.Context) (bool, error) { + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(ioutil.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + return false, nil +} + +// Header set a key, value pair in the ExplainDataLifecycle headers map. +func (r *ExplainDataLifecycle) Header(key, value string) *ExplainDataLifecycle { + r.headers.Set(key, value) + + return r +} + +// Index The name of the index to explain +// API Name: index +func (r *ExplainDataLifecycle) _index(index string) *ExplainDataLifecycle { + r.paramSet |= indexMask + r.index = index + + return r +} + +// IncludeDefaults indicates if the API should return the default values the system uses for the +// index's lifecycle +// API name: include_defaults +func (r *ExplainDataLifecycle) IncludeDefaults(includedefaults bool) *ExplainDataLifecycle { + r.values.Set("include_defaults", strconv.FormatBool(includedefaults)) + + return r +} + +// MasterTimeout Specify timeout for connection to master +// API name: master_timeout +func (r *ExplainDataLifecycle) MasterTimeout(duration string) *ExplainDataLifecycle { + r.values.Set("master_timeout", duration) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/explaindatalifecycle/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/explaindatalifecycle/response.go new file mode 100644 index 000000000..4e8469263 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/explaindatalifecycle/response.go @@ -0,0 +1,41 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package explaindatalifecycle + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Response holds the response body struct for the package explaindatalifecycle +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/explain_data_lifecycle/IndicesExplainDataLifecycleResponse.ts#L25-L29 + +type Response struct { + Indices map[string]types.DataStreamLifecycleExplain `json:"indices"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Indices: make(map[string]types.DataStreamLifecycleExplain, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/fieldusagestats/field_usage_stats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/fieldusagestats/field_usage_stats.go index d08138a1b..913a660f2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/fieldusagestats/field_usage_stats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/fieldusagestats/field_usage_stats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns the field usage stats for each field of an index package fieldusagestats @@ -36,6 +36,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" ) const ( @@ -68,7 +69,7 @@ func NewFieldUsageStatsFunc(tp elastictransport.Interface) NewFieldUsageStats { return func(index string) *FieldUsageStats { n := New(tp) - n.Index(index) + n._index(index) return n } @@ -169,7 +170,6 @@ func (r FieldUsageStats) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -178,6 +178,10 @@ func (r FieldUsageStats) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -212,43 +216,44 @@ func (r *FieldUsageStats) Header(key, value string) *FieldUsageStats { // Index Comma-separated list or wildcard expression of index names used to limit the // request. // API Name: index -func (r *FieldUsageStats) Index(v string) *FieldUsageStats { +func (r *FieldUsageStats) _index(index string) *FieldUsageStats { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// AllowNoIndices If false, the request returns an error if any wildcard expression, index -// alias, or _all value targets -// only missing or closed indices. This behavior applies even if the request -// targets other open indices. +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. // For example, a request targeting `foo*,bar*` returns an error if an index -// starts with `foo` but no index -// starts with `bar`. +// starts with `foo` but no index starts with `bar`. // API name: allow_no_indices -func (r *FieldUsageStats) AllowNoIndices(b bool) *FieldUsageStats { - r.values.Set("allow_no_indices", strconv.FormatBool(b)) +func (r *FieldUsageStats) AllowNoIndices(allownoindices bool) *FieldUsageStats { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) return r } -// ExpandWildcards Type of index that wildcard patterns can match. If the request can target -// data streams, this argument -// determines whether wildcard expressions match hidden data streams. Supports -// comma-separated values, -// such as `open,hidden`. +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. // API name: expand_wildcards -func (r *FieldUsageStats) ExpandWildcards(v string) *FieldUsageStats { - r.values.Set("expand_wildcards", v) +func (r *FieldUsageStats) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *FieldUsageStats { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } -// IgnoreUnavailable If true, missing or closed indices are not included in the response. +// IgnoreUnavailable If `true`, missing or closed indices are not included in the response. // API name: ignore_unavailable -func (r *FieldUsageStats) IgnoreUnavailable(b bool) *FieldUsageStats { - r.values.Set("ignore_unavailable", strconv.FormatBool(b)) +func (r *FieldUsageStats) IgnoreUnavailable(ignoreunavailable bool) *FieldUsageStats { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) return r } @@ -256,39 +261,39 @@ func (r *FieldUsageStats) IgnoreUnavailable(b bool) *FieldUsageStats { // Fields Comma-separated list or wildcard expressions of fields to include in the // statistics. // API name: fields -func (r *FieldUsageStats) Fields(v string) *FieldUsageStats { - r.values.Set("fields", v) +func (r *FieldUsageStats) Fields(fields ...string) *FieldUsageStats { + r.values.Set("fields", strings.Join(fields, ",")) return r } -// MasterTimeout Period to wait for a connection to the master node. If no response is -// received before the timeout expires, -// the request fails and returns an error. +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: master_timeout -func (r *FieldUsageStats) MasterTimeout(v string) *FieldUsageStats { - r.values.Set("master_timeout", v) +func (r *FieldUsageStats) MasterTimeout(duration string) *FieldUsageStats { + r.values.Set("master_timeout", duration) return r } -// Timeout Period to wait for a response. If no response is received before the timeout -// expires, the request fails -// and returns an error. +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: timeout -func (r *FieldUsageStats) Timeout(v string) *FieldUsageStats { - r.values.Set("timeout", v) +func (r *FieldUsageStats) Timeout(duration string) *FieldUsageStats { + r.values.Set("timeout", duration) return r } // WaitForActiveShards The number of shard copies that must be active before proceeding with the -// operation. Set to all or any -// positive integer up to the total number of shards in the index -// (`number_of_replicas+1`). +// operation. +// Set to all or any positive integer up to the total number of shards in the +// index (`number_of_replicas+1`). // API name: wait_for_active_shards -func (r *FieldUsageStats) WaitForActiveShards(v string) *FieldUsageStats { - r.values.Set("wait_for_active_shards", v) +func (r *FieldUsageStats) WaitForActiveShards(waitforactiveshards string) *FieldUsageStats { + r.values.Set("wait_for_active_shards", waitforactiveshards) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/fieldusagestats/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/fieldusagestats/response.go index cd125e65b..75433d11c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/fieldusagestats/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/fieldusagestats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package fieldusagestats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package fieldusagestats // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L28-L30 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L28-L30 type Response struct { FieldsUsageBody map[string]types.UsageStatsIndex `json:"-"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/flush/flush.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/flush/flush.go index 265b4f028..2aa2328fc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/flush/flush.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/flush/flush.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Performs the flush operation on one or more indices. package flush @@ -36,6 +36,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" ) const ( @@ -74,7 +75,7 @@ func NewFlushFunc(tp elastictransport.Interface) NewFlush { // Performs the flush operation on one or more indices. // -// https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-flush.html +// https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-flush.html func New(tp elastictransport.Interface) *Flush { r := &Flush{ transport: tp, @@ -172,7 +173,6 @@ func (r Flush) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -181,6 +181,10 @@ func (r Flush) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -212,61 +216,69 @@ func (r *Flush) Header(key, value string) *Flush { return r } -// Index A comma-separated list of index names; use `_all` or empty string for all -// indices +// Index Comma-separated list of data streams, indices, and aliases to flush. +// Supports wildcards (`*`). +// To flush all data streams and indices, omit this parameter or use `*` or +// `_all`. // API Name: index -func (r *Flush) Index(v string) *Flush { +func (r *Flush) Index(index string) *Flush { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// AllowNoIndices Whether to ignore if a wildcard indices expression resolves into no concrete -// indices. (This includes `_all` string or when no indices have been specified) +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. // API name: allow_no_indices -func (r *Flush) AllowNoIndices(b bool) *Flush { - r.values.Set("allow_no_indices", strconv.FormatBool(b)) +func (r *Flush) AllowNoIndices(allownoindices bool) *Flush { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) return r } -// ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, -// closed or both. +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. +// Valid values are: `all`, `open`, `closed`, `hidden`, `none`. // API name: expand_wildcards -func (r *Flush) ExpandWildcards(v string) *Flush { - r.values.Set("expand_wildcards", v) +func (r *Flush) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Flush { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } -// Force Whether a flush should be forced even if it is not necessarily needed ie. if -// no changes will be committed to the index. This is useful if transaction log -// IDs should be incremented even if no uncommitted changes are present. (This -// setting can be considered as internal) +// Force If `true`, the request forces a flush even if there are no changes to commit +// to the index. // API name: force -func (r *Flush) Force(b bool) *Flush { - r.values.Set("force", strconv.FormatBool(b)) +func (r *Flush) Force(force bool) *Flush { + r.values.Set("force", strconv.FormatBool(force)) return r } -// IgnoreUnavailable Whether specified concrete indices should be ignored when unavailable -// (missing or closed) +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. // API name: ignore_unavailable -func (r *Flush) IgnoreUnavailable(b bool) *Flush { - r.values.Set("ignore_unavailable", strconv.FormatBool(b)) +func (r *Flush) IgnoreUnavailable(ignoreunavailable bool) *Flush { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) return r } -// WaitIfOngoing If set to true the flush operation will block until the flush can be executed -// if another flush operation is already executing. The default is true. If set -// to false the flush will be skipped iff if another flush operation is already -// running. +// WaitIfOngoing If `true`, the flush operation blocks until execution when another flush +// operation is running. +// If `false`, Elasticsearch returns an error if you request a flush when +// another flush operation is running. // API name: wait_if_ongoing -func (r *Flush) WaitIfOngoing(b bool) *Flush { - r.values.Set("wait_if_ongoing", strconv.FormatBool(b)) +func (r *Flush) WaitIfOngoing(waitifongoing bool) *Flush { + r.values.Set("wait_if_ongoing", strconv.FormatBool(waitifongoing)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/flush/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/flush/response.go index 4a11b22e4..6e494dbde 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/flush/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/flush/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package flush @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package flush // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/flush/IndicesFlushResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/flush/IndicesFlushResponse.ts#L22-L24 type Response struct { Shards_ types.ShardStatistics `json:"_shards"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/forcemerge/forcemerge.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/forcemerge/forcemerge.go index d5f32fbf5..50f92c5d9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/forcemerge/forcemerge.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/forcemerge/forcemerge.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Performs the force merge operation on one or more indices. package forcemerge @@ -36,6 +36,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" ) const ( @@ -172,7 +173,6 @@ func (r Forcemerge) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -181,6 +181,10 @@ func (r Forcemerge) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -215,9 +219,9 @@ func (r *Forcemerge) Header(key, value string) *Forcemerge { // Index A comma-separated list of index names; use `_all` or empty string to perform // the operation on all indices // API Name: index -func (r *Forcemerge) Index(v string) *Forcemerge { +func (r *Forcemerge) Index(index string) *Forcemerge { r.paramSet |= indexMask - r.index = v + r.index = index return r } @@ -225,8 +229,8 @@ func (r *Forcemerge) Index(v string) *Forcemerge { // AllowNoIndices Whether to ignore if a wildcard indices expression resolves into no concrete // indices. (This includes `_all` string or when no indices have been specified) // API name: allow_no_indices -func (r *Forcemerge) AllowNoIndices(b bool) *Forcemerge { - r.values.Set("allow_no_indices", strconv.FormatBool(b)) +func (r *Forcemerge) AllowNoIndices(allownoindices bool) *Forcemerge { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) return r } @@ -234,8 +238,12 @@ func (r *Forcemerge) AllowNoIndices(b bool) *Forcemerge { // ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, // closed or both. // API name: expand_wildcards -func (r *Forcemerge) ExpandWildcards(v string) *Forcemerge { - r.values.Set("expand_wildcards", v) +func (r *Forcemerge) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Forcemerge { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } @@ -243,8 +251,8 @@ func (r *Forcemerge) ExpandWildcards(v string) *Forcemerge { // Flush Specify whether the index should be flushed after performing the operation // (default: true) // API name: flush -func (r *Forcemerge) Flush(b bool) *Forcemerge { - r.values.Set("flush", strconv.FormatBool(b)) +func (r *Forcemerge) Flush(flush bool) *Forcemerge { + r.values.Set("flush", strconv.FormatBool(flush)) return r } @@ -252,32 +260,32 @@ func (r *Forcemerge) Flush(b bool) *Forcemerge { // IgnoreUnavailable Whether specified concrete indices should be ignored when unavailable // (missing or closed) // API name: ignore_unavailable -func (r *Forcemerge) IgnoreUnavailable(b bool) *Forcemerge { - r.values.Set("ignore_unavailable", strconv.FormatBool(b)) +func (r *Forcemerge) IgnoreUnavailable(ignoreunavailable bool) *Forcemerge { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) return r } // MaxNumSegments The number of segments the index should be merged into (default: dynamic) // API name: max_num_segments -func (r *Forcemerge) MaxNumSegments(v string) *Forcemerge { - r.values.Set("max_num_segments", v) +func (r *Forcemerge) MaxNumSegments(maxnumsegments string) *Forcemerge { + r.values.Set("max_num_segments", maxnumsegments) return r } // OnlyExpungeDeletes Specify whether the operation should only expunge deleted documents // API name: only_expunge_deletes -func (r *Forcemerge) OnlyExpungeDeletes(b bool) *Forcemerge { - r.values.Set("only_expunge_deletes", strconv.FormatBool(b)) +func (r *Forcemerge) OnlyExpungeDeletes(onlyexpungedeletes bool) *Forcemerge { + r.values.Set("only_expunge_deletes", strconv.FormatBool(onlyexpungedeletes)) return r } // WaitForCompletion Should the request wait until the force merge is completed. // API name: wait_for_completion -func (r *Forcemerge) WaitForCompletion(b bool) *Forcemerge { - r.values.Set("wait_for_completion", strconv.FormatBool(b)) +func (r *Forcemerge) WaitForCompletion(waitforcompletion bool) *Forcemerge { + r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/forcemerge/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/forcemerge/response.go index 9a5d2b7aa..4cd63a59a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/forcemerge/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/forcemerge/response.go @@ -16,16 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package forcemerge +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + // Response holds the response body struct for the package forcemerge // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/forcemerge/IndicesForceMergeResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/forcemerge/IndicesForceMergeResponse.ts#L22-L24 type Response struct { - + Shards_ types.ShardStatistics `json:"_shards"` // Task task contains a task id returned when wait_for_completion=false, // you can use the task_id to get the status of the task at _tasks/ Task *string `json:"task,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/get/get.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/get/get.go index 21f5bb970..c723b58d1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/get/get.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/get/get.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns information about one or more indices. package get @@ -36,6 +36,8 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/feature" ) const ( @@ -68,7 +70,7 @@ func NewGetFunc(tp elastictransport.Interface) NewGet { return func(index string) *Get { n := New(tp) - n.Index(index) + n._index(index) return n } @@ -167,7 +169,6 @@ func (r Get) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -176,6 +177,10 @@ func (r Get) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -211,9 +216,9 @@ func (r *Get) Header(key, value string) *Get { // limit the request. // Wildcard expressions (*) are supported. // API Name: index -func (r *Get) Index(v string) *Get { +func (r *Get) _index(index string) *Get { r.paramSet |= indexMask - r.index = v + r.index = index return r } @@ -225,8 +230,8 @@ func (r *Get) Index(v string) *Get { // a request targeting foo*,bar* returns an error if an index starts with foo // but no index starts with bar. // API name: allow_no_indices -func (r *Get) AllowNoIndices(b bool) *Get { - r.values.Set("allow_no_indices", strconv.FormatBool(b)) +func (r *Get) AllowNoIndices(allownoindices bool) *Get { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) return r } @@ -237,32 +242,36 @@ func (r *Get) AllowNoIndices(b bool) *Get { // comma-separated values, // such as open,hidden. // API name: expand_wildcards -func (r *Get) ExpandWildcards(v string) *Get { - r.values.Set("expand_wildcards", v) +func (r *Get) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Get { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } // FlatSettings If true, returns settings in flat format. // API name: flat_settings -func (r *Get) FlatSettings(b bool) *Get { - r.values.Set("flat_settings", strconv.FormatBool(b)) +func (r *Get) FlatSettings(flatsettings bool) *Get { + r.values.Set("flat_settings", strconv.FormatBool(flatsettings)) return r } // IgnoreUnavailable If false, requests that target a missing index return an error. // API name: ignore_unavailable -func (r *Get) IgnoreUnavailable(b bool) *Get { - r.values.Set("ignore_unavailable", strconv.FormatBool(b)) +func (r *Get) IgnoreUnavailable(ignoreunavailable bool) *Get { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) return r } // IncludeDefaults If true, return all default settings in the response. // API name: include_defaults -func (r *Get) IncludeDefaults(b bool) *Get { - r.values.Set("include_defaults", strconv.FormatBool(b)) +func (r *Get) IncludeDefaults(includedefaults bool) *Get { + r.values.Set("include_defaults", strconv.FormatBool(includedefaults)) return r } @@ -270,8 +279,8 @@ func (r *Get) IncludeDefaults(b bool) *Get { // Local If true, the request retrieves information from the local node only. Defaults // to false, which means information is retrieved from the master node. // API name: local -func (r *Get) Local(b bool) *Get { - r.values.Set("local", strconv.FormatBool(b)) +func (r *Get) Local(local bool) *Get { + r.values.Set("local", strconv.FormatBool(local)) return r } @@ -279,16 +288,20 @@ func (r *Get) Local(b bool) *Get { // MasterTimeout Period to wait for a connection to the master node. If no response is // received before the timeout expires, the request fails and returns an error. // API name: master_timeout -func (r *Get) MasterTimeout(v string) *Get { - r.values.Set("master_timeout", v) +func (r *Get) MasterTimeout(duration string) *Get { + r.values.Set("master_timeout", duration) return r } // Features Return only information on specified index features // API name: features -func (r *Get) Features(v string) *Get { - r.values.Set("features", v) +func (r *Get) Features(features ...feature.Feature) *Get { + tmp := []string{} + for _, item := range features { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/get/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/get/response.go index 7a8f39cfb..d1405836e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/get/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/get/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package get @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package get // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/get/IndicesGetResponse.ts#L24-L26 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/get/IndicesGetResponse.ts#L24-L26 type Response map[string]types.IndexState diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getalias/get_alias.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getalias/get_alias.go index eac298d69..8c87dde0f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getalias/get_alias.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getalias/get_alias.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns an alias. package getalias @@ -36,6 +36,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" ) const ( @@ -194,7 +195,30 @@ func (r GetAlias) Do(ctx context.Context) (Response, error) { } return response, nil + } + + if res.StatusCode == 404 { + data, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(gobytes.NewReader(data)).Decode(&errorResponse) + if err != nil { + return nil, err + } + + if errorResponse.Status == 0 { + err = json.NewDecoder(gobytes.NewReader(data)).Decode(&response) + if err != nil { + return nil, err + } + + return response, nil + } + + return nil, errorResponse } errorResponse := types.NewElasticsearchError() @@ -203,6 +227,10 @@ func (r GetAlias) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -234,56 +262,68 @@ func (r *GetAlias) Header(key, value string) *GetAlias { return r } -// Name A comma-separated list of alias names to return +// Name Comma-separated list of aliases to retrieve. +// Supports wildcards (`*`). +// To retrieve all aliases, omit this parameter or use `*` or `_all`. // API Name: name -func (r *GetAlias) Name(v string) *GetAlias { +func (r *GetAlias) Name(name string) *GetAlias { r.paramSet |= nameMask - r.name = v + r.name = name return r } -// Index A comma-separated list of index names to filter aliases +// Index Comma-separated list of data streams or indices used to limit the request. +// Supports wildcards (`*`). +// To target all data streams and indices, omit this parameter or use `*` or +// `_all`. // API Name: index -func (r *GetAlias) Index(v string) *GetAlias { +func (r *GetAlias) Index(index string) *GetAlias { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// AllowNoIndices Whether to ignore if a wildcard indices expression resolves into no concrete -// indices. (This includes `_all` string or when no indices have been specified) +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. // API name: allow_no_indices -func (r *GetAlias) AllowNoIndices(b bool) *GetAlias { - r.values.Set("allow_no_indices", strconv.FormatBool(b)) +func (r *GetAlias) AllowNoIndices(allownoindices bool) *GetAlias { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) return r } -// ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, -// closed or both. +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. +// Valid values are: `all`, `open`, `closed`, `hidden`, `none`. // API name: expand_wildcards -func (r *GetAlias) ExpandWildcards(v string) *GetAlias { - r.values.Set("expand_wildcards", v) +func (r *GetAlias) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *GetAlias { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } -// IgnoreUnavailable Whether specified concrete indices should be ignored when unavailable -// (missing or closed) +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. // API name: ignore_unavailable -func (r *GetAlias) IgnoreUnavailable(b bool) *GetAlias { - r.values.Set("ignore_unavailable", strconv.FormatBool(b)) +func (r *GetAlias) IgnoreUnavailable(ignoreunavailable bool) *GetAlias { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) return r } -// Local Return local information, do not retrieve the state from master node -// (default: false) +// Local If `true`, the request retrieves information from the local node only. // API name: local -func (r *GetAlias) Local(b bool) *GetAlias { - r.values.Set("local", strconv.FormatBool(b)) +func (r *GetAlias) Local(local bool) *GetAlias { + r.values.Set("local", strconv.FormatBool(local)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getalias/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getalias/response.go index 86932c77e..7a2ebbc54 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getalias/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getalias/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getalias @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getalias // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/get_alias/IndicesGetAliasResponse.ts#L26-L34 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/get_alias/IndicesGetAliasResponse.ts#L26-L34 type Response map[string]types.IndexAliases diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getdatalifecycle/get_data_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getdatalifecycle/get_data_lifecycle.go new file mode 100644 index 000000000..d68cebd7b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getdatalifecycle/get_data_lifecycle.go @@ -0,0 +1,249 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Returns the data stream lifecycle of the selected data streams. +package getdatalifecycle + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetDataLifecycle struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + buf *gobytes.Buffer + + paramSet int + + name string +} + +// NewGetDataLifecycle type alias for index. +type NewGetDataLifecycle func(name string) *GetDataLifecycle + +// NewGetDataLifecycleFunc returns a new instance of GetDataLifecycle with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetDataLifecycleFunc(tp elastictransport.Interface) NewGetDataLifecycle { + return func(name string) *GetDataLifecycle { + n := New(tp) + + n._name(name) + + return n + } +} + +// Returns the data stream lifecycle of the selected data streams. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-get-lifecycle.html +func New(tp elastictransport.Interface) *GetDataLifecycle { + r := &GetDataLifecycle{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + buf: gobytes.NewBuffer(nil), + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetDataLifecycle) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_data_stream") + path.WriteString("/") + + path.WriteString(r.name) + path.WriteString("/") + path.WriteString("_lifecycle") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.buf) + } else { + req, err = http.NewRequest(method, r.path.String(), r.buf) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetDataLifecycle) Perform(ctx context.Context) (*http.Response, error) { + req, err := r.HttpRequest(ctx) + if err != nil { + return nil, err + } + + res, err := r.transport.Perform(req) + if err != nil { + return nil, fmt.Errorf("an error happened during the GetDataLifecycle query execution: %w", err) + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getdatalifecycle.Response +func (r GetDataLifecycle) Do(ctx context.Context) (*Response, error) { + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetDataLifecycle) IsSuccess(ctx context.Context) (bool, error) { + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(ioutil.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + return false, nil +} + +// Header set a key, value pair in the GetDataLifecycle headers map. +func (r *GetDataLifecycle) Header(key, value string) *GetDataLifecycle { + r.headers.Set(key, value) + + return r +} + +// Name Comma-separated list of data streams to limit the request. +// Supports wildcards (`*`). +// To target all data streams, omit this parameter or use `*` or `_all`. +// API Name: name +func (r *GetDataLifecycle) _name(name string) *GetDataLifecycle { + r.paramSet |= nameMask + r.name = name + + return r +} + +// ExpandWildcards Type of data stream that wildcard patterns can match. +// Supports comma-separated values, such as `open,hidden`. +// Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +// API name: expand_wildcards +func (r *GetDataLifecycle) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *GetDataLifecycle { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// IncludeDefaults If `true`, return all default settings in the response. +// API name: include_defaults +func (r *GetDataLifecycle) IncludeDefaults(includedefaults bool) *GetDataLifecycle { + r.values.Set("include_defaults", strconv.FormatBool(includedefaults)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getdatalifecycle/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getdatalifecycle/response.go new file mode 100644 index 000000000..7d57f4b47 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getdatalifecycle/response.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package getdatalifecycle + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Response holds the response body struct for the package getdatalifecycle +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/get_data_lifecycle/IndicesGetDataLifecycleResponse.ts#L23-L25 + +type Response struct { + DataStreams []types.DataStreamWithLifecycle `json:"data_streams"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getdatastream/get_data_stream.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getdatastream/get_data_stream.go index d65ceb1a3..119eb5fff 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getdatastream/get_data_stream.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getdatastream/get_data_stream.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns data streams. package getdatastream @@ -31,10 +31,12 @@ import ( "io/ioutil" "net/http" "net/url" + "strconv" "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" ) const ( @@ -171,7 +173,6 @@ func (r GetDataStream) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -180,6 +181,10 @@ func (r GetDataStream) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -211,21 +216,34 @@ func (r *GetDataStream) Header(key, value string) *GetDataStream { return r } -// Name A comma-separated list of data streams to get; use `*` to get all data -// streams +// Name Comma-separated list of data stream names used to limit the request. +// Wildcard (`*`) expressions are supported. If omitted, all data streams are +// returned. // API Name: name -func (r *GetDataStream) Name(v string) *GetDataStream { +func (r *GetDataStream) Name(name string) *GetDataStream { r.paramSet |= nameMask - r.name = v + r.name = name return r } -// ExpandWildcards Whether wildcard expressions should get expanded to open or closed indices -// (default: open) +// ExpandWildcards Type of data stream that wildcard patterns can match. +// Supports comma-separated values, such as `open,hidden`. // API name: expand_wildcards -func (r *GetDataStream) ExpandWildcards(v string) *GetDataStream { - r.values.Set("expand_wildcards", v) +func (r *GetDataStream) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *GetDataStream { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// IncludeDefaults If true, returns all relevant default configurations for the index template. +// API name: include_defaults +func (r *GetDataStream) IncludeDefaults(includedefaults bool) *GetDataStream { + r.values.Set("include_defaults", strconv.FormatBool(includedefaults)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getdatastream/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getdatastream/response.go index 857ce1b89..089353431 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getdatastream/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getdatastream/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getdatastream @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getdatastream // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/get_data_stream/IndicesGetDataStreamResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/get_data_stream/IndicesGetDataStreamResponse.ts#L22-L24 type Response struct { DataStreams []types.DataStream `json:"data_streams"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getfieldmapping/get_field_mapping.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getfieldmapping/get_field_mapping.go index caeebe252..07ebff8a2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getfieldmapping/get_field_mapping.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getfieldmapping/get_field_mapping.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns mapping for one or more fields. package getfieldmapping @@ -36,6 +36,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" ) const ( @@ -71,7 +72,7 @@ func NewGetFieldMappingFunc(tp elastictransport.Interface) NewGetFieldMapping { return func(fields string) *GetFieldMapping { n := New(tp) - n.Fields(fields) + n._fields(fields) return n } @@ -187,7 +188,6 @@ func (r GetFieldMapping) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -196,6 +196,10 @@ func (r GetFieldMapping) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -227,64 +231,76 @@ func (r *GetFieldMapping) Header(key, value string) *GetFieldMapping { return r } -// Fields A comma-separated list of fields +// Fields Comma-separated list or wildcard expression of fields used to limit returned +// information. // API Name: fields -func (r *GetFieldMapping) Fields(v string) *GetFieldMapping { +func (r *GetFieldMapping) _fields(fields string) *GetFieldMapping { r.paramSet |= fieldsMask - r.fields = v + r.fields = fields return r } -// Index A comma-separated list of index names +// Index Comma-separated list of data streams, indices, and aliases used to limit the +// request. +// Supports wildcards (`*`). +// To target all data streams and indices, omit this parameter or use `*` or +// `_all`. // API Name: index -func (r *GetFieldMapping) Index(v string) *GetFieldMapping { +func (r *GetFieldMapping) Index(index string) *GetFieldMapping { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// AllowNoIndices Whether to ignore if a wildcard indices expression resolves into no concrete -// indices. (This includes `_all` string or when no indices have been specified) +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. // API name: allow_no_indices -func (r *GetFieldMapping) AllowNoIndices(b bool) *GetFieldMapping { - r.values.Set("allow_no_indices", strconv.FormatBool(b)) +func (r *GetFieldMapping) AllowNoIndices(allownoindices bool) *GetFieldMapping { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) return r } -// ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, -// closed or both. +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. +// Valid values are: `all`, `open`, `closed`, `hidden`, `none`. // API name: expand_wildcards -func (r *GetFieldMapping) ExpandWildcards(v string) *GetFieldMapping { - r.values.Set("expand_wildcards", v) +func (r *GetFieldMapping) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *GetFieldMapping { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } -// IgnoreUnavailable Whether specified concrete indices should be ignored when unavailable -// (missing or closed) +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. // API name: ignore_unavailable -func (r *GetFieldMapping) IgnoreUnavailable(b bool) *GetFieldMapping { - r.values.Set("ignore_unavailable", strconv.FormatBool(b)) +func (r *GetFieldMapping) IgnoreUnavailable(ignoreunavailable bool) *GetFieldMapping { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) return r } -// IncludeDefaults Whether the default mapping values should be returned as well +// IncludeDefaults If `true`, return all default settings in the response. // API name: include_defaults -func (r *GetFieldMapping) IncludeDefaults(b bool) *GetFieldMapping { - r.values.Set("include_defaults", strconv.FormatBool(b)) +func (r *GetFieldMapping) IncludeDefaults(includedefaults bool) *GetFieldMapping { + r.values.Set("include_defaults", strconv.FormatBool(includedefaults)) return r } -// Local Return local information, do not retrieve the state from master node -// (default: false) +// Local If `true`, the request retrieves information from the local node only. // API name: local -func (r *GetFieldMapping) Local(b bool) *GetFieldMapping { - r.values.Set("local", strconv.FormatBool(b)) +func (r *GetFieldMapping) Local(local bool) *GetFieldMapping { + r.values.Set("local", strconv.FormatBool(local)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getfieldmapping/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getfieldmapping/response.go index ce0536982..3c93739fb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getfieldmapping/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getfieldmapping/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getfieldmapping @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getfieldmapping // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/get_field_mapping/IndicesGetFieldMappingResponse.ts#L24-L26 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/get_field_mapping/IndicesGetFieldMappingResponse.ts#L24-L26 type Response map[string]types.TypeFieldMappings diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getindextemplate/get_index_template.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getindextemplate/get_index_template.go index 8dfb2bd01..218772070 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getindextemplate/get_index_template.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getindextemplate/get_index_template.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns an index template. package getindextemplate @@ -172,7 +172,6 @@ func (r GetIndexTemplate) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -181,6 +180,10 @@ func (r GetIndexTemplate) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -215,9 +218,9 @@ func (r *GetIndexTemplate) Header(key, value string) *GetIndexTemplate { // Name Comma-separated list of index template names used to limit the request. // Wildcard (*) expressions are supported. // API Name: name -func (r *GetIndexTemplate) Name(v string) *GetIndexTemplate { +func (r *GetIndexTemplate) Name(name string) *GetIndexTemplate { r.paramSet |= nameMask - r.name = v + r.name = name return r } @@ -225,16 +228,16 @@ func (r *GetIndexTemplate) Name(v string) *GetIndexTemplate { // Local If true, the request retrieves information from the local node only. Defaults // to false, which means information is retrieved from the master node. // API name: local -func (r *GetIndexTemplate) Local(b bool) *GetIndexTemplate { - r.values.Set("local", strconv.FormatBool(b)) +func (r *GetIndexTemplate) Local(local bool) *GetIndexTemplate { + r.values.Set("local", strconv.FormatBool(local)) return r } // FlatSettings If true, returns settings in flat format. // API name: flat_settings -func (r *GetIndexTemplate) FlatSettings(b bool) *GetIndexTemplate { - r.values.Set("flat_settings", strconv.FormatBool(b)) +func (r *GetIndexTemplate) FlatSettings(flatsettings bool) *GetIndexTemplate { + r.values.Set("flat_settings", strconv.FormatBool(flatsettings)) return r } @@ -242,8 +245,16 @@ func (r *GetIndexTemplate) FlatSettings(b bool) *GetIndexTemplate { // MasterTimeout Period to wait for a connection to the master node. If no response is // received before the timeout expires, the request fails and returns an error. // API name: master_timeout -func (r *GetIndexTemplate) MasterTimeout(v string) *GetIndexTemplate { - r.values.Set("master_timeout", v) +func (r *GetIndexTemplate) MasterTimeout(duration string) *GetIndexTemplate { + r.values.Set("master_timeout", duration) + + return r +} + +// IncludeDefaults If true, returns all relevant default configurations for the index template. +// API name: include_defaults +func (r *GetIndexTemplate) IncludeDefaults(includedefaults bool) *GetIndexTemplate { + r.values.Set("include_defaults", strconv.FormatBool(includedefaults)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getindextemplate/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getindextemplate/response.go index bcbbaa11d..9fe63a43b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getindextemplate/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getindextemplate/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getindextemplate @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getindextemplate // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/get_index_template/IndicesGetIndexTemplateResponse.ts#L23-L27 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/get_index_template/IndicesGetIndexTemplateResponse.ts#L23-L27 type Response struct { IndexTemplates []types.IndexTemplateItem `json:"index_templates"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getmapping/get_mapping.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getmapping/get_mapping.go index 76940d5ce..3ece912d3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getmapping/get_mapping.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getmapping/get_mapping.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns mappings for one or more indices. package getmapping @@ -36,6 +36,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" ) const ( @@ -172,7 +173,6 @@ func (r GetMapping) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -181,6 +181,10 @@ func (r GetMapping) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -212,55 +216,68 @@ func (r *GetMapping) Header(key, value string) *GetMapping { return r } -// Index A comma-separated list of index names +// Index Comma-separated list of data streams, indices, and aliases used to limit the +// request. +// Supports wildcards (`*`). +// To target all data streams and indices, omit this parameter or use `*` or +// `_all`. // API Name: index -func (r *GetMapping) Index(v string) *GetMapping { +func (r *GetMapping) Index(index string) *GetMapping { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// AllowNoIndices Whether to ignore if a wildcard indices expression resolves into no concrete -// indices. (This includes `_all` string or when no indices have been specified) +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. // API name: allow_no_indices -func (r *GetMapping) AllowNoIndices(b bool) *GetMapping { - r.values.Set("allow_no_indices", strconv.FormatBool(b)) +func (r *GetMapping) AllowNoIndices(allownoindices bool) *GetMapping { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) return r } -// ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, -// closed or both. +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. +// Valid values are: `all`, `open`, `closed`, `hidden`, `none`. // API name: expand_wildcards -func (r *GetMapping) ExpandWildcards(v string) *GetMapping { - r.values.Set("expand_wildcards", v) +func (r *GetMapping) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *GetMapping { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } -// IgnoreUnavailable Whether specified concrete indices should be ignored when unavailable -// (missing or closed) +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. // API name: ignore_unavailable -func (r *GetMapping) IgnoreUnavailable(b bool) *GetMapping { - r.values.Set("ignore_unavailable", strconv.FormatBool(b)) +func (r *GetMapping) IgnoreUnavailable(ignoreunavailable bool) *GetMapping { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) return r } -// Local Return local information, do not retrieve the state from master node -// (default: false) +// Local If `true`, the request retrieves information from the local node only. // API name: local -func (r *GetMapping) Local(b bool) *GetMapping { - r.values.Set("local", strconv.FormatBool(b)) +func (r *GetMapping) Local(local bool) *GetMapping { + r.values.Set("local", strconv.FormatBool(local)) return r } -// MasterTimeout Specify timeout for connection to master +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: master_timeout -func (r *GetMapping) MasterTimeout(v string) *GetMapping { - r.values.Set("master_timeout", v) +func (r *GetMapping) MasterTimeout(duration string) *GetMapping { + r.values.Set("master_timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getmapping/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getmapping/response.go index 431efc780..bfcabaf74 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getmapping/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getmapping/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getmapping @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getmapping // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/get_mapping/IndicesGetMappingResponse.ts#L24-L26 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/get_mapping/IndicesGetMappingResponse.ts#L24-L26 type Response map[string]types.IndexMappingRecord diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getsettings/get_settings.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getsettings/get_settings.go index 86bb89286..7aca60342 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getsettings/get_settings.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getsettings/get_settings.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns settings for one or more indices. package getsettings @@ -36,6 +36,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" ) const ( @@ -194,7 +195,6 @@ func (r GetSettings) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -203,6 +203,10 @@ func (r GetSettings) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -234,81 +238,93 @@ func (r *GetSettings) Header(key, value string) *GetSettings { return r } -// Index A comma-separated list of index names; use `_all` or empty string to perform -// the operation on all indices +// Index Comma-separated list of data streams, indices, and aliases used to limit +// the request. Supports wildcards (`*`). To target all data streams and +// indices, omit this parameter or use `*` or `_all`. // API Name: index -func (r *GetSettings) Index(v string) *GetSettings { +func (r *GetSettings) Index(index string) *GetSettings { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// Name The name of the settings that should be included +// Name Comma-separated list or wildcard expression of settings to retrieve. // API Name: name -func (r *GetSettings) Name(v string) *GetSettings { +func (r *GetSettings) Name(name string) *GetSettings { r.paramSet |= nameMask - r.name = v + r.name = name return r } -// AllowNoIndices Whether to ignore if a wildcard indices expression resolves into no concrete -// indices. (This includes `_all` string or when no indices have been specified) +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. This +// behavior applies even if the request targets other open indices. For +// example, a request targeting `foo*,bar*` returns an error if an index +// starts with foo but no index starts with `bar`. // API name: allow_no_indices -func (r *GetSettings) AllowNoIndices(b bool) *GetSettings { - r.values.Set("allow_no_indices", strconv.FormatBool(b)) +func (r *GetSettings) AllowNoIndices(allownoindices bool) *GetSettings { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) return r } -// ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, -// closed or both. +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. // API name: expand_wildcards -func (r *GetSettings) ExpandWildcards(v string) *GetSettings { - r.values.Set("expand_wildcards", v) +func (r *GetSettings) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *GetSettings { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } -// FlatSettings Return settings in flat format (default: false) +// FlatSettings If `true`, returns settings in flat format. // API name: flat_settings -func (r *GetSettings) FlatSettings(b bool) *GetSettings { - r.values.Set("flat_settings", strconv.FormatBool(b)) +func (r *GetSettings) FlatSettings(flatsettings bool) *GetSettings { + r.values.Set("flat_settings", strconv.FormatBool(flatsettings)) return r } -// IgnoreUnavailable Whether specified concrete indices should be ignored when unavailable -// (missing or closed) +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. // API name: ignore_unavailable -func (r *GetSettings) IgnoreUnavailable(b bool) *GetSettings { - r.values.Set("ignore_unavailable", strconv.FormatBool(b)) +func (r *GetSettings) IgnoreUnavailable(ignoreunavailable bool) *GetSettings { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) return r } -// IncludeDefaults Whether to return all default setting for each of the indices. +// IncludeDefaults If `true`, return all default settings in the response. // API name: include_defaults -func (r *GetSettings) IncludeDefaults(b bool) *GetSettings { - r.values.Set("include_defaults", strconv.FormatBool(b)) +func (r *GetSettings) IncludeDefaults(includedefaults bool) *GetSettings { + r.values.Set("include_defaults", strconv.FormatBool(includedefaults)) return r } -// Local Return local information, do not retrieve the state from master node -// (default: false) +// Local If `true`, the request retrieves information from the local node only. If +// `false`, information is retrieved from the master node. // API name: local -func (r *GetSettings) Local(b bool) *GetSettings { - r.values.Set("local", strconv.FormatBool(b)) +func (r *GetSettings) Local(local bool) *GetSettings { + r.values.Set("local", strconv.FormatBool(local)) return r } -// MasterTimeout Specify timeout for connection to master +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an +// error. // API name: master_timeout -func (r *GetSettings) MasterTimeout(v string) *GetSettings { - r.values.Set("master_timeout", v) +func (r *GetSettings) MasterTimeout(duration string) *GetSettings { + r.values.Set("master_timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getsettings/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getsettings/response.go index 9b04d933d..cf0c65c00 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getsettings/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/getsettings/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getsettings @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getsettings // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/get_settings/IndicesGetSettingsResponse.ts#L24-L26 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/get_settings/IndicesGetSettingsResponse.ts#L24-L26 type Response map[string]types.IndexState diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/gettemplate/get_template.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/gettemplate/get_template.go index 800a0dba5..e99b8ae2f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/gettemplate/get_template.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/gettemplate/get_template.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns an index template. package gettemplate @@ -172,7 +172,6 @@ func (r GetTemplate) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -181,6 +180,10 @@ func (r GetTemplate) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -212,36 +215,40 @@ func (r *GetTemplate) Header(key, value string) *GetTemplate { return r } -// Name The comma separated names of the index templates +// Name Comma-separated list of index template names used to limit the request. +// Wildcard (`*`) expressions are supported. +// To return all index templates, omit this parameter or use a value of `_all` +// or `*`. // API Name: name -func (r *GetTemplate) Name(v string) *GetTemplate { +func (r *GetTemplate) Name(name string) *GetTemplate { r.paramSet |= nameMask - r.name = v + r.name = name return r } -// FlatSettings Return settings in flat format (default: false) +// FlatSettings If `true`, returns settings in flat format. // API name: flat_settings -func (r *GetTemplate) FlatSettings(b bool) *GetTemplate { - r.values.Set("flat_settings", strconv.FormatBool(b)) +func (r *GetTemplate) FlatSettings(flatsettings bool) *GetTemplate { + r.values.Set("flat_settings", strconv.FormatBool(flatsettings)) return r } -// Local Return local information, do not retrieve the state from master node -// (default: false) +// Local If `true`, the request retrieves information from the local node only. // API name: local -func (r *GetTemplate) Local(b bool) *GetTemplate { - r.values.Set("local", strconv.FormatBool(b)) +func (r *GetTemplate) Local(local bool) *GetTemplate { + r.values.Set("local", strconv.FormatBool(local)) return r } -// MasterTimeout Explicit operation timeout for connection to master node +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: master_timeout -func (r *GetTemplate) MasterTimeout(v string) *GetTemplate { - r.values.Set("master_timeout", v) +func (r *GetTemplate) MasterTimeout(duration string) *GetTemplate { + r.values.Set("master_timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/gettemplate/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/gettemplate/response.go index c2ee70b9a..2757a4822 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/gettemplate/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/gettemplate/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package gettemplate @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package gettemplate // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/get_template/IndicesGetTemplateResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/get_template/IndicesGetTemplateResponse.ts#L23-L25 type Response map[string]types.TemplateMapping diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/migratetodatastream/migrate_to_data_stream.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/migratetodatastream/migrate_to_data_stream.go index a00d6611e..99329958e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/migratetodatastream/migrate_to_data_stream.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/migratetodatastream/migrate_to_data_stream.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Migrates an alias to a data stream package migratetodatastream @@ -67,7 +67,7 @@ func NewMigrateToDataStreamFunc(tp elastictransport.Interface) NewMigrateToDataS return func(name string) *MigrateToDataStream { n := New(tp) - n.Name(name) + n._name(name) return n } @@ -170,7 +170,6 @@ func (r MigrateToDataStream) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -179,6 +178,10 @@ func (r MigrateToDataStream) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -210,11 +213,11 @@ func (r *MigrateToDataStream) Header(key, value string) *MigrateToDataStream { return r } -// Name The name of the alias to migrate +// Name Name of the index alias to convert to a data stream. // API Name: name -func (r *MigrateToDataStream) Name(v string) *MigrateToDataStream { +func (r *MigrateToDataStream) _name(name string) *MigrateToDataStream { r.paramSet |= nameMask - r.name = v + r.name = name return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/migratetodatastream/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/migratetodatastream/response.go index 28fbe6a68..bcf78e88f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/migratetodatastream/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/migratetodatastream/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package migratetodatastream // Response holds the response body struct for the package migratetodatastream // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/migrate_to_data_stream/IndicesMigrateToDataStreamResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/migrate_to_data_stream/IndicesMigrateToDataStreamResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/modifydatastream/modify_data_stream.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/modifydatastream/modify_data_stream.go index 1a1a6b14b..950320385 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/modifydatastream/modify_data_stream.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/modifydatastream/modify_data_stream.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Modifies a data stream package modifydatastream @@ -48,8 +48,9 @@ type ModifyDataStream struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int } @@ -76,6 +77,8 @@ func New(tp elastictransport.Interface) *ModifyDataStream { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -105,9 +108,19 @@ func (r *ModifyDataStream) HttpRequest(ctx context.Context) (*http.Request, erro var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -115,6 +128,7 @@ func (r *ModifyDataStream) HttpRequest(ctx context.Context) (*http.Request, erro } r.buf.Write(data) + } r.path.Scheme = "http" @@ -194,7 +208,6 @@ func (r ModifyDataStream) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -203,6 +216,10 @@ func (r ModifyDataStream) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -212,3 +229,11 @@ func (r *ModifyDataStream) Header(key, value string) *ModifyDataStream { return r } + +// Actions Actions to perform. +// API name: actions +func (r *ModifyDataStream) Actions(actions ...types.IndicesModifyAction) *ModifyDataStream { + r.req.Actions = actions + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/modifydatastream/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/modifydatastream/request.go index 0399549c9..fe7d7ac71 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/modifydatastream/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/modifydatastream/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package modifydatastream @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package modifydatastream // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/modify_data_stream/IndicesModifyDataStreamRequest.ts#L23-L35 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/modify_data_stream/IndicesModifyDataStreamRequest.ts#L23-L36 type Request struct { // Actions Actions to perform. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/modifydatastream/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/modifydatastream/response.go index 7c1b76da9..769af570d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/modifydatastream/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/modifydatastream/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package modifydatastream // Response holds the response body struct for the package modifydatastream // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/modify_data_stream/IndicesModifyDataStreamResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/modify_data_stream/IndicesModifyDataStreamResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/open/open.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/open/open.go index 84096ac27..9cb4f0983 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/open/open.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/open/open.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Opens an index. package open @@ -36,6 +36,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" ) const ( @@ -68,7 +69,7 @@ func NewOpenFunc(tp elastictransport.Interface) NewOpen { return func(index string) *Open { n := New(tp) - n.Index(index) + n._index(index) return n } @@ -169,7 +170,6 @@ func (r Open) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -178,6 +178,10 @@ func (r Open) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -209,62 +213,85 @@ func (r *Open) Header(key, value string) *Open { return r } -// Index A comma separated list of indices to open +// Index Comma-separated list of data streams, indices, and aliases used to limit the +// request. +// Supports wildcards (`*`). +// By default, you must explicitly name the indices you using to limit the +// request. +// To limit a request using `_all`, `*`, or other wildcard expressions, change +// the `action.destructive_requires_name` setting to false. +// You can update this setting in the `elasticsearch.yml` file or using the +// cluster update settings API. // API Name: index -func (r *Open) Index(v string) *Open { +func (r *Open) _index(index string) *Open { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// AllowNoIndices Whether to ignore if a wildcard indices expression resolves into no concrete -// indices. (This includes `_all` string or when no indices have been specified) +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. // API name: allow_no_indices -func (r *Open) AllowNoIndices(b bool) *Open { - r.values.Set("allow_no_indices", strconv.FormatBool(b)) +func (r *Open) AllowNoIndices(allownoindices bool) *Open { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) return r } -// ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, -// closed or both. +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. +// Valid values are: `all`, `open`, `closed`, `hidden`, `none`. // API name: expand_wildcards -func (r *Open) ExpandWildcards(v string) *Open { - r.values.Set("expand_wildcards", v) +func (r *Open) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Open { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } -// IgnoreUnavailable Whether specified concrete indices should be ignored when unavailable -// (missing or closed) +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. // API name: ignore_unavailable -func (r *Open) IgnoreUnavailable(b bool) *Open { - r.values.Set("ignore_unavailable", strconv.FormatBool(b)) +func (r *Open) IgnoreUnavailable(ignoreunavailable bool) *Open { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) return r } -// MasterTimeout Specify timeout for connection to master +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: master_timeout -func (r *Open) MasterTimeout(v string) *Open { - r.values.Set("master_timeout", v) +func (r *Open) MasterTimeout(duration string) *Open { + r.values.Set("master_timeout", duration) return r } -// Timeout Explicit operation timeout +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: timeout -func (r *Open) Timeout(v string) *Open { - r.values.Set("timeout", v) +func (r *Open) Timeout(duration string) *Open { + r.values.Set("timeout", duration) return r } -// WaitForActiveShards Sets the number of active shards to wait for before the operation returns. +// WaitForActiveShards The number of shard copies that must be active before proceeding with the +// operation. +// Set to `all` or any positive integer up to the total number of shards in the +// index (`number_of_replicas+1`). // API name: wait_for_active_shards -func (r *Open) WaitForActiveShards(v string) *Open { - r.values.Set("wait_for_active_shards", v) +func (r *Open) WaitForActiveShards(waitforactiveshards string) *Open { + r.values.Set("wait_for_active_shards", waitforactiveshards) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/open/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/open/response.go index 7a42ac99b..c30ce6ff8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/open/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/open/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package open // Response holds the response body struct for the package open // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/open/IndicesOpenResponse.ts#L20-L25 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/open/IndicesOpenResponse.ts#L20-L25 type Response struct { Acknowledged bool `json:"acknowledged"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/promotedatastream/promote_data_stream.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/promotedatastream/promote_data_stream.go index 0197dbcbe..fe7f60ccb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/promotedatastream/promote_data_stream.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/promotedatastream/promote_data_stream.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Promotes a data stream from a replicated data stream managed by CCR to a // regular data stream @@ -68,7 +68,7 @@ func NewPromoteDataStreamFunc(tp elastictransport.Interface) NewPromoteDataStrea return func(name string) *PromoteDataStream { n := New(tp) - n.Name(name) + n._name(name) return n } @@ -172,7 +172,6 @@ func (r PromoteDataStream) Do(ctx context.Context) (Response, error) { } return *response, nil - } errorResponse := types.NewElasticsearchError() @@ -181,6 +180,10 @@ func (r PromoteDataStream) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -214,9 +217,9 @@ func (r *PromoteDataStream) Header(key, value string) *PromoteDataStream { // Name The name of the data stream // API Name: name -func (r *PromoteDataStream) Name(v string) *PromoteDataStream { +func (r *PromoteDataStream) _name(name string) *PromoteDataStream { r.paramSet |= nameMask - r.name = v + r.name = name return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/promotedatastream/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/promotedatastream/response.go index c12b51b5e..c0b0bd881 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/promotedatastream/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/promotedatastream/response.go @@ -16,14 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package promotedatastream -import "encoding/json" +import ( + "encoding/json" +) // Response holds the response body struct for the package promotedatastream // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/promote_data_stream/IndicesPromoteDataStreamResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/promote_data_stream/IndicesPromoteDataStreamResponse.ts#L22-L24 -type Response json.RawMessage +type Response = json.RawMessage + +func NewResponse() *Response { + return new(Response) +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putalias/put_alias.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putalias/put_alias.go index 7dde1af79..c09ea3fd4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putalias/put_alias.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putalias/put_alias.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Creates or updates an alias. package putalias @@ -54,8 +54,9 @@ type PutAlias struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -72,9 +73,9 @@ func NewPutAliasFunc(tp elastictransport.Interface) NewPutAlias { return func(index, name string) *PutAlias { n := New(tp) - n.Index(index) + n._index(index) - n.Name(name) + n._name(name) return n } @@ -89,6 +90,8 @@ func New(tp elastictransport.Interface) *PutAlias { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -118,9 +121,19 @@ func (r *PutAlias) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -128,6 +141,7 @@ func (r *PutAlias) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -222,7 +236,6 @@ func (r PutAlias) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -231,6 +244,10 @@ func (r PutAlias) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -241,37 +258,96 @@ func (r *PutAlias) Header(key, value string) *PutAlias { return r } -// Index A comma-separated list of index names the alias should point to (supports -// wildcards); use `_all` to perform the operation on all indices. +// Index Comma-separated list of data streams or indices to add. +// Supports wildcards (`*`). +// Wildcard patterns that match both data streams and indices return an error. // API Name: index -func (r *PutAlias) Index(v string) *PutAlias { +func (r *PutAlias) _index(index string) *PutAlias { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// Name The name of the alias to be created or updated +// Name Alias to update. +// If the alias doesn’t exist, the request creates it. +// Index alias names support date math. // API Name: name -func (r *PutAlias) Name(v string) *PutAlias { +func (r *PutAlias) _name(name string) *PutAlias { r.paramSet |= nameMask - r.name = v + r.name = name return r } -// MasterTimeout Specify timeout for connection to master +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: master_timeout -func (r *PutAlias) MasterTimeout(v string) *PutAlias { - r.values.Set("master_timeout", v) +func (r *PutAlias) MasterTimeout(duration string) *PutAlias { + r.values.Set("master_timeout", duration) return r } -// Timeout Explicit timestamp for the document +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: timeout -func (r *PutAlias) Timeout(v string) *PutAlias { - r.values.Set("timeout", v) +func (r *PutAlias) Timeout(duration string) *PutAlias { + r.values.Set("timeout", duration) + + return r +} + +// Filter Query used to limit documents the alias can access. +// API name: filter +func (r *PutAlias) Filter(filter *types.Query) *PutAlias { + + r.req.Filter = filter + + return r +} + +// IndexRouting Value used to route indexing operations to a specific shard. +// If specified, this overwrites the `routing` value for indexing operations. +// Data stream aliases don’t support this parameter. +// API name: index_routing +func (r *PutAlias) IndexRouting(routing string) *PutAlias { + r.req.IndexRouting = &routing + + return r +} + +// IsWriteIndex If `true`, sets the write index or data stream for the alias. +// If an alias points to multiple indices or data streams and `is_write_index` +// isn’t set, the alias rejects write requests. +// If an index alias points to one index and `is_write_index` isn’t set, the +// index automatically acts as the write index. +// Data stream aliases don’t automatically set a write data stream, even if the +// alias points to one data stream. +// API name: is_write_index +func (r *PutAlias) IsWriteIndex(iswriteindex bool) *PutAlias { + r.req.IsWriteIndex = &iswriteindex + + return r +} + +// Routing Value used to route indexing and search operations to a specific shard. +// Data stream aliases don’t support this parameter. +// API name: routing +func (r *PutAlias) Routing(routing string) *PutAlias { + r.req.Routing = &routing + + return r +} + +// SearchRouting Value used to route search operations to a specific shard. +// If specified, this overwrites the `routing` value for search operations. +// Data stream aliases don’t support this parameter. +// API name: search_routing +func (r *PutAlias) SearchRouting(routing string) *PutAlias { + r.req.SearchRouting = &routing return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putalias/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putalias/request.go index 56db50e27..ea3e0c2fe 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putalias/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putalias/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putalias @@ -29,13 +29,30 @@ import ( // Request holds the request body struct for the package putalias // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/put_alias/IndicesPutAliasRequest.ts#L25-L46 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/put_alias/IndicesPutAliasRequest.ts#L25-L91 type Request struct { - Filter *types.Query `json:"filter,omitempty"` - IndexRouting *string `json:"index_routing,omitempty"` - IsWriteIndex *bool `json:"is_write_index,omitempty"` - Routing *string `json:"routing,omitempty"` - SearchRouting *string `json:"search_routing,omitempty"` + + // Filter Query used to limit documents the alias can access. + Filter *types.Query `json:"filter,omitempty"` + // IndexRouting Value used to route indexing operations to a specific shard. + // If specified, this overwrites the `routing` value for indexing operations. + // Data stream aliases don’t support this parameter. + IndexRouting *string `json:"index_routing,omitempty"` + // IsWriteIndex If `true`, sets the write index or data stream for the alias. + // If an alias points to multiple indices or data streams and `is_write_index` + // isn’t set, the alias rejects write requests. + // If an index alias points to one index and `is_write_index` isn’t set, the + // index automatically acts as the write index. + // Data stream aliases don’t automatically set a write data stream, even if the + // alias points to one data stream. + IsWriteIndex *bool `json:"is_write_index,omitempty"` + // Routing Value used to route indexing and search operations to a specific shard. + // Data stream aliases don’t support this parameter. + Routing *string `json:"routing,omitempty"` + // SearchRouting Value used to route search operations to a specific shard. + // If specified, this overwrites the `routing` value for search operations. + // Data stream aliases don’t support this parameter. + SearchRouting *string `json:"search_routing,omitempty"` } // NewRequest returns a Request diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putalias/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putalias/response.go index 9714d32d0..b4b0abef0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putalias/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putalias/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putalias // Response holds the response body struct for the package putalias // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/put_alias/IndicesPutAliasResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/put_alias/IndicesPutAliasResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putdatalifecycle/put_data_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putdatalifecycle/put_data_lifecycle.go new file mode 100644 index 000000000..e04ecf188 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putdatalifecycle/put_data_lifecycle.go @@ -0,0 +1,304 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Updates the data stream lifecycle of the selected data streams. +package putdatalifecycle + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutDataLifecycle struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + buf *gobytes.Buffer + + req *Request + deferred []func(request *Request) error + raw io.Reader + + paramSet int + + name string +} + +// NewPutDataLifecycle type alias for index. +type NewPutDataLifecycle func(name string) *PutDataLifecycle + +// NewPutDataLifecycleFunc returns a new instance of PutDataLifecycle with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutDataLifecycleFunc(tp elastictransport.Interface) NewPutDataLifecycle { + return func(name string) *PutDataLifecycle { + n := New(tp) + + n._name(name) + + return n + } +} + +// Updates the data stream lifecycle of the selected data streams. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-put-lifecycle.html +func New(tp elastictransport.Interface) *PutDataLifecycle { + r := &PutDataLifecycle{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + buf: gobytes.NewBuffer(nil), + + req: NewRequest(), + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutDataLifecycle) Raw(raw io.Reader) *PutDataLifecycle { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutDataLifecycle) Request(req *Request) *PutDataLifecycle { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutDataLifecycle) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw != nil { + r.buf.ReadFrom(r.raw) + } else if r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutDataLifecycle: %w", err) + } + + r.buf.Write(data) + + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_data_stream") + path.WriteString("/") + + path.WriteString(r.name) + path.WriteString("/") + path.WriteString("_lifecycle") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.buf) + } else { + req, err = http.NewRequest(method, r.path.String(), r.buf) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutDataLifecycle) Perform(ctx context.Context) (*http.Response, error) { + req, err := r.HttpRequest(ctx) + if err != nil { + return nil, err + } + + res, err := r.transport.Perform(req) + if err != nil { + return nil, fmt.Errorf("an error happened during the PutDataLifecycle query execution: %w", err) + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putdatalifecycle.Response +func (r PutDataLifecycle) Do(ctx context.Context) (*Response, error) { + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + return nil, errorResponse +} + +// Header set a key, value pair in the PutDataLifecycle headers map. +func (r *PutDataLifecycle) Header(key, value string) *PutDataLifecycle { + r.headers.Set(key, value) + + return r +} + +// Name Comma-separated list of data streams used to limit the request. +// Supports wildcards (`*`). +// To target all data streams use `*` or `_all`. +// API Name: name +func (r *PutDataLifecycle) _name(name string) *PutDataLifecycle { + r.paramSet |= nameMask + r.name = name + + return r +} + +// ExpandWildcards Type of data stream that wildcard patterns can match. +// Supports comma-separated values, such as `open,hidden`. +// Valid values are: `all`, `hidden`, `open`, `closed`, `none`. +// API name: expand_wildcards +func (r *PutDataLifecycle) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *PutDataLifecycle { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) + + return r +} + +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an +// error. +// API name: master_timeout +func (r *PutDataLifecycle) MasterTimeout(duration string) *PutDataLifecycle { + r.values.Set("master_timeout", duration) + + return r +} + +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. +// API name: timeout +func (r *PutDataLifecycle) Timeout(duration string) *PutDataLifecycle { + r.values.Set("timeout", duration) + + return r +} + +// DataRetention If defined, every document added to this data stream will be stored at least +// for this time frame. +// Any time after this duration the document could be deleted. +// When empty, every document in this data stream will be stored indefinitely. +// API name: data_retention +func (r *PutDataLifecycle) DataRetention(duration types.Duration) *PutDataLifecycle { + r.req.DataRetention = duration + + return r +} + +// Downsampling If defined, every backing index will execute the configured downsampling +// configuration after the backing +// index is not the data stream write index anymore. +// API name: downsampling +func (r *PutDataLifecycle) Downsampling(downsampling *types.DataStreamLifecycleDownsampling) *PutDataLifecycle { + + r.req.Downsampling = downsampling + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putdatalifecycle/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putdatalifecycle/request.go new file mode 100644 index 000000000..4176e1238 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putdatalifecycle/request.go @@ -0,0 +1,62 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package putdatalifecycle + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Request holds the request body struct for the package putdatalifecycle +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/put_data_lifecycle/IndicesPutDataLifecycleRequest.ts#L25-L75 +type Request struct { + + // DataRetention If defined, every document added to this data stream will be stored at least + // for this time frame. + // Any time after this duration the document could be deleted. + // When empty, every document in this data stream will be stored indefinitely. + DataRetention types.Duration `json:"data_retention,omitempty"` + // Downsampling If defined, every backing index will execute the configured downsampling + // configuration after the backing + // index is not the data stream write index anymore. + Downsampling *types.DataStreamLifecycleDownsampling `json:"downsampling,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putdatalifecycle request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putdatalifecycle/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putdatalifecycle/response.go new file mode 100644 index 000000000..f77c43051 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putdatalifecycle/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package putdatalifecycle + +// Response holds the response body struct for the package putdatalifecycle +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/put_data_lifecycle/IndicesPutDataLifecycleResponse.ts#L22-L24 + +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putindextemplate/put_index_template.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putindextemplate/put_index_template.go index 21109149b..69cdba7e3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putindextemplate/put_index_template.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putindextemplate/put_index_template.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Creates or updates an index template. package putindextemplate @@ -53,8 +53,9 @@ type PutIndexTemplate struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -70,7 +71,7 @@ func NewPutIndexTemplateFunc(tp elastictransport.Interface) NewPutIndexTemplate return func(name string) *PutIndexTemplate { n := New(tp) - n.Name(name) + n._name(name) return n } @@ -85,6 +86,8 @@ func New(tp elastictransport.Interface) *PutIndexTemplate { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -114,9 +117,19 @@ func (r *PutIndexTemplate) HttpRequest(ctx context.Context) (*http.Request, erro var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -124,6 +137,7 @@ func (r *PutIndexTemplate) HttpRequest(ctx context.Context) (*http.Request, erro } r.buf.Write(data) + } r.path.Scheme = "http" @@ -204,7 +218,6 @@ func (r PutIndexTemplate) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -213,6 +226,10 @@ func (r PutIndexTemplate) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -225,18 +242,90 @@ func (r *PutIndexTemplate) Header(key, value string) *PutIndexTemplate { // Name Index or template name // API Name: name -func (r *PutIndexTemplate) Name(v string) *PutIndexTemplate { +func (r *PutIndexTemplate) _name(name string) *PutIndexTemplate { r.paramSet |= nameMask - r.name = v + r.name = name return r } -// Create Whether the index template should only be added if new or can also replace an -// existing one +// Create If `true`, this request cannot replace or update existing index templates. // API name: create -func (r *PutIndexTemplate) Create(b bool) *PutIndexTemplate { - r.values.Set("create", strconv.FormatBool(b)) +func (r *PutIndexTemplate) Create(create bool) *PutIndexTemplate { + r.values.Set("create", strconv.FormatBool(create)) + + return r +} + +// ComposedOf An ordered list of component template names. +// Component templates are merged in the order specified, meaning that the last +// component template specified has the highest precedence. +// API name: composed_of +func (r *PutIndexTemplate) ComposedOf(composedofs ...string) *PutIndexTemplate { + r.req.ComposedOf = composedofs + + return r +} + +// DataStream If this object is included, the template is used to create data streams and +// their backing indices. +// Supports an empty object. +// Data streams require a matching index template with a `data_stream` object. +// API name: data_stream +func (r *PutIndexTemplate) DataStream(datastream *types.DataStreamVisibility) *PutIndexTemplate { + + r.req.DataStream = datastream + + return r +} + +// IndexPatterns Name of the index template to create. +// API name: index_patterns +func (r *PutIndexTemplate) IndexPatterns(indices ...string) *PutIndexTemplate { + r.req.IndexPatterns = indices + + return r +} + +// Meta_ Optional user metadata about the index template. +// May have any contents. +// This map is not automatically generated by Elasticsearch. +// API name: _meta +func (r *PutIndexTemplate) Meta_(metadata types.Metadata) *PutIndexTemplate { + r.req.Meta_ = metadata + + return r +} + +// Priority Priority to determine index template precedence when a new data stream or +// index is created. +// The index template with the highest priority is chosen. +// If no priority is specified the template is treated as though it is of +// priority 0 (lowest priority). +// This number is not automatically generated by Elasticsearch. +// API name: priority +func (r *PutIndexTemplate) Priority(priority int) *PutIndexTemplate { + r.req.Priority = &priority + + return r +} + +// Template Template to be applied. +// It may optionally include an `aliases`, `mappings`, or `settings` +// configuration. +// API name: template +func (r *PutIndexTemplate) Template(template *types.IndexTemplateMapping) *PutIndexTemplate { + + r.req.Template = template + + return r +} + +// Version Version number used to manage index templates externally. +// This number is not automatically generated by Elasticsearch. +// API name: version +func (r *PutIndexTemplate) Version(versionnumber int64) *PutIndexTemplate { + r.req.Version = &versionnumber return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putindextemplate/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putindextemplate/request.go index 2ea8ab380..97066476d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putindextemplate/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putindextemplate/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putindextemplate @@ -29,15 +29,38 @@ import ( // Request holds the request body struct for the package putindextemplate // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/put_index_template/IndicesPutIndexTemplateRequest.ts#L35-L58 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/put_index_template/IndicesPutIndexTemplateRequest.ts#L36-L95 type Request struct { - ComposedOf []string `json:"composed_of,omitempty"` - DataStream *types.DataStreamVisibility `json:"data_stream,omitempty"` - IndexPatterns []string `json:"index_patterns,omitempty"` - Meta_ map[string]json.RawMessage `json:"_meta,omitempty"` - Priority *int `json:"priority,omitempty"` - Template *types.IndexTemplateMapping `json:"template,omitempty"` - Version *int64 `json:"version,omitempty"` + + // ComposedOf An ordered list of component template names. + // Component templates are merged in the order specified, meaning that the last + // component template specified has the highest precedence. + ComposedOf []string `json:"composed_of,omitempty"` + // DataStream If this object is included, the template is used to create data streams and + // their backing indices. + // Supports an empty object. + // Data streams require a matching index template with a `data_stream` object. + DataStream *types.DataStreamVisibility `json:"data_stream,omitempty"` + // IndexPatterns Name of the index template to create. + IndexPatterns []string `json:"index_patterns,omitempty"` + // Meta_ Optional user metadata about the index template. + // May have any contents. + // This map is not automatically generated by Elasticsearch. + Meta_ types.Metadata `json:"_meta,omitempty"` + // Priority Priority to determine index template precedence when a new data stream or + // index is created. + // The index template with the highest priority is chosen. + // If no priority is specified the template is treated as though it is of + // priority 0 (lowest priority). + // This number is not automatically generated by Elasticsearch. + Priority *int `json:"priority,omitempty"` + // Template Template to be applied. + // It may optionally include an `aliases`, `mappings`, or `settings` + // configuration. + Template *types.IndexTemplateMapping `json:"template,omitempty"` + // Version Version number used to manage index templates externally. + // This number is not automatically generated by Elasticsearch. + Version *int64 `json:"version,omitempty"` } // NewRequest returns a Request diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putindextemplate/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putindextemplate/response.go index d9e147b8b..9c1bdd5eb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putindextemplate/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putindextemplate/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putindextemplate // Response holds the response body struct for the package putindextemplate // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/put_index_template/IndicesPutIndexTemplateResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/put_index_template/IndicesPutIndexTemplateResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putmapping/put_mapping.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putmapping/put_mapping.go index 5add93be0..bf8499d8d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putmapping/put_mapping.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putmapping/put_mapping.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Updates the index mappings. package putmapping @@ -35,6 +35,8 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" ) const ( @@ -53,8 +55,9 @@ type PutMapping struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -70,7 +73,7 @@ func NewPutMappingFunc(tp elastictransport.Interface) NewPutMapping { return func(index string) *PutMapping { n := New(tp) - n.Index(index) + n._index(index) return n } @@ -85,6 +88,8 @@ func New(tp elastictransport.Interface) *PutMapping { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -114,9 +119,19 @@ func (r *PutMapping) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -124,6 +139,7 @@ func (r *PutMapping) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -204,7 +220,6 @@ func (r PutMapping) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -213,6 +228,10 @@ func (r PutMapping) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -226,61 +245,173 @@ func (r *PutMapping) Header(key, value string) *PutMapping { // Index A comma-separated list of index names the mapping should be added to // (supports wildcards); use `_all` or omit to add the mapping on all indices. // API Name: index -func (r *PutMapping) Index(v string) *PutMapping { +func (r *PutMapping) _index(index string) *PutMapping { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// AllowNoIndices Whether to ignore if a wildcard indices expression resolves into no concrete -// indices. (This includes `_all` string or when no indices have been specified) +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. // API name: allow_no_indices -func (r *PutMapping) AllowNoIndices(b bool) *PutMapping { - r.values.Set("allow_no_indices", strconv.FormatBool(b)) +func (r *PutMapping) AllowNoIndices(allownoindices bool) *PutMapping { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) return r } -// ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, -// closed or both. +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. +// Valid values are: `all`, `open`, `closed`, `hidden`, `none`. // API name: expand_wildcards -func (r *PutMapping) ExpandWildcards(v string) *PutMapping { - r.values.Set("expand_wildcards", v) +func (r *PutMapping) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *PutMapping { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } -// IgnoreUnavailable Whether specified concrete indices should be ignored when unavailable -// (missing or closed) +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. // API name: ignore_unavailable -func (r *PutMapping) IgnoreUnavailable(b bool) *PutMapping { - r.values.Set("ignore_unavailable", strconv.FormatBool(b)) +func (r *PutMapping) IgnoreUnavailable(ignoreunavailable bool) *PutMapping { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) return r } -// MasterTimeout Specify timeout for connection to master +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: master_timeout -func (r *PutMapping) MasterTimeout(v string) *PutMapping { - r.values.Set("master_timeout", v) +func (r *PutMapping) MasterTimeout(duration string) *PutMapping { + r.values.Set("master_timeout", duration) return r } -// Timeout Explicit operation timeout +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: timeout -func (r *PutMapping) Timeout(v string) *PutMapping { - r.values.Set("timeout", v) +func (r *PutMapping) Timeout(duration string) *PutMapping { + r.values.Set("timeout", duration) return r } -// WriteIndexOnly When true, applies mappings only to the write index of an alias or data -// stream +// WriteIndexOnly If `true`, the mappings are applied only to the current write index for the +// target. // API name: write_index_only -func (r *PutMapping) WriteIndexOnly(b bool) *PutMapping { - r.values.Set("write_index_only", strconv.FormatBool(b)) +func (r *PutMapping) WriteIndexOnly(writeindexonly bool) *PutMapping { + r.values.Set("write_index_only", strconv.FormatBool(writeindexonly)) + + return r +} + +// DateDetection Controls whether dynamic date detection is enabled. +// API name: date_detection +func (r *PutMapping) DateDetection(datedetection bool) *PutMapping { + r.req.DateDetection = &datedetection + + return r +} + +// Dynamic Controls whether new fields are added dynamically. +// API name: dynamic +func (r *PutMapping) Dynamic(dynamic dynamicmapping.DynamicMapping) *PutMapping { + r.req.Dynamic = &dynamic + + return r +} + +// DynamicDateFormats If date detection is enabled then new string fields are checked +// against 'dynamic_date_formats' and if the value matches then +// a new date field is added instead of string. +// API name: dynamic_date_formats +func (r *PutMapping) DynamicDateFormats(dynamicdateformats ...string) *PutMapping { + r.req.DynamicDateFormats = dynamicdateformats + + return r +} + +// DynamicTemplates Specify dynamic templates for the mapping. +// API name: dynamic_templates +func (r *PutMapping) DynamicTemplates(dynamictemplates []map[string]types.DynamicTemplate) *PutMapping { + r.req.DynamicTemplates = dynamictemplates + + return r +} + +// FieldNames_ Control whether field names are enabled for the index. +// API name: _field_names +func (r *PutMapping) FieldNames_(fieldnames_ *types.FieldNamesField) *PutMapping { + + r.req.FieldNames_ = fieldnames_ + + return r +} + +// Meta_ A mapping type can have custom meta data associated with it. These are +// not used at all by Elasticsearch, but can be used to store +// application-specific metadata. +// API name: _meta +func (r *PutMapping) Meta_(metadata types.Metadata) *PutMapping { + r.req.Meta_ = metadata + + return r +} + +// NumericDetection Automatically map strings into numeric data types for all fields. +// API name: numeric_detection +func (r *PutMapping) NumericDetection(numericdetection bool) *PutMapping { + r.req.NumericDetection = &numericdetection + + return r +} + +// Properties Mapping for a field. For new fields, this mapping can include: +// +// - Field name +// - Field data type +// - Mapping parameters +// API name: properties +func (r *PutMapping) Properties(properties map[string]types.Property) *PutMapping { + + r.req.Properties = properties + + return r +} + +// Routing_ Enable making a routing value required on indexed documents. +// API name: _routing +func (r *PutMapping) Routing_(routing_ *types.RoutingField) *PutMapping { + + r.req.Routing_ = routing_ + + return r +} + +// Runtime Mapping of runtime fields for the index. +// API name: runtime +func (r *PutMapping) Runtime(runtimefields types.RuntimeFields) *PutMapping { + r.req.Runtime = runtimefields + + return r +} + +// Source_ Control whether the _source field is enabled on the index. +// API name: _source +func (r *PutMapping) Source_(source_ *types.SourceField) *PutMapping { + + r.req.Source_ = source_ return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putmapping/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putmapping/request.go index a46d12c1a..51b6cc9a7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putmapping/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putmapping/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putmapping @@ -30,7 +30,7 @@ import ( // Request holds the request body struct for the package putmapping // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/put_mapping/IndicesPutMappingRequest.ts#L42-L116 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/put_mapping/IndicesPutMappingRequest.ts#L42-L149 type Request struct { // DateDetection Controls whether dynamic date detection is enabled. @@ -48,7 +48,7 @@ type Request struct { // Meta_ A mapping type can have custom meta data associated with it. These are // not used at all by Elasticsearch, but can be used to store // application-specific metadata. - Meta_ map[string]json.RawMessage `json:"_meta,omitempty"` + Meta_ types.Metadata `json:"_meta,omitempty"` // NumericDetection Automatically map strings into numeric data types for all fields. NumericDetection *bool `json:"numeric_detection,omitempty"` // Properties Mapping for a field. For new fields, this mapping can include: @@ -60,7 +60,7 @@ type Request struct { // Routing_ Enable making a routing value required on indexed documents. Routing_ *types.RoutingField `json:"_routing,omitempty"` // Runtime Mapping of runtime fields for the index. - Runtime map[string]types.RuntimeField `json:"runtime,omitempty"` + Runtime types.RuntimeFields `json:"runtime,omitempty"` // Source_ Control whether the _source field is enabled on the index. Source_ *types.SourceField `json:"_source,omitempty"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putmapping/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putmapping/response.go index c51119d28..ae9e785ee 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putmapping/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putmapping/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putmapping @@ -26,10 +26,14 @@ import ( // Response holds the response body struct for the package putmapping // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/put_mapping/IndicesPutMappingResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/put_mapping/IndicesPutMappingResponse.ts#L22-L24 type Response struct { - Shards_ *types.ShardStatistics `json:"_shards,omitempty"` + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` + Shards_ *types.ShardStatistics `json:"_shards,omitempty"` } // NewResponse returns a Response diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putsettings/put_settings.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putsettings/put_settings.go index 0bf304399..48176d178 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putsettings/put_settings.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putsettings/put_settings.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Updates the index settings. package putsettings @@ -35,6 +35,8 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexcheckonstartup" ) const ( @@ -53,8 +55,9 @@ type PutSettings struct { buf *gobytes.Buffer - req *types.IndexSettings - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -97,7 +100,7 @@ func (r *PutSettings) Raw(raw io.Reader) *PutSettings { } // Request allows to set the request property with the appropriate payload. -func (r *PutSettings) Request(req *types.IndexSettings) *PutSettings { +func (r *PutSettings) Request(req *Request) *PutSettings { r.req = req return r @@ -112,9 +115,19 @@ func (r *PutSettings) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -122,6 +135,7 @@ func (r *PutSettings) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -207,7 +221,6 @@ func (r PutSettings) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -216,6 +229,10 @@ func (r PutSettings) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -226,72 +243,512 @@ func (r *PutSettings) Header(key, value string) *PutSettings { return r } -// Index A comma-separated list of index names; use `_all` or empty string to perform -// the operation on all indices -// API Name: index -func (r *PutSettings) Index(v string) *PutSettings { - r.paramSet |= indexMask - r.index = v - - return r -} - -// AllowNoIndices Whether to ignore if a wildcard indices expression resolves into no concrete -// indices. (This includes `_all` string or when no indices have been specified) +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. This +// behavior applies even if the request targets other open indices. For +// example, a request targeting `foo*,bar*` returns an error if an index +// starts with `foo` but no index starts with `bar`. // API name: allow_no_indices -func (r *PutSettings) AllowNoIndices(b bool) *PutSettings { - r.values.Set("allow_no_indices", strconv.FormatBool(b)) +func (r *PutSettings) AllowNoIndices(allownoindices bool) *PutSettings { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) return r } -// ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, -// closed or both. +// ExpandWildcards Type of index that wildcard patterns can match. If the request can target +// data streams, this argument determines whether wildcard expressions match +// hidden data streams. Supports comma-separated values, such as +// `open,hidden`. // API name: expand_wildcards -func (r *PutSettings) ExpandWildcards(v string) *PutSettings { - r.values.Set("expand_wildcards", v) +func (r *PutSettings) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *PutSettings { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } -// FlatSettings Return settings in flat format (default: false) +// FlatSettings If `true`, returns settings in flat format. // API name: flat_settings -func (r *PutSettings) FlatSettings(b bool) *PutSettings { - r.values.Set("flat_settings", strconv.FormatBool(b)) +func (r *PutSettings) FlatSettings(flatsettings bool) *PutSettings { + r.values.Set("flat_settings", strconv.FormatBool(flatsettings)) return r } -// IgnoreUnavailable Whether specified concrete indices should be ignored when unavailable -// (missing or closed) +// IgnoreUnavailable If `true`, returns settings in flat format. // API name: ignore_unavailable -func (r *PutSettings) IgnoreUnavailable(b bool) *PutSettings { - r.values.Set("ignore_unavailable", strconv.FormatBool(b)) +func (r *PutSettings) IgnoreUnavailable(ignoreunavailable bool) *PutSettings { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) return r } -// MasterTimeout Specify timeout for connection to master +// MasterTimeout Period to wait for a connection to the master node. If no response is +// received before the timeout expires, the request fails and returns an +// error. // API name: master_timeout -func (r *PutSettings) MasterTimeout(v string) *PutSettings { - r.values.Set("master_timeout", v) +func (r *PutSettings) MasterTimeout(duration string) *PutSettings { + r.values.Set("master_timeout", duration) return r } -// PreserveExisting Whether to update existing settings. If set to `true` existing settings on an -// index remain unchanged, the default is `false` +// PreserveExisting If `true`, existing index settings remain unchanged. // API name: preserve_existing -func (r *PutSettings) PreserveExisting(b bool) *PutSettings { - r.values.Set("preserve_existing", strconv.FormatBool(b)) +func (r *PutSettings) PreserveExisting(preserveexisting bool) *PutSettings { + r.values.Set("preserve_existing", strconv.FormatBool(preserveexisting)) return r } -// Timeout Explicit operation timeout +// Timeout Period to wait for a response. If no response is received before the +// timeout expires, the request fails and returns an error. // API name: timeout -func (r *PutSettings) Timeout(v string) *PutSettings { - r.values.Set("timeout", v) +func (r *PutSettings) Timeout(duration string) *PutSettings { + r.values.Set("timeout", duration) + + return r +} + +// API name: analysis +func (r *PutSettings) Analysis(analysis *types.IndexSettingsAnalysis) *PutSettings { + + r.req.Analysis = analysis + + return r +} + +// Analyze Settings to define analyzers, tokenizers, token filters and character +// filters. +// API name: analyze +func (r *PutSettings) Analyze(analyze *types.SettingsAnalyze) *PutSettings { + + r.req.Analyze = analyze + + return r +} + +// API name: auto_expand_replicas +func (r *PutSettings) AutoExpandReplicas(autoexpandreplicas string) *PutSettings { + + r.req.AutoExpandReplicas = &autoexpandreplicas + + return r +} + +// API name: blocks +func (r *PutSettings) Blocks(blocks *types.IndexSettingBlocks) *PutSettings { + + r.req.Blocks = blocks + + return r +} + +// API name: check_on_startup +func (r *PutSettings) CheckOnStartup(checkonstartup indexcheckonstartup.IndexCheckOnStartup) *PutSettings { + r.req.CheckOnStartup = &checkonstartup + + return r +} + +// API name: codec +func (r *PutSettings) Codec(codec string) *PutSettings { + + r.req.Codec = &codec + + return r +} + +// API name: creation_date +func (r *PutSettings) CreationDate(stringifiedepochtimeunitmillis types.StringifiedEpochTimeUnitMillis) *PutSettings { + r.req.CreationDate = stringifiedepochtimeunitmillis + + return r +} + +// API name: creation_date_string +func (r *PutSettings) CreationDateString(datetime types.DateTime) *PutSettings { + r.req.CreationDateString = datetime + + return r +} + +// API name: default_pipeline +func (r *PutSettings) DefaultPipeline(pipelinename string) *PutSettings { + r.req.DefaultPipeline = &pipelinename + + return r +} + +// API name: final_pipeline +func (r *PutSettings) FinalPipeline(pipelinename string) *PutSettings { + r.req.FinalPipeline = &pipelinename + + return r +} + +// API name: format +func (r *PutSettings) Format(format string) *PutSettings { + r.req.Format = format + + return r +} + +// API name: gc_deletes +func (r *PutSettings) GcDeletes(duration types.Duration) *PutSettings { + r.req.GcDeletes = duration + + return r +} + +// API name: hidden +func (r *PutSettings) Hidden(hidden string) *PutSettings { + r.req.Hidden = hidden + + return r +} + +// API name: highlight +func (r *PutSettings) Highlight(highlight *types.SettingsHighlight) *PutSettings { + + r.req.Highlight = highlight + + return r +} + +// API name: index +func (r *PutSettings) Index(index *types.IndexSettings) *PutSettings { + + r.req.Index = index + + return r +} + +// API name: IndexSettings +func (r *PutSettings) IndexSettings(indexsettings map[string]json.RawMessage) *PutSettings { + + r.req.IndexSettings = indexsettings + + return r +} + +// IndexingPressure Configure indexing back pressure limits. +// API name: indexing_pressure +func (r *PutSettings) IndexingPressure(indexingpressure *types.IndicesIndexingPressure) *PutSettings { + + r.req.IndexingPressure = indexingpressure + + return r +} + +// API name: indexing.slowlog +func (r *PutSettings) IndexingSlowlog(indexingslowlog *types.IndexingSlowlogSettings) *PutSettings { + + r.req.IndexingSlowlog = indexingslowlog + + return r +} + +// API name: lifecycle +func (r *PutSettings) Lifecycle(lifecycle *types.IndexSettingsLifecycle) *PutSettings { + + r.req.Lifecycle = lifecycle + + return r +} + +// API name: load_fixed_bitset_filters_eagerly +func (r *PutSettings) LoadFixedBitsetFiltersEagerly(loadfixedbitsetfilterseagerly bool) *PutSettings { + r.req.LoadFixedBitsetFiltersEagerly = &loadfixedbitsetfilterseagerly + + return r +} + +// Mapping Enable or disable dynamic mapping for an index. +// API name: mapping +func (r *PutSettings) Mapping(mapping *types.MappingLimitSettings) *PutSettings { + + r.req.Mapping = mapping + + return r +} + +// API name: max_docvalue_fields_search +func (r *PutSettings) MaxDocvalueFieldsSearch(maxdocvaluefieldssearch int) *PutSettings { + r.req.MaxDocvalueFieldsSearch = &maxdocvaluefieldssearch + + return r +} + +// API name: max_inner_result_window +func (r *PutSettings) MaxInnerResultWindow(maxinnerresultwindow int) *PutSettings { + r.req.MaxInnerResultWindow = &maxinnerresultwindow + + return r +} + +// API name: max_ngram_diff +func (r *PutSettings) MaxNgramDiff(maxngramdiff int) *PutSettings { + r.req.MaxNgramDiff = &maxngramdiff + + return r +} + +// API name: max_refresh_listeners +func (r *PutSettings) MaxRefreshListeners(maxrefreshlisteners int) *PutSettings { + r.req.MaxRefreshListeners = &maxrefreshlisteners + + return r +} + +// API name: max_regex_length +func (r *PutSettings) MaxRegexLength(maxregexlength int) *PutSettings { + r.req.MaxRegexLength = &maxregexlength + + return r +} + +// API name: max_rescore_window +func (r *PutSettings) MaxRescoreWindow(maxrescorewindow int) *PutSettings { + r.req.MaxRescoreWindow = &maxrescorewindow + + return r +} + +// API name: max_result_window +func (r *PutSettings) MaxResultWindow(maxresultwindow int) *PutSettings { + r.req.MaxResultWindow = &maxresultwindow + + return r +} + +// API name: max_script_fields +func (r *PutSettings) MaxScriptFields(maxscriptfields int) *PutSettings { + r.req.MaxScriptFields = &maxscriptfields + + return r +} + +// API name: max_shingle_diff +func (r *PutSettings) MaxShingleDiff(maxshinglediff int) *PutSettings { + r.req.MaxShingleDiff = &maxshinglediff + + return r +} + +// API name: max_slices_per_scroll +func (r *PutSettings) MaxSlicesPerScroll(maxslicesperscroll int) *PutSettings { + r.req.MaxSlicesPerScroll = &maxslicesperscroll + + return r +} + +// API name: max_terms_count +func (r *PutSettings) MaxTermsCount(maxtermscount int) *PutSettings { + r.req.MaxTermsCount = &maxtermscount + + return r +} + +// API name: merge +func (r *PutSettings) Merge(merge *types.Merge) *PutSettings { + + r.req.Merge = merge + + return r +} + +// API name: mode +func (r *PutSettings) Mode(mode string) *PutSettings { + + r.req.Mode = &mode + + return r +} + +// API name: number_of_replicas +func (r *PutSettings) NumberOfReplicas(numberofreplicas string) *PutSettings { + r.req.NumberOfReplicas = numberofreplicas + + return r +} + +// API name: number_of_routing_shards +func (r *PutSettings) NumberOfRoutingShards(numberofroutingshards int) *PutSettings { + r.req.NumberOfRoutingShards = &numberofroutingshards + + return r +} + +// API name: number_of_shards +func (r *PutSettings) NumberOfShards(numberofshards string) *PutSettings { + r.req.NumberOfShards = numberofshards + + return r +} + +// API name: priority +func (r *PutSettings) Priority(priority string) *PutSettings { + r.req.Priority = priority + + return r +} + +// API name: provided_name +func (r *PutSettings) ProvidedName(name string) *PutSettings { + r.req.ProvidedName = &name + + return r +} + +// API name: queries +func (r *PutSettings) Queries(queries *types.Queries) *PutSettings { + + r.req.Queries = queries + + return r +} + +// API name: query_string +func (r *PutSettings) QueryString(querystring *types.SettingsQueryString) *PutSettings { + + r.req.QueryString = querystring + + return r +} + +// API name: refresh_interval +func (r *PutSettings) RefreshInterval(duration types.Duration) *PutSettings { + r.req.RefreshInterval = duration + + return r +} + +// API name: routing +func (r *PutSettings) Routing(routing *types.IndexRouting) *PutSettings { + + r.req.Routing = routing + + return r +} + +// API name: routing_partition_size +func (r *PutSettings) RoutingPartitionSize(stringifiedinteger types.Stringifiedinteger) *PutSettings { + r.req.RoutingPartitionSize = stringifiedinteger + + return r +} + +// API name: routing_path +func (r *PutSettings) RoutingPath(routingpaths ...string) *PutSettings { + r.req.RoutingPath = routingpaths + + return r +} + +// API name: search +func (r *PutSettings) Search(search *types.SettingsSearch) *PutSettings { + + r.req.Search = search + + return r +} + +// API name: settings +func (r *PutSettings) Settings(settings *types.IndexSettings) *PutSettings { + + r.req.Settings = settings + + return r +} + +// API name: shards +func (r *PutSettings) Shards(shards int) *PutSettings { + r.req.Shards = &shards + + return r +} + +// Similarity Configure custom similarity settings to customize how search results are +// scored. +// API name: similarity +func (r *PutSettings) Similarity(similarity *types.SettingsSimilarity) *PutSettings { + + r.req.Similarity = similarity + + return r +} + +// API name: soft_deletes +func (r *PutSettings) SoftDeletes(softdeletes *types.SoftDeletes) *PutSettings { + + r.req.SoftDeletes = softdeletes + + return r +} + +// API name: sort +func (r *PutSettings) Sort(sort *types.IndexSegmentSort) *PutSettings { + + r.req.Sort = sort + + return r +} + +// Store The store module allows you to control how index data is stored and accessed +// on disk. +// API name: store +func (r *PutSettings) Store(store *types.Storage) *PutSettings { + + r.req.Store = store + + return r +} + +// API name: time_series +func (r *PutSettings) TimeSeries(timeseries *types.IndexSettingsTimeSeries) *PutSettings { + + r.req.TimeSeries = timeseries + + return r +} + +// API name: top_metrics_max_size +func (r *PutSettings) TopMetricsMaxSize(topmetricsmaxsize int) *PutSettings { + r.req.TopMetricsMaxSize = &topmetricsmaxsize + + return r +} + +// API name: translog +func (r *PutSettings) Translog(translog *types.Translog) *PutSettings { + + r.req.Translog = translog + + return r +} + +// API name: uuid +func (r *PutSettings) Uuid(uuid string) *PutSettings { + r.req.Uuid = &uuid + + return r +} + +// API name: verified_before_close +func (r *PutSettings) VerifiedBeforeClose(verifiedbeforeclose string) *PutSettings { + r.req.VerifiedBeforeClose = verifiedbeforeclose + + return r +} + +// API name: version +func (r *PutSettings) Version(version *types.IndexVersioning) *PutSettings { + + r.req.Version = version return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putsettings/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putsettings/request.go new file mode 100644 index 000000000..0eccdf621 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putsettings/request.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package putsettings + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Request holds the request body struct for the package putsettings +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/put_settings/IndicesPutSettingsRequest.ts#L25-L92 +type Request = types.IndexSettings diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putsettings/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putsettings/response.go index fcdf7d3cb..6e8fbd556 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putsettings/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/putsettings/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putsettings // Response holds the response body struct for the package putsettings // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/put_settings/IndicesPutSettingsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/put_settings/IndicesPutSettingsResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/puttemplate/put_template.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/puttemplate/put_template.go index 5b4a8de55..beac21983 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/puttemplate/put_template.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/puttemplate/put_template.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Creates or updates an index template. package puttemplate @@ -53,8 +53,9 @@ type PutTemplate struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -70,7 +71,7 @@ func NewPutTemplateFunc(tp elastictransport.Interface) NewPutTemplate { return func(name string) *PutTemplate { n := New(tp) - n.Name(name) + n._name(name) return n } @@ -85,6 +86,8 @@ func New(tp elastictransport.Interface) *PutTemplate { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -114,9 +117,19 @@ func (r *PutTemplate) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -124,6 +137,7 @@ func (r *PutTemplate) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -204,7 +218,6 @@ func (r PutTemplate) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -213,6 +226,10 @@ func (r PutTemplate) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -225,24 +242,25 @@ func (r *PutTemplate) Header(key, value string) *PutTemplate { // Name The name of the template // API Name: name -func (r *PutTemplate) Name(v string) *PutTemplate { +func (r *PutTemplate) _name(name string) *PutTemplate { r.paramSet |= nameMask - r.name = v + r.name = name return r } // Create If true, this request cannot replace or update existing index templates. // API name: create -func (r *PutTemplate) Create(b bool) *PutTemplate { - r.values.Set("create", strconv.FormatBool(b)) +func (r *PutTemplate) Create(create bool) *PutTemplate { + r.values.Set("create", strconv.FormatBool(create)) return r } +// FlatSettings If `true`, returns settings in flat format. // API name: flat_settings -func (r *PutTemplate) FlatSettings(b bool) *PutTemplate { - r.values.Set("flat_settings", strconv.FormatBool(b)) +func (r *PutTemplate) FlatSettings(flatsettings bool) *PutTemplate { + r.values.Set("flat_settings", strconv.FormatBool(flatsettings)) return r } @@ -250,15 +268,45 @@ func (r *PutTemplate) FlatSettings(b bool) *PutTemplate { // MasterTimeout Period to wait for a connection to the master node. If no response is // received before the timeout expires, the request fails and returns an error. // API name: master_timeout -func (r *PutTemplate) MasterTimeout(v string) *PutTemplate { - r.values.Set("master_timeout", v) +func (r *PutTemplate) MasterTimeout(duration string) *PutTemplate { + r.values.Set("master_timeout", duration) return r } +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: timeout -func (r *PutTemplate) Timeout(v string) *PutTemplate { - r.values.Set("timeout", v) +func (r *PutTemplate) Timeout(duration string) *PutTemplate { + r.values.Set("timeout", duration) + + return r +} + +// Aliases Aliases for the index. +// API name: aliases +func (r *PutTemplate) Aliases(aliases map[string]types.Alias) *PutTemplate { + + r.req.Aliases = aliases + + return r +} + +// IndexPatterns Array of wildcard expressions used to match the names +// of indices during creation. +// API name: index_patterns +func (r *PutTemplate) IndexPatterns(indexpatterns ...string) *PutTemplate { + r.req.IndexPatterns = indexpatterns + + return r +} + +// Mappings Mapping for fields in the index. +// API name: mappings +func (r *PutTemplate) Mappings(mappings *types.TypeMapping) *PutTemplate { + + r.req.Mappings = mappings return r } @@ -269,8 +317,26 @@ func (r *PutTemplate) Timeout(v string) *PutTemplate { // Templates with lower 'order' values are merged first. Templates with higher // 'order' values are merged later, overriding templates with lower values. // API name: order -func (r *PutTemplate) Order(i int) *PutTemplate { - r.values.Set("order", strconv.Itoa(i)) +func (r *PutTemplate) Order(order int) *PutTemplate { + r.req.Order = &order + + return r +} + +// Settings Configuration options for the index. +// API name: settings +func (r *PutTemplate) Settings(settings map[string]json.RawMessage) *PutTemplate { + + r.req.Settings = settings + + return r +} + +// Version Version number used to manage index templates externally. This number +// is not automatically generated by Elasticsearch. +// API name: version +func (r *PutTemplate) Version(versionnumber int64) *PutTemplate { + r.req.Version = &versionnumber return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/puttemplate/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/puttemplate/request.go index 84190e7c3..0df70fa54 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/puttemplate/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/puttemplate/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package puttemplate @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package puttemplate // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/put_template/IndicesPutTemplateRequest.ts#L29-L93 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/put_template/IndicesPutTemplateRequest.ts#L29-L105 type Request struct { // Aliases Aliases for the index. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/puttemplate/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/puttemplate/response.go index 87559f01f..53af3682d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/puttemplate/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/puttemplate/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package puttemplate // Response holds the response body struct for the package puttemplate // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/put_template/IndicesPutTemplateResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/put_template/IndicesPutTemplateResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/recovery/recovery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/recovery/recovery.go index 84c2b8b12..40375f11f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/recovery/recovery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/recovery/recovery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns information about ongoing index shard recoveries. package recovery @@ -172,7 +172,6 @@ func (r Recovery) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -181,6 +180,10 @@ func (r Recovery) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -212,28 +215,31 @@ func (r *Recovery) Header(key, value string) *Recovery { return r } -// Index A comma-separated list of index names; use `_all` or empty string to perform -// the operation on all indices +// Index Comma-separated list of data streams, indices, and aliases used to limit the +// request. +// Supports wildcards (`*`). +// To target all data streams and indices, omit this parameter or use `*` or +// `_all`. // API Name: index -func (r *Recovery) Index(v string) *Recovery { +func (r *Recovery) Index(index string) *Recovery { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// ActiveOnly Display only those recoveries that are currently on-going +// ActiveOnly If `true`, the response only includes ongoing shard recoveries. // API name: active_only -func (r *Recovery) ActiveOnly(b bool) *Recovery { - r.values.Set("active_only", strconv.FormatBool(b)) +func (r *Recovery) ActiveOnly(activeonly bool) *Recovery { + r.values.Set("active_only", strconv.FormatBool(activeonly)) return r } -// Detailed Whether to display detailed information about shard recovery +// Detailed If `true`, the response includes detailed information about shard recoveries. // API name: detailed -func (r *Recovery) Detailed(b bool) *Recovery { - r.values.Set("detailed", strconv.FormatBool(b)) +func (r *Recovery) Detailed(detailed bool) *Recovery { + r.values.Set("detailed", strconv.FormatBool(detailed)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/recovery/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/recovery/response.go index 4e81795ce..c043dc1f1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/recovery/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/recovery/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package recovery @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package recovery // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/recovery/IndicesRecoveryResponse.ts#L24-L26 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/recovery/IndicesRecoveryResponse.ts#L24-L26 type Response map[string]types.RecoveryStatus diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/refresh/refresh.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/refresh/refresh.go index 3f3977b04..ec3e2c301 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/refresh/refresh.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/refresh/refresh.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Performs the refresh operation in one or more indices. package refresh @@ -36,6 +36,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" ) const ( @@ -172,7 +173,6 @@ func (r Refresh) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -181,6 +181,10 @@ func (r Refresh) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -212,39 +216,50 @@ func (r *Refresh) Header(key, value string) *Refresh { return r } -// Index A comma-separated list of index names; use `_all` or empty string to perform -// the operation on all indices +// Index Comma-separated list of data streams, indices, and aliases used to limit the +// request. +// Supports wildcards (`*`). +// To target all data streams and indices, omit this parameter or use `*` or +// `_all`. // API Name: index -func (r *Refresh) Index(v string) *Refresh { +func (r *Refresh) Index(index string) *Refresh { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// AllowNoIndices Whether to ignore if a wildcard indices expression resolves into no concrete -// indices. (This includes `_all` string or when no indices have been specified) +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. // API name: allow_no_indices -func (r *Refresh) AllowNoIndices(b bool) *Refresh { - r.values.Set("allow_no_indices", strconv.FormatBool(b)) +func (r *Refresh) AllowNoIndices(allownoindices bool) *Refresh { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) return r } -// ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, -// closed or both. +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. +// Valid values are: `all`, `open`, `closed`, `hidden`, `none`. // API name: expand_wildcards -func (r *Refresh) ExpandWildcards(v string) *Refresh { - r.values.Set("expand_wildcards", v) +func (r *Refresh) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Refresh { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } -// IgnoreUnavailable Whether specified concrete indices should be ignored when unavailable -// (missing or closed) +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. // API name: ignore_unavailable -func (r *Refresh) IgnoreUnavailable(b bool) *Refresh { - r.values.Set("ignore_unavailable", strconv.FormatBool(b)) +func (r *Refresh) IgnoreUnavailable(ignoreunavailable bool) *Refresh { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/refresh/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/refresh/response.go index 1c4ed0740..3a6136679 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/refresh/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/refresh/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package refresh @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package refresh // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/refresh/IndicesRefreshResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/refresh/IndicesRefreshResponse.ts#L22-L24 type Response struct { Shards_ types.ShardStatistics `json:"_shards"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/reloadsearchanalyzers/reload_search_analyzers.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/reloadsearchanalyzers/reload_search_analyzers.go index 8568a85c1..cfafc1dcd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/reloadsearchanalyzers/reload_search_analyzers.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/reloadsearchanalyzers/reload_search_analyzers.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Reloads an index's search analyzers and their resources. package reloadsearchanalyzers @@ -36,6 +36,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" ) const ( @@ -68,7 +69,7 @@ func NewReloadSearchAnalyzersFunc(tp elastictransport.Interface) NewReloadSearch return func(index string) *ReloadSearchAnalyzers { n := New(tp) - n.Index(index) + n._index(index) return n } @@ -169,7 +170,6 @@ func (r ReloadSearchAnalyzers) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -178,6 +178,10 @@ func (r ReloadSearchAnalyzers) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -211,9 +215,9 @@ func (r *ReloadSearchAnalyzers) Header(key, value string) *ReloadSearchAnalyzers // Index A comma-separated list of index names to reload analyzers for // API Name: index -func (r *ReloadSearchAnalyzers) Index(v string) *ReloadSearchAnalyzers { +func (r *ReloadSearchAnalyzers) _index(index string) *ReloadSearchAnalyzers { r.paramSet |= indexMask - r.index = v + r.index = index return r } @@ -221,8 +225,8 @@ func (r *ReloadSearchAnalyzers) Index(v string) *ReloadSearchAnalyzers { // AllowNoIndices Whether to ignore if a wildcard indices expression resolves into no concrete // indices. (This includes `_all` string or when no indices have been specified) // API name: allow_no_indices -func (r *ReloadSearchAnalyzers) AllowNoIndices(b bool) *ReloadSearchAnalyzers { - r.values.Set("allow_no_indices", strconv.FormatBool(b)) +func (r *ReloadSearchAnalyzers) AllowNoIndices(allownoindices bool) *ReloadSearchAnalyzers { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) return r } @@ -230,8 +234,12 @@ func (r *ReloadSearchAnalyzers) AllowNoIndices(b bool) *ReloadSearchAnalyzers { // ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, // closed or both. // API name: expand_wildcards -func (r *ReloadSearchAnalyzers) ExpandWildcards(v string) *ReloadSearchAnalyzers { - r.values.Set("expand_wildcards", v) +func (r *ReloadSearchAnalyzers) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *ReloadSearchAnalyzers { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } @@ -239,8 +247,8 @@ func (r *ReloadSearchAnalyzers) ExpandWildcards(v string) *ReloadSearchAnalyzers // IgnoreUnavailable Whether specified concrete indices should be ignored when unavailable // (missing or closed) // API name: ignore_unavailable -func (r *ReloadSearchAnalyzers) IgnoreUnavailable(b bool) *ReloadSearchAnalyzers { - r.values.Set("ignore_unavailable", strconv.FormatBool(b)) +func (r *ReloadSearchAnalyzers) IgnoreUnavailable(ignoreunavailable bool) *ReloadSearchAnalyzers { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/reloadsearchanalyzers/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/reloadsearchanalyzers/response.go index c2396fb18..5b2e2a3ae 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/reloadsearchanalyzers/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/reloadsearchanalyzers/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package reloadsearchanalyzers @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package reloadsearchanalyzers // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/reload_search_analyzers/ReloadSearchAnalyzersResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/reload_search_analyzers/ReloadSearchAnalyzersResponse.ts#L22-L24 type Response struct { ReloadDetails []types.ReloadDetails `json:"reload_details"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/resolveindex/resolve_index.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/resolveindex/resolve_index.go index 54d9df6e8..3c53c3e3e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/resolveindex/resolve_index.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/resolveindex/resolve_index.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns information about any matching indices, aliases, and data streams package resolveindex @@ -35,6 +35,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" ) const ( @@ -67,7 +68,7 @@ func NewResolveIndexFunc(tp elastictransport.Interface) NewResolveIndex { return func(name string) *ResolveIndex { n := New(tp) - n.Name(name) + n._name(name) return n } @@ -170,7 +171,6 @@ func (r ResolveIndex) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -179,6 +179,10 @@ func (r ResolveIndex) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -210,20 +214,30 @@ func (r *ResolveIndex) Header(key, value string) *ResolveIndex { return r } -// Name A comma-separated list of names or wildcard expressions +// Name Comma-separated name(s) or index pattern(s) of the indices, aliases, and data +// streams to resolve. +// Resources on remote clusters can be specified using the ``:`` +// syntax. // API Name: name -func (r *ResolveIndex) Name(v string) *ResolveIndex { +func (r *ResolveIndex) _name(name string) *ResolveIndex { r.paramSet |= nameMask - r.name = v + r.name = name return r } -// ExpandWildcards Whether wildcard expressions should get expanded to open or closed indices -// (default: open) +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. +// Valid values are: `all`, `open`, `closed`, `hidden`, `none`. // API name: expand_wildcards -func (r *ResolveIndex) ExpandWildcards(v string) *ResolveIndex { - r.values.Set("expand_wildcards", v) +func (r *ResolveIndex) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *ResolveIndex { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/resolveindex/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/resolveindex/response.go index 4aea529b4..691a71499 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/resolveindex/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/resolveindex/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package resolveindex @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package resolveindex // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/resolve_index/ResolveIndexResponse.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/resolve_index/ResolveIndexResponse.ts#L22-L28 type Response struct { Aliases []types.ResolveIndexAliasItem `json:"aliases"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/rollover/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/rollover/request.go index c6265368e..7ce1fa339 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/rollover/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/rollover/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package rollover @@ -29,12 +29,29 @@ import ( // Request holds the request body struct for the package rollover // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/rollover/IndicesRolloverRequest.ts#L29-L51 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/rollover/IndicesRolloverRequest.ts#L29-L99 type Request struct { - Aliases map[string]types.Alias `json:"aliases,omitempty"` - Conditions *types.RolloverConditions `json:"conditions,omitempty"` - Mappings *types.TypeMapping `json:"mappings,omitempty"` - Settings map[string]json.RawMessage `json:"settings,omitempty"` + + // Aliases Aliases for the target index. + // Data streams do not support this parameter. + Aliases map[string]types.Alias `json:"aliases,omitempty"` + // Conditions Conditions for the rollover. + // If specified, Elasticsearch only performs the rollover if the current index + // satisfies these conditions. + // If this parameter is not specified, Elasticsearch performs the rollover + // unconditionally. + // If conditions are specified, at least one of them must be a `max_*` + // condition. + // The index will rollover if any `max_*` condition is satisfied and all `min_*` + // conditions are satisfied. + Conditions *types.RolloverConditions `json:"conditions,omitempty"` + // Mappings Mapping for fields in the index. + // If specified, this mapping can include field names, field data types, and + // mapping paramaters. + Mappings *types.TypeMapping `json:"mappings,omitempty"` + // Settings Configuration options for the index. + // Data streams do not support this parameter. + Settings map[string]json.RawMessage `json:"settings,omitempty"` } // NewRequest returns a Request diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/rollover/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/rollover/response.go index dccc7223e..79e4b6cb6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/rollover/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/rollover/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package rollover // Response holds the response body struct for the package rollover // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/rollover/IndicesRolloverResponse.ts#L22-L32 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/rollover/IndicesRolloverResponse.ts#L22-L32 type Response struct { Acknowledged bool `json:"acknowledged"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/rollover/rollover.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/rollover/rollover.go index 32aac75e3..6838c6538 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/rollover/rollover.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/rollover/rollover.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Updates an alias to point to a new index when the existing index // is considered to be too large or too old. @@ -56,8 +56,9 @@ type Rollover struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -74,7 +75,7 @@ func NewRolloverFunc(tp elastictransport.Interface) NewRollover { return func(alias string) *Rollover { n := New(tp) - n.Alias(alias) + n._alias(alias) return n } @@ -83,13 +84,15 @@ func NewRolloverFunc(tp elastictransport.Interface) NewRollover { // Updates an alias to point to a new index when the existing index // is considered to be too large or too old. // -// https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-rollover-index.html +// https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-rollover-index.html func New(tp elastictransport.Interface) *Rollover { r := &Rollover{ transport: tp, values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -119,9 +122,19 @@ func (r *Rollover) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -129,6 +142,7 @@ func (r *Rollover) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -220,7 +234,6 @@ func (r Rollover) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -229,6 +242,10 @@ func (r Rollover) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -239,54 +256,110 @@ func (r *Rollover) Header(key, value string) *Rollover { return r } -// Alias The name of the alias to rollover +// Alias Name of the data stream or index alias to roll over. // API Name: alias -func (r *Rollover) Alias(v string) *Rollover { +func (r *Rollover) _alias(alias string) *Rollover { r.paramSet |= aliasMask - r.alias = v + r.alias = alias return r } -// NewIndex The name of the rollover index +// NewIndex Name of the index to create. +// Supports date math. +// Data streams do not support this parameter. // API Name: newindex -func (r *Rollover) NewIndex(v string) *Rollover { +func (r *Rollover) NewIndex(newindex string) *Rollover { r.paramSet |= newindexMask - r.newindex = v + r.newindex = newindex return r } -// DryRun If set to true the rollover action will only be validated but not actually -// performed even if a condition matches. The default is false +// DryRun If `true`, checks whether the current index satisfies the specified +// conditions but does not perform a rollover. // API name: dry_run -func (r *Rollover) DryRun(b bool) *Rollover { - r.values.Set("dry_run", strconv.FormatBool(b)) +func (r *Rollover) DryRun(dryrun bool) *Rollover { + r.values.Set("dry_run", strconv.FormatBool(dryrun)) return r } -// MasterTimeout Specify timeout for connection to master +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: master_timeout -func (r *Rollover) MasterTimeout(v string) *Rollover { - r.values.Set("master_timeout", v) +func (r *Rollover) MasterTimeout(duration string) *Rollover { + r.values.Set("master_timeout", duration) return r } -// Timeout Explicit operation timeout +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: timeout -func (r *Rollover) Timeout(v string) *Rollover { - r.values.Set("timeout", v) +func (r *Rollover) Timeout(duration string) *Rollover { + r.values.Set("timeout", duration) return r } -// WaitForActiveShards Set the number of active shards to wait for on the newly created rollover -// index before the operation returns. +// WaitForActiveShards The number of shard copies that must be active before proceeding with the +// operation. +// Set to all or any positive integer up to the total number of shards in the +// index (`number_of_replicas+1`). // API name: wait_for_active_shards -func (r *Rollover) WaitForActiveShards(v string) *Rollover { - r.values.Set("wait_for_active_shards", v) +func (r *Rollover) WaitForActiveShards(waitforactiveshards string) *Rollover { + r.values.Set("wait_for_active_shards", waitforactiveshards) + + return r +} + +// Aliases Aliases for the target index. +// Data streams do not support this parameter. +// API name: aliases +func (r *Rollover) Aliases(aliases map[string]types.Alias) *Rollover { + + r.req.Aliases = aliases + + return r +} + +// Conditions Conditions for the rollover. +// If specified, Elasticsearch only performs the rollover if the current index +// satisfies these conditions. +// If this parameter is not specified, Elasticsearch performs the rollover +// unconditionally. +// If conditions are specified, at least one of them must be a `max_*` +// condition. +// The index will rollover if any `max_*` condition is satisfied and all `min_*` +// conditions are satisfied. +// API name: conditions +func (r *Rollover) Conditions(conditions *types.RolloverConditions) *Rollover { + + r.req.Conditions = conditions + + return r +} + +// Mappings Mapping for fields in the index. +// If specified, this mapping can include field names, field data types, and +// mapping paramaters. +// API name: mappings +func (r *Rollover) Mappings(mappings *types.TypeMapping) *Rollover { + + r.req.Mappings = mappings + + return r +} + +// Settings Configuration options for the index. +// Data streams do not support this parameter. +// API name: settings +func (r *Rollover) Settings(settings map[string]json.RawMessage) *Rollover { + + r.req.Settings = settings return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/segments/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/segments/response.go index 5db29e560..0c19ff9c6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/segments/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/segments/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package segments @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package segments // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/segments/IndicesSegmentsResponse.ts#L24-L29 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/segments/IndicesSegmentsResponse.ts#L24-L29 type Response struct { Indices map[string]types.IndexSegment `json:"indices"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/segments/segments.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/segments/segments.go index 077ffdb5e..ac59fdf38 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/segments/segments.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/segments/segments.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Provides low-level information about segments in a Lucene index. package segments @@ -36,6 +36,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" ) const ( @@ -172,7 +173,6 @@ func (r Segments) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -181,6 +181,10 @@ func (r Segments) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -212,47 +216,58 @@ func (r *Segments) Header(key, value string) *Segments { return r } -// Index A comma-separated list of index names; use `_all` or empty string to perform -// the operation on all indices +// Index Comma-separated list of data streams, indices, and aliases used to limit the +// request. +// Supports wildcards (`*`). +// To target all data streams and indices, omit this parameter or use `*` or +// `_all`. // API Name: index -func (r *Segments) Index(v string) *Segments { +func (r *Segments) Index(index string) *Segments { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// AllowNoIndices Whether to ignore if a wildcard indices expression resolves into no concrete -// indices. (This includes `_all` string or when no indices have been specified) +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. // API name: allow_no_indices -func (r *Segments) AllowNoIndices(b bool) *Segments { - r.values.Set("allow_no_indices", strconv.FormatBool(b)) +func (r *Segments) AllowNoIndices(allownoindices bool) *Segments { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) return r } -// ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, -// closed or both. +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. +// Valid values are: `all`, `open`, `closed`, `hidden`, `none`. // API name: expand_wildcards -func (r *Segments) ExpandWildcards(v string) *Segments { - r.values.Set("expand_wildcards", v) +func (r *Segments) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Segments { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } -// IgnoreUnavailable Whether specified concrete indices should be ignored when unavailable -// (missing or closed) +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. // API name: ignore_unavailable -func (r *Segments) IgnoreUnavailable(b bool) *Segments { - r.values.Set("ignore_unavailable", strconv.FormatBool(b)) +func (r *Segments) IgnoreUnavailable(ignoreunavailable bool) *Segments { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) return r } -// Verbose Includes detailed memory usage by Lucene. +// Verbose If `true`, the request returns a verbose response. // API name: verbose -func (r *Segments) Verbose(b bool) *Segments { - r.values.Set("verbose", strconv.FormatBool(b)) +func (r *Segments) Verbose(verbose bool) *Segments { + r.values.Set("verbose", strconv.FormatBool(verbose)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/shardstores/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/shardstores/response.go index 465232dd9..3d953d65a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/shardstores/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/shardstores/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package shardstores @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package shardstores // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/shard_stores/IndicesShardStoresResponse.ts#L24-L26 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/shard_stores/IndicesShardStoresResponse.ts#L24-L26 type Response struct { Indices map[string]types.IndicesShardStores `json:"indices"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/shardstores/shard_stores.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/shardstores/shard_stores.go index 8c8985898..f99e28b18 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/shardstores/shard_stores.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/shardstores/shard_stores.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Provides store information for shard copies of indices. package shardstores @@ -36,6 +36,8 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/shardstorestatus" ) const ( @@ -172,7 +174,6 @@ func (r ShardStores) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -181,6 +182,10 @@ func (r ShardStores) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -214,9 +219,9 @@ func (r *ShardStores) Header(key, value string) *ShardStores { // Index List of data streams, indices, and aliases used to limit the request. // API Name: index -func (r *ShardStores) Index(v string) *ShardStores { +func (r *ShardStores) Index(index string) *ShardStores { r.paramSet |= indexMask - r.index = v + r.index = index return r } @@ -227,8 +232,8 @@ func (r *ShardStores) Index(v string) *ShardStores { // the request // targets other open indices. // API name: allow_no_indices -func (r *ShardStores) AllowNoIndices(b bool) *ShardStores { - r.values.Set("allow_no_indices", strconv.FormatBool(b)) +func (r *ShardStores) AllowNoIndices(allownoindices bool) *ShardStores { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) return r } @@ -238,24 +243,32 @@ func (r *ShardStores) AllowNoIndices(b bool) *ShardStores { // this argument determines whether wildcard expressions match hidden data // streams. // API name: expand_wildcards -func (r *ShardStores) ExpandWildcards(v string) *ShardStores { - r.values.Set("expand_wildcards", v) +func (r *ShardStores) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *ShardStores { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } // IgnoreUnavailable If true, missing or closed indices are not included in the response. // API name: ignore_unavailable -func (r *ShardStores) IgnoreUnavailable(b bool) *ShardStores { - r.values.Set("ignore_unavailable", strconv.FormatBool(b)) +func (r *ShardStores) IgnoreUnavailable(ignoreunavailable bool) *ShardStores { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) return r } // Status List of shard health statuses used to limit the request. // API name: status -func (r *ShardStores) Status(v string) *ShardStores { - r.values.Set("status", v) +func (r *ShardStores) Status(statuses ...shardstorestatus.ShardStoreStatus) *ShardStores { + tmp := []string{} + for _, item := range statuses { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("status", strings.Join(tmp, ",")) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/shrink/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/shrink/request.go index 3bba692e9..cd172a625 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/shrink/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/shrink/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package shrink @@ -29,9 +29,13 @@ import ( // Request holds the request body struct for the package shrink // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/shrink/IndicesShrinkRequest.ts#L27-L46 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/shrink/IndicesShrinkRequest.ts#L27-L75 type Request struct { - Aliases map[string]types.Alias `json:"aliases,omitempty"` + + // Aliases The key is the alias name. + // Index alias names support date math. + Aliases map[string]types.Alias `json:"aliases,omitempty"` + // Settings Configuration options for the target index. Settings map[string]json.RawMessage `json:"settings,omitempty"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/shrink/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/shrink/response.go index 5c152879f..dc55ff4f1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/shrink/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/shrink/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package shrink // Response holds the response body struct for the package shrink // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/shrink/IndicesShrinkResponse.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/shrink/IndicesShrinkResponse.ts#L22-L28 type Response struct { Acknowledged bool `json:"acknowledged"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/shrink/shrink.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/shrink/shrink.go index e56e6cd54..06a50a7a5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/shrink/shrink.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/shrink/shrink.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Allow to shrink an existing index into a new index with fewer primary shards. package shrink @@ -54,8 +54,9 @@ type Shrink struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -72,9 +73,9 @@ func NewShrinkFunc(tp elastictransport.Interface) NewShrink { return func(index, target string) *Shrink { n := New(tp) - n.Index(index) + n._index(index) - n.Target(target) + n._target(target) return n } @@ -82,13 +83,15 @@ func NewShrinkFunc(tp elastictransport.Interface) NewShrink { // Allow to shrink an existing index into a new index with fewer primary shards. // -// https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-shrink-index.html +// https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-shrink-index.html func New(tp elastictransport.Interface) *Shrink { r := &Shrink{ transport: tp, values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -118,9 +121,19 @@ func (r *Shrink) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -128,6 +141,7 @@ func (r *Shrink) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -211,7 +225,6 @@ func (r Shrink) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -220,6 +233,10 @@ func (r Shrink) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -230,45 +247,70 @@ func (r *Shrink) Header(key, value string) *Shrink { return r } -// Index The name of the source index to shrink +// Index Name of the source index to shrink. // API Name: index -func (r *Shrink) Index(v string) *Shrink { +func (r *Shrink) _index(index string) *Shrink { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// Target The name of the target index to shrink into +// Target Name of the target index to create. // API Name: target -func (r *Shrink) Target(v string) *Shrink { +func (r *Shrink) _target(target string) *Shrink { r.paramSet |= targetMask - r.target = v + r.target = target return r } -// MasterTimeout Specify timeout for connection to master +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: master_timeout -func (r *Shrink) MasterTimeout(v string) *Shrink { - r.values.Set("master_timeout", v) +func (r *Shrink) MasterTimeout(duration string) *Shrink { + r.values.Set("master_timeout", duration) return r } -// Timeout Explicit operation timeout +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: timeout -func (r *Shrink) Timeout(v string) *Shrink { - r.values.Set("timeout", v) +func (r *Shrink) Timeout(duration string) *Shrink { + r.values.Set("timeout", duration) return r } -// WaitForActiveShards Set the number of active shards to wait for on the shrunken index before the -// operation returns. +// WaitForActiveShards The number of shard copies that must be active before proceeding with the +// operation. +// Set to `all` or any positive integer up to the total number of shards in the +// index (`number_of_replicas+1`). // API name: wait_for_active_shards -func (r *Shrink) WaitForActiveShards(v string) *Shrink { - r.values.Set("wait_for_active_shards", v) +func (r *Shrink) WaitForActiveShards(waitforactiveshards string) *Shrink { + r.values.Set("wait_for_active_shards", waitforactiveshards) + + return r +} + +// Aliases The key is the alias name. +// Index alias names support date math. +// API name: aliases +func (r *Shrink) Aliases(aliases map[string]types.Alias) *Shrink { + + r.req.Aliases = aliases + + return r +} + +// Settings Configuration options for the target index. +// API name: settings +func (r *Shrink) Settings(settings map[string]json.RawMessage) *Shrink { + + r.req.Settings = settings return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/simulateindextemplate/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/simulateindextemplate/request.go index dfa634652..a55c0ea17 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/simulateindextemplate/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/simulateindextemplate/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package simulateindextemplate @@ -29,16 +29,47 @@ import ( // Request holds the request body struct for the package simulateindextemplate // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/simulate_index_template/IndicesSimulateIndexTemplateRequest.ts#L33-L71 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/simulate_index_template/IndicesSimulateIndexTemplateRequest.ts#L33-L115 type Request struct { - AllowAutoCreate *bool `json:"allow_auto_create,omitempty"` - ComposedOf []string `json:"composed_of,omitempty"` - DataStream *types.DataStreamVisibility `json:"data_stream,omitempty"` - IndexPatterns []string `json:"index_patterns,omitempty"` - Meta_ map[string]json.RawMessage `json:"_meta,omitempty"` - Priority *int `json:"priority,omitempty"` - Template *types.IndexTemplateMapping `json:"template,omitempty"` - Version *int64 `json:"version,omitempty"` + + // AllowAutoCreate This setting overrides the value of the `action.auto_create_index` cluster + // setting. + // If set to `true` in a template, then indices can be automatically created + // using that template even if auto-creation of indices is disabled via + // `actions.auto_create_index`. + // If set to `false`, then indices or data streams matching the template must + // always be explicitly created, and may never be automatically created. + AllowAutoCreate *bool `json:"allow_auto_create,omitempty"` + // ComposedOf An ordered list of component template names. + // Component templates are merged in the order specified, meaning that the last + // component template specified has the highest precedence. + ComposedOf []string `json:"composed_of,omitempty"` + // DataStream If this object is included, the template is used to create data streams and + // their backing indices. + // Supports an empty object. + // Data streams require a matching index template with a `data_stream` object. + DataStream *types.DataStreamVisibility `json:"data_stream,omitempty"` + // IndexPatterns Array of wildcard (`*`) expressions used to match the names of data streams + // and indices during creation. + IndexPatterns []string `json:"index_patterns,omitempty"` + // Meta_ Optional user metadata about the index template. + // May have any contents. + // This map is not automatically generated by Elasticsearch. + Meta_ types.Metadata `json:"_meta,omitempty"` + // Priority Priority to determine index template precedence when a new data stream or + // index is created. + // The index template with the highest priority is chosen. + // If no priority is specified the template is treated as though it is of + // priority 0 (lowest priority). + // This number is not automatically generated by Elasticsearch. + Priority *int `json:"priority,omitempty"` + // Template Template to be applied. + // It may optionally include an `aliases`, `mappings`, or `settings` + // configuration. + Template *types.IndexTemplateMapping `json:"template,omitempty"` + // Version Version number used to manage index templates externally. + // This number is not automatically generated by Elasticsearch. + Version *int64 `json:"version,omitempty"` } // NewRequest returns a Request diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/simulateindextemplate/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/simulateindextemplate/response.go index c7c03924a..5aaf5960b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/simulateindextemplate/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/simulateindextemplate/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package simulateindextemplate // Response holds the response body struct for the package simulateindextemplate // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/simulate_index_template/IndicesSimulateIndexTemplateResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/simulate_index_template/IndicesSimulateIndexTemplateResponse.ts#L20-L22 type Response struct { } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/simulateindextemplate/simulate_index_template.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/simulateindextemplate/simulate_index_template.go index b7706ed68..708e37a25 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/simulateindextemplate/simulate_index_template.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/simulateindextemplate/simulate_index_template.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Simulate matching the given index name against the index templates in the // system @@ -54,8 +54,9 @@ type SimulateIndexTemplate struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -71,7 +72,7 @@ func NewSimulateIndexTemplateFunc(tp elastictransport.Interface) NewSimulateInde return func(name string) *SimulateIndexTemplate { n := New(tp) - n.Name(name) + n._name(name) return n } @@ -87,6 +88,8 @@ func New(tp elastictransport.Interface) *SimulateIndexTemplate { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -116,9 +119,19 @@ func (r *SimulateIndexTemplate) HttpRequest(ctx context.Context) (*http.Request, var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -126,6 +139,7 @@ func (r *SimulateIndexTemplate) HttpRequest(ctx context.Context) (*http.Request, } r.buf.Write(data) + } r.path.Scheme = "http" @@ -208,7 +222,6 @@ func (r SimulateIndexTemplate) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -217,6 +230,10 @@ func (r SimulateIndexTemplate) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -229,9 +246,9 @@ func (r *SimulateIndexTemplate) Header(key, value string) *SimulateIndexTemplate // Name Index or template name to simulate // API Name: name -func (r *SimulateIndexTemplate) Name(v string) *SimulateIndexTemplate { +func (r *SimulateIndexTemplate) _name(name string) *SimulateIndexTemplate { r.paramSet |= nameMask - r.name = v + r.name = name return r } @@ -242,8 +259,8 @@ func (r *SimulateIndexTemplate) Name(v string) *SimulateIndexTemplate { // permanently added or updated in either case; it is only used for the // simulation. // API name: create -func (r *SimulateIndexTemplate) Create(b bool) *SimulateIndexTemplate { - r.values.Set("create", strconv.FormatBool(b)) +func (r *SimulateIndexTemplate) Create(create bool) *SimulateIndexTemplate { + r.values.Set("create", strconv.FormatBool(create)) return r } @@ -252,8 +269,104 @@ func (r *SimulateIndexTemplate) Create(b bool) *SimulateIndexTemplate { // received // before the timeout expires, the request fails and returns an error. // API name: master_timeout -func (r *SimulateIndexTemplate) MasterTimeout(v string) *SimulateIndexTemplate { - r.values.Set("master_timeout", v) +func (r *SimulateIndexTemplate) MasterTimeout(duration string) *SimulateIndexTemplate { + r.values.Set("master_timeout", duration) + + return r +} + +// IncludeDefaults If true, returns all relevant default configurations for the index template. +// API name: include_defaults +func (r *SimulateIndexTemplate) IncludeDefaults(includedefaults bool) *SimulateIndexTemplate { + r.values.Set("include_defaults", strconv.FormatBool(includedefaults)) + + return r +} + +// AllowAutoCreate This setting overrides the value of the `action.auto_create_index` cluster +// setting. +// If set to `true` in a template, then indices can be automatically created +// using that template even if auto-creation of indices is disabled via +// `actions.auto_create_index`. +// If set to `false`, then indices or data streams matching the template must +// always be explicitly created, and may never be automatically created. +// API name: allow_auto_create +func (r *SimulateIndexTemplate) AllowAutoCreate(allowautocreate bool) *SimulateIndexTemplate { + r.req.AllowAutoCreate = &allowautocreate + + return r +} + +// ComposedOf An ordered list of component template names. +// Component templates are merged in the order specified, meaning that the last +// component template specified has the highest precedence. +// API name: composed_of +func (r *SimulateIndexTemplate) ComposedOf(composedofs ...string) *SimulateIndexTemplate { + r.req.ComposedOf = composedofs + + return r +} + +// DataStream If this object is included, the template is used to create data streams and +// their backing indices. +// Supports an empty object. +// Data streams require a matching index template with a `data_stream` object. +// API name: data_stream +func (r *SimulateIndexTemplate) DataStream(datastream *types.DataStreamVisibility) *SimulateIndexTemplate { + + r.req.DataStream = datastream + + return r +} + +// IndexPatterns Array of wildcard (`*`) expressions used to match the names of data streams +// and indices during creation. +// API name: index_patterns +func (r *SimulateIndexTemplate) IndexPatterns(indices ...string) *SimulateIndexTemplate { + r.req.IndexPatterns = indices + + return r +} + +// Meta_ Optional user metadata about the index template. +// May have any contents. +// This map is not automatically generated by Elasticsearch. +// API name: _meta +func (r *SimulateIndexTemplate) Meta_(metadata types.Metadata) *SimulateIndexTemplate { + r.req.Meta_ = metadata + + return r +} + +// Priority Priority to determine index template precedence when a new data stream or +// index is created. +// The index template with the highest priority is chosen. +// If no priority is specified the template is treated as though it is of +// priority 0 (lowest priority). +// This number is not automatically generated by Elasticsearch. +// API name: priority +func (r *SimulateIndexTemplate) Priority(priority int) *SimulateIndexTemplate { + r.req.Priority = &priority + + return r +} + +// Template Template to be applied. +// It may optionally include an `aliases`, `mappings`, or `settings` +// configuration. +// API name: template +func (r *SimulateIndexTemplate) Template(template *types.IndexTemplateMapping) *SimulateIndexTemplate { + + r.req.Template = template + + return r +} + +// Version Version number used to manage index templates externally. +// This number is not automatically generated by Elasticsearch. +// API name: version +func (r *SimulateIndexTemplate) Version(versionnumber int64) *SimulateIndexTemplate { + r.req.Version = &versionnumber return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/existssource/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/simulatetemplate/request.go similarity index 63% rename from vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/existssource/response.go rename to vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/simulatetemplate/request.go index 195db616f..f0b95729a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/existssource/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/simulatetemplate/request.go @@ -16,19 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 -package existssource +package simulatetemplate -// Response holds the response body struct for the package existssource -// -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/exists_source/SourceExistsResponse.ts#L22-L24 - -type Response struct { -} +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) -// NewResponse returns a Response -func NewResponse() *Response { - r := &Response{} - return r -} +// Request holds the request body struct for the package simulatetemplate +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/simulate_template/IndicesSimulateTemplateRequest.ts#L25-L61 +type Request = types.IndexTemplate diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/simulatetemplate/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/simulatetemplate/response.go index 9f1fc620e..b8e6f43b5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/simulatetemplate/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/simulatetemplate/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package simulatetemplate @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package simulatetemplate // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/simulate_template/IndicesSimulateTemplateResponse.ts#L26-L31 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/simulate_template/IndicesSimulateTemplateResponse.ts#L26-L31 type Response struct { Overlapping []types.Overlapping `json:"overlapping,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/simulatetemplate/simulate_template.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/simulatetemplate/simulate_template.go index a256bc8a9..d57ff705f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/simulatetemplate/simulate_template.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/simulatetemplate/simulate_template.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Simulate resolving the given template name or body package simulatetemplate @@ -53,8 +53,9 @@ type SimulateTemplate struct { buf *gobytes.Buffer - req *types.IndexTemplate - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -97,7 +98,7 @@ func (r *SimulateTemplate) Raw(raw io.Reader) *SimulateTemplate { } // Request allows to set the request property with the appropriate payload. -func (r *SimulateTemplate) Request(req *types.IndexTemplate) *SimulateTemplate { +func (r *SimulateTemplate) Request(req *Request) *SimulateTemplate { r.req = req return r @@ -112,9 +113,19 @@ func (r *SimulateTemplate) HttpRequest(ctx context.Context) (*http.Request, erro var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -122,6 +133,7 @@ func (r *SimulateTemplate) HttpRequest(ctx context.Context) (*http.Request, erro } r.buf.Write(data) + } r.path.Scheme = "http" @@ -211,7 +223,6 @@ func (r SimulateTemplate) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -220,6 +231,10 @@ func (r SimulateTemplate) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -234,9 +249,9 @@ func (r *SimulateTemplate) Header(key, value string) *SimulateTemplate { // before you add it to the cluster, omit // this parameter and specify the template configuration in the request body. // API Name: name -func (r *SimulateTemplate) Name(v string) *SimulateTemplate { +func (r *SimulateTemplate) Name(name string) *SimulateTemplate { r.paramSet |= nameMask - r.name = v + r.name = name return r } @@ -246,8 +261,8 @@ func (r *SimulateTemplate) Name(v string) *SimulateTemplate { // template with the highest priority. Note that the template is not permanently // added or updated in either case; it is only used for the simulation. // API name: create -func (r *SimulateTemplate) Create(b bool) *SimulateTemplate { - r.values.Set("create", strconv.FormatBool(b)) +func (r *SimulateTemplate) Create(create bool) *SimulateTemplate { + r.values.Set("create", strconv.FormatBool(create)) return r } @@ -255,8 +270,96 @@ func (r *SimulateTemplate) Create(b bool) *SimulateTemplate { // MasterTimeout Period to wait for a connection to the master node. If no response is // received before the timeout expires, the request fails and returns an error. // API name: master_timeout -func (r *SimulateTemplate) MasterTimeout(v string) *SimulateTemplate { - r.values.Set("master_timeout", v) +func (r *SimulateTemplate) MasterTimeout(duration string) *SimulateTemplate { + r.values.Set("master_timeout", duration) + + return r +} + +// IncludeDefaults If true, returns all relevant default configurations for the index template. +// API name: include_defaults +func (r *SimulateTemplate) IncludeDefaults(includedefaults bool) *SimulateTemplate { + r.values.Set("include_defaults", strconv.FormatBool(includedefaults)) + + return r +} + +// API name: allow_auto_create +func (r *SimulateTemplate) AllowAutoCreate(allowautocreate bool) *SimulateTemplate { + r.req.AllowAutoCreate = &allowautocreate + + return r +} + +// ComposedOf An ordered list of component template names. +// Component templates are merged in the order specified, meaning that the last +// component template specified has the highest precedence. +// API name: composed_of +func (r *SimulateTemplate) ComposedOf(composedofs ...string) *SimulateTemplate { + r.req.ComposedOf = composedofs + + return r +} + +// DataStream If this object is included, the template is used to create data streams and +// their backing indices. +// Supports an empty object. +// Data streams require a matching index template with a `data_stream` object. +// API name: data_stream +func (r *SimulateTemplate) DataStream(datastream *types.IndexTemplateDataStreamConfiguration) *SimulateTemplate { + + r.req.DataStream = datastream + + return r +} + +// IndexPatterns Name of the index template. +// API name: index_patterns +func (r *SimulateTemplate) IndexPatterns(names ...string) *SimulateTemplate { + r.req.IndexPatterns = names + + return r +} + +// Meta_ Optional user metadata about the index template. May have any contents. +// This map is not automatically generated by Elasticsearch. +// API name: _meta +func (r *SimulateTemplate) Meta_(metadata types.Metadata) *SimulateTemplate { + r.req.Meta_ = metadata + + return r +} + +// Priority Priority to determine index template precedence when a new data stream or +// index is created. +// The index template with the highest priority is chosen. +// If no priority is specified the template is treated as though it is of +// priority 0 (lowest priority). +// This number is not automatically generated by Elasticsearch. +// API name: priority +func (r *SimulateTemplate) Priority(priority int64) *SimulateTemplate { + + r.req.Priority = &priority + + return r +} + +// Template Template to be applied. +// It may optionally include an `aliases`, `mappings`, or `settings` +// configuration. +// API name: template +func (r *SimulateTemplate) Template(template *types.IndexTemplateSummary) *SimulateTemplate { + + r.req.Template = template + + return r +} + +// Version Version number used to manage index templates externally. +// This number is not automatically generated by Elasticsearch. +// API name: version +func (r *SimulateTemplate) Version(versionnumber int64) *SimulateTemplate { + r.req.Version = &versionnumber return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/split/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/split/request.go index 0df50a211..44994a31c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/split/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/split/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package split @@ -29,9 +29,12 @@ import ( // Request holds the request body struct for the package split // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/split/IndicesSplitRequest.ts#L27-L46 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/split/IndicesSplitRequest.ts#L27-L74 type Request struct { - Aliases map[string]types.Alias `json:"aliases,omitempty"` + + // Aliases Aliases for the resulting index. + Aliases map[string]types.Alias `json:"aliases,omitempty"` + // Settings Configuration options for the target index. Settings map[string]json.RawMessage `json:"settings,omitempty"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/split/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/split/response.go index 2343cd33f..51510ebe5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/split/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/split/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package split // Response holds the response body struct for the package split // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/split/IndicesSplitResponse.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/split/IndicesSplitResponse.ts#L22-L28 type Response struct { Acknowledged bool `json:"acknowledged"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/split/split.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/split/split.go index 3468d0223..7ee0dacea 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/split/split.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/split/split.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Allows you to split an existing index into a new index with more primary // shards. @@ -55,8 +55,9 @@ type Split struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -73,9 +74,9 @@ func NewSplitFunc(tp elastictransport.Interface) NewSplit { return func(index, target string) *Split { n := New(tp) - n.Index(index) + n._index(index) - n.Target(target) + n._target(target) return n } @@ -84,13 +85,15 @@ func NewSplitFunc(tp elastictransport.Interface) NewSplit { // Allows you to split an existing index into a new index with more primary // shards. // -// https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-split-index.html +// https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-split-index.html func New(tp elastictransport.Interface) *Split { r := &Split{ transport: tp, values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -120,9 +123,19 @@ func (r *Split) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -130,6 +143,7 @@ func (r *Split) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -213,7 +227,6 @@ func (r Split) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -222,6 +235,10 @@ func (r Split) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -232,45 +249,69 @@ func (r *Split) Header(key, value string) *Split { return r } -// Index The name of the source index to split +// Index Name of the source index to split. // API Name: index -func (r *Split) Index(v string) *Split { +func (r *Split) _index(index string) *Split { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// Target The name of the target index to split into +// Target Name of the target index to create. // API Name: target -func (r *Split) Target(v string) *Split { +func (r *Split) _target(target string) *Split { r.paramSet |= targetMask - r.target = v + r.target = target return r } -// MasterTimeout Specify timeout for connection to master +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: master_timeout -func (r *Split) MasterTimeout(v string) *Split { - r.values.Set("master_timeout", v) +func (r *Split) MasterTimeout(duration string) *Split { + r.values.Set("master_timeout", duration) return r } -// Timeout Explicit operation timeout +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: timeout -func (r *Split) Timeout(v string) *Split { - r.values.Set("timeout", v) +func (r *Split) Timeout(duration string) *Split { + r.values.Set("timeout", duration) return r } -// WaitForActiveShards Set the number of active shards to wait for on the shrunken index before the -// operation returns. +// WaitForActiveShards The number of shard copies that must be active before proceeding with the +// operation. +// Set to `all` or any positive integer up to the total number of shards in the +// index (`number_of_replicas+1`). // API name: wait_for_active_shards -func (r *Split) WaitForActiveShards(v string) *Split { - r.values.Set("wait_for_active_shards", v) +func (r *Split) WaitForActiveShards(waitforactiveshards string) *Split { + r.values.Set("wait_for_active_shards", waitforactiveshards) + + return r +} + +// Aliases Aliases for the resulting index. +// API name: aliases +func (r *Split) Aliases(aliases map[string]types.Alias) *Split { + + r.req.Aliases = aliases + + return r +} + +// Settings Configuration options for the target index. +// API name: settings +func (r *Split) Settings(settings map[string]json.RawMessage) *Split { + + r.req.Settings = settings return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/stats/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/stats/response.go index 13889fbfd..bc8783663 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/stats/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/stats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package stats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package stats // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/stats/IndicesStatsResponse.ts#L24-L30 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/stats/IndicesStatsResponse.ts#L24-L30 type Response struct { All_ types.IndicesStats `json:"_all"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/stats/stats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/stats/stats.go index 386ad0212..ce841896f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/stats/stats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/stats/stats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Provides statistics on operations happening in an index. package stats @@ -36,7 +36,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/level" ) @@ -196,7 +196,6 @@ func (r Stats) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -205,6 +204,10 @@ func (r Stats) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -238,9 +241,9 @@ func (r *Stats) Header(key, value string) *Stats { // Metric Limit the information returned the specific metrics. // API Name: metric -func (r *Stats) Metric(v string) *Stats { +func (r *Stats) Metric(metric string) *Stats { r.paramSet |= metricMask - r.metric = v + r.metric = metric return r } @@ -248,9 +251,9 @@ func (r *Stats) Metric(v string) *Stats { // Index A comma-separated list of index names; use `_all` or empty string to perform // the operation on all indices // API Name: index -func (r *Stats) Index(v string) *Stats { +func (r *Stats) Index(index string) *Stats { r.paramSet |= indexMask - r.index = v + r.index = index return r } @@ -258,8 +261,8 @@ func (r *Stats) Index(v string) *Stats { // CompletionFields Comma-separated list or wildcard expressions of fields to include in // fielddata and suggest statistics. // API name: completion_fields -func (r *Stats) CompletionFields(v string) *Stats { - r.values.Set("completion_fields", v) +func (r *Stats) CompletionFields(fields ...string) *Stats { + r.values.Set("completion_fields", strings.Join(fields, ",")) return r } @@ -270,8 +273,12 @@ func (r *Stats) CompletionFields(v string) *Stats { // comma-separated values, // such as `open,hidden`. // API name: expand_wildcards -func (r *Stats) ExpandWildcards(v string) *Stats { - r.values.Set("expand_wildcards", v) +func (r *Stats) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Stats { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } @@ -279,8 +286,8 @@ func (r *Stats) ExpandWildcards(v string) *Stats { // FielddataFields Comma-separated list or wildcard expressions of fields to include in // fielddata statistics. // API name: fielddata_fields -func (r *Stats) FielddataFields(v string) *Stats { - r.values.Set("fielddata_fields", v) +func (r *Stats) FielddataFields(fields ...string) *Stats { + r.values.Set("fielddata_fields", strings.Join(fields, ",")) return r } @@ -288,24 +295,28 @@ func (r *Stats) FielddataFields(v string) *Stats { // Fields Comma-separated list or wildcard expressions of fields to include in the // statistics. // API name: fields -func (r *Stats) Fields(v string) *Stats { - r.values.Set("fields", v) +func (r *Stats) Fields(fields ...string) *Stats { + r.values.Set("fields", strings.Join(fields, ",")) return r } // ForbidClosedIndices If true, statistics are not collected from closed indices. // API name: forbid_closed_indices -func (r *Stats) ForbidClosedIndices(b bool) *Stats { - r.values.Set("forbid_closed_indices", strconv.FormatBool(b)) +func (r *Stats) ForbidClosedIndices(forbidclosedindices bool) *Stats { + r.values.Set("forbid_closed_indices", strconv.FormatBool(forbidclosedindices)) return r } // Groups Comma-separated list of search groups to include in the search statistics. // API name: groups -func (r *Stats) Groups(v string) *Stats { - r.values.Set("groups", v) +func (r *Stats) Groups(groups ...string) *Stats { + tmp := []string{} + for _, item := range groups { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("groups", strings.Join(tmp, ",")) return r } @@ -313,8 +324,8 @@ func (r *Stats) Groups(v string) *Stats { // IncludeSegmentFileSizes If true, the call reports the aggregated disk usage of each one of the Lucene // index files (only applies if segment stats are requested). // API name: include_segment_file_sizes -func (r *Stats) IncludeSegmentFileSizes(b bool) *Stats { - r.values.Set("include_segment_file_sizes", strconv.FormatBool(b)) +func (r *Stats) IncludeSegmentFileSizes(includesegmentfilesizes bool) *Stats { + r.values.Set("include_segment_file_sizes", strconv.FormatBool(includesegmentfilesizes)) return r } @@ -322,8 +333,8 @@ func (r *Stats) IncludeSegmentFileSizes(b bool) *Stats { // IncludeUnloadedSegments If true, the response includes information from segments that are not loaded // into memory. // API name: include_unloaded_segments -func (r *Stats) IncludeUnloadedSegments(b bool) *Stats { - r.values.Set("include_unloaded_segments", strconv.FormatBool(b)) +func (r *Stats) IncludeUnloadedSegments(includeunloadedsegments bool) *Stats { + r.values.Set("include_unloaded_segments", strconv.FormatBool(includeunloadedsegments)) return r } @@ -331,8 +342,8 @@ func (r *Stats) IncludeUnloadedSegments(b bool) *Stats { // Level Indicates whether statistics are aggregated at the cluster, index, or shard // level. // API name: level -func (r *Stats) Level(enum level.Level) *Stats { - r.values.Set("level", enum.String()) +func (r *Stats) Level(level level.Level) *Stats { + r.values.Set("level", level.String()) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/unfreeze/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/unfreeze/response.go index c95888ef8..26b9ed21d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/unfreeze/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/unfreeze/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package unfreeze // Response holds the response body struct for the package unfreeze // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/unfreeze/IndicesUnfreezeResponse.ts#L20-L25 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/unfreeze/IndicesUnfreezeResponse.ts#L20-L25 type Response struct { Acknowledged bool `json:"acknowledged"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/unfreeze/unfreeze.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/unfreeze/unfreeze.go index 4e675453f..7fce72126 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/unfreeze/unfreeze.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/unfreeze/unfreeze.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Unfreezes an index. When a frozen index is unfrozen, the index goes through // the normal recovery process and becomes writeable again. @@ -37,6 +37,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" ) const ( @@ -69,7 +70,7 @@ func NewUnfreezeFunc(tp elastictransport.Interface) NewUnfreeze { return func(index string) *Unfreeze { n := New(tp) - n.Index(index) + n._index(index) return n } @@ -171,7 +172,6 @@ func (r Unfreeze) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -180,6 +180,10 @@ func (r Unfreeze) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -211,62 +215,77 @@ func (r *Unfreeze) Header(key, value string) *Unfreeze { return r } -// Index The name of the index to unfreeze +// Index Identifier for the index. // API Name: index -func (r *Unfreeze) Index(v string) *Unfreeze { +func (r *Unfreeze) _index(index string) *Unfreeze { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// AllowNoIndices Whether to ignore if a wildcard indices expression resolves into no concrete -// indices. (This includes `_all` string or when no indices have been specified) +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. // API name: allow_no_indices -func (r *Unfreeze) AllowNoIndices(b bool) *Unfreeze { - r.values.Set("allow_no_indices", strconv.FormatBool(b)) +func (r *Unfreeze) AllowNoIndices(allownoindices bool) *Unfreeze { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) return r } -// ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, -// closed or both. +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. +// Valid values are: `all`, `open`, `closed`, `hidden`, `none`. // API name: expand_wildcards -func (r *Unfreeze) ExpandWildcards(v string) *Unfreeze { - r.values.Set("expand_wildcards", v) +func (r *Unfreeze) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Unfreeze { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } -// IgnoreUnavailable Whether specified concrete indices should be ignored when unavailable -// (missing or closed) +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. // API name: ignore_unavailable -func (r *Unfreeze) IgnoreUnavailable(b bool) *Unfreeze { - r.values.Set("ignore_unavailable", strconv.FormatBool(b)) +func (r *Unfreeze) IgnoreUnavailable(ignoreunavailable bool) *Unfreeze { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) return r } -// MasterTimeout Specify timeout for connection to master +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: master_timeout -func (r *Unfreeze) MasterTimeout(v string) *Unfreeze { - r.values.Set("master_timeout", v) +func (r *Unfreeze) MasterTimeout(duration string) *Unfreeze { + r.values.Set("master_timeout", duration) return r } -// Timeout Explicit operation timeout +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: timeout -func (r *Unfreeze) Timeout(v string) *Unfreeze { - r.values.Set("timeout", v) +func (r *Unfreeze) Timeout(duration string) *Unfreeze { + r.values.Set("timeout", duration) return r } -// WaitForActiveShards Sets the number of active shards to wait for before the operation returns. +// WaitForActiveShards The number of shard copies that must be active before proceeding with the +// operation. +// Set to `all` or any positive integer up to the total number of shards in the +// index (`number_of_replicas+1`). // API name: wait_for_active_shards -func (r *Unfreeze) WaitForActiveShards(v string) *Unfreeze { - r.values.Set("wait_for_active_shards", v) +func (r *Unfreeze) WaitForActiveShards(waitforactiveshards string) *Unfreeze { + r.values.Set("wait_for_active_shards", waitforactiveshards) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/updatealiases/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/updatealiases/request.go index eed628ecd..2b0786133 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/updatealiases/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/updatealiases/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package updatealiases @@ -29,8 +29,10 @@ import ( // Request holds the request body struct for the package updatealiases // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/update_aliases/IndicesUpdateAliasesRequest.ts#L24-L37 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/update_aliases/IndicesUpdateAliasesRequest.ts#L24-L51 type Request struct { + + // Actions Actions to perform. Actions []types.IndicesAction `json:"actions,omitempty"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/updatealiases/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/updatealiases/response.go index 60e3c2333..224624431 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/updatealiases/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/updatealiases/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package updatealiases // Response holds the response body struct for the package updatealiases // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/update_aliases/IndicesUpdateAliasesResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/update_aliases/IndicesUpdateAliasesResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/updatealiases/update_aliases.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/updatealiases/update_aliases.go index 78407bc07..2b7939d5f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/updatealiases/update_aliases.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/updatealiases/update_aliases.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Updates index aliases. package updatealiases @@ -48,8 +48,9 @@ type UpdateAliases struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int } @@ -76,6 +77,8 @@ func New(tp elastictransport.Interface) *UpdateAliases { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -105,9 +108,19 @@ func (r *UpdateAliases) HttpRequest(ctx context.Context) (*http.Request, error) var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -115,6 +128,7 @@ func (r *UpdateAliases) HttpRequest(ctx context.Context) (*http.Request, error) } r.buf.Write(data) + } r.path.Scheme = "http" @@ -192,7 +206,6 @@ func (r UpdateAliases) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -201,6 +214,10 @@ func (r UpdateAliases) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -211,18 +228,30 @@ func (r *UpdateAliases) Header(key, value string) *UpdateAliases { return r } -// MasterTimeout Specify timeout for connection to master +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: master_timeout -func (r *UpdateAliases) MasterTimeout(v string) *UpdateAliases { - r.values.Set("master_timeout", v) +func (r *UpdateAliases) MasterTimeout(duration string) *UpdateAliases { + r.values.Set("master_timeout", duration) return r } -// Timeout Request timeout +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: timeout -func (r *UpdateAliases) Timeout(v string) *UpdateAliases { - r.values.Set("timeout", v) +func (r *UpdateAliases) Timeout(duration string) *UpdateAliases { + r.values.Set("timeout", duration) + + return r +} + +// Actions Actions to perform. +// API name: actions +func (r *UpdateAliases) Actions(actions ...types.IndicesAction) *UpdateAliases { + r.req.Actions = actions return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/validatequery/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/validatequery/request.go index 6d3866780..17e0b3355 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/validatequery/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/validatequery/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package validatequery @@ -29,8 +29,10 @@ import ( // Request holds the request body struct for the package validatequery // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/validate_query/IndicesValidateQueryRequest.ts#L25-L51 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/validate_query/IndicesValidateQueryRequest.ts#L25-L111 type Request struct { + + // Query Query in the Lucene query string syntax. Query *types.Query `json:"query,omitempty"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/validatequery/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/validatequery/response.go index 2918c1681..7c838bde6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/validatequery/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/validatequery/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package validatequery @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package validatequery // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/validate_query/IndicesValidateQueryResponse.ts#L23-L30 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/validate_query/IndicesValidateQueryResponse.ts#L23-L30 type Response struct { Error *string `json:"error,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/validatequery/validate_query.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/validatequery/validate_query.go index 9d41a861e..ec677023c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/validatequery/validate_query.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/validatequery/validate_query.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Allows a user to validate a potentially expensive query without executing it. package validatequery @@ -35,7 +35,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/operator" ) @@ -55,8 +55,9 @@ type ValidateQuery struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -85,6 +86,8 @@ func New(tp elastictransport.Interface) *ValidateQuery { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -114,9 +117,19 @@ func (r *ValidateQuery) HttpRequest(ctx context.Context) (*http.Request, error) var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -124,6 +137,7 @@ func (r *ValidateQuery) HttpRequest(ctx context.Context) (*http.Request, error) } r.buf.Write(data) + } r.path.Scheme = "http" @@ -213,7 +227,6 @@ func (r ValidateQuery) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -222,6 +235,10 @@ func (r ValidateQuery) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -232,115 +249,138 @@ func (r *ValidateQuery) Header(key, value string) *ValidateQuery { return r } -// Index A comma-separated list of index names to restrict the operation; use `_all` -// or empty string to perform the operation on all indices +// Index Comma-separated list of data streams, indices, and aliases to search. +// Supports wildcards (`*`). +// To search all data streams or indices, omit this parameter or use `*` or +// `_all`. // API Name: index -func (r *ValidateQuery) Index(v string) *ValidateQuery { +func (r *ValidateQuery) Index(index string) *ValidateQuery { r.paramSet |= indexMask - r.index = v + r.index = index return r } -// AllowNoIndices Whether to ignore if a wildcard indices expression resolves into no concrete -// indices. (This includes `_all` string or when no indices have been specified) +// AllowNoIndices If `false`, the request returns an error if any wildcard expression, index +// alias, or `_all` value targets only missing or closed indices. +// This behavior applies even if the request targets other open indices. // API name: allow_no_indices -func (r *ValidateQuery) AllowNoIndices(b bool) *ValidateQuery { - r.values.Set("allow_no_indices", strconv.FormatBool(b)) +func (r *ValidateQuery) AllowNoIndices(allownoindices bool) *ValidateQuery { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) return r } -// AllShards Execute validation on all shards instead of one random shard per index +// AllShards If `true`, the validation is executed on all shards instead of one random +// shard per index. // API name: all_shards -func (r *ValidateQuery) AllShards(b bool) *ValidateQuery { - r.values.Set("all_shards", strconv.FormatBool(b)) +func (r *ValidateQuery) AllShards(allshards bool) *ValidateQuery { + r.values.Set("all_shards", strconv.FormatBool(allshards)) return r } -// Analyzer The analyzer to use for the query string +// Analyzer Analyzer to use for the query string. +// This parameter can only be used when the `q` query string parameter is +// specified. // API name: analyzer -func (r *ValidateQuery) Analyzer(v string) *ValidateQuery { - r.values.Set("analyzer", v) +func (r *ValidateQuery) Analyzer(analyzer string) *ValidateQuery { + r.values.Set("analyzer", analyzer) return r } -// AnalyzeWildcard Specify whether wildcard and prefix queries should be analyzed (default: -// false) +// AnalyzeWildcard If `true`, wildcard and prefix queries are analyzed. // API name: analyze_wildcard -func (r *ValidateQuery) AnalyzeWildcard(b bool) *ValidateQuery { - r.values.Set("analyze_wildcard", strconv.FormatBool(b)) +func (r *ValidateQuery) AnalyzeWildcard(analyzewildcard bool) *ValidateQuery { + r.values.Set("analyze_wildcard", strconv.FormatBool(analyzewildcard)) return r } -// DefaultOperator The default operator for query string query (AND or OR) +// DefaultOperator The default operator for query string query: `AND` or `OR`. // API name: default_operator -func (r *ValidateQuery) DefaultOperator(enum operator.Operator) *ValidateQuery { - r.values.Set("default_operator", enum.String()) +func (r *ValidateQuery) DefaultOperator(defaultoperator operator.Operator) *ValidateQuery { + r.values.Set("default_operator", defaultoperator.String()) return r } -// Df The field to use as default where no field prefix is given in the query -// string +// Df Field to use as default where no field prefix is given in the query string. +// This parameter can only be used when the `q` query string parameter is +// specified. // API name: df -func (r *ValidateQuery) Df(v string) *ValidateQuery { - r.values.Set("df", v) +func (r *ValidateQuery) Df(df string) *ValidateQuery { + r.values.Set("df", df) return r } -// ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, -// closed or both. +// ExpandWildcards Type of index that wildcard patterns can match. +// If the request can target data streams, this argument determines whether +// wildcard expressions match hidden data streams. +// Supports comma-separated values, such as `open,hidden`. +// Valid values are: `all`, `open`, `closed`, `hidden`, `none`. // API name: expand_wildcards -func (r *ValidateQuery) ExpandWildcards(v string) *ValidateQuery { - r.values.Set("expand_wildcards", v) +func (r *ValidateQuery) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *ValidateQuery { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } -// Explain Return detailed information about the error +// Explain If `true`, the response returns detailed information if an error has +// occurred. // API name: explain -func (r *ValidateQuery) Explain(b bool) *ValidateQuery { - r.values.Set("explain", strconv.FormatBool(b)) +func (r *ValidateQuery) Explain(explain bool) *ValidateQuery { + r.values.Set("explain", strconv.FormatBool(explain)) return r } -// IgnoreUnavailable Whether specified concrete indices should be ignored when unavailable -// (missing or closed) +// IgnoreUnavailable If `false`, the request returns an error if it targets a missing or closed +// index. // API name: ignore_unavailable -func (r *ValidateQuery) IgnoreUnavailable(b bool) *ValidateQuery { - r.values.Set("ignore_unavailable", strconv.FormatBool(b)) +func (r *ValidateQuery) IgnoreUnavailable(ignoreunavailable bool) *ValidateQuery { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) return r } -// Lenient Specify whether format-based query failures (such as providing text to a -// numeric field) should be ignored +// Lenient If `true`, format-based query failures (such as providing text to a numeric +// field) in the query string will be ignored. // API name: lenient -func (r *ValidateQuery) Lenient(b bool) *ValidateQuery { - r.values.Set("lenient", strconv.FormatBool(b)) +func (r *ValidateQuery) Lenient(lenient bool) *ValidateQuery { + r.values.Set("lenient", strconv.FormatBool(lenient)) return r } -// Rewrite Provide a more detailed explanation showing the actual Lucene query that will -// be executed. +// Rewrite If `true`, returns a more detailed explanation showing the actual Lucene +// query that will be executed. // API name: rewrite -func (r *ValidateQuery) Rewrite(b bool) *ValidateQuery { - r.values.Set("rewrite", strconv.FormatBool(b)) +func (r *ValidateQuery) Rewrite(rewrite bool) *ValidateQuery { + r.values.Set("rewrite", strconv.FormatBool(rewrite)) return r } -// Q Query in the Lucene query string syntax +// Q Query in the Lucene query string syntax. // API name: q -func (r *ValidateQuery) Q(v string) *ValidateQuery { - r.values.Set("q", v) +func (r *ValidateQuery) Q(q string) *ValidateQuery { + r.values.Set("q", q) + + return r +} + +// Query Query in the Lucene query string syntax. +// API name: query +func (r *ValidateQuery) Query(query *types.Query) *ValidateQuery { + + r.req.Query = query return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/deletepipeline/delete_pipeline.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/deletepipeline/delete_pipeline.go index dd0330a71..d0212512a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/deletepipeline/delete_pipeline.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/deletepipeline/delete_pipeline.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Deletes a pipeline. package deletepipeline @@ -67,7 +67,7 @@ func NewDeletePipelineFunc(tp elastictransport.Interface) NewDeletePipeline { return func(id string) *DeletePipeline { n := New(tp) - n.Id(id) + n._id(id) return n } @@ -170,7 +170,6 @@ func (r DeletePipeline) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -179,6 +178,10 @@ func (r DeletePipeline) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -210,27 +213,32 @@ func (r *DeletePipeline) Header(key, value string) *DeletePipeline { return r } -// Id Pipeline ID +// Id Pipeline ID or wildcard expression of pipeline IDs used to limit the request. +// To delete all ingest pipelines in a cluster, use a value of `*`. // API Name: id -func (r *DeletePipeline) Id(v string) *DeletePipeline { +func (r *DeletePipeline) _id(id string) *DeletePipeline { r.paramSet |= idMask - r.id = v + r.id = id return r } -// MasterTimeout Explicit operation timeout for connection to master node +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: master_timeout -func (r *DeletePipeline) MasterTimeout(v string) *DeletePipeline { - r.values.Set("master_timeout", v) +func (r *DeletePipeline) MasterTimeout(duration string) *DeletePipeline { + r.values.Set("master_timeout", duration) return r } -// Timeout Explicit operation timeout +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: timeout -func (r *DeletePipeline) Timeout(v string) *DeletePipeline { - r.values.Set("timeout", v) +func (r *DeletePipeline) Timeout(duration string) *DeletePipeline { + r.values.Set("timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/deletepipeline/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/deletepipeline/response.go index 9f90771a1..24a84f635 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/deletepipeline/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/deletepipeline/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package deletepipeline // Response holds the response body struct for the package deletepipeline // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/delete_pipeline/DeletePipelineResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/delete_pipeline/DeletePipelineResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/geoipstats/geo_ip_stats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/geoipstats/geo_ip_stats.go index 104f98e77..31b3ee5b1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/geoipstats/geo_ip_stats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/geoipstats/geo_ip_stats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns statistical information about geoip databases package geoipstats @@ -67,7 +67,7 @@ func NewGeoIpStatsFunc(tp elastictransport.Interface) NewGeoIpStats { // Returns statistical information about geoip databases // -// https://www.elastic.co/guide/en/elasticsearch/reference/master/geoip-stats-api.html +// https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/geoip-processor.html func New(tp elastictransport.Interface) *GeoIpStats { r := &GeoIpStats{ transport: tp, @@ -161,7 +161,6 @@ func (r GeoIpStats) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -170,6 +169,10 @@ func (r GeoIpStats) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/geoipstats/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/geoipstats/response.go index 618fdcccf..f72c9a154 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/geoipstats/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/geoipstats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package geoipstats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package geoipstats // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/geo_ip_stats/IngestGeoIpStatsResponse.ts#L24-L31 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/geo_ip_stats/IngestGeoIpStatsResponse.ts#L24-L31 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/getpipeline/get_pipeline.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/getpipeline/get_pipeline.go index 9b4cf2369..60fb32e48 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/getpipeline/get_pipeline.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/getpipeline/get_pipeline.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns a pipeline. package getpipeline @@ -176,7 +176,6 @@ func (r GetPipeline) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -185,6 +184,10 @@ func (r GetPipeline) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -216,27 +219,31 @@ func (r *GetPipeline) Header(key, value string) *GetPipeline { return r } -// Id Comma separated list of pipeline ids. Wildcards supported +// Id Comma-separated list of pipeline IDs to retrieve. +// Wildcard (`*`) expressions are supported. +// To get all ingest pipelines, omit this parameter or use `*`. // API Name: id -func (r *GetPipeline) Id(v string) *GetPipeline { +func (r *GetPipeline) Id(id string) *GetPipeline { r.paramSet |= idMask - r.id = v + r.id = id return r } -// MasterTimeout Explicit operation timeout for connection to master node +// MasterTimeout Period to wait for a connection to the master node. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: master_timeout -func (r *GetPipeline) MasterTimeout(v string) *GetPipeline { - r.values.Set("master_timeout", v) +func (r *GetPipeline) MasterTimeout(duration string) *GetPipeline { + r.values.Set("master_timeout", duration) return r } // Summary Return pipelines without their definitions (default: false) // API name: summary -func (r *GetPipeline) Summary(b bool) *GetPipeline { - r.values.Set("summary", strconv.FormatBool(b)) +func (r *GetPipeline) Summary(summary bool) *GetPipeline { + r.values.Set("summary", strconv.FormatBool(summary)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/getpipeline/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/getpipeline/response.go index caea78ad0..e7c200a5f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/getpipeline/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/getpipeline/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getpipeline @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getpipeline // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/get_pipeline/GetPipelineResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/get_pipeline/GetPipelineResponse.ts#L23-L25 type Response map[string]types.IngestPipeline diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/processorgrok/processor_grok.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/processorgrok/processor_grok.go index 704bc5bcf..1d80a2f52 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/processorgrok/processor_grok.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/processorgrok/processor_grok.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns a list of the built-in patterns. package processorgrok @@ -67,7 +67,7 @@ func NewProcessorGrokFunc(tp elastictransport.Interface) NewProcessorGrok { // Returns a list of the built-in patterns. // -// https://www.elastic.co/guide/en/elasticsearch/reference/master/grok-processor.html#grok-processor-rest-get +// https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/grok-processor.html func New(tp elastictransport.Interface) *ProcessorGrok { r := &ProcessorGrok{ transport: tp, @@ -161,7 +161,6 @@ func (r ProcessorGrok) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -170,6 +169,10 @@ func (r ProcessorGrok) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/processorgrok/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/processorgrok/response.go index e969afb3e..75152c952 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/processorgrok/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/processorgrok/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package processorgrok // Response holds the response body struct for the package processorgrok // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/processor_grok/GrokProcessorPatternsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/processor_grok/GrokProcessorPatternsResponse.ts#L22-L24 type Response struct { Patterns map[string]string `json:"patterns"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/putpipeline/put_pipeline.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/putpipeline/put_pipeline.go index cfeb69783..e88053019 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/putpipeline/put_pipeline.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/putpipeline/put_pipeline.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Creates or updates a pipeline. package putpipeline @@ -52,8 +52,9 @@ type PutPipeline struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -69,7 +70,7 @@ func NewPutPipelineFunc(tp elastictransport.Interface) NewPutPipeline { return func(id string) *PutPipeline { n := New(tp) - n.Id(id) + n._id(id) return n } @@ -77,13 +78,15 @@ func NewPutPipelineFunc(tp elastictransport.Interface) NewPutPipeline { // Creates or updates a pipeline. // -// https://www.elastic.co/guide/en/elasticsearch/reference/master/put-pipeline-api.html +// https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ingest.html func New(tp elastictransport.Interface) *PutPipeline { r := &PutPipeline{ transport: tp, values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -113,9 +116,19 @@ func (r *PutPipeline) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -123,6 +136,7 @@ func (r *PutPipeline) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -205,7 +219,6 @@ func (r PutPipeline) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -214,6 +227,10 @@ func (r PutPipeline) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -226,9 +243,9 @@ func (r *PutPipeline) Header(key, value string) *PutPipeline { // Id ID of the ingest pipeline to create or update. // API Name: id -func (r *PutPipeline) Id(v string) *PutPipeline { +func (r *PutPipeline) _id(id string) *PutPipeline { r.paramSet |= idMask - r.id = v + r.id = id return r } @@ -236,8 +253,8 @@ func (r *PutPipeline) Id(v string) *PutPipeline { // MasterTimeout Period to wait for a connection to the master node. If no response is // received before the timeout expires, the request fails and returns an error. // API name: master_timeout -func (r *PutPipeline) MasterTimeout(v string) *PutPipeline { - r.values.Set("master_timeout", v) +func (r *PutPipeline) MasterTimeout(duration string) *PutPipeline { + r.values.Set("master_timeout", duration) return r } @@ -245,16 +262,66 @@ func (r *PutPipeline) MasterTimeout(v string) *PutPipeline { // Timeout Period to wait for a response. If no response is received before the timeout // expires, the request fails and returns an error. // API name: timeout -func (r *PutPipeline) Timeout(v string) *PutPipeline { - r.values.Set("timeout", v) +func (r *PutPipeline) Timeout(duration string) *PutPipeline { + r.values.Set("timeout", duration) return r } // IfVersion Required version for optimistic concurrency control for pipeline updates // API name: if_version -func (r *PutPipeline) IfVersion(v string) *PutPipeline { - r.values.Set("if_version", v) +func (r *PutPipeline) IfVersion(versionnumber string) *PutPipeline { + r.values.Set("if_version", versionnumber) + + return r +} + +// Description Description of the ingest pipeline. +// API name: description +func (r *PutPipeline) Description(description string) *PutPipeline { + + r.req.Description = &description + + return r +} + +// Meta_ Optional metadata about the ingest pipeline. May have any contents. This map +// is not automatically generated by Elasticsearch. +// API name: _meta +func (r *PutPipeline) Meta_(metadata types.Metadata) *PutPipeline { + r.req.Meta_ = metadata + + return r +} + +// OnFailure Processors to run immediately after a processor failure. Each processor +// supports a processor-level `on_failure` value. If a processor without an +// `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as +// a fallback. The processors in this parameter run sequentially in the order +// specified. Elasticsearch will not attempt to run the pipeline's remaining +// processors. +// API name: on_failure +func (r *PutPipeline) OnFailure(onfailures ...types.ProcessorContainer) *PutPipeline { + r.req.OnFailure = onfailures + + return r +} + +// Processors Processors used to perform transformations on documents before indexing. +// Processors run sequentially in the order specified. +// API name: processors +func (r *PutPipeline) Processors(processors ...types.ProcessorContainer) *PutPipeline { + r.req.Processors = processors + + return r +} + +// Version Version number used by external systems to track ingest pipelines. This +// parameter is intended for external systems only. Elasticsearch does not use +// or validate pipeline version numbers. +// API name: version +func (r *PutPipeline) Version(versionnumber int64) *PutPipeline { + r.req.Version = &versionnumber return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/putpipeline/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/putpipeline/request.go index d8be85aed..0eefff6c7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/putpipeline/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/putpipeline/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putpipeline @@ -29,14 +29,14 @@ import ( // Request holds the request body struct for the package putpipeline // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/put_pipeline/PutPipelineRequest.ts#L25-L74 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/put_pipeline/PutPipelineRequest.ts#L25-L77 type Request struct { // Description Description of the ingest pipeline. Description *string `json:"description,omitempty"` // Meta_ Optional metadata about the ingest pipeline. May have any contents. This map // is not automatically generated by Elasticsearch. - Meta_ map[string]json.RawMessage `json:"_meta,omitempty"` + Meta_ types.Metadata `json:"_meta,omitempty"` // OnFailure Processors to run immediately after a processor failure. Each processor // supports a processor-level `on_failure` value. If a processor without an // `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/putpipeline/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/putpipeline/response.go index c672f6a1e..22819b8e5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/putpipeline/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/putpipeline/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putpipeline // Response holds the response body struct for the package putpipeline // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/put_pipeline/PutPipelineResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/put_pipeline/PutPipelineResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/simulate/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/simulate/request.go index acc72cae9..23e832132 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/simulate/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/simulate/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package simulate @@ -29,9 +29,16 @@ import ( // Request holds the request body struct for the package simulate // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/simulate/SimulatePipelineRequest.ts#L25-L41 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/simulate/SimulatePipelineRequest.ts#L25-L57 type Request struct { - Docs []types.Document `json:"docs,omitempty"` + + // Docs Sample documents to test in the pipeline. + Docs []types.Document `json:"docs,omitempty"` + // Pipeline Pipeline to test. + // If you don’t specify the `pipeline` request path parameter, this parameter is + // required. + // If you specify both this and the request path parameter, the API only uses + // the request path parameter. Pipeline *types.IngestPipeline `json:"pipeline,omitempty"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/simulate/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/simulate/response.go index 64b2e49d9..9f5b382ce 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/simulate/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/simulate/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package simulate @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package simulate // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/simulate/SimulatePipelineResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/simulate/SimulatePipelineResponse.ts#L22-L24 type Response struct { Docs []types.PipelineSimulation `json:"docs"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/simulate/simulate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/simulate/simulate.go index 9102712d4..5ca3c346c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/simulate/simulate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ingest/simulate/simulate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Allows to simulate a pipeline with example documents. package simulate @@ -53,8 +53,9 @@ type Simulate struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -83,6 +84,8 @@ func New(tp elastictransport.Interface) *Simulate { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -112,9 +115,19 @@ func (r *Simulate) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -122,6 +135,7 @@ func (r *Simulate) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -215,7 +229,6 @@ func (r Simulate) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -224,6 +237,10 @@ func (r Simulate) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -234,19 +251,43 @@ func (r *Simulate) Header(key, value string) *Simulate { return r } -// Id Pipeline ID +// Id Pipeline to test. +// If you don’t specify a `pipeline` in the request body, this parameter is +// required. // API Name: id -func (r *Simulate) Id(v string) *Simulate { +func (r *Simulate) Id(id string) *Simulate { r.paramSet |= idMask - r.id = v + r.id = id return r } -// Verbose Verbose mode. Display data output for each processor in executed pipeline +// Verbose If `true`, the response includes output data for each processor in the +// executed pipeline. // API name: verbose -func (r *Simulate) Verbose(b bool) *Simulate { - r.values.Set("verbose", strconv.FormatBool(b)) +func (r *Simulate) Verbose(verbose bool) *Simulate { + r.values.Set("verbose", strconv.FormatBool(verbose)) + + return r +} + +// Docs Sample documents to test in the pipeline. +// API name: docs +func (r *Simulate) Docs(docs ...types.Document) *Simulate { + r.req.Docs = docs + + return r +} + +// Pipeline Pipeline to test. +// If you don’t specify the `pipeline` request path parameter, this parameter is +// required. +// If you specify both this and the request path parameter, the API only uses +// the request path parameter. +// API name: pipeline +func (r *Simulate) Pipeline(pipeline *types.IngestPipeline) *Simulate { + + r.req.Pipeline = pipeline return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/delete/delete.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/delete/delete.go index cac6ae328..1c639ebfb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/delete/delete.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/delete/delete.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Deletes licensing information for the cluster package delete @@ -157,7 +157,6 @@ func (r Delete) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -166,6 +165,10 @@ func (r Delete) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/delete/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/delete/response.go index f978f8090..6dd58b25a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/delete/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/delete/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package delete // Response holds the response body struct for the package delete // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/license/delete/DeleteLicenseResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/license/delete/DeleteLicenseResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/get/get.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/get/get.go index e95f1011b..2021e91ae 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/get/get.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/get/get.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves licensing information for the cluster package get @@ -158,7 +158,6 @@ func (r Get) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -167,6 +166,10 @@ func (r Get) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -203,8 +206,8 @@ func (r *Get) Header(key, value string) *Get { // license types. This behavior is maintained for backwards compatibility. // This parameter is deprecated and will always be set to true in 8.x. // API name: accept_enterprise -func (r *Get) AcceptEnterprise(b bool) *Get { - r.values.Set("accept_enterprise", strconv.FormatBool(b)) +func (r *Get) AcceptEnterprise(acceptenterprise bool) *Get { + r.values.Set("accept_enterprise", strconv.FormatBool(acceptenterprise)) return r } @@ -212,8 +215,8 @@ func (r *Get) AcceptEnterprise(b bool) *Get { // Local Specifies whether to retrieve local information. The default value is // `false`, which means the information is retrieved from the master node. // API name: local -func (r *Get) Local(b bool) *Get { - r.values.Set("local", strconv.FormatBool(b)) +func (r *Get) Local(local bool) *Get { + r.values.Set("local", strconv.FormatBool(local)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/get/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/get/response.go index 81cefe55e..ba3139066 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/get/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/get/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package get @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package get // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/license/get/GetLicenseResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/license/get/GetLicenseResponse.ts#L22-L24 type Response struct { License types.LicenseInformation `json:"license"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/getbasicstatus/get_basic_status.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/getbasicstatus/get_basic_status.go index 0e7c0f073..7e586faab 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/getbasicstatus/get_basic_status.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/getbasicstatus/get_basic_status.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves information about the status of the basic license. package getbasicstatus @@ -159,7 +159,6 @@ func (r GetBasicStatus) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -168,6 +167,10 @@ func (r GetBasicStatus) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/getbasicstatus/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/getbasicstatus/response.go index 184aad3a8..253ae4bf9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/getbasicstatus/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/getbasicstatus/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getbasicstatus // Response holds the response body struct for the package getbasicstatus // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/license/get_basic_status/GetBasicLicenseStatusResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/license/get_basic_status/GetBasicLicenseStatusResponse.ts#L20-L22 type Response struct { EligibleToStartBasic bool `json:"eligible_to_start_basic"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/gettrialstatus/get_trial_status.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/gettrialstatus/get_trial_status.go index d7409ce13..f15158bed 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/gettrialstatus/get_trial_status.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/gettrialstatus/get_trial_status.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves information about the status of the trial license. package gettrialstatus @@ -159,7 +159,6 @@ func (r GetTrialStatus) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -168,6 +167,10 @@ func (r GetTrialStatus) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/gettrialstatus/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/gettrialstatus/response.go index 01ed7a900..f5b92bdbe 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/gettrialstatus/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/gettrialstatus/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package gettrialstatus // Response holds the response body struct for the package gettrialstatus // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/license/get_trial_status/GetTrialLicenseStatusResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/license/get_trial_status/GetTrialLicenseStatusResponse.ts#L20-L22 type Response struct { EligibleToStartTrial bool `json:"eligible_to_start_trial"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/post/post.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/post/post.go index 991628e32..ded85de80 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/post/post.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/post/post.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Updates the license for the cluster. package post @@ -49,8 +49,9 @@ type Post struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int } @@ -77,6 +78,8 @@ func New(tp elastictransport.Interface) *Post { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -106,9 +109,19 @@ func (r *Post) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -116,6 +129,7 @@ func (r *Post) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -193,7 +207,6 @@ func (r Post) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -202,6 +215,10 @@ func (r Post) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -214,8 +231,24 @@ func (r *Post) Header(key, value string) *Post { // Acknowledge Specifies whether you acknowledge the license changes. // API name: acknowledge -func (r *Post) Acknowledge(b bool) *Post { - r.values.Set("acknowledge", strconv.FormatBool(b)) +func (r *Post) Acknowledge(acknowledge bool) *Post { + r.values.Set("acknowledge", strconv.FormatBool(acknowledge)) + + return r +} + +// API name: license +func (r *Post) License(license *types.License) *Post { + + r.req.License = license + + return r +} + +// Licenses A sequence of one or more JSON documents containing the license information. +// API name: licenses +func (r *Post) Licenses(licenses ...types.License) *Post { + r.req.Licenses = licenses return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/post/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/post/request.go index 60cc03f15..8f02f9031 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/post/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/post/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package post @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package post // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/license/post/PostLicenseRequest.ts#L23-L44 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/license/post/PostLicenseRequest.ts#L23-L43 type Request struct { License *types.License `json:"license,omitempty"` // Licenses A sequence of one or more JSON documents containing the license information. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/post/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/post/response.go index 754c71d01..b1bafde7f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/post/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/post/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package post @@ -27,7 +27,7 @@ import ( // Response holds the response body struct for the package post // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/license/post/PostLicenseResponse.ts#L23-L29 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/license/post/PostLicenseResponse.ts#L23-L29 type Response struct { Acknowledge *types.Acknowledgement `json:"acknowledge,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/poststartbasic/post_start_basic.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/poststartbasic/post_start_basic.go index 1e5cefac0..9a45c3da9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/poststartbasic/post_start_basic.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/poststartbasic/post_start_basic.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Starts an indefinite basic license. package poststartbasic @@ -160,7 +160,6 @@ func (r PostStartBasic) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -169,6 +168,10 @@ func (r PostStartBasic) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -202,8 +205,8 @@ func (r *PostStartBasic) Header(key, value string) *PostStartBasic { // Acknowledge whether the user has acknowledged acknowledge messages (default: false) // API name: acknowledge -func (r *PostStartBasic) Acknowledge(b bool) *PostStartBasic { - r.values.Set("acknowledge", strconv.FormatBool(b)) +func (r *PostStartBasic) Acknowledge(acknowledge bool) *PostStartBasic { + r.values.Set("acknowledge", strconv.FormatBool(acknowledge)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/poststartbasic/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/poststartbasic/response.go index d39e12779..575cf4dba 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/poststartbasic/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/poststartbasic/response.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package poststartbasic import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/licensetype" ) // Response holds the response body struct for the package poststartbasic // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/license/post_start_basic/StartBasicLicenseResponse.ts#L23-L31 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/license/post_start_basic/StartBasicLicenseResponse.ts#L23-L31 type Response struct { Acknowledge map[string][]string `json:"acknowledge,omitempty"` @@ -43,3 +49,92 @@ func NewResponse() *Response { } return r } + +func (s *Response) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "acknowledge": + if s.Acknowledge == nil { + s.Acknowledge = make(map[string][]string, 0) + } + rawMsg := make(map[string]json.RawMessage, 0) + dec.Decode(&rawMsg) + for key, value := range rawMsg { + switch { + case bytes.HasPrefix(value, []byte("\"")), bytes.HasPrefix(value, []byte("{")): + o := new(string) + err := json.NewDecoder(bytes.NewReader(value)).Decode(&o) + if err != nil { + return err + } + s.Acknowledge[key] = append(s.Acknowledge[key], *o) + default: + o := []string{} + err := json.NewDecoder(bytes.NewReader(value)).Decode(&o) + if err != nil { + return err + } + s.Acknowledge[key] = o + } + } + + case "acknowledged": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Acknowledged = value + case bool: + s.Acknowledged = v + } + + case "basic_was_started": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.BasicWasStarted = value + case bool: + s.BasicWasStarted = v + } + + case "error_message": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ErrorMessage = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/poststarttrial/post_start_trial.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/poststarttrial/post_start_trial.go index 30c67d2c8..195f702a0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/poststarttrial/post_start_trial.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/poststarttrial/post_start_trial.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // starts a limited time trial license. package poststarttrial @@ -160,7 +160,6 @@ func (r PostStartTrial) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -169,6 +168,10 @@ func (r PostStartTrial) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -202,15 +205,15 @@ func (r *PostStartTrial) Header(key, value string) *PostStartTrial { // Acknowledge whether the user has acknowledged acknowledge messages (default: false) // API name: acknowledge -func (r *PostStartTrial) Acknowledge(b bool) *PostStartTrial { - r.values.Set("acknowledge", strconv.FormatBool(b)) +func (r *PostStartTrial) Acknowledge(acknowledge bool) *PostStartTrial { + r.values.Set("acknowledge", strconv.FormatBool(acknowledge)) return r } // API name: type_query_string -func (r *PostStartTrial) TypeQueryString(v string) *PostStartTrial { - r.values.Set("type_query_string", v) +func (r *PostStartTrial) TypeQueryString(typequerystring string) *PostStartTrial { + r.values.Set("type_query_string", typequerystring) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/poststarttrial/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/poststarttrial/response.go index 24a09339a..7b5d1c660 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/poststarttrial/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/license/poststarttrial/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package poststarttrial @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package poststarttrial // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/license/post_start_trial/StartTrialLicenseResponse.ts#L22-L29 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/license/post_start_trial/StartTrialLicenseResponse.ts#L22-L29 type Response struct { Acknowledged bool `json:"acknowledged"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/logstash/deletepipeline/delete_pipeline.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/logstash/deletepipeline/delete_pipeline.go index a7a749e6f..c0c18e1b4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/logstash/deletepipeline/delete_pipeline.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/logstash/deletepipeline/delete_pipeline.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Deletes Logstash Pipelines used by Central Management package deletepipeline @@ -24,7 +24,6 @@ package deletepipeline import ( gobytes "bytes" "context" - "encoding/json" "errors" "fmt" "io" @@ -34,7 +33,6 @@ import ( "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" - "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) const ( @@ -67,7 +65,7 @@ func NewDeletePipelineFunc(tp elastictransport.Interface) NewDeletePipeline { return func(id string) *DeletePipeline { n := New(tp) - n.Id(id) + n._id(id) return n } @@ -153,33 +151,8 @@ func (r DeletePipeline) Perform(ctx context.Context) (*http.Response, error) { } // Do runs the request through the transport, handle the response and returns a deletepipeline.Response -func (r DeletePipeline) Do(ctx context.Context) (*Response, error) { - - response := NewResponse() - - res, err := r.Perform(ctx) - if err != nil { - return nil, err - } - defer res.Body.Close() - - if res.StatusCode < 299 { - err = json.NewDecoder(res.Body).Decode(response) - if err != nil { - return nil, err - } - - return response, nil - - } - - errorResponse := types.NewElasticsearchError() - err = json.NewDecoder(res.Body).Decode(errorResponse) - if err != nil { - return nil, err - } - - return nil, errorResponse +func (r DeletePipeline) Do(ctx context.Context) (bool, error) { + return r.IsSuccess(ctx) } // IsSuccess allows to run a query with a context and retrieve the result as a boolean. @@ -210,11 +183,11 @@ func (r *DeletePipeline) Header(key, value string) *DeletePipeline { return r } -// Id The ID of the Pipeline +// Id Identifier for the pipeline. // API Name: id -func (r *DeletePipeline) Id(v string) *DeletePipeline { +func (r *DeletePipeline) _id(id string) *DeletePipeline { r.paramSet |= idMask - r.id = v + r.id = id return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/logstash/getpipeline/get_pipeline.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/logstash/getpipeline/get_pipeline.go index 965d02f68..b8f756fe3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/logstash/getpipeline/get_pipeline.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/logstash/getpipeline/get_pipeline.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves Logstash Pipelines used by Central Management package getpipeline @@ -67,7 +67,7 @@ func NewGetPipelineFunc(tp elastictransport.Interface) NewGetPipeline { return func(id string) *GetPipeline { n := New(tp) - n.Id(id) + n._id(id) return n } @@ -99,6 +99,13 @@ func (r *GetPipeline) HttpRequest(ctx context.Context) (*http.Request, error) { r.path.Scheme = "http" switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_logstash") + path.WriteString("/") + path.WriteString("pipeline") + + method = http.MethodGet case r.paramSet == idMask: path.WriteString("/") path.WriteString("_logstash") @@ -170,7 +177,6 @@ func (r GetPipeline) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -179,6 +185,10 @@ func (r GetPipeline) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -210,11 +220,11 @@ func (r *GetPipeline) Header(key, value string) *GetPipeline { return r } -// Id A comma-separated list of Pipeline IDs +// Id Comma-separated list of pipeline identifiers. // API Name: id -func (r *GetPipeline) Id(v string) *GetPipeline { +func (r *GetPipeline) _id(id string) *GetPipeline { r.paramSet |= idMask - r.id = v + r.id = id return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/logstash/getpipeline/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/logstash/getpipeline/response.go index 3a3e08041..e1b36e17e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/logstash/getpipeline/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/logstash/getpipeline/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getpipeline @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getpipeline // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/logstash/get_pipeline/LogstashGetPipelineResponse.ts#L24-L26 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/logstash/get_pipeline/LogstashGetPipelineResponse.ts#L24-L26 type Response map[string]types.LogstashPipeline diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/logstash/putpipeline/put_pipeline.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/logstash/putpipeline/put_pipeline.go index b64f0977a..3f2b2770c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/logstash/putpipeline/put_pipeline.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/logstash/putpipeline/put_pipeline.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Adds and updates Logstash Pipelines used for Central Management package putpipeline @@ -52,8 +52,9 @@ type PutPipeline struct { buf *gobytes.Buffer - req *types.LogstashPipeline - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -69,7 +70,7 @@ func NewPutPipelineFunc(tp elastictransport.Interface) NewPutPipeline { return func(id string) *PutPipeline { n := New(tp) - n.Id(id) + n._id(id) return n } @@ -98,7 +99,7 @@ func (r *PutPipeline) Raw(raw io.Reader) *PutPipeline { } // Request allows to set the request property with the appropriate payload. -func (r *PutPipeline) Request(req *types.LogstashPipeline) *PutPipeline { +func (r *PutPipeline) Request(req *Request) *PutPipeline { r.req = req return r @@ -113,9 +114,19 @@ func (r *PutPipeline) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -123,6 +134,7 @@ func (r *PutPipeline) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -187,48 +199,76 @@ func (r PutPipeline) Perform(ctx context.Context) (*http.Response, error) { return res, nil } -// Do runs the request through the transport, handle the response and returns a putpipeline.Response -func (r PutPipeline) Do(ctx context.Context) (*Response, error) { +// Header set a key, value pair in the PutPipeline headers map. +func (r *PutPipeline) Header(key, value string) *PutPipeline { + r.headers.Set(key, value) - response := NewResponse() + return r +} - res, err := r.Perform(ctx) - if err != nil { - return nil, err - } - defer res.Body.Close() +// Id Identifier for the pipeline. +// API Name: id +func (r *PutPipeline) _id(id string) *PutPipeline { + r.paramSet |= idMask + r.id = id - if res.StatusCode < 299 { - err = json.NewDecoder(res.Body).Decode(response) - if err != nil { - return nil, err - } + return r +} - return response, nil +// Description Description of the pipeline. +// This description is not used by Elasticsearch or Logstash. +// API name: description +func (r *PutPipeline) Description(description string) *PutPipeline { - } + r.req.Description = description - errorResponse := types.NewElasticsearchError() - err = json.NewDecoder(res.Body).Decode(errorResponse) - if err != nil { - return nil, err - } + return r +} - return nil, errorResponse +// LastModified Date the pipeline was last updated. +// Must be in the `yyyy-MM-dd'T'HH:mm:ss.SSSZZ` strict_date_time format. +// API name: last_modified +func (r *PutPipeline) LastModified(datetime types.DateTime) *PutPipeline { + r.req.LastModified = datetime + + return r } -// Header set a key, value pair in the PutPipeline headers map. -func (r *PutPipeline) Header(key, value string) *PutPipeline { - r.headers.Set(key, value) +// Pipeline Configuration for the pipeline. +// API name: pipeline +func (r *PutPipeline) Pipeline(pipeline string) *PutPipeline { + + r.req.Pipeline = pipeline return r } -// Id The ID of the Pipeline -// API Name: id -func (r *PutPipeline) Id(v string) *PutPipeline { - r.paramSet |= idMask - r.id = v +// PipelineMetadata Optional metadata about the pipeline. +// May have any contents. +// This metadata is not generated or used by Elasticsearch or Logstash. +// API name: pipeline_metadata +func (r *PutPipeline) PipelineMetadata(pipelinemetadata *types.PipelineMetadata) *PutPipeline { + + r.req.PipelineMetadata = *pipelinemetadata + + return r +} + +// PipelineSettings Settings for the pipeline. +// Supports only flat keys in dot notation. +// API name: pipeline_settings +func (r *PutPipeline) PipelineSettings(pipelinesettings *types.PipelineSettings) *PutPipeline { + + r.req.PipelineSettings = *pipelinesettings + + return r +} + +// Username User who last updated the pipeline. +// API name: username +func (r *PutPipeline) Username(username string) *PutPipeline { + + r.req.Username = username return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/logstash/putpipeline/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/logstash/putpipeline/request.go similarity index 65% rename from vendor/github.com/elastic/go-elasticsearch/v8/typedapi/logstash/putpipeline/response.go rename to vendor/github.com/elastic/go-elasticsearch/v8/typedapi/logstash/putpipeline/request.go index 9d2f57861..94f358cf8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/logstash/putpipeline/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/logstash/putpipeline/request.go @@ -16,19 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putpipeline -// Response holds the response body struct for the package putpipeline -// -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/logstash/put_pipeline/LogstashPutPipelineResponse.ts#L22-L24 - -type Response struct { -} +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) -// NewResponse returns a Response -func NewResponse() *Response { - r := &Response{} - return r -} +// Request holds the request body struct for the package putpipeline +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/logstash/put_pipeline/LogstashPutPipelineRequest.ts#L24-L39 +type Request = types.LogstashPipeline diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/migration/deprecations/deprecations.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/migration/deprecations/deprecations.go index 82c0fba22..4a510a270 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/migration/deprecations/deprecations.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/migration/deprecations/deprecations.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves information about different cluster, node, and index level settings // that use deprecated features that will be removed or changed in the next @@ -179,7 +179,6 @@ func (r Deprecations) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -188,6 +187,10 @@ func (r Deprecations) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -222,9 +225,9 @@ func (r *Deprecations) Header(key, value string) *Deprecations { // Index Comma-separate list of data streams or indices to check. Wildcard (*) // expressions are supported. // API Name: index -func (r *Deprecations) Index(v string) *Deprecations { +func (r *Deprecations) Index(index string) *Deprecations { r.paramSet |= indexMask - r.index = v + r.index = index return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/migration/deprecations/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/migration/deprecations/response.go index 7b87082e8..f67cc69aa 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/migration/deprecations/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/migration/deprecations/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package deprecations @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package deprecations // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/migration/deprecations/DeprecationInfoResponse.ts#L23-L30 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/migration/deprecations/DeprecationInfoResponse.ts#L23-L30 type Response struct { ClusterSettings []types.Deprecation `json:"cluster_settings"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/migration/getfeatureupgradestatus/get_feature_upgrade_status.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/migration/getfeatureupgradestatus/get_feature_upgrade_status.go index 58899b410..b69238fc3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/migration/getfeatureupgradestatus/get_feature_upgrade_status.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/migration/getfeatureupgradestatus/get_feature_upgrade_status.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Find out whether system features need to be upgraded or not package getfeatureupgradestatus @@ -159,7 +159,6 @@ func (r GetFeatureUpgradeStatus) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -168,6 +167,10 @@ func (r GetFeatureUpgradeStatus) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/migration/getfeatureupgradestatus/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/migration/getfeatureupgradestatus/response.go index 922ca3c3e..0608d2fda 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/migration/getfeatureupgradestatus/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/migration/getfeatureupgradestatus/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getfeatureupgradestatus @@ -27,7 +27,7 @@ import ( // Response holds the response body struct for the package getfeatureupgradestatus // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/migration/get_feature_upgrade_status/GetFeatureUpgradeStatusResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/migration/get_feature_upgrade_status/GetFeatureUpgradeStatusResponse.ts#L23-L28 type Response struct { Features []types.GetMigrationFeature `json:"features"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/migration/postfeatureupgrade/post_feature_upgrade.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/migration/postfeatureupgrade/post_feature_upgrade.go index ed110f3d4..d0cfe7d16 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/migration/postfeatureupgrade/post_feature_upgrade.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/migration/postfeatureupgrade/post_feature_upgrade.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Begin upgrades for system features package postfeatureupgrade @@ -159,7 +159,6 @@ func (r PostFeatureUpgrade) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -168,6 +167,10 @@ func (r PostFeatureUpgrade) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/migration/postfeatureupgrade/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/migration/postfeatureupgrade/response.go index fac56af3e..6fbb9db4c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/migration/postfeatureupgrade/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/migration/postfeatureupgrade/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package postfeatureupgrade @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package postfeatureupgrade // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/migration/post_feature_upgrade/PostFeatureUpgradeResponse.ts#L20-L25 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/migration/post_feature_upgrade/PostFeatureUpgradeResponse.ts#L20-L25 type Response struct { Accepted bool `json:"accepted"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/cleartrainedmodeldeploymentcache/clear_trained_model_deployment_cache.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/cleartrainedmodeldeploymentcache/clear_trained_model_deployment_cache.go index ea2f31161..b863236fc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/cleartrainedmodeldeploymentcache/clear_trained_model_deployment_cache.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/cleartrainedmodeldeploymentcache/clear_trained_model_deployment_cache.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Clear the cached results from a trained model deployment package cleartrainedmodeldeploymentcache @@ -67,7 +67,7 @@ func NewClearTrainedModelDeploymentCacheFunc(tp elastictransport.Interface) NewC return func(modelid string) *ClearTrainedModelDeploymentCache { n := New(tp) - n.ModelId(modelid) + n._modelid(modelid) return n } @@ -182,7 +182,6 @@ func (r ClearTrainedModelDeploymentCache) Do(ctx context.Context) (*Response, er } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -191,6 +190,10 @@ func (r ClearTrainedModelDeploymentCache) Do(ctx context.Context) (*Response, er return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -224,9 +227,9 @@ func (r *ClearTrainedModelDeploymentCache) Header(key, value string) *ClearTrain // ModelId The unique identifier of the trained model. // API Name: modelid -func (r *ClearTrainedModelDeploymentCache) ModelId(v string) *ClearTrainedModelDeploymentCache { +func (r *ClearTrainedModelDeploymentCache) _modelid(modelid string) *ClearTrainedModelDeploymentCache { r.paramSet |= modelidMask - r.modelid = v + r.modelid = modelid return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/cleartrainedmodeldeploymentcache/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/cleartrainedmodeldeploymentcache/response.go index 2ce9579ff..26572a332 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/cleartrainedmodeldeploymentcache/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/cleartrainedmodeldeploymentcache/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package cleartrainedmodeldeploymentcache // Response holds the response body struct for the package cleartrainedmodeldeploymentcache // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/clear_trained_model_deployment_cache/MlClearTrainedModelDeploymentCacheResponse.ts#L20-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/clear_trained_model_deployment_cache/MlClearTrainedModelDeploymentCacheResponse.ts#L20-L24 type Response struct { Cleared bool `json:"cleared"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/closejob/close_job.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/closejob/close_job.go index b0249e607..20434122a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/closejob/close_job.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/closejob/close_job.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Closes one or more anomaly detection jobs. A job can be opened and closed // multiple times throughout its lifecycle. @@ -31,7 +31,6 @@ import ( "io" "net/http" "net/url" - "strconv" "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" @@ -54,8 +53,9 @@ type CloseJob struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -71,7 +71,7 @@ func NewCloseJobFunc(tp elastictransport.Interface) NewCloseJob { return func(jobid string) *CloseJob { n := New(tp) - n.JobId(jobid) + n._jobid(jobid) return n } @@ -87,6 +87,8 @@ func New(tp elastictransport.Interface) *CloseJob { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -116,9 +118,19 @@ func (r *CloseJob) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -126,6 +138,7 @@ func (r *CloseJob) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -210,7 +223,6 @@ func (r CloseJob) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -219,6 +231,10 @@ func (r CloseJob) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -235,47 +251,33 @@ func (r *CloseJob) Header(key, value string) *CloseJob { // jobs, or a wildcard expression. You can close all jobs by using `_all` or by // specifying `*` as the job identifier. // API Name: jobid -func (r *CloseJob) JobId(v string) *CloseJob { +func (r *CloseJob) _jobid(jobid string) *CloseJob { r.paramSet |= jobidMask - r.jobid = v + r.jobid = jobid return r } -// AllowNoMatch Specifies what to do when the request: contains wildcard expressions and -// there are no jobs that match; contains the `_all` string or no identifiers -// and there are no matches; or contains wildcard expressions and there are only -// partial matches. By default, it returns an empty jobs array when there are no -// matches and the subset of results when there are partial matches. -// If `false`, the request returns a 404 status code when there are no matches -// or only partial matches. +// AllowNoMatch Refer to the description for the `allow_no_match` query parameter. // API name: allow_no_match -func (r *CloseJob) AllowNoMatch(b bool) *CloseJob { - r.values.Set("allow_no_match", strconv.FormatBool(b)) +func (r *CloseJob) AllowNoMatch(allownomatch bool) *CloseJob { + r.req.AllowNoMatch = &allownomatch return r } -// Force Use to close a failed job, or to forcefully close a job which has not -// responded to its initial close request; the request returns without -// performing the associated actions such as flushing buffers and persisting the -// model snapshots. -// If you want the job to be in a consistent state after the close job API -// returns, do not set to `true`. This parameter should be used only in -// situations where the job has already failed or where you are not interested -// in results the job might have recently produced or might produce in the -// future. +// Force Refer to the descriptiion for the `force` query parameter. // API name: force -func (r *CloseJob) Force(b bool) *CloseJob { - r.values.Set("force", strconv.FormatBool(b)) +func (r *CloseJob) Force(force bool) *CloseJob { + r.req.Force = &force return r } -// Timeout Controls the time to wait until a job has closed. +// Timeout Refer to the description for the `timeout` query parameter. // API name: timeout -func (r *CloseJob) Timeout(v string) *CloseJob { - r.values.Set("timeout", v) +func (r *CloseJob) Timeout(duration types.Duration) *CloseJob { + r.req.Timeout = duration return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/closejob/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/closejob/request.go index 246a76267..c2313ffb0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/closejob/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/closejob/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package closejob @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package closejob // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/close_job/MlCloseJobRequest.ts#L24-L77 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/close_job/MlCloseJobRequest.ts#L24-L77 type Request struct { // AllowNoMatch Refer to the description for the `allow_no_match` query parameter. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/closejob/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/closejob/response.go index 33b869109..826c02e10 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/closejob/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/closejob/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package closejob // Response holds the response body struct for the package closejob // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/close_job/MlCloseJobResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/close_job/MlCloseJobResponse.ts#L20-L22 type Response struct { Closed bool `json:"closed"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletecalendar/delete_calendar.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletecalendar/delete_calendar.go index 3cb897698..3eb60c78e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletecalendar/delete_calendar.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletecalendar/delete_calendar.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Deletes a calendar. package deletecalendar @@ -67,7 +67,7 @@ func NewDeleteCalendarFunc(tp elastictransport.Interface) NewDeleteCalendar { return func(calendarid string) *DeleteCalendar { n := New(tp) - n.CalendarId(calendarid) + n._calendarid(calendarid) return n } @@ -170,7 +170,6 @@ func (r DeleteCalendar) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -179,6 +178,10 @@ func (r DeleteCalendar) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -212,9 +215,9 @@ func (r *DeleteCalendar) Header(key, value string) *DeleteCalendar { // CalendarId A string that uniquely identifies a calendar. // API Name: calendarid -func (r *DeleteCalendar) CalendarId(v string) *DeleteCalendar { +func (r *DeleteCalendar) _calendarid(calendarid string) *DeleteCalendar { r.paramSet |= calendaridMask - r.calendarid = v + r.calendarid = calendarid return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletecalendar/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletecalendar/response.go index a7a9c381e..bd1098551 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletecalendar/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletecalendar/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package deletecalendar // Response holds the response body struct for the package deletecalendar // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/delete_calendar/MlDeleteCalendarResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/delete_calendar/MlDeleteCalendarResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletecalendarevent/delete_calendar_event.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletecalendarevent/delete_calendar_event.go index bfc94f828..bf157af74 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletecalendarevent/delete_calendar_event.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletecalendarevent/delete_calendar_event.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Deletes scheduled events from a calendar. package deletecalendarevent @@ -70,9 +70,9 @@ func NewDeleteCalendarEventFunc(tp elastictransport.Interface) NewDeleteCalendar return func(calendarid, eventid string) *DeleteCalendarEvent { n := New(tp) - n.CalendarId(calendarid) + n._calendarid(calendarid) - n.EventId(eventid) + n._eventid(eventid) return n } @@ -180,7 +180,6 @@ func (r DeleteCalendarEvent) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -189,6 +188,10 @@ func (r DeleteCalendarEvent) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -220,20 +223,21 @@ func (r *DeleteCalendarEvent) Header(key, value string) *DeleteCalendarEvent { return r } -// CalendarId The ID of the calendar to modify +// CalendarId A string that uniquely identifies a calendar. // API Name: calendarid -func (r *DeleteCalendarEvent) CalendarId(v string) *DeleteCalendarEvent { +func (r *DeleteCalendarEvent) _calendarid(calendarid string) *DeleteCalendarEvent { r.paramSet |= calendaridMask - r.calendarid = v + r.calendarid = calendarid return r } -// EventId The ID of the event to remove from the calendar +// EventId Identifier for the scheduled event. +// You can obtain this identifier by using the get calendar events API. // API Name: eventid -func (r *DeleteCalendarEvent) EventId(v string) *DeleteCalendarEvent { +func (r *DeleteCalendarEvent) _eventid(eventid string) *DeleteCalendarEvent { r.paramSet |= eventidMask - r.eventid = v + r.eventid = eventid return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletecalendarevent/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletecalendarevent/response.go index 83c5e5695..8d6b1ace2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletecalendarevent/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletecalendarevent/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package deletecalendarevent // Response holds the response body struct for the package deletecalendarevent // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/delete_calendar_event/MlDeleteCalendarEventResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/delete_calendar_event/MlDeleteCalendarEventResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletecalendarjob/delete_calendar_job.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletecalendarjob/delete_calendar_job.go index 2415f9a6e..af7b899cd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletecalendarjob/delete_calendar_job.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletecalendarjob/delete_calendar_job.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Deletes anomaly detection jobs from a calendar. package deletecalendarjob @@ -70,9 +70,9 @@ func NewDeleteCalendarJobFunc(tp elastictransport.Interface) NewDeleteCalendarJo return func(calendarid, jobid string) *DeleteCalendarJob { n := New(tp) - n.CalendarId(calendarid) + n._calendarid(calendarid) - n.JobId(jobid) + n._jobid(jobid) return n } @@ -180,7 +180,6 @@ func (r DeleteCalendarJob) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -189,6 +188,10 @@ func (r DeleteCalendarJob) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -222,9 +225,9 @@ func (r *DeleteCalendarJob) Header(key, value string) *DeleteCalendarJob { // CalendarId A string that uniquely identifies a calendar. // API Name: calendarid -func (r *DeleteCalendarJob) CalendarId(v string) *DeleteCalendarJob { +func (r *DeleteCalendarJob) _calendarid(calendarid string) *DeleteCalendarJob { r.paramSet |= calendaridMask - r.calendarid = v + r.calendarid = calendarid return r } @@ -233,9 +236,9 @@ func (r *DeleteCalendarJob) CalendarId(v string) *DeleteCalendarJob { // group name, or a // comma-separated list of jobs or groups. // API Name: jobid -func (r *DeleteCalendarJob) JobId(v string) *DeleteCalendarJob { +func (r *DeleteCalendarJob) _jobid(jobid string) *DeleteCalendarJob { r.paramSet |= jobidMask - r.jobid = v + r.jobid = jobid return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletecalendarjob/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletecalendarjob/response.go index f954158cc..09d4d6bc9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletecalendarjob/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletecalendarjob/response.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package deletecalendarjob +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Response holds the response body struct for the package deletecalendarjob // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/delete_calendar_job/MlDeleteCalendarJobResponse.ts#L22-L31 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/delete_calendar_job/MlDeleteCalendarJobResponse.ts#L22-L31 type Response struct { @@ -39,3 +47,55 @@ func NewResponse() *Response { r := &Response{} return r } + +func (s *Response) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "calendar_id": + if err := dec.Decode(&s.CalendarId); err != nil { + return err + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "job_ids": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.JobIds = append(s.JobIds, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.JobIds); err != nil { + return err + } + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletedatafeed/delete_datafeed.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletedatafeed/delete_datafeed.go index e99b0db30..e84a46740 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletedatafeed/delete_datafeed.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletedatafeed/delete_datafeed.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Deletes an existing datafeed. package deletedatafeed @@ -68,7 +68,7 @@ func NewDeleteDatafeedFunc(tp elastictransport.Interface) NewDeleteDatafeed { return func(datafeedid string) *DeleteDatafeed { n := New(tp) - n.DatafeedId(datafeedid) + n._datafeedid(datafeedid) return n } @@ -171,7 +171,6 @@ func (r DeleteDatafeed) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -180,6 +179,10 @@ func (r DeleteDatafeed) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -216,9 +219,9 @@ func (r *DeleteDatafeed) Header(key, value string) *DeleteDatafeed { // hyphens, and underscores. It must start and end with alphanumeric // characters. // API Name: datafeedid -func (r *DeleteDatafeed) DatafeedId(v string) *DeleteDatafeed { +func (r *DeleteDatafeed) _datafeedid(datafeedid string) *DeleteDatafeed { r.paramSet |= datafeedidMask - r.datafeedid = v + r.datafeedid = datafeedid return r } @@ -226,8 +229,8 @@ func (r *DeleteDatafeed) DatafeedId(v string) *DeleteDatafeed { // Force Use to forcefully delete a started datafeed; this method is quicker than // stopping and deleting the datafeed. // API name: force -func (r *DeleteDatafeed) Force(b bool) *DeleteDatafeed { - r.values.Set("force", strconv.FormatBool(b)) +func (r *DeleteDatafeed) Force(force bool) *DeleteDatafeed { + r.values.Set("force", strconv.FormatBool(force)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletedatafeed/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletedatafeed/response.go index 5761daf07..a92e07f90 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletedatafeed/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletedatafeed/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package deletedatafeed // Response holds the response body struct for the package deletedatafeed // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/delete_datafeed/MlDeleteDatafeedResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/delete_datafeed/MlDeleteDatafeedResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletedataframeanalytics/delete_data_frame_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletedataframeanalytics/delete_data_frame_analytics.go index b8da3ad61..d74f7d5cf 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletedataframeanalytics/delete_data_frame_analytics.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletedataframeanalytics/delete_data_frame_analytics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Deletes an existing data frame analytics job. package deletedataframeanalytics @@ -68,7 +68,7 @@ func NewDeleteDataFrameAnalyticsFunc(tp elastictransport.Interface) NewDeleteDat return func(id string) *DeleteDataFrameAnalytics { n := New(tp) - n.Id(id) + n._id(id) return n } @@ -173,7 +173,6 @@ func (r DeleteDataFrameAnalytics) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -182,6 +181,10 @@ func (r DeleteDataFrameAnalytics) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -215,9 +218,9 @@ func (r *DeleteDataFrameAnalytics) Header(key, value string) *DeleteDataFrameAna // Id Identifier for the data frame analytics job. // API Name: id -func (r *DeleteDataFrameAnalytics) Id(v string) *DeleteDataFrameAnalytics { +func (r *DeleteDataFrameAnalytics) _id(id string) *DeleteDataFrameAnalytics { r.paramSet |= idMask - r.id = v + r.id = id return r } @@ -225,16 +228,16 @@ func (r *DeleteDataFrameAnalytics) Id(v string) *DeleteDataFrameAnalytics { // Force If `true`, it deletes a job that is not stopped; this method is quicker than // stopping and deleting the job. // API name: force -func (r *DeleteDataFrameAnalytics) Force(b bool) *DeleteDataFrameAnalytics { - r.values.Set("force", strconv.FormatBool(b)) +func (r *DeleteDataFrameAnalytics) Force(force bool) *DeleteDataFrameAnalytics { + r.values.Set("force", strconv.FormatBool(force)) return r } // Timeout The time to wait for the job to be deleted. // API name: timeout -func (r *DeleteDataFrameAnalytics) Timeout(v string) *DeleteDataFrameAnalytics { - r.values.Set("timeout", v) +func (r *DeleteDataFrameAnalytics) Timeout(duration string) *DeleteDataFrameAnalytics { + r.values.Set("timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletedataframeanalytics/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletedataframeanalytics/response.go index 38bf136ef..d4454bcfe 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletedataframeanalytics/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletedataframeanalytics/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package deletedataframeanalytics // Response holds the response body struct for the package deletedataframeanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/delete_data_frame_analytics/MlDeleteDataFrameAnalyticsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/delete_data_frame_analytics/MlDeleteDataFrameAnalyticsResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deleteexpireddata/delete_expired_data.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deleteexpireddata/delete_expired_data.go index 7bdf7b869..9120b3352 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deleteexpireddata/delete_expired_data.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deleteexpireddata/delete_expired_data.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Deletes expired and unused machine learning data. package deleteexpireddata @@ -52,8 +52,9 @@ type DeleteExpiredData struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -82,6 +83,8 @@ func New(tp elastictransport.Interface) *DeleteExpiredData { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -111,9 +114,19 @@ func (r *DeleteExpiredData) HttpRequest(ctx context.Context) (*http.Request, err var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -121,6 +134,7 @@ func (r *DeleteExpiredData) HttpRequest(ctx context.Context) (*http.Request, err } r.buf.Write(data) + } r.path.Scheme = "http" @@ -210,7 +224,6 @@ func (r DeleteExpiredData) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -219,6 +232,10 @@ func (r DeleteExpiredData) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -232,9 +249,9 @@ func (r *DeleteExpiredData) Header(key, value string) *DeleteExpiredData { // JobId Identifier for an anomaly detection job. It can be a job identifier, a // group name, or a wildcard expression. // API Name: jobid -func (r *DeleteExpiredData) JobId(v string) *DeleteExpiredData { +func (r *DeleteExpiredData) JobId(jobid string) *DeleteExpiredData { r.paramSet |= jobidMask - r.jobid = v + r.jobid = jobid return r } @@ -242,16 +259,17 @@ func (r *DeleteExpiredData) JobId(v string) *DeleteExpiredData { // RequestsPerSecond The desired requests per second for the deletion processes. The default // behavior is no throttling. // API name: requests_per_second -func (r *DeleteExpiredData) RequestsPerSecond(v string) *DeleteExpiredData { - r.values.Set("requests_per_second", v) +func (r *DeleteExpiredData) RequestsPerSecond(requestspersecond float32) *DeleteExpiredData { + + r.req.RequestsPerSecond = &requestspersecond return r } // Timeout How long can the underlying delete processes run until they are canceled. // API name: timeout -func (r *DeleteExpiredData) Timeout(v string) *DeleteExpiredData { - r.values.Set("timeout", v) +func (r *DeleteExpiredData) Timeout(duration types.Duration) *DeleteExpiredData { + r.req.Timeout = duration return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deleteexpireddata/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deleteexpireddata/request.go index f12970f2c..a19be67fd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deleteexpireddata/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deleteexpireddata/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package deleteexpireddata @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package deleteexpireddata // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/delete_expired_data/MlDeleteExpiredDataRequest.ts#L25-L72 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/delete_expired_data/MlDeleteExpiredDataRequest.ts#L25-L72 type Request struct { // RequestsPerSecond The desired requests per second for the deletion processes. The default diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deleteexpireddata/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deleteexpireddata/response.go index a91924226..dfc8cffb5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deleteexpireddata/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deleteexpireddata/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package deleteexpireddata // Response holds the response body struct for the package deleteexpireddata // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/delete_expired_data/MlDeleteExpiredDataResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/delete_expired_data/MlDeleteExpiredDataResponse.ts#L20-L22 type Response struct { Deleted bool `json:"deleted"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletefilter/delete_filter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletefilter/delete_filter.go index cd175207a..c69f96d3d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletefilter/delete_filter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletefilter/delete_filter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Deletes a filter. package deletefilter @@ -67,7 +67,7 @@ func NewDeleteFilterFunc(tp elastictransport.Interface) NewDeleteFilter { return func(filterid string) *DeleteFilter { n := New(tp) - n.FilterId(filterid) + n._filterid(filterid) return n } @@ -170,7 +170,6 @@ func (r DeleteFilter) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -179,6 +178,10 @@ func (r DeleteFilter) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -212,9 +215,9 @@ func (r *DeleteFilter) Header(key, value string) *DeleteFilter { // FilterId A string that uniquely identifies a filter. // API Name: filterid -func (r *DeleteFilter) FilterId(v string) *DeleteFilter { +func (r *DeleteFilter) _filterid(filterid string) *DeleteFilter { r.paramSet |= filteridMask - r.filterid = v + r.filterid = filterid return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletefilter/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletefilter/response.go index 0a84bd16f..37dd87ae3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletefilter/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletefilter/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package deletefilter // Response holds the response body struct for the package deletefilter // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/delete_filter/MlDeleteFilterResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/delete_filter/MlDeleteFilterResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deleteforecast/delete_forecast.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deleteforecast/delete_forecast.go index 1c9a78320..92b647b9d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deleteforecast/delete_forecast.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deleteforecast/delete_forecast.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Deletes forecasts from a machine learning job. package deleteforecast @@ -71,7 +71,7 @@ func NewDeleteForecastFunc(tp elastictransport.Interface) NewDeleteForecast { return func(jobid string) *DeleteForecast { n := New(tp) - n.JobId(jobid) + n._jobid(jobid) return n } @@ -191,7 +191,6 @@ func (r DeleteForecast) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -200,6 +199,10 @@ func (r DeleteForecast) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -233,9 +236,9 @@ func (r *DeleteForecast) Header(key, value string) *DeleteForecast { // JobId Identifier for the anomaly detection job. // API Name: jobid -func (r *DeleteForecast) JobId(v string) *DeleteForecast { +func (r *DeleteForecast) _jobid(jobid string) *DeleteForecast { r.paramSet |= jobidMask - r.jobid = v + r.jobid = jobid return r } @@ -244,9 +247,9 @@ func (r *DeleteForecast) JobId(v string) *DeleteForecast { // this optional parameter or if you specify `_all` or `*` the API deletes // all forecasts from the job. // API Name: forecastid -func (r *DeleteForecast) ForecastId(v string) *DeleteForecast { +func (r *DeleteForecast) ForecastId(forecastid string) *DeleteForecast { r.paramSet |= forecastidMask - r.forecastid = v + r.forecastid = forecastid return r } @@ -256,8 +259,8 @@ func (r *DeleteForecast) ForecastId(v string) *DeleteForecast { // forecasts associated with the job, attempts to delete all forecasts // return an error. // API name: allow_no_forecasts -func (r *DeleteForecast) AllowNoForecasts(b bool) *DeleteForecast { - r.values.Set("allow_no_forecasts", strconv.FormatBool(b)) +func (r *DeleteForecast) AllowNoForecasts(allownoforecasts bool) *DeleteForecast { + r.values.Set("allow_no_forecasts", strconv.FormatBool(allownoforecasts)) return r } @@ -266,8 +269,8 @@ func (r *DeleteForecast) AllowNoForecasts(b bool) *DeleteForecast { // operation. When this period of time elapses, the API fails and returns an // error. // API name: timeout -func (r *DeleteForecast) Timeout(v string) *DeleteForecast { - r.values.Set("timeout", v) +func (r *DeleteForecast) Timeout(duration string) *DeleteForecast { + r.values.Set("timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deleteforecast/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deleteforecast/response.go index bcb6599b8..94a1eee07 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deleteforecast/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deleteforecast/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package deleteforecast // Response holds the response body struct for the package deleteforecast // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/delete_forecast/MlDeleteForecastResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/delete_forecast/MlDeleteForecastResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletejob/delete_job.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletejob/delete_job.go index f020b045f..1f79e3259 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletejob/delete_job.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletejob/delete_job.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Deletes an existing anomaly detection job. package deletejob @@ -68,7 +68,7 @@ func NewDeleteJobFunc(tp elastictransport.Interface) NewDeleteJob { return func(jobid string) *DeleteJob { n := New(tp) - n.JobId(jobid) + n._jobid(jobid) return n } @@ -171,7 +171,6 @@ func (r DeleteJob) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -180,6 +179,10 @@ func (r DeleteJob) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -213,9 +216,9 @@ func (r *DeleteJob) Header(key, value string) *DeleteJob { // JobId Identifier for the anomaly detection job. // API Name: jobid -func (r *DeleteJob) JobId(v string) *DeleteJob { +func (r *DeleteJob) _jobid(jobid string) *DeleteJob { r.paramSet |= jobidMask - r.jobid = v + r.jobid = jobid return r } @@ -223,8 +226,8 @@ func (r *DeleteJob) JobId(v string) *DeleteJob { // Force Use to forcefully delete an opened job; this method is quicker than // closing and deleting the job. // API name: force -func (r *DeleteJob) Force(b bool) *DeleteJob { - r.values.Set("force", strconv.FormatBool(b)) +func (r *DeleteJob) Force(force bool) *DeleteJob { + r.values.Set("force", strconv.FormatBool(force)) return r } @@ -234,8 +237,8 @@ func (r *DeleteJob) Force(b bool) *DeleteJob { // is // reset. // API name: delete_user_annotations -func (r *DeleteJob) DeleteUserAnnotations(b bool) *DeleteJob { - r.values.Set("delete_user_annotations", strconv.FormatBool(b)) +func (r *DeleteJob) DeleteUserAnnotations(deleteuserannotations bool) *DeleteJob { + r.values.Set("delete_user_annotations", strconv.FormatBool(deleteuserannotations)) return r } @@ -243,8 +246,8 @@ func (r *DeleteJob) DeleteUserAnnotations(b bool) *DeleteJob { // WaitForCompletion Specifies whether the request should return immediately or wait until the // job deletion completes. // API name: wait_for_completion -func (r *DeleteJob) WaitForCompletion(b bool) *DeleteJob { - r.values.Set("wait_for_completion", strconv.FormatBool(b)) +func (r *DeleteJob) WaitForCompletion(waitforcompletion bool) *DeleteJob { + r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletejob/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletejob/response.go index 0147bc27c..e927a691a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletejob/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletejob/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package deletejob // Response holds the response body struct for the package deletejob // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/delete_job/MlDeleteJobResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/delete_job/MlDeleteJobResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletemodelsnapshot/delete_model_snapshot.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletemodelsnapshot/delete_model_snapshot.go index bc5914b9a..199df0611 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletemodelsnapshot/delete_model_snapshot.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletemodelsnapshot/delete_model_snapshot.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Deletes an existing model snapshot. package deletemodelsnapshot @@ -70,9 +70,9 @@ func NewDeleteModelSnapshotFunc(tp elastictransport.Interface) NewDeleteModelSna return func(jobid, snapshotid string) *DeleteModelSnapshot { n := New(tp) - n.JobId(jobid) + n._jobid(jobid) - n.SnapshotId(snapshotid) + n._snapshotid(snapshotid) return n } @@ -180,7 +180,6 @@ func (r DeleteModelSnapshot) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -189,6 +188,10 @@ func (r DeleteModelSnapshot) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -222,18 +225,18 @@ func (r *DeleteModelSnapshot) Header(key, value string) *DeleteModelSnapshot { // JobId Identifier for the anomaly detection job. // API Name: jobid -func (r *DeleteModelSnapshot) JobId(v string) *DeleteModelSnapshot { +func (r *DeleteModelSnapshot) _jobid(jobid string) *DeleteModelSnapshot { r.paramSet |= jobidMask - r.jobid = v + r.jobid = jobid return r } // SnapshotId Identifier for the model snapshot. // API Name: snapshotid -func (r *DeleteModelSnapshot) SnapshotId(v string) *DeleteModelSnapshot { +func (r *DeleteModelSnapshot) _snapshotid(snapshotid string) *DeleteModelSnapshot { r.paramSet |= snapshotidMask - r.snapshotid = v + r.snapshotid = snapshotid return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletemodelsnapshot/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletemodelsnapshot/response.go index 883416941..f4b2bea8d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletemodelsnapshot/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletemodelsnapshot/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package deletemodelsnapshot // Response holds the response body struct for the package deletemodelsnapshot // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/delete_model_snapshot/MlDeleteModelSnapshotResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/delete_model_snapshot/MlDeleteModelSnapshotResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletetrainedmodel/delete_trained_model.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletetrainedmodel/delete_trained_model.go index 5cb6d53ce..2fbee83bd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletetrainedmodel/delete_trained_model.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletetrainedmodel/delete_trained_model.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Deletes an existing trained inference model that is currently not referenced // by an ingest pipeline. @@ -69,7 +69,7 @@ func NewDeleteTrainedModelFunc(tp elastictransport.Interface) NewDeleteTrainedMo return func(modelid string) *DeleteTrainedModel { n := New(tp) - n.ModelId(modelid) + n._modelid(modelid) return n } @@ -173,7 +173,6 @@ func (r DeleteTrainedModel) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -182,6 +181,10 @@ func (r DeleteTrainedModel) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -215,9 +218,9 @@ func (r *DeleteTrainedModel) Header(key, value string) *DeleteTrainedModel { // ModelId The unique identifier of the trained model. // API Name: modelid -func (r *DeleteTrainedModel) ModelId(v string) *DeleteTrainedModel { +func (r *DeleteTrainedModel) _modelid(modelid string) *DeleteTrainedModel { r.paramSet |= modelidMask - r.modelid = v + r.modelid = modelid return r } @@ -225,8 +228,8 @@ func (r *DeleteTrainedModel) ModelId(v string) *DeleteTrainedModel { // Force Forcefully deletes a trained model that is referenced by ingest pipelines or // has a started deployment. // API name: force -func (r *DeleteTrainedModel) Force(b bool) *DeleteTrainedModel { - r.values.Set("force", strconv.FormatBool(b)) +func (r *DeleteTrainedModel) Force(force bool) *DeleteTrainedModel { + r.values.Set("force", strconv.FormatBool(force)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletetrainedmodel/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletetrainedmodel/response.go index 66cc3c8ab..2c2e976ec 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletetrainedmodel/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletetrainedmodel/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package deletetrainedmodel // Response holds the response body struct for the package deletetrainedmodel // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/delete_trained_model/MlDeleteTrainedModelResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/delete_trained_model/MlDeleteTrainedModelResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletetrainedmodelalias/delete_trained_model_alias.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletetrainedmodelalias/delete_trained_model_alias.go index 1facfdc7d..e19fe2e3a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletetrainedmodelalias/delete_trained_model_alias.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletetrainedmodelalias/delete_trained_model_alias.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Deletes a model alias that refers to the trained model package deletetrainedmodelalias @@ -70,9 +70,9 @@ func NewDeleteTrainedModelAliasFunc(tp elastictransport.Interface) NewDeleteTrai return func(modelid, modelalias string) *DeleteTrainedModelAlias { n := New(tp) - n.ModelAlias(modelalias) + n._modelalias(modelalias) - n.ModelId(modelid) + n._modelid(modelid) return n } @@ -186,7 +186,6 @@ func (r DeleteTrainedModelAlias) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -195,6 +194,10 @@ func (r DeleteTrainedModelAlias) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -228,18 +231,18 @@ func (r *DeleteTrainedModelAlias) Header(key, value string) *DeleteTrainedModelA // ModelAlias The model alias to delete. // API Name: modelalias -func (r *DeleteTrainedModelAlias) ModelAlias(v string) *DeleteTrainedModelAlias { +func (r *DeleteTrainedModelAlias) _modelalias(modelalias string) *DeleteTrainedModelAlias { r.paramSet |= modelaliasMask - r.modelalias = v + r.modelalias = modelalias return r } // ModelId The trained model ID to which the model alias refers. // API Name: modelid -func (r *DeleteTrainedModelAlias) ModelId(v string) *DeleteTrainedModelAlias { +func (r *DeleteTrainedModelAlias) _modelid(modelid string) *DeleteTrainedModelAlias { r.paramSet |= modelidMask - r.modelid = v + r.modelid = modelid return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletetrainedmodelalias/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletetrainedmodelalias/response.go index 9afc6e6c3..572a5b6e1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletetrainedmodelalias/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/deletetrainedmodelalias/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package deletetrainedmodelalias // Response holds the response body struct for the package deletetrainedmodelalias // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/delete_trained_model_alias/MlDeleteTrainedModelAliasResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/delete_trained_model_alias/MlDeleteTrainedModelAliasResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/estimatemodelmemory/estimate_model_memory.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/estimatemodelmemory/estimate_model_memory.go index ea76a13ee..a31adb23c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/estimatemodelmemory/estimate_model_memory.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/estimatemodelmemory/estimate_model_memory.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Estimates the model memory package estimatemodelmemory @@ -48,8 +48,9 @@ type EstimateModelMemory struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int } @@ -76,6 +77,8 @@ func New(tp elastictransport.Interface) *EstimateModelMemory { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -105,9 +108,19 @@ func (r *EstimateModelMemory) HttpRequest(ctx context.Context) (*http.Request, e var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -115,6 +128,7 @@ func (r *EstimateModelMemory) HttpRequest(ctx context.Context) (*http.Request, e } r.buf.Write(data) + } r.path.Scheme = "http" @@ -196,7 +210,6 @@ func (r EstimateModelMemory) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -205,6 +218,10 @@ func (r EstimateModelMemory) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -214,3 +231,41 @@ func (r *EstimateModelMemory) Header(key, value string) *EstimateModelMemory { return r } + +// AnalysisConfig For a list of the properties that you can specify in the +// `analysis_config` component of the body of this API. +// API name: analysis_config +func (r *EstimateModelMemory) AnalysisConfig(analysisconfig *types.AnalysisConfig) *EstimateModelMemory { + + r.req.AnalysisConfig = analysisconfig + + return r +} + +// MaxBucketCardinality Estimates of the highest cardinality in a single bucket that is observed +// for influencer fields over the time period that the job analyzes data. +// To produce a good answer, values must be provided for all influencer +// fields. Providing values for fields that are not listed as `influencers` +// has no effect on the estimation. +// API name: max_bucket_cardinality +func (r *EstimateModelMemory) MaxBucketCardinality(maxbucketcardinality map[string]int64) *EstimateModelMemory { + + r.req.MaxBucketCardinality = maxbucketcardinality + + return r +} + +// OverallCardinality Estimates of the cardinality that is observed for fields over the whole +// time period that the job analyzes data. To produce a good answer, values +// must be provided for fields referenced in the `by_field_name`, +// `over_field_name` and `partition_field_name` of any detectors. Providing +// values for other fields has no effect on the estimation. It can be +// omitted from the request if no detectors have a `by_field_name`, +// `over_field_name` or `partition_field_name`. +// API name: overall_cardinality +func (r *EstimateModelMemory) OverallCardinality(overallcardinality map[string]int64) *EstimateModelMemory { + + r.req.OverallCardinality = overallcardinality + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/estimatemodelmemory/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/estimatemodelmemory/request.go index 6e5368c0e..68999a3e0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/estimatemodelmemory/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/estimatemodelmemory/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package estimatemodelmemory @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package estimatemodelmemory // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/estimate_model_memory/MlEstimateModelMemoryRequest.ts#L26-L61 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/estimate_model_memory/MlEstimateModelMemoryRequest.ts#L26-L61 type Request struct { // AnalysisConfig For a list of the properties that you can specify in the diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/estimatemodelmemory/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/estimatemodelmemory/response.go index 43b97538e..7acdfdbbe 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/estimatemodelmemory/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/estimatemodelmemory/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package estimatemodelmemory // Response holds the response body struct for the package estimatemodelmemory // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/estimate_model_memory/MlEstimateModelMemoryResponse.ts#L20-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/estimate_model_memory/MlEstimateModelMemoryResponse.ts#L20-L24 type Response struct { ModelMemoryEstimate string `json:"model_memory_estimate"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/evaluatedataframe/evaluate_data_frame.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/evaluatedataframe/evaluate_data_frame.go index 6c30bacca..6a76b26b5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/evaluatedataframe/evaluate_data_frame.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/evaluatedataframe/evaluate_data_frame.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Evaluates the data frame analytics for an annotated index. package evaluatedataframe @@ -48,8 +48,9 @@ type EvaluateDataFrame struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int } @@ -76,6 +77,8 @@ func New(tp elastictransport.Interface) *EvaluateDataFrame { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -105,9 +108,19 @@ func (r *EvaluateDataFrame) HttpRequest(ctx context.Context) (*http.Request, err var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -115,6 +128,7 @@ func (r *EvaluateDataFrame) HttpRequest(ctx context.Context) (*http.Request, err } r.buf.Write(data) + } r.path.Scheme = "http" @@ -196,7 +210,6 @@ func (r EvaluateDataFrame) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -205,6 +218,10 @@ func (r EvaluateDataFrame) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -214,3 +231,29 @@ func (r *EvaluateDataFrame) Header(key, value string) *EvaluateDataFrame { return r } + +// Evaluation Defines the type of evaluation you want to perform. +// API name: evaluation +func (r *EvaluateDataFrame) Evaluation(evaluation *types.DataframeEvaluationContainer) *EvaluateDataFrame { + + r.req.Evaluation = *evaluation + + return r +} + +// Index Defines the `index` in which the evaluation will be performed. +// API name: index +func (r *EvaluateDataFrame) Index(indexname string) *EvaluateDataFrame { + r.req.Index = indexname + + return r +} + +// Query A query clause that retrieves a subset of data from the source index. +// API name: query +func (r *EvaluateDataFrame) Query(query *types.Query) *EvaluateDataFrame { + + r.req.Query = query + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/evaluatedataframe/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/evaluatedataframe/request.go index 41521a73d..9adafbaa8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/evaluatedataframe/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/evaluatedataframe/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package evaluatedataframe @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package evaluatedataframe // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/evaluate_data_frame/MlEvaluateDataFrameRequest.ts#L25-L52 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/evaluate_data_frame/MlEvaluateDataFrameRequest.ts#L25-L52 type Request struct { // Evaluation Defines the type of evaluation you want to perform. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/evaluatedataframe/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/evaluatedataframe/response.go index 91d1186a8..77892dd8e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/evaluatedataframe/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/evaluatedataframe/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package evaluatedataframe @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package evaluatedataframe // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/evaluate_data_frame/MlEvaluateDataFrameResponse.ts#L26-L33 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/evaluate_data_frame/MlEvaluateDataFrameResponse.ts#L26-L33 type Response struct { Classification *types.DataframeClassificationSummary `json:"classification,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/explaindataframeanalytics/explain_data_frame_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/explaindataframeanalytics/explain_data_frame_analytics.go index 0f7ac0b47..85931d137 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/explaindataframeanalytics/explain_data_frame_analytics.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/explaindataframeanalytics/explain_data_frame_analytics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Explains a data frame analytics config. package explaindataframeanalytics @@ -52,8 +52,9 @@ type ExplainDataFrameAnalytics struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -82,6 +83,8 @@ func New(tp elastictransport.Interface) *ExplainDataFrameAnalytics { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -111,9 +114,19 @@ func (r *ExplainDataFrameAnalytics) HttpRequest(ctx context.Context) (*http.Requ var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -121,6 +134,7 @@ func (r *ExplainDataFrameAnalytics) HttpRequest(ctx context.Context) (*http.Requ } r.buf.Write(data) + } r.path.Scheme = "http" @@ -218,7 +232,6 @@ func (r ExplainDataFrameAnalytics) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -227,6 +240,10 @@ func (r ExplainDataFrameAnalytics) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -241,9 +258,95 @@ func (r *ExplainDataFrameAnalytics) Header(key, value string) *ExplainDataFrameA // lowercase alphanumeric characters (a-z and 0-9), hyphens, and // underscores. It must start and end with alphanumeric characters. // API Name: id -func (r *ExplainDataFrameAnalytics) Id(v string) *ExplainDataFrameAnalytics { +func (r *ExplainDataFrameAnalytics) Id(id string) *ExplainDataFrameAnalytics { r.paramSet |= idMask - r.id = v + r.id = id + + return r +} + +// AllowLazyStart Specifies whether this job can start when there is insufficient machine +// learning node capacity for it to be immediately assigned to a node. +// API name: allow_lazy_start +func (r *ExplainDataFrameAnalytics) AllowLazyStart(allowlazystart bool) *ExplainDataFrameAnalytics { + r.req.AllowLazyStart = &allowlazystart + + return r +} + +// Analysis The analysis configuration, which contains the information necessary to +// perform one of the following types of analysis: classification, outlier +// detection, or regression. +// API name: analysis +func (r *ExplainDataFrameAnalytics) Analysis(analysis *types.DataframeAnalysisContainer) *ExplainDataFrameAnalytics { + + r.req.Analysis = analysis + + return r +} + +// AnalyzedFields Specify includes and/or excludes patterns to select which fields will be +// included in the analysis. The patterns specified in excludes are applied +// last, therefore excludes takes precedence. In other words, if the same +// field is specified in both includes and excludes, then the field will not +// be included in the analysis. +// API name: analyzed_fields +func (r *ExplainDataFrameAnalytics) AnalyzedFields(analyzedfields *types.DataframeAnalysisAnalyzedFields) *ExplainDataFrameAnalytics { + + r.req.AnalyzedFields = analyzedfields + + return r +} + +// Description A description of the job. +// API name: description +func (r *ExplainDataFrameAnalytics) Description(description string) *ExplainDataFrameAnalytics { + + r.req.Description = &description + + return r +} + +// Dest The destination configuration, consisting of index and optionally +// results_field (ml by default). +// API name: dest +func (r *ExplainDataFrameAnalytics) Dest(dest *types.DataframeAnalyticsDestination) *ExplainDataFrameAnalytics { + + r.req.Dest = dest + + return r +} + +// MaxNumThreads The maximum number of threads to be used by the analysis. Using more +// threads may decrease the time necessary to complete the analysis at the +// cost of using more CPU. Note that the process may use additional threads +// for operational functionality other than the analysis itself. +// API name: max_num_threads +func (r *ExplainDataFrameAnalytics) MaxNumThreads(maxnumthreads int) *ExplainDataFrameAnalytics { + r.req.MaxNumThreads = &maxnumthreads + + return r +} + +// ModelMemoryLimit The approximate maximum amount of memory resources that are permitted for +// analytical processing. If your `elasticsearch.yml` file contains an +// `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to +// create data frame analytics jobs that have `model_memory_limit` values +// greater than that setting. +// API name: model_memory_limit +func (r *ExplainDataFrameAnalytics) ModelMemoryLimit(modelmemorylimit string) *ExplainDataFrameAnalytics { + + r.req.ModelMemoryLimit = &modelmemorylimit + + return r +} + +// Source The configuration of how to source the analysis data. It requires an +// index. Optionally, query and _source may be specified. +// API name: source +func (r *ExplainDataFrameAnalytics) Source(source *types.DataframeAnalyticsSource) *ExplainDataFrameAnalytics { + + r.req.Source = source return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/explaindataframeanalytics/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/explaindataframeanalytics/request.go index d889d8353..5da679ac0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/explaindataframeanalytics/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/explaindataframeanalytics/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package explaindataframeanalytics @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package explaindataframeanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/explain_data_frame_analytics/MlExplainDataFrameAnalyticsRequest.ts#L30-L107 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/explain_data_frame_analytics/MlExplainDataFrameAnalyticsRequest.ts#L30-L107 type Request struct { // AllowLazyStart Specifies whether this job can start when there is insufficient machine diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/explaindataframeanalytics/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/explaindataframeanalytics/response.go index 74dd76027..7d1d314cd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/explaindataframeanalytics/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/explaindataframeanalytics/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package explaindataframeanalytics @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package explaindataframeanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/explain_data_frame_analytics/MlExplainDataFrameAnalyticsResponse.ts#L25-L32 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/explain_data_frame_analytics/MlExplainDataFrameAnalyticsResponse.ts#L25-L32 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/flushjob/flush_job.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/flushjob/flush_job.go index 6adef1a0c..e8c83a662 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/flushjob/flush_job.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/flushjob/flush_job.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Forces any buffered data to be processed by the job. package flushjob @@ -30,7 +30,6 @@ import ( "io" "net/http" "net/url" - "strconv" "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" @@ -53,8 +52,9 @@ type FlushJob struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -70,7 +70,7 @@ func NewFlushJobFunc(tp elastictransport.Interface) NewFlushJob { return func(jobid string) *FlushJob { n := New(tp) - n.JobId(jobid) + n._jobid(jobid) return n } @@ -85,6 +85,8 @@ func New(tp elastictransport.Interface) *FlushJob { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -114,9 +116,19 @@ func (r *FlushJob) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -124,6 +136,7 @@ func (r *FlushJob) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -208,7 +221,6 @@ func (r FlushJob) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -217,6 +229,10 @@ func (r FlushJob) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -229,54 +245,49 @@ func (r *FlushJob) Header(key, value string) *FlushJob { // JobId Identifier for the anomaly detection job. // API Name: jobid -func (r *FlushJob) JobId(v string) *FlushJob { +func (r *FlushJob) _jobid(jobid string) *FlushJob { r.paramSet |= jobidMask - r.jobid = v + r.jobid = jobid return r } -// AdvanceTime Specifies to advance to a particular time value. Results are generated -// and the model is updated for data from the specified time interval. +// AdvanceTime Refer to the description for the `advance_time` query parameter. // API name: advance_time -func (r *FlushJob) AdvanceTime(v string) *FlushJob { - r.values.Set("advance_time", v) +func (r *FlushJob) AdvanceTime(datetime types.DateTime) *FlushJob { + r.req.AdvanceTime = datetime return r } -// CalcInterim If true, calculates the interim results for the most recent bucket or all -// buckets within the latency period. +// CalcInterim Refer to the description for the `calc_interim` query parameter. // API name: calc_interim -func (r *FlushJob) CalcInterim(b bool) *FlushJob { - r.values.Set("calc_interim", strconv.FormatBool(b)) +func (r *FlushJob) CalcInterim(calcinterim bool) *FlushJob { + r.req.CalcInterim = &calcinterim return r } -// End When used in conjunction with `calc_interim` and `start`, specifies the -// range of buckets on which to calculate interim results. +// End Refer to the description for the `end` query parameter. // API name: end -func (r *FlushJob) End(v string) *FlushJob { - r.values.Set("end", v) +func (r *FlushJob) End(datetime types.DateTime) *FlushJob { + r.req.End = datetime return r } -// SkipTime Specifies to skip to a particular time value. Results are not generated -// and the model is not updated for data from the specified time interval. +// SkipTime Refer to the description for the `skip_time` query parameter. // API name: skip_time -func (r *FlushJob) SkipTime(v string) *FlushJob { - r.values.Set("skip_time", v) +func (r *FlushJob) SkipTime(datetime types.DateTime) *FlushJob { + r.req.SkipTime = datetime return r } -// Start When used in conjunction with `calc_interim`, specifies the range of -// buckets on which to calculate interim results. +// Start Refer to the description for the `start` query parameter. // API name: start -func (r *FlushJob) Start(v string) *FlushJob { - r.values.Set("start", v) +func (r *FlushJob) Start(datetime types.DateTime) *FlushJob { + r.req.Start = datetime return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/flushjob/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/flushjob/request.go index feaf07cf5..6478f3298 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/flushjob/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/flushjob/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package flushjob @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package flushjob // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/flush_job/MlFlushJobRequest.ts#L24-L99 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/flush_job/MlFlushJobRequest.ts#L24-L99 type Request struct { // AdvanceTime Refer to the description for the `advance_time` query parameter. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/flushjob/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/flushjob/response.go index b50fbd8b1..04cd20b9a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/flushjob/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/flushjob/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package flushjob // Response holds the response body struct for the package flushjob // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/flush_job/MlFlushJobResponse.ts#L22-L31 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/flush_job/MlFlushJobResponse.ts#L22-L31 type Response struct { Flushed bool `json:"flushed"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/forecast/forecast.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/forecast/forecast.go index 88054e00e..d2ee59180 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/forecast/forecast.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/forecast/forecast.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Predicts the future behavior of a time series by using its historical // behavior. @@ -53,8 +53,9 @@ type Forecast struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -70,7 +71,7 @@ func NewForecastFunc(tp elastictransport.Interface) NewForecast { return func(jobid string) *Forecast { n := New(tp) - n.JobId(jobid) + n._jobid(jobid) return n } @@ -86,6 +87,8 @@ func New(tp elastictransport.Interface) *Forecast { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -115,9 +118,19 @@ func (r *Forecast) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -125,6 +138,7 @@ func (r *Forecast) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -209,7 +223,6 @@ func (r Forecast) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -218,6 +231,10 @@ func (r Forecast) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -231,41 +248,34 @@ func (r *Forecast) Header(key, value string) *Forecast { // JobId Identifier for the anomaly detection job. The job must be open when you // create a forecast; otherwise, an error occurs. // API Name: jobid -func (r *Forecast) JobId(v string) *Forecast { +func (r *Forecast) _jobid(jobid string) *Forecast { r.paramSet |= jobidMask - r.jobid = v + r.jobid = jobid return r } -// Duration A period of time that indicates how far into the future to forecast. For -// example, `30d` corresponds to 30 days. The forecast starts at the last -// record that was processed. +// Duration Refer to the description for the `duration` query parameter. // API name: duration -func (r *Forecast) Duration(v string) *Forecast { - r.values.Set("duration", v) +func (r *Forecast) Duration(duration types.Duration) *Forecast { + r.req.Duration = duration return r } -// ExpiresIn The period of time that forecast results are retained. After a forecast -// expires, the results are deleted. If set to a value of 0, the forecast is -// never automatically deleted. +// ExpiresIn Refer to the description for the `expires_in` query parameter. // API name: expires_in -func (r *Forecast) ExpiresIn(v string) *Forecast { - r.values.Set("expires_in", v) +func (r *Forecast) ExpiresIn(duration types.Duration) *Forecast { + r.req.ExpiresIn = duration return r } -// MaxModelMemory The maximum memory the forecast can use. If the forecast needs to use -// more than the provided amount, it will spool to disk. Default is 20mb, -// maximum is 500mb and minimum is 1mb. If set to 40% or more of the job’s -// configured memory limit, it is automatically reduced to below that -// amount. +// MaxModelMemory Refer to the description for the `max_model_memory` query parameter. // API name: max_model_memory -func (r *Forecast) MaxModelMemory(v string) *Forecast { - r.values.Set("max_model_memory", v) +func (r *Forecast) MaxModelMemory(maxmodelmemory string) *Forecast { + + r.req.MaxModelMemory = &maxmodelmemory return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/forecast/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/forecast/request.go index fa6a99a7e..bf9bff4b5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/forecast/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/forecast/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package forecast @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package forecast // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/forecast/MlForecastJobRequest.ts#L24-L87 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/forecast/MlForecastJobRequest.ts#L24-L87 type Request struct { // Duration Refer to the description for the `duration` query parameter. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/forecast/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/forecast/response.go index 3f428955c..4414a1a2c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/forecast/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/forecast/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package forecast // Response holds the response body struct for the package forecast // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/forecast/MlForecastJobResponse.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/forecast/MlForecastJobResponse.ts#L22-L27 type Response struct { Acknowledged bool `json:"acknowledged"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getbuckets/get_buckets.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getbuckets/get_buckets.go index 5e1b8a78f..dad826860 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getbuckets/get_buckets.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getbuckets/get_buckets.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves anomaly detection job results for one or more buckets. package getbuckets @@ -55,8 +55,9 @@ type GetBuckets struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -73,7 +74,7 @@ func NewGetBucketsFunc(tp elastictransport.Interface) NewGetBuckets { return func(jobid string) *GetBuckets { n := New(tp) - n.JobId(jobid) + n._jobid(jobid) return n } @@ -88,6 +89,8 @@ func New(tp elastictransport.Interface) *GetBuckets { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -117,9 +120,19 @@ func (r *GetBuckets) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -127,6 +140,7 @@ func (r *GetBuckets) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -230,7 +244,6 @@ func (r GetBuckets) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -239,6 +252,10 @@ func (r GetBuckets) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -251,9 +268,9 @@ func (r *GetBuckets) Header(key, value string) *GetBuckets { // JobId Identifier for the anomaly detection job. // API Name: jobid -func (r *GetBuckets) JobId(v string) *GetBuckets { +func (r *GetBuckets) _jobid(jobid string) *GetBuckets { r.paramSet |= jobidMask - r.jobid = v + r.jobid = jobid return r } @@ -261,83 +278,90 @@ func (r *GetBuckets) JobId(v string) *GetBuckets { // Timestamp The timestamp of a single bucket result. If you do not specify this // parameter, the API returns information about all buckets. // API Name: timestamp -func (r *GetBuckets) Timestamp(v string) *GetBuckets { +func (r *GetBuckets) Timestamp(timestamp string) *GetBuckets { r.paramSet |= timestampMask - r.timestamp = v + r.timestamp = timestamp + + return r +} + +// From Skips the specified number of buckets. +// API name: from +func (r *GetBuckets) From(from int) *GetBuckets { + r.values.Set("from", strconv.Itoa(from)) return r } -// AnomalyScore Returns buckets with anomaly scores greater or equal than this value. +// Size Specifies the maximum number of buckets to obtain. +// API name: size +func (r *GetBuckets) Size(size int) *GetBuckets { + r.values.Set("size", strconv.Itoa(size)) + + return r +} + +// AnomalyScore Refer to the description for the `anomaly_score` query parameter. // API name: anomaly_score -func (r *GetBuckets) AnomalyScore(v string) *GetBuckets { - r.values.Set("anomaly_score", v) +func (r *GetBuckets) AnomalyScore(anomalyscore types.Float64) *GetBuckets { + + r.req.AnomalyScore = &anomalyscore return r } -// Desc If `true`, the buckets are sorted in descending order. +// Desc Refer to the description for the `desc` query parameter. // API name: desc -func (r *GetBuckets) Desc(b bool) *GetBuckets { - r.values.Set("desc", strconv.FormatBool(b)) +func (r *GetBuckets) Desc(desc bool) *GetBuckets { + r.req.Desc = &desc return r } -// End Returns buckets with timestamps earlier than this time. `-1` means it is -// unset and results are not limited to specific timestamps. +// End Refer to the description for the `end` query parameter. // API name: end -func (r *GetBuckets) End(v string) *GetBuckets { - r.values.Set("end", v) +func (r *GetBuckets) End(datetime types.DateTime) *GetBuckets { + r.req.End = datetime return r } -// ExcludeInterim If `true`, the output excludes interim results. +// ExcludeInterim Refer to the description for the `exclude_interim` query parameter. // API name: exclude_interim -func (r *GetBuckets) ExcludeInterim(b bool) *GetBuckets { - r.values.Set("exclude_interim", strconv.FormatBool(b)) +func (r *GetBuckets) ExcludeInterim(excludeinterim bool) *GetBuckets { + r.req.ExcludeInterim = &excludeinterim return r } -// Expand If true, the output includes anomaly records. +// Expand Refer to the description for the `expand` query parameter. // API name: expand -func (r *GetBuckets) Expand(b bool) *GetBuckets { - r.values.Set("expand", strconv.FormatBool(b)) +func (r *GetBuckets) Expand(expand bool) *GetBuckets { + r.req.Expand = &expand return r } -// From Skips the specified number of buckets. -// API name: from -func (r *GetBuckets) From(i int) *GetBuckets { - r.values.Set("from", strconv.Itoa(i)) +// API name: page +func (r *GetBuckets) Page(page *types.Page) *GetBuckets { - return r -} - -// Size Specifies the maximum number of buckets to obtain. -// API name: size -func (r *GetBuckets) Size(i int) *GetBuckets { - r.values.Set("size", strconv.Itoa(i)) + r.req.Page = page return r } -// Sort Specifies the sort field for the requested buckets. +// Sort Refer to the desription for the `sort` query parameter. // API name: sort -func (r *GetBuckets) Sort(v string) *GetBuckets { - r.values.Set("sort", v) +func (r *GetBuckets) Sort(field string) *GetBuckets { + r.req.Sort = &field return r } -// Start Returns buckets with timestamps after this time. `-1` means it is unset -// and results are not limited to specific timestamps. +// Start Refer to the description for the `start` query parameter. // API name: start -func (r *GetBuckets) Start(v string) *GetBuckets { - r.values.Set("start", v) +func (r *GetBuckets) Start(datetime types.DateTime) *GetBuckets { + r.req.Start = datetime return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getbuckets/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getbuckets/request.go index 1d9511bdf..aa652277d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getbuckets/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getbuckets/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getbuckets @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package getbuckets // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/get_buckets/MlGetBucketsRequest.ts#L26-L133 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/get_buckets/MlGetBucketsRequest.ts#L26-L133 type Request struct { // AnomalyScore Refer to the description for the `anomaly_score` query parameter. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getbuckets/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getbuckets/response.go index 5232a10d3..f998d7b5d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getbuckets/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getbuckets/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getbuckets @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getbuckets // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/get_buckets/MlGetBucketsResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/get_buckets/MlGetBucketsResponse.ts#L23-L28 type Response struct { Buckets []types.BucketSummary `json:"buckets"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getcalendarevents/get_calendar_events.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getcalendarevents/get_calendar_events.go index 4ef86a671..65f163da1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getcalendarevents/get_calendar_events.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getcalendarevents/get_calendar_events.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves information about the scheduled events in calendars. package getcalendarevents @@ -68,7 +68,7 @@ func NewGetCalendarEventsFunc(tp elastictransport.Interface) NewGetCalendarEvent return func(calendarid string) *GetCalendarEvents { n := New(tp) - n.CalendarId(calendarid) + n._calendarid(calendarid) return n } @@ -173,7 +173,6 @@ func (r GetCalendarEvents) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -182,6 +181,10 @@ func (r GetCalendarEvents) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -218,25 +221,25 @@ func (r *GetCalendarEvents) Header(key, value string) *GetCalendarEvents { // expression. You can get information for all calendars by using `_all` or `*` // or by omitting the calendar identifier. // API Name: calendarid -func (r *GetCalendarEvents) CalendarId(v string) *GetCalendarEvents { +func (r *GetCalendarEvents) _calendarid(calendarid string) *GetCalendarEvents { r.paramSet |= calendaridMask - r.calendarid = v + r.calendarid = calendarid return r } // End Specifies to get events with timestamps earlier than this time. // API name: end -func (r *GetCalendarEvents) End(v string) *GetCalendarEvents { - r.values.Set("end", v) +func (r *GetCalendarEvents) End(datetime string) *GetCalendarEvents { + r.values.Set("end", datetime) return r } // From Skips the specified number of events. // API name: from -func (r *GetCalendarEvents) From(i int) *GetCalendarEvents { - r.values.Set("from", strconv.Itoa(i)) +func (r *GetCalendarEvents) From(from int) *GetCalendarEvents { + r.values.Set("from", strconv.Itoa(from)) return r } @@ -244,24 +247,24 @@ func (r *GetCalendarEvents) From(i int) *GetCalendarEvents { // JobId Specifies to get events for a specific anomaly detection job identifier or // job group. It must be used with a calendar identifier of `_all` or `*`. // API name: job_id -func (r *GetCalendarEvents) JobId(v string) *GetCalendarEvents { - r.values.Set("job_id", v) +func (r *GetCalendarEvents) JobId(id string) *GetCalendarEvents { + r.values.Set("job_id", id) return r } // Size Specifies the maximum number of events to obtain. // API name: size -func (r *GetCalendarEvents) Size(i int) *GetCalendarEvents { - r.values.Set("size", strconv.Itoa(i)) +func (r *GetCalendarEvents) Size(size int) *GetCalendarEvents { + r.values.Set("size", strconv.Itoa(size)) return r } // Start Specifies to get events with timestamps after this time. // API name: start -func (r *GetCalendarEvents) Start(v string) *GetCalendarEvents { - r.values.Set("start", v) +func (r *GetCalendarEvents) Start(datetime string) *GetCalendarEvents { + r.values.Set("start", datetime) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getcalendarevents/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getcalendarevents/response.go index 7fadd8d98..5909fd67d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getcalendarevents/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getcalendarevents/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getcalendarevents @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getcalendarevents // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/get_calendar_events/MlGetCalendarEventsResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/get_calendar_events/MlGetCalendarEventsResponse.ts#L23-L28 type Response struct { Count int64 `json:"count"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getcalendars/get_calendars.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getcalendars/get_calendars.go index b36aa9246..ae7953481 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getcalendars/get_calendars.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getcalendars/get_calendars.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves configuration information for calendars. package getcalendars @@ -53,8 +53,9 @@ type GetCalendars struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -83,6 +84,8 @@ func New(tp elastictransport.Interface) *GetCalendars { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -112,9 +115,19 @@ func (r *GetCalendars) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -122,6 +135,7 @@ func (r *GetCalendars) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -211,7 +225,6 @@ func (r GetCalendars) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -220,6 +233,10 @@ func (r GetCalendars) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -235,9 +252,9 @@ func (r *GetCalendars) Header(key, value string) *GetCalendars { // expression. You can get information for all calendars by using `_all` or `*` // or by omitting the calendar identifier. // API Name: calendarid -func (r *GetCalendars) CalendarId(v string) *GetCalendars { +func (r *GetCalendars) CalendarId(calendarid string) *GetCalendars { r.paramSet |= calendaridMask - r.calendarid = v + r.calendarid = calendarid return r } @@ -245,8 +262,8 @@ func (r *GetCalendars) CalendarId(v string) *GetCalendars { // From Skips the specified number of calendars. This parameter is supported only // when you omit the calendar identifier. // API name: from -func (r *GetCalendars) From(i int) *GetCalendars { - r.values.Set("from", strconv.Itoa(i)) +func (r *GetCalendars) From(from int) *GetCalendars { + r.values.Set("from", strconv.Itoa(from)) return r } @@ -254,8 +271,17 @@ func (r *GetCalendars) From(i int) *GetCalendars { // Size Specifies the maximum number of calendars to obtain. This parameter is // supported only when you omit the calendar identifier. // API name: size -func (r *GetCalendars) Size(i int) *GetCalendars { - r.values.Set("size", strconv.Itoa(i)) +func (r *GetCalendars) Size(size int) *GetCalendars { + r.values.Set("size", strconv.Itoa(size)) + + return r +} + +// Page This object is supported only when you omit the calendar identifier. +// API name: page +func (r *GetCalendars) Page(page *types.Page) *GetCalendars { + + r.req.Page = page return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getcalendars/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getcalendars/request.go index dff283ee2..99a78e60c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getcalendars/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getcalendars/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getcalendars @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package getcalendars // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/get_calendars/MlGetCalendarsRequest.ts#L25-L51 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/get_calendars/MlGetCalendarsRequest.ts#L25-L51 type Request struct { // Page This object is supported only when you omit the calendar identifier. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getcalendars/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getcalendars/response.go index 436470a6d..7290aebee 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getcalendars/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getcalendars/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getcalendars @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getcalendars // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/get_calendars/MlGetCalendarsResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/get_calendars/MlGetCalendarsResponse.ts#L23-L25 type Response struct { Calendars []types.Calendar `json:"calendars"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getcategories/get_categories.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getcategories/get_categories.go index 3cf1720d6..ca16e5857 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getcategories/get_categories.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getcategories/get_categories.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves anomaly detection job results for one or more categories. package getcategories @@ -55,8 +55,9 @@ type GetCategories struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -73,7 +74,7 @@ func NewGetCategoriesFunc(tp elastictransport.Interface) NewGetCategories { return func(jobid string) *GetCategories { n := New(tp) - n.JobId(jobid) + n._jobid(jobid) return n } @@ -88,6 +89,8 @@ func New(tp elastictransport.Interface) *GetCategories { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -117,9 +120,19 @@ func (r *GetCategories) HttpRequest(ctx context.Context) (*http.Request, error) var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -127,6 +140,7 @@ func (r *GetCategories) HttpRequest(ctx context.Context) (*http.Request, error) } r.buf.Write(data) + } r.path.Scheme = "http" @@ -161,7 +175,7 @@ func (r *GetCategories) HttpRequest(ctx context.Context) (*http.Request, error) path.WriteString("results") path.WriteString("/") path.WriteString("categories") - path.WriteString("/") + method = http.MethodPost } @@ -230,7 +244,6 @@ func (r GetCategories) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -239,6 +252,10 @@ func (r GetCategories) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -251,9 +268,9 @@ func (r *GetCategories) Header(key, value string) *GetCategories { // JobId Identifier for the anomaly detection job. // API Name: jobid -func (r *GetCategories) JobId(v string) *GetCategories { +func (r *GetCategories) _jobid(jobid string) *GetCategories { r.paramSet |= jobidMask - r.jobid = v + r.jobid = jobid return r } @@ -264,33 +281,43 @@ func (r *GetCategories) JobId(v string) *GetCategories { // partition_field_value, it returns information about all categories for // the specified partition. // API Name: categoryid -func (r *GetCategories) CategoryId(v string) *GetCategories { +func (r *GetCategories) CategoryId(categoryid string) *GetCategories { r.paramSet |= categoryidMask - r.categoryid = v + r.categoryid = categoryid return r } // From Skips the specified number of categories. // API name: from -func (r *GetCategories) From(i int) *GetCategories { - r.values.Set("from", strconv.Itoa(i)) +func (r *GetCategories) From(from int) *GetCategories { + r.values.Set("from", strconv.Itoa(from)) return r } // PartitionFieldValue Only return categories for the specified partition. // API name: partition_field_value -func (r *GetCategories) PartitionFieldValue(v string) *GetCategories { - r.values.Set("partition_field_value", v) +func (r *GetCategories) PartitionFieldValue(partitionfieldvalue string) *GetCategories { + r.values.Set("partition_field_value", partitionfieldvalue) return r } // Size Specifies the maximum number of categories to obtain. // API name: size -func (r *GetCategories) Size(i int) *GetCategories { - r.values.Set("size", strconv.Itoa(i)) +func (r *GetCategories) Size(size int) *GetCategories { + r.values.Set("size", strconv.Itoa(size)) + + return r +} + +// Page Configures pagination. +// This parameter has the `from` and `size` properties. +// API name: page +func (r *GetCategories) Page(page *types.Page) *GetCategories { + + r.req.Page = page return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getcategories/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getcategories/request.go index 53baf9dd1..3d6ec5f27 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getcategories/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getcategories/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getcategories @@ -29,8 +29,11 @@ import ( // Request holds the request body struct for the package getcategories // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/get_categories/MlGetCategoriesRequest.ts#L25-L66 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/get_categories/MlGetCategoriesRequest.ts#L25-L70 type Request struct { + + // Page Configures pagination. + // This parameter has the `from` and `size` properties. Page *types.Page `json:"page,omitempty"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getcategories/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getcategories/response.go index 503c1a389..3425be126 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getcategories/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getcategories/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getcategories @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getcategories // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/get_categories/MlGetCategoriesResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/get_categories/MlGetCategoriesResponse.ts#L23-L28 type Response struct { Categories []types.Category `json:"categories"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getdatafeeds/get_datafeeds.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getdatafeeds/get_datafeeds.go index ed1f54e5a..61347ab1c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getdatafeeds/get_datafeeds.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getdatafeeds/get_datafeeds.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves configuration information for datafeeds. package getdatafeeds @@ -176,7 +176,6 @@ func (r GetDatafeeds) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -185,6 +184,10 @@ func (r GetDatafeeds) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -220,9 +223,9 @@ func (r *GetDatafeeds) Header(key, value string) *GetDatafeeds { // wildcard expression. If you do not specify one of these options, the API // returns information about all datafeeds. // API Name: datafeedid -func (r *GetDatafeeds) DatafeedId(v string) *GetDatafeeds { +func (r *GetDatafeeds) DatafeedId(datafeedid string) *GetDatafeeds { r.paramSet |= datafeedidMask - r.datafeedid = v + r.datafeedid = datafeedid return r } @@ -238,8 +241,8 @@ func (r *GetDatafeeds) DatafeedId(v string) *GetDatafeeds { // partial matches. If this parameter is `false`, the request returns a // `404` status code when there are no matches or only partial matches. // API name: allow_no_match -func (r *GetDatafeeds) AllowNoMatch(b bool) *GetDatafeeds { - r.values.Set("allow_no_match", strconv.FormatBool(b)) +func (r *GetDatafeeds) AllowNoMatch(allownomatch bool) *GetDatafeeds { + r.values.Set("allow_no_match", strconv.FormatBool(allownomatch)) return r } @@ -248,8 +251,8 @@ func (r *GetDatafeeds) AllowNoMatch(b bool) *GetDatafeeds { // retrieval. This allows the configuration to be in an acceptable format to // be retrieved and then added to another cluster. // API name: exclude_generated -func (r *GetDatafeeds) ExcludeGenerated(b bool) *GetDatafeeds { - r.values.Set("exclude_generated", strconv.FormatBool(b)) +func (r *GetDatafeeds) ExcludeGenerated(excludegenerated bool) *GetDatafeeds { + r.values.Set("exclude_generated", strconv.FormatBool(excludegenerated)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getdatafeeds/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getdatafeeds/response.go index 61928936e..b14083318 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getdatafeeds/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getdatafeeds/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getdatafeeds @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getdatafeeds // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/get_datafeeds/MlGetDatafeedsResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/get_datafeeds/MlGetDatafeedsResponse.ts#L23-L28 type Response struct { Count int64 `json:"count"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getdatafeedstats/get_datafeed_stats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getdatafeedstats/get_datafeed_stats.go index 7d8346dd6..7050176c5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getdatafeedstats/get_datafeed_stats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getdatafeedstats/get_datafeed_stats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves usage information for datafeeds. package getdatafeedstats @@ -180,7 +180,6 @@ func (r GetDatafeedStats) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -189,6 +188,10 @@ func (r GetDatafeedStats) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -224,9 +227,9 @@ func (r *GetDatafeedStats) Header(key, value string) *GetDatafeedStats { // wildcard expression. If you do not specify one of these options, the API // returns information about all datafeeds. // API Name: datafeedid -func (r *GetDatafeedStats) DatafeedId(v string) *GetDatafeedStats { +func (r *GetDatafeedStats) DatafeedId(datafeedid string) *GetDatafeedStats { r.paramSet |= datafeedidMask - r.datafeedid = v + r.datafeedid = datafeedid return r } @@ -242,8 +245,8 @@ func (r *GetDatafeedStats) DatafeedId(v string) *GetDatafeedStats { // partial matches. If this parameter is `false`, the request returns a // `404` status code when there are no matches or only partial matches. // API name: allow_no_match -func (r *GetDatafeedStats) AllowNoMatch(b bool) *GetDatafeedStats { - r.values.Set("allow_no_match", strconv.FormatBool(b)) +func (r *GetDatafeedStats) AllowNoMatch(allownomatch bool) *GetDatafeedStats { + r.values.Set("allow_no_match", strconv.FormatBool(allownomatch)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getdatafeedstats/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getdatafeedstats/response.go index dae365b4c..ced786009 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getdatafeedstats/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getdatafeedstats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getdatafeedstats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getdatafeedstats // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/get_datafeed_stats/MlGetDatafeedStatsResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/get_datafeed_stats/MlGetDatafeedStatsResponse.ts#L23-L28 type Response struct { Count int64 `json:"count"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getdataframeanalytics/get_data_frame_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getdataframeanalytics/get_data_frame_analytics.go index a8aace19d..93c977fff 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getdataframeanalytics/get_data_frame_analytics.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getdataframeanalytics/get_data_frame_analytics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves configuration information for data frame analytics jobs. package getdataframeanalytics @@ -180,7 +180,6 @@ func (r GetDataFrameAnalytics) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -189,6 +188,10 @@ func (r GetDataFrameAnalytics) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -224,9 +227,9 @@ func (r *GetDataFrameAnalytics) Header(key, value string) *GetDataFrameAnalytics // option, the API returns information for the first hundred data frame // analytics jobs. // API Name: id -func (r *GetDataFrameAnalytics) Id(v string) *GetDataFrameAnalytics { +func (r *GetDataFrameAnalytics) Id(id string) *GetDataFrameAnalytics { r.paramSet |= idMask - r.id = v + r.id = id return r } @@ -243,24 +246,24 @@ func (r *GetDataFrameAnalytics) Id(v string) *GetDataFrameAnalytics { // If this parameter is `false`, the request returns a 404 status code when // there are no matches or only partial matches. // API name: allow_no_match -func (r *GetDataFrameAnalytics) AllowNoMatch(b bool) *GetDataFrameAnalytics { - r.values.Set("allow_no_match", strconv.FormatBool(b)) +func (r *GetDataFrameAnalytics) AllowNoMatch(allownomatch bool) *GetDataFrameAnalytics { + r.values.Set("allow_no_match", strconv.FormatBool(allownomatch)) return r } // From Skips the specified number of data frame analytics jobs. // API name: from -func (r *GetDataFrameAnalytics) From(i int) *GetDataFrameAnalytics { - r.values.Set("from", strconv.Itoa(i)) +func (r *GetDataFrameAnalytics) From(from int) *GetDataFrameAnalytics { + r.values.Set("from", strconv.Itoa(from)) return r } // Size Specifies the maximum number of data frame analytics jobs to obtain. // API name: size -func (r *GetDataFrameAnalytics) Size(i int) *GetDataFrameAnalytics { - r.values.Set("size", strconv.Itoa(i)) +func (r *GetDataFrameAnalytics) Size(size int) *GetDataFrameAnalytics { + r.values.Set("size", strconv.Itoa(size)) return r } @@ -269,8 +272,8 @@ func (r *GetDataFrameAnalytics) Size(i int) *GetDataFrameAnalytics { // retrieval. This allows the configuration to be in an acceptable format to // be retrieved and then added to another cluster. // API name: exclude_generated -func (r *GetDataFrameAnalytics) ExcludeGenerated(b bool) *GetDataFrameAnalytics { - r.values.Set("exclude_generated", strconv.FormatBool(b)) +func (r *GetDataFrameAnalytics) ExcludeGenerated(excludegenerated bool) *GetDataFrameAnalytics { + r.values.Set("exclude_generated", strconv.FormatBool(excludegenerated)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getdataframeanalytics/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getdataframeanalytics/response.go index 2af6619d1..6c4a9c41e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getdataframeanalytics/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getdataframeanalytics/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getdataframeanalytics @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getdataframeanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/get_data_frame_analytics/MlGetDataFrameAnalyticsResponse.ts#L23-L29 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/get_data_frame_analytics/MlGetDataFrameAnalyticsResponse.ts#L23-L29 type Response struct { Count int `json:"count"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getdataframeanalyticsstats/get_data_frame_analytics_stats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getdataframeanalyticsstats/get_data_frame_analytics_stats.go index 4e8080aeb..53f2e8b4d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getdataframeanalyticsstats/get_data_frame_analytics_stats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getdataframeanalyticsstats/get_data_frame_analytics_stats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves usage information for data frame analytics jobs. package getdataframeanalyticsstats @@ -184,7 +184,6 @@ func (r GetDataFrameAnalyticsStats) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -193,6 +192,10 @@ func (r GetDataFrameAnalyticsStats) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -228,9 +231,9 @@ func (r *GetDataFrameAnalyticsStats) Header(key, value string) *GetDataFrameAnal // option, the API returns information for the first hundred data frame // analytics jobs. // API Name: id -func (r *GetDataFrameAnalyticsStats) Id(v string) *GetDataFrameAnalyticsStats { +func (r *GetDataFrameAnalyticsStats) Id(id string) *GetDataFrameAnalyticsStats { r.paramSet |= idMask - r.id = v + r.id = id return r } @@ -247,32 +250,32 @@ func (r *GetDataFrameAnalyticsStats) Id(v string) *GetDataFrameAnalyticsStats { // If this parameter is `false`, the request returns a 404 status code when // there are no matches or only partial matches. // API name: allow_no_match -func (r *GetDataFrameAnalyticsStats) AllowNoMatch(b bool) *GetDataFrameAnalyticsStats { - r.values.Set("allow_no_match", strconv.FormatBool(b)) +func (r *GetDataFrameAnalyticsStats) AllowNoMatch(allownomatch bool) *GetDataFrameAnalyticsStats { + r.values.Set("allow_no_match", strconv.FormatBool(allownomatch)) return r } // From Skips the specified number of data frame analytics jobs. // API name: from -func (r *GetDataFrameAnalyticsStats) From(i int) *GetDataFrameAnalyticsStats { - r.values.Set("from", strconv.Itoa(i)) +func (r *GetDataFrameAnalyticsStats) From(from int) *GetDataFrameAnalyticsStats { + r.values.Set("from", strconv.Itoa(from)) return r } // Size Specifies the maximum number of data frame analytics jobs to obtain. // API name: size -func (r *GetDataFrameAnalyticsStats) Size(i int) *GetDataFrameAnalyticsStats { - r.values.Set("size", strconv.Itoa(i)) +func (r *GetDataFrameAnalyticsStats) Size(size int) *GetDataFrameAnalyticsStats { + r.values.Set("size", strconv.Itoa(size)) return r } // Verbose Defines whether the stats response should be verbose. // API name: verbose -func (r *GetDataFrameAnalyticsStats) Verbose(b bool) *GetDataFrameAnalyticsStats { - r.values.Set("verbose", strconv.FormatBool(b)) +func (r *GetDataFrameAnalyticsStats) Verbose(verbose bool) *GetDataFrameAnalyticsStats { + r.values.Set("verbose", strconv.FormatBool(verbose)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getdataframeanalyticsstats/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getdataframeanalyticsstats/response.go index b5e7659ce..334483cc7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getdataframeanalyticsstats/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getdataframeanalyticsstats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getdataframeanalyticsstats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getdataframeanalyticsstats // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/get_data_frame_analytics_stats/MlGetDataFrameAnalyticsStatsResponse.ts#L24-L30 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/get_data_frame_analytics_stats/MlGetDataFrameAnalyticsStatsResponse.ts#L24-L30 type Response struct { Count int64 `json:"count"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getfilters/get_filters.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getfilters/get_filters.go index 36acbd966..2cdd33ebf 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getfilters/get_filters.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getfilters/get_filters.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves filters. package getfilters @@ -176,7 +176,6 @@ func (r GetFilters) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -185,6 +184,10 @@ func (r GetFilters) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -218,25 +221,25 @@ func (r *GetFilters) Header(key, value string) *GetFilters { // FilterId A string that uniquely identifies a filter. // API Name: filterid -func (r *GetFilters) FilterId(v string) *GetFilters { +func (r *GetFilters) FilterId(filterid string) *GetFilters { r.paramSet |= filteridMask - r.filterid = v + r.filterid = filterid return r } // From Skips the specified number of filters. // API name: from -func (r *GetFilters) From(i int) *GetFilters { - r.values.Set("from", strconv.Itoa(i)) +func (r *GetFilters) From(from int) *GetFilters { + r.values.Set("from", strconv.Itoa(from)) return r } // Size Specifies the maximum number of filters to obtain. // API name: size -func (r *GetFilters) Size(i int) *GetFilters { - r.values.Set("size", strconv.Itoa(i)) +func (r *GetFilters) Size(size int) *GetFilters { + r.values.Set("size", strconv.Itoa(size)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getfilters/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getfilters/response.go index b5cdd2f10..0eb83980c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getfilters/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getfilters/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getfilters @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getfilters // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/get_filters/MlGetFiltersResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/get_filters/MlGetFiltersResponse.ts#L23-L28 type Response struct { Count int64 `json:"count"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getinfluencers/get_influencers.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getinfluencers/get_influencers.go index e957b9f74..8eac30ffe 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getinfluencers/get_influencers.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getinfluencers/get_influencers.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves anomaly detection job results for one or more influencers. package getinfluencers @@ -53,8 +53,9 @@ type GetInfluencers struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -70,7 +71,7 @@ func NewGetInfluencersFunc(tp elastictransport.Interface) NewGetInfluencers { return func(jobid string) *GetInfluencers { n := New(tp) - n.JobId(jobid) + n._jobid(jobid) return n } @@ -85,6 +86,8 @@ func New(tp elastictransport.Interface) *GetInfluencers { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -114,9 +117,19 @@ func (r *GetInfluencers) HttpRequest(ctx context.Context) (*http.Request, error) var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -124,6 +137,7 @@ func (r *GetInfluencers) HttpRequest(ctx context.Context) (*http.Request, error) } r.buf.Write(data) + } r.path.Scheme = "http" @@ -210,7 +224,6 @@ func (r GetInfluencers) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -219,6 +232,10 @@ func (r GetInfluencers) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -231,17 +248,17 @@ func (r *GetInfluencers) Header(key, value string) *GetInfluencers { // JobId Identifier for the anomaly detection job. // API Name: jobid -func (r *GetInfluencers) JobId(v string) *GetInfluencers { +func (r *GetInfluencers) _jobid(jobid string) *GetInfluencers { r.paramSet |= jobidMask - r.jobid = v + r.jobid = jobid return r } // Desc If true, the results are sorted in descending order. // API name: desc -func (r *GetInfluencers) Desc(b bool) *GetInfluencers { - r.values.Set("desc", strconv.FormatBool(b)) +func (r *GetInfluencers) Desc(desc bool) *GetInfluencers { + r.values.Set("desc", strconv.FormatBool(desc)) return r } @@ -250,8 +267,8 @@ func (r *GetInfluencers) Desc(b bool) *GetInfluencers { // The default value means it is unset and results are not limited to // specific timestamps. // API name: end -func (r *GetInfluencers) End(v string) *GetInfluencers { - r.values.Set("end", v) +func (r *GetInfluencers) End(datetime string) *GetInfluencers { + r.values.Set("end", datetime) return r } @@ -259,8 +276,8 @@ func (r *GetInfluencers) End(v string) *GetInfluencers { // ExcludeInterim If true, the output excludes interim results. By default, interim results // are included. // API name: exclude_interim -func (r *GetInfluencers) ExcludeInterim(b bool) *GetInfluencers { - r.values.Set("exclude_interim", strconv.FormatBool(b)) +func (r *GetInfluencers) ExcludeInterim(excludeinterim bool) *GetInfluencers { + r.values.Set("exclude_interim", strconv.FormatBool(excludeinterim)) return r } @@ -268,24 +285,24 @@ func (r *GetInfluencers) ExcludeInterim(b bool) *GetInfluencers { // InfluencerScore Returns influencers with anomaly scores greater than or equal to this // value. // API name: influencer_score -func (r *GetInfluencers) InfluencerScore(v string) *GetInfluencers { - r.values.Set("influencer_score", v) +func (r *GetInfluencers) InfluencerScore(influencerscore string) *GetInfluencers { + r.values.Set("influencer_score", influencerscore) return r } // From Skips the specified number of influencers. // API name: from -func (r *GetInfluencers) From(i int) *GetInfluencers { - r.values.Set("from", strconv.Itoa(i)) +func (r *GetInfluencers) From(from int) *GetInfluencers { + r.values.Set("from", strconv.Itoa(from)) return r } // Size Specifies the maximum number of influencers to obtain. // API name: size -func (r *GetInfluencers) Size(i int) *GetInfluencers { - r.values.Set("size", strconv.Itoa(i)) +func (r *GetInfluencers) Size(size int) *GetInfluencers { + r.values.Set("size", strconv.Itoa(size)) return r } @@ -293,8 +310,8 @@ func (r *GetInfluencers) Size(i int) *GetInfluencers { // Sort Specifies the sort field for the requested influencers. By default, the // influencers are sorted by the `influencer_score` value. // API name: sort -func (r *GetInfluencers) Sort(v string) *GetInfluencers { - r.values.Set("sort", v) +func (r *GetInfluencers) Sort(field string) *GetInfluencers { + r.values.Set("sort", field) return r } @@ -302,8 +319,18 @@ func (r *GetInfluencers) Sort(v string) *GetInfluencers { // Start Returns influencers with timestamps after this time. The default value // means it is unset and results are not limited to specific timestamps. // API name: start -func (r *GetInfluencers) Start(v string) *GetInfluencers { - r.values.Set("start", v) +func (r *GetInfluencers) Start(datetime string) *GetInfluencers { + r.values.Set("start", datetime) + + return r +} + +// Page Configures pagination. +// This parameter has the `from` and `size` properties. +// API name: page +func (r *GetInfluencers) Page(page *types.Page) *GetInfluencers { + + r.req.Page = page return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getinfluencers/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getinfluencers/request.go index 94090818a..9b4961f06 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getinfluencers/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getinfluencers/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getinfluencers @@ -29,8 +29,11 @@ import ( // Request holds the request body struct for the package getinfluencers // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/get_influencers/MlGetInfluencersRequest.ts#L26-L93 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/get_influencers/MlGetInfluencersRequest.ts#L26-L97 type Request struct { + + // Page Configures pagination. + // This parameter has the `from` and `size` properties. Page *types.Page `json:"page,omitempty"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getinfluencers/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getinfluencers/response.go index 47599fb5f..e26958349 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getinfluencers/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getinfluencers/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getinfluencers @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getinfluencers // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/get_influencers/MlGetInfluencersResponse.ts#L23-L29 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/get_influencers/MlGetInfluencersResponse.ts#L23-L29 type Response struct { Count int64 `json:"count"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getjobs/get_jobs.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getjobs/get_jobs.go index cd804b11f..142ff3b4a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getjobs/get_jobs.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getjobs/get_jobs.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves configuration information for anomaly detection jobs. package getjobs @@ -176,7 +176,6 @@ func (r GetJobs) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -185,6 +184,10 @@ func (r GetJobs) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -220,9 +223,9 @@ func (r *GetJobs) Header(key, value string) *GetJobs { // group name, or a wildcard expression. If you do not specify one of these // options, the API returns information for all anomaly detection jobs. // API Name: jobid -func (r *GetJobs) JobId(v string) *GetJobs { +func (r *GetJobs) JobId(jobid string) *GetJobs { r.paramSet |= jobidMask - r.jobid = v + r.jobid = jobid return r } @@ -238,8 +241,8 @@ func (r *GetJobs) JobId(v string) *GetJobs { // matches. If this parameter is `false`, the request returns a `404` status // code when there are no matches or only partial matches. // API name: allow_no_match -func (r *GetJobs) AllowNoMatch(b bool) *GetJobs { - r.values.Set("allow_no_match", strconv.FormatBool(b)) +func (r *GetJobs) AllowNoMatch(allownomatch bool) *GetJobs { + r.values.Set("allow_no_match", strconv.FormatBool(allownomatch)) return r } @@ -248,8 +251,8 @@ func (r *GetJobs) AllowNoMatch(b bool) *GetJobs { // retrieval. This allows the configuration to be in an acceptable format to // be retrieved and then added to another cluster. // API name: exclude_generated -func (r *GetJobs) ExcludeGenerated(b bool) *GetJobs { - r.values.Set("exclude_generated", strconv.FormatBool(b)) +func (r *GetJobs) ExcludeGenerated(excludegenerated bool) *GetJobs { + r.values.Set("exclude_generated", strconv.FormatBool(excludegenerated)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getjobs/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getjobs/response.go index 3ab59dc5f..3c1dc2efc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getjobs/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getjobs/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getjobs @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getjobs // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/get_jobs/MlGetJobsResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/get_jobs/MlGetJobsResponse.ts#L23-L28 type Response struct { Count int64 `json:"count"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getjobstats/get_job_stats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getjobstats/get_job_stats.go index f0b0e0691..562dc4a3c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getjobstats/get_job_stats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getjobstats/get_job_stats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves usage information for anomaly detection jobs. package getjobstats @@ -180,7 +180,6 @@ func (r GetJobStats) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -189,6 +188,10 @@ func (r GetJobStats) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -225,9 +228,9 @@ func (r *GetJobStats) Header(key, value string) *GetJobStats { // you do not specify one of these options, the API returns information for // all anomaly detection jobs. // API Name: jobid -func (r *GetJobStats) JobId(v string) *GetJobStats { +func (r *GetJobStats) JobId(jobid string) *GetJobStats { r.paramSet |= jobidMask - r.jobid = v + r.jobid = jobid return r } @@ -243,8 +246,8 @@ func (r *GetJobStats) JobId(v string) *GetJobStats { // matches. If `false`, the API returns a `404` status // code when there are no matches or only partial matches. // API name: allow_no_match -func (r *GetJobStats) AllowNoMatch(b bool) *GetJobStats { - r.values.Set("allow_no_match", strconv.FormatBool(b)) +func (r *GetJobStats) AllowNoMatch(allownomatch bool) *GetJobStats { + r.values.Set("allow_no_match", strconv.FormatBool(allownomatch)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getjobstats/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getjobstats/response.go index eafbff8da..3fe71e1f2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getjobstats/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getjobstats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getjobstats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getjobstats // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/get_job_stats/MlGetJobStatsResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/get_job_stats/MlGetJobStatsResponse.ts#L23-L28 type Response struct { Count int64 `json:"count"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getmemorystats/get_memory_stats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getmemorystats/get_memory_stats.go index 2cb54d343..6971711d1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getmemorystats/get_memory_stats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getmemorystats/get_memory_stats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns information on how ML is using memory. package getmemorystats @@ -180,7 +180,6 @@ func (r GetMemoryStats) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -189,6 +188,10 @@ func (r GetMemoryStats) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -224,9 +227,9 @@ func (r *GetMemoryStats) Header(key, value string) *GetMemoryStats { // `nodeId1,nodeId2` or // `ml:true` // API Name: nodeid -func (r *GetMemoryStats) NodeId(v string) *GetMemoryStats { +func (r *GetMemoryStats) NodeId(nodeid string) *GetMemoryStats { r.paramSet |= nodeidMask - r.nodeid = v + r.nodeid = nodeid return r } @@ -235,8 +238,8 @@ func (r *GetMemoryStats) NodeId(v string) *GetMemoryStats { // response. Otherwise only // the `_in_bytes` sizes are returned in the response. // API name: human -func (r *GetMemoryStats) Human(b bool) *GetMemoryStats { - r.values.Set("human", strconv.FormatBool(b)) +func (r *GetMemoryStats) Human(human bool) *GetMemoryStats { + r.values.Set("human", strconv.FormatBool(human)) return r } @@ -245,8 +248,8 @@ func (r *GetMemoryStats) Human(b bool) *GetMemoryStats { // received before the timeout // expires, the request fails and returns an error. // API name: master_timeout -func (r *GetMemoryStats) MasterTimeout(v string) *GetMemoryStats { - r.values.Set("master_timeout", v) +func (r *GetMemoryStats) MasterTimeout(duration string) *GetMemoryStats { + r.values.Set("master_timeout", duration) return r } @@ -255,8 +258,8 @@ func (r *GetMemoryStats) MasterTimeout(v string) *GetMemoryStats { // expires, the request // fails and returns an error. // API name: timeout -func (r *GetMemoryStats) Timeout(v string) *GetMemoryStats { - r.values.Set("timeout", v) +func (r *GetMemoryStats) Timeout(duration string) *GetMemoryStats { + r.values.Set("timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getmemorystats/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getmemorystats/response.go index fd47211ec..89c3edff8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getmemorystats/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getmemorystats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getmemorystats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getmemorystats // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/get_memory_stats/MlGetMemoryStatsResponse.ts#L25-L31 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/get_memory_stats/MlGetMemoryStatsResponse.ts#L25-L31 type Response struct { ClusterName string `json:"cluster_name"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getmodelsnapshots/get_model_snapshots.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getmodelsnapshots/get_model_snapshots.go index 5e406f72f..06453cf6a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getmodelsnapshots/get_model_snapshots.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getmodelsnapshots/get_model_snapshots.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves information about model snapshots. package getmodelsnapshots @@ -55,8 +55,9 @@ type GetModelSnapshots struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -73,7 +74,7 @@ func NewGetModelSnapshotsFunc(tp elastictransport.Interface) NewGetModelSnapshot return func(jobid string) *GetModelSnapshots { n := New(tp) - n.JobId(jobid) + n._jobid(jobid) return n } @@ -88,6 +89,8 @@ func New(tp elastictransport.Interface) *GetModelSnapshots { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -117,9 +120,19 @@ func (r *GetModelSnapshots) HttpRequest(ctx context.Context) (*http.Request, err var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -127,6 +140,7 @@ func (r *GetModelSnapshots) HttpRequest(ctx context.Context) (*http.Request, err } r.buf.Write(data) + } r.path.Scheme = "http" @@ -226,7 +240,6 @@ func (r GetModelSnapshots) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -235,6 +248,10 @@ func (r GetModelSnapshots) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -247,9 +264,9 @@ func (r *GetModelSnapshots) Header(key, value string) *GetModelSnapshots { // JobId Identifier for the anomaly detection job. // API Name: jobid -func (r *GetModelSnapshots) JobId(v string) *GetModelSnapshots { +func (r *GetModelSnapshots) _jobid(jobid string) *GetModelSnapshots { r.paramSet |= jobidMask - r.jobid = v + r.jobid = jobid return r } @@ -260,58 +277,65 @@ func (r *GetModelSnapshots) JobId(v string) *GetModelSnapshots { // get all snapshots by using `_all`, // by specifying `*` as the snapshot ID, or by omitting the snapshot ID. // API Name: snapshotid -func (r *GetModelSnapshots) SnapshotId(v string) *GetModelSnapshots { +func (r *GetModelSnapshots) SnapshotId(snapshotid string) *GetModelSnapshots { r.paramSet |= snapshotidMask - r.snapshotid = v + r.snapshotid = snapshotid return r } -// Desc If true, the results are sorted in descending order. -// API name: desc -func (r *GetModelSnapshots) Desc(b bool) *GetModelSnapshots { - r.values.Set("desc", strconv.FormatBool(b)) +// From Skips the specified number of snapshots. +// API name: from +func (r *GetModelSnapshots) From(from int) *GetModelSnapshots { + r.values.Set("from", strconv.Itoa(from)) return r } -// End Returns snapshots with timestamps earlier than this time. -// API name: end -func (r *GetModelSnapshots) End(v string) *GetModelSnapshots { - r.values.Set("end", v) +// Size Specifies the maximum number of snapshots to obtain. +// API name: size +func (r *GetModelSnapshots) Size(size int) *GetModelSnapshots { + r.values.Set("size", strconv.Itoa(size)) return r } -// From Skips the specified number of snapshots. -// API name: from -func (r *GetModelSnapshots) From(i int) *GetModelSnapshots { - r.values.Set("from", strconv.Itoa(i)) +// Desc Refer to the description for the `desc` query parameter. +// API name: desc +func (r *GetModelSnapshots) Desc(desc bool) *GetModelSnapshots { + r.req.Desc = &desc return r } -// Size Specifies the maximum number of snapshots to obtain. -// API name: size -func (r *GetModelSnapshots) Size(i int) *GetModelSnapshots { - r.values.Set("size", strconv.Itoa(i)) +// End Refer to the description for the `end` query parameter. +// API name: end +func (r *GetModelSnapshots) End(datetime types.DateTime) *GetModelSnapshots { + r.req.End = datetime + + return r +} + +// API name: page +func (r *GetModelSnapshots) Page(page *types.Page) *GetModelSnapshots { + + r.req.Page = page return r } -// Sort Specifies the sort field for the requested snapshots. By default, the -// snapshots are sorted by their timestamp. +// Sort Refer to the description for the `sort` query parameter. // API name: sort -func (r *GetModelSnapshots) Sort(v string) *GetModelSnapshots { - r.values.Set("sort", v) +func (r *GetModelSnapshots) Sort(field string) *GetModelSnapshots { + r.req.Sort = &field return r } -// Start Returns snapshots with timestamps after this time. +// Start Refer to the description for the `start` query parameter. // API name: start -func (r *GetModelSnapshots) Start(v string) *GetModelSnapshots { - r.values.Set("start", v) +func (r *GetModelSnapshots) Start(datetime types.DateTime) *GetModelSnapshots { + r.req.Start = datetime return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getmodelsnapshots/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getmodelsnapshots/request.go index 0556cb26e..d989e111d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getmodelsnapshots/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getmodelsnapshots/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getmodelsnapshots @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package getmodelsnapshots // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/get_model_snapshots/MlGetModelSnapshotsRequest.ts#L26-L96 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/get_model_snapshots/MlGetModelSnapshotsRequest.ts#L26-L96 type Request struct { // Desc Refer to the description for the `desc` query parameter. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getmodelsnapshots/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getmodelsnapshots/response.go index deae29f3d..7b2d6ec07 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getmodelsnapshots/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getmodelsnapshots/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getmodelsnapshots @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getmodelsnapshots // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/get_model_snapshots/MlGetModelSnapshotsResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/get_model_snapshots/MlGetModelSnapshotsResponse.ts#L23-L28 type Response struct { Count int64 `json:"count"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getmodelsnapshotupgradestats/get_model_snapshot_upgrade_stats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getmodelsnapshotupgradestats/get_model_snapshot_upgrade_stats.go index fdc8a0968..1dc74b626 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getmodelsnapshotupgradestats/get_model_snapshot_upgrade_stats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getmodelsnapshotupgradestats/get_model_snapshot_upgrade_stats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Gets stats for anomaly detection job model snapshot upgrades that are in // progress. @@ -72,9 +72,9 @@ func NewGetModelSnapshotUpgradeStatsFunc(tp elastictransport.Interface) NewGetMo return func(jobid, snapshotid string) *GetModelSnapshotUpgradeStats { n := New(tp) - n.JobId(jobid) + n._jobid(jobid) - n.SnapshotId(snapshotid) + n._snapshotid(snapshotid) return n } @@ -187,7 +187,6 @@ func (r GetModelSnapshotUpgradeStats) Do(ctx context.Context) (*Response, error) } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -196,6 +195,10 @@ func (r GetModelSnapshotUpgradeStats) Do(ctx context.Context) (*Response, error) return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -229,9 +232,9 @@ func (r *GetModelSnapshotUpgradeStats) Header(key, value string) *GetModelSnapsh // JobId Identifier for the anomaly detection job. // API Name: jobid -func (r *GetModelSnapshotUpgradeStats) JobId(v string) *GetModelSnapshotUpgradeStats { +func (r *GetModelSnapshotUpgradeStats) _jobid(jobid string) *GetModelSnapshotUpgradeStats { r.paramSet |= jobidMask - r.jobid = v + r.jobid = jobid return r } @@ -242,9 +245,9 @@ func (r *GetModelSnapshotUpgradeStats) JobId(v string) *GetModelSnapshotUpgradeS // get all snapshots by using `_all`, // by specifying `*` as the snapshot ID, or by omitting the snapshot ID. // API Name: snapshotid -func (r *GetModelSnapshotUpgradeStats) SnapshotId(v string) *GetModelSnapshotUpgradeStats { +func (r *GetModelSnapshotUpgradeStats) _snapshotid(snapshotid string) *GetModelSnapshotUpgradeStats { r.paramSet |= snapshotidMask - r.snapshotid = v + r.snapshotid = snapshotid return r } @@ -261,8 +264,8 @@ func (r *GetModelSnapshotUpgradeStats) SnapshotId(v string) *GetModelSnapshotUpg // returns a 404 status code when there are // no matches or only partial matches. // API name: allow_no_match -func (r *GetModelSnapshotUpgradeStats) AllowNoMatch(b bool) *GetModelSnapshotUpgradeStats { - r.values.Set("allow_no_match", strconv.FormatBool(b)) +func (r *GetModelSnapshotUpgradeStats) AllowNoMatch(allownomatch bool) *GetModelSnapshotUpgradeStats { + r.values.Set("allow_no_match", strconv.FormatBool(allownomatch)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getmodelsnapshotupgradestats/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getmodelsnapshotupgradestats/response.go index 163b4ff05..c8f2ff0df 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getmodelsnapshotupgradestats/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getmodelsnapshotupgradestats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getmodelsnapshotupgradestats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getmodelsnapshotupgradestats // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/get_model_snapshot_upgrade_stats/MlGetModelSnapshotUpgradeStatsResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/get_model_snapshot_upgrade_stats/MlGetModelSnapshotUpgradeStatsResponse.ts#L23-L28 type Response struct { Count int64 `json:"count"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getoverallbuckets/get_overall_buckets.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getoverallbuckets/get_overall_buckets.go index 8b3065992..31eb413f3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getoverallbuckets/get_overall_buckets.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getoverallbuckets/get_overall_buckets.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves overall bucket results that summarize the bucket results of // multiple anomaly detection jobs. @@ -31,7 +31,6 @@ import ( "io" "net/http" "net/url" - "strconv" "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" @@ -54,8 +53,9 @@ type GetOverallBuckets struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -71,7 +71,7 @@ func NewGetOverallBucketsFunc(tp elastictransport.Interface) NewGetOverallBucket return func(jobid string) *GetOverallBuckets { n := New(tp) - n.JobId(jobid) + n._jobid(jobid) return n } @@ -87,6 +87,8 @@ func New(tp elastictransport.Interface) *GetOverallBuckets { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -116,9 +118,19 @@ func (r *GetOverallBuckets) HttpRequest(ctx context.Context) (*http.Request, err var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -126,6 +138,7 @@ func (r *GetOverallBuckets) HttpRequest(ctx context.Context) (*http.Request, err } r.buf.Write(data) + } r.path.Scheme = "http" @@ -212,7 +225,6 @@ func (r GetOverallBuckets) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -221,6 +233,10 @@ func (r GetOverallBuckets) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -238,82 +254,65 @@ func (r *GetOverallBuckets) Header(key, value string) *GetOverallBuckets { // You can summarize the bucket results for all anomaly detection jobs by // using `_all` or by specifying `*` as the ``. // API Name: jobid -func (r *GetOverallBuckets) JobId(v string) *GetOverallBuckets { +func (r *GetOverallBuckets) _jobid(jobid string) *GetOverallBuckets { r.paramSet |= jobidMask - r.jobid = v + r.jobid = jobid return r } -// AllowNoMatch Specifies what to do when the request: -// -// 1. Contains wildcard expressions and there are no jobs that match. -// 2. Contains the `_all` string or no identifiers and there are no matches. -// 3. Contains wildcard expressions and there are only partial matches. -// -// If `true`, the request returns an empty `jobs` array when there are no -// matches and the subset of results when there are partial matches. If this -// parameter is `false`, the request returns a `404` status code when there -// are no matches or only partial matches. +// AllowNoMatch Refer to the description for the `allow_no_match` query parameter. // API name: allow_no_match -func (r *GetOverallBuckets) AllowNoMatch(b bool) *GetOverallBuckets { - r.values.Set("allow_no_match", strconv.FormatBool(b)) +func (r *GetOverallBuckets) AllowNoMatch(allownomatch bool) *GetOverallBuckets { + r.req.AllowNoMatch = &allownomatch return r } -// BucketSpan The span of the overall buckets. Must be greater or equal to the largest -// bucket span of the specified anomaly detection jobs, which is the default -// value. -// -// By default, an overall bucket has a span equal to the largest bucket span -// of the specified anomaly detection jobs. To override that behavior, use -// the optional `bucket_span` parameter. +// BucketSpan Refer to the description for the `bucket_span` query parameter. // API name: bucket_span -func (r *GetOverallBuckets) BucketSpan(v string) *GetOverallBuckets { - r.values.Set("bucket_span", v) +func (r *GetOverallBuckets) BucketSpan(duration types.Duration) *GetOverallBuckets { + r.req.BucketSpan = duration return r } -// End Returns overall buckets with timestamps earlier than this time. +// End Refer to the description for the `end` query parameter. // API name: end -func (r *GetOverallBuckets) End(v string) *GetOverallBuckets { - r.values.Set("end", v) +func (r *GetOverallBuckets) End(datetime types.DateTime) *GetOverallBuckets { + r.req.End = datetime return r } -// ExcludeInterim If `true`, the output excludes interim results. +// ExcludeInterim Refer to the description for the `exclude_interim` query parameter. // API name: exclude_interim -func (r *GetOverallBuckets) ExcludeInterim(b bool) *GetOverallBuckets { - r.values.Set("exclude_interim", strconv.FormatBool(b)) +func (r *GetOverallBuckets) ExcludeInterim(excludeinterim bool) *GetOverallBuckets { + r.req.ExcludeInterim = &excludeinterim return r } -// OverallScore Returns overall buckets with overall scores greater than or equal to this -// value. +// OverallScore Refer to the description for the `overall_score` query parameter. // API name: overall_score -func (r *GetOverallBuckets) OverallScore(v string) *GetOverallBuckets { - r.values.Set("overall_score", v) +func (r *GetOverallBuckets) OverallScore(overallscore string) *GetOverallBuckets { + r.req.OverallScore = overallscore return r } -// Start Returns overall buckets with timestamps after this time. +// Start Refer to the description for the `start` query parameter. // API name: start -func (r *GetOverallBuckets) Start(v string) *GetOverallBuckets { - r.values.Set("start", v) +func (r *GetOverallBuckets) Start(datetime types.DateTime) *GetOverallBuckets { + r.req.Start = datetime return r } -// TopN The number of top anomaly detection job bucket scores to be used in the -// `overall_score` calculation. +// TopN Refer to the description for the `top_n` query parameter. // API name: top_n -func (r *GetOverallBuckets) TopN(i int) *GetOverallBuckets { - r.values.Set("top_n", strconv.Itoa(i)) +func (r *GetOverallBuckets) TopN(topn int) *GetOverallBuckets { + r.req.TopN = &topn return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getoverallbuckets/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getoverallbuckets/request.go index a1e809c7f..609e28f6d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getoverallbuckets/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getoverallbuckets/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getoverallbuckets @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package getoverallbuckets // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/get_overall_buckets/MlGetOverallBucketsRequest.ts#L25-L143 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/get_overall_buckets/MlGetOverallBucketsRequest.ts#L25-L143 type Request struct { // AllowNoMatch Refer to the description for the `allow_no_match` query parameter. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getoverallbuckets/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getoverallbuckets/response.go index 5fc85abf5..3dfafdf38 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getoverallbuckets/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getoverallbuckets/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getoverallbuckets @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getoverallbuckets // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/get_overall_buckets/MlGetOverallBucketsResponse.ts#L23-L29 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/get_overall_buckets/MlGetOverallBucketsResponse.ts#L23-L29 type Response struct { Count int64 `json:"count"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getrecords/get_records.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getrecords/get_records.go index 3c69c0e4c..a78efdfaa 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getrecords/get_records.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getrecords/get_records.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves anomaly records for an anomaly detection job. package getrecords @@ -53,8 +53,9 @@ type GetRecords struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -70,7 +71,7 @@ func NewGetRecordsFunc(tp elastictransport.Interface) NewGetRecords { return func(jobid string) *GetRecords { n := New(tp) - n.JobId(jobid) + n._jobid(jobid) return n } @@ -85,6 +86,8 @@ func New(tp elastictransport.Interface) *GetRecords { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -114,9 +117,19 @@ func (r *GetRecords) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -124,6 +137,7 @@ func (r *GetRecords) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -210,7 +224,6 @@ func (r GetRecords) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -219,6 +232,10 @@ func (r GetRecords) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -231,75 +248,82 @@ func (r *GetRecords) Header(key, value string) *GetRecords { // JobId Identifier for the anomaly detection job. // API Name: jobid -func (r *GetRecords) JobId(v string) *GetRecords { +func (r *GetRecords) _jobid(jobid string) *GetRecords { r.paramSet |= jobidMask - r.jobid = v + r.jobid = jobid + + return r +} + +// From Skips the specified number of records. +// API name: from +func (r *GetRecords) From(from int) *GetRecords { + r.values.Set("from", strconv.Itoa(from)) return r } -// Desc If true, the results are sorted in descending order. +// Size Specifies the maximum number of records to obtain. +// API name: size +func (r *GetRecords) Size(size int) *GetRecords { + r.values.Set("size", strconv.Itoa(size)) + + return r +} + +// Desc Refer to the description for the `desc` query parameter. // API name: desc -func (r *GetRecords) Desc(b bool) *GetRecords { - r.values.Set("desc", strconv.FormatBool(b)) +func (r *GetRecords) Desc(desc bool) *GetRecords { + r.req.Desc = &desc return r } -// End Returns records with timestamps earlier than this time. The default value -// means results are not limited to specific timestamps. +// End Refer to the description for the `end` query parameter. // API name: end -func (r *GetRecords) End(v string) *GetRecords { - r.values.Set("end", v) +func (r *GetRecords) End(datetime types.DateTime) *GetRecords { + r.req.End = datetime return r } -// ExcludeInterim If `true`, the output excludes interim results. +// ExcludeInterim Refer to the description for the `exclude_interim` query parameter. // API name: exclude_interim -func (r *GetRecords) ExcludeInterim(b bool) *GetRecords { - r.values.Set("exclude_interim", strconv.FormatBool(b)) +func (r *GetRecords) ExcludeInterim(excludeinterim bool) *GetRecords { + r.req.ExcludeInterim = &excludeinterim return r } -// From Skips the specified number of records. -// API name: from -func (r *GetRecords) From(i int) *GetRecords { - r.values.Set("from", strconv.Itoa(i)) +// API name: page +func (r *GetRecords) Page(page *types.Page) *GetRecords { + + r.req.Page = page return r } -// RecordScore Returns records with anomaly scores greater or equal than this value. +// RecordScore Refer to the description for the `record_score` query parameter. // API name: record_score -func (r *GetRecords) RecordScore(v string) *GetRecords { - r.values.Set("record_score", v) +func (r *GetRecords) RecordScore(recordscore types.Float64) *GetRecords { - return r -} - -// Size Specifies the maximum number of records to obtain. -// API name: size -func (r *GetRecords) Size(i int) *GetRecords { - r.values.Set("size", strconv.Itoa(i)) + r.req.RecordScore = &recordscore return r } -// Sort Specifies the sort field for the requested records. +// Sort Refer to the description for the `sort` query parameter. // API name: sort -func (r *GetRecords) Sort(v string) *GetRecords { - r.values.Set("sort", v) +func (r *GetRecords) Sort(field string) *GetRecords { + r.req.Sort = &field return r } -// Start Returns records with timestamps after this time. The default value means -// results are not limited to specific timestamps. +// Start Refer to the description for the `start` query parameter. // API name: start -func (r *GetRecords) Start(v string) *GetRecords { - r.values.Set("start", v) +func (r *GetRecords) Start(datetime types.DateTime) *GetRecords { + r.req.Start = datetime return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getrecords/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getrecords/request.go index 1eb27ff36..67f650bf0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getrecords/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getrecords/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getrecords @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package getrecords // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/get_records/MlGetAnomalyRecordsRequest.ts#L26-L127 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/get_records/MlGetAnomalyRecordsRequest.ts#L26-L127 type Request struct { // Desc Refer to the description for the `desc` query parameter. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getrecords/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getrecords/response.go index 5816d197f..5022a6dc3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getrecords/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/getrecords/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getrecords @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getrecords // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/get_records/MlGetAnomalyRecordsResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/get_records/MlGetAnomalyRecordsResponse.ts#L23-L28 type Response struct { Count int64 `json:"count"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/gettrainedmodels/get_trained_models.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/gettrainedmodels/get_trained_models.go index ed3e264c6..13c2822af 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/gettrainedmodels/get_trained_models.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/gettrainedmodels/get_trained_models.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves configuration information for a trained inference model. package gettrainedmodels @@ -36,7 +36,6 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/include" ) @@ -178,7 +177,6 @@ func (r GetTrainedModels) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -187,6 +185,10 @@ func (r GetTrainedModels) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -220,9 +222,9 @@ func (r *GetTrainedModels) Header(key, value string) *GetTrainedModels { // ModelId The unique identifier of the trained model. // API Name: modelid -func (r *GetTrainedModels) ModelId(v string) *GetTrainedModels { +func (r *GetTrainedModels) ModelId(modelid string) *GetTrainedModels { r.paramSet |= modelidMask - r.modelid = v + r.modelid = modelid return r } @@ -236,8 +238,8 @@ func (r *GetTrainedModels) ModelId(v string) *GetTrainedModels { // If true, it returns an empty array when there are no matches and the // subset of results when there are partial matches. // API name: allow_no_match -func (r *GetTrainedModels) AllowNoMatch(b bool) *GetTrainedModels { - r.values.Set("allow_no_match", strconv.FormatBool(b)) +func (r *GetTrainedModels) AllowNoMatch(allownomatch bool) *GetTrainedModels { + r.values.Set("allow_no_match", strconv.FormatBool(allownomatch)) return r } @@ -245,8 +247,8 @@ func (r *GetTrainedModels) AllowNoMatch(b bool) *GetTrainedModels { // DecompressDefinition Specifies whether the included model definition should be returned as a // JSON map (true) or in a custom compressed format (false). // API name: decompress_definition -func (r *GetTrainedModels) DecompressDefinition(b bool) *GetTrainedModels { - r.values.Set("decompress_definition", strconv.FormatBool(b)) +func (r *GetTrainedModels) DecompressDefinition(decompressdefinition bool) *GetTrainedModels { + r.values.Set("decompress_definition", strconv.FormatBool(decompressdefinition)) return r } @@ -255,16 +257,16 @@ func (r *GetTrainedModels) DecompressDefinition(b bool) *GetTrainedModels { // retrieval. This allows the configuration to be in an acceptable format to // be retrieved and then added to another cluster. // API name: exclude_generated -func (r *GetTrainedModels) ExcludeGenerated(b bool) *GetTrainedModels { - r.values.Set("exclude_generated", strconv.FormatBool(b)) +func (r *GetTrainedModels) ExcludeGenerated(excludegenerated bool) *GetTrainedModels { + r.values.Set("exclude_generated", strconv.FormatBool(excludegenerated)) return r } // From Skips the specified number of models. // API name: from -func (r *GetTrainedModels) From(i int) *GetTrainedModels { - r.values.Set("from", strconv.Itoa(i)) +func (r *GetTrainedModels) From(from int) *GetTrainedModels { + r.values.Set("from", strconv.Itoa(from)) return r } @@ -272,16 +274,16 @@ func (r *GetTrainedModels) From(i int) *GetTrainedModels { // Include A comma delimited string of optional fields to include in the response // body. // API name: include -func (r *GetTrainedModels) Include(enum include.Include) *GetTrainedModels { - r.values.Set("include", enum.String()) +func (r *GetTrainedModels) Include(include include.Include) *GetTrainedModels { + r.values.Set("include", include.String()) return r } // Size Specifies the maximum number of models to obtain. // API name: size -func (r *GetTrainedModels) Size(i int) *GetTrainedModels { - r.values.Set("size", strconv.Itoa(i)) +func (r *GetTrainedModels) Size(size int) *GetTrainedModels { + r.values.Set("size", strconv.Itoa(size)) return r } @@ -290,8 +292,8 @@ func (r *GetTrainedModels) Size(i int) *GetTrainedModels { // none. When supplied, only trained models that contain all the supplied // tags are returned. // API name: tags -func (r *GetTrainedModels) Tags(v string) *GetTrainedModels { - r.values.Set("tags", v) +func (r *GetTrainedModels) Tags(tags string) *GetTrainedModels { + r.values.Set("tags", tags) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/gettrainedmodels/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/gettrainedmodels/response.go index 0f2b51733..3f2059fbf 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/gettrainedmodels/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/gettrainedmodels/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package gettrainedmodels @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package gettrainedmodels // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/get_trained_models/MlGetTrainedModelResponse.ts#L23-L34 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/get_trained_models/MlGetTrainedModelResponse.ts#L23-L34 type Response struct { Count int `json:"count"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/gettrainedmodelsstats/get_trained_models_stats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/gettrainedmodelsstats/get_trained_models_stats.go index 34066fba4..61b308d0b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/gettrainedmodelsstats/get_trained_models_stats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/gettrainedmodelsstats/get_trained_models_stats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves usage information for trained inference models. package gettrainedmodelsstats @@ -180,7 +180,6 @@ func (r GetTrainedModelsStats) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -189,6 +188,10 @@ func (r GetTrainedModelsStats) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -223,9 +226,9 @@ func (r *GetTrainedModelsStats) Header(key, value string) *GetTrainedModelsStats // ModelId The unique identifier of the trained model or a model alias. It can be a // comma-separated list or a wildcard expression. // API Name: modelid -func (r *GetTrainedModelsStats) ModelId(v string) *GetTrainedModelsStats { +func (r *GetTrainedModelsStats) ModelId(modelid string) *GetTrainedModelsStats { r.paramSet |= modelidMask - r.modelid = v + r.modelid = modelid return r } @@ -239,24 +242,24 @@ func (r *GetTrainedModelsStats) ModelId(v string) *GetTrainedModelsStats { // If true, it returns an empty array when there are no matches and the // subset of results when there are partial matches. // API name: allow_no_match -func (r *GetTrainedModelsStats) AllowNoMatch(b bool) *GetTrainedModelsStats { - r.values.Set("allow_no_match", strconv.FormatBool(b)) +func (r *GetTrainedModelsStats) AllowNoMatch(allownomatch bool) *GetTrainedModelsStats { + r.values.Set("allow_no_match", strconv.FormatBool(allownomatch)) return r } // From Skips the specified number of models. // API name: from -func (r *GetTrainedModelsStats) From(i int) *GetTrainedModelsStats { - r.values.Set("from", strconv.Itoa(i)) +func (r *GetTrainedModelsStats) From(from int) *GetTrainedModelsStats { + r.values.Set("from", strconv.Itoa(from)) return r } // Size Specifies the maximum number of models to obtain. // API name: size -func (r *GetTrainedModelsStats) Size(i int) *GetTrainedModelsStats { - r.values.Set("size", strconv.Itoa(i)) +func (r *GetTrainedModelsStats) Size(size int) *GetTrainedModelsStats { + r.values.Set("size", strconv.Itoa(size)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/gettrainedmodelsstats/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/gettrainedmodelsstats/response.go index 2e8e0b3d4..5e06f4a06 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/gettrainedmodelsstats/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/gettrainedmodelsstats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package gettrainedmodelsstats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package gettrainedmodelsstats // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/get_trained_models_stats/MlGetTrainedModelStatsResponse.ts#L23-L33 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/get_trained_models_stats/MlGetTrainedModelStatsResponse.ts#L23-L33 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/infertrainedmodel/infer_trained_model.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/infertrainedmodel/infer_trained_model.go index 64b45d9be..03ee719e8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/infertrainedmodel/infer_trained_model.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/infertrainedmodel/infer_trained_model.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Evaluate a trained model. package infertrainedmodel @@ -52,8 +52,9 @@ type InferTrainedModel struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -69,7 +70,7 @@ func NewInferTrainedModelFunc(tp elastictransport.Interface) NewInferTrainedMode return func(modelid string) *InferTrainedModel { n := New(tp) - n.ModelId(modelid) + n._modelid(modelid) return n } @@ -84,6 +85,8 @@ func New(tp elastictransport.Interface) *InferTrainedModel { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -113,9 +116,19 @@ func (r *InferTrainedModel) HttpRequest(ctx context.Context) (*http.Request, err var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -123,6 +136,7 @@ func (r *InferTrainedModel) HttpRequest(ctx context.Context) (*http.Request, err } r.buf.Write(data) + } r.path.Scheme = "http" @@ -221,7 +235,6 @@ func (r InferTrainedModel) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -230,6 +243,10 @@ func (r InferTrainedModel) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -242,17 +259,38 @@ func (r *InferTrainedModel) Header(key, value string) *InferTrainedModel { // ModelId The unique identifier of the trained model. // API Name: modelid -func (r *InferTrainedModel) ModelId(v string) *InferTrainedModel { +func (r *InferTrainedModel) _modelid(modelid string) *InferTrainedModel { r.paramSet |= modelidMask - r.modelid = v + r.modelid = modelid return r } // Timeout Controls the amount of time to wait for inference results. // API name: timeout -func (r *InferTrainedModel) Timeout(v string) *InferTrainedModel { - r.values.Set("timeout", v) +func (r *InferTrainedModel) Timeout(duration string) *InferTrainedModel { + r.values.Set("timeout", duration) + + return r +} + +// Docs An array of objects to pass to the model for inference. The objects should +// contain a fields matching your +// configured trained model input. Typically, for NLP models, the field name is +// `text_field`. +// Currently, for NLP models, only a single value is allowed. +// API name: docs +func (r *InferTrainedModel) Docs(docs ...map[string]json.RawMessage) *InferTrainedModel { + r.req.Docs = docs + + return r +} + +// InferenceConfig The inference configuration updates to apply on the API call +// API name: inference_config +func (r *InferTrainedModel) InferenceConfig(inferenceconfig *types.InferenceConfigUpdateContainer) *InferTrainedModel { + + r.req.InferenceConfig = inferenceconfig return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/infertrainedmodel/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/infertrainedmodel/request.go index 3e40da9ea..dc807aeeb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/infertrainedmodel/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/infertrainedmodel/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package infertrainedmodel @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package infertrainedmodel // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/infer_trained_model/MlInferTrainedModelRequest.ts#L27-L59 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/infer_trained_model/MlInferTrainedModelRequest.ts#L27-L59 type Request struct { // Docs An array of objects to pass to the model for inference. The objects should diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/infertrainedmodel/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/infertrainedmodel/response.go index cee2bc74b..5a09e6fab 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/infertrainedmodel/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/infertrainedmodel/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package infertrainedmodel @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package infertrainedmodel // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/infer_trained_model/MlInferTrainedModelResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/infer_trained_model/MlInferTrainedModelResponse.ts#L22-L26 type Response struct { InferenceResults []types.InferenceResponseResult `json:"inference_results"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/info/info.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/info/info.go index 93e45e262..6aece857a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/info/info.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/info/info.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns defaults and limits used by machine learning. package info @@ -159,7 +159,6 @@ func (r Info) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -168,6 +167,10 @@ func (r Info) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/info/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/info/response.go index bfae40c5b..c6ae646b6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/info/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/info/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package info @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package info // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/info/MlInfoResponse.ts#L22-L29 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/info/MlInfoResponse.ts#L22-L29 type Response struct { Defaults types.Defaults `json:"defaults"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/openjob/open_job.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/openjob/open_job.go index 72325e929..f261d9565 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/openjob/open_job.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/openjob/open_job.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Opens one or more anomaly detection jobs. package openjob @@ -52,8 +52,9 @@ type OpenJob struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -69,7 +70,7 @@ func NewOpenJobFunc(tp elastictransport.Interface) NewOpenJob { return func(jobid string) *OpenJob { n := New(tp) - n.JobId(jobid) + n._jobid(jobid) return n } @@ -84,6 +85,8 @@ func New(tp elastictransport.Interface) *OpenJob { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -113,9 +116,19 @@ func (r *OpenJob) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -123,6 +136,7 @@ func (r *OpenJob) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -207,7 +221,6 @@ func (r OpenJob) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -216,6 +229,10 @@ func (r OpenJob) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -228,17 +245,17 @@ func (r *OpenJob) Header(key, value string) *OpenJob { // JobId Identifier for the anomaly detection job. // API Name: jobid -func (r *OpenJob) JobId(v string) *OpenJob { +func (r *OpenJob) _jobid(jobid string) *OpenJob { r.paramSet |= jobidMask - r.jobid = v + r.jobid = jobid return r } -// Timeout Controls the time to wait until a job has opened. +// Timeout Refer to the description for the `timeout` query parameter. // API name: timeout -func (r *OpenJob) Timeout(v string) *OpenJob { - r.values.Set("timeout", v) +func (r *OpenJob) Timeout(duration types.Duration) *OpenJob { + r.req.Timeout = duration return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/openjob/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/openjob/request.go index 4e82f3a34..d98a00d63 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/openjob/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/openjob/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package openjob @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package openjob // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/open_job/MlOpenJobRequest.ts#L24-L59 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/open_job/MlOpenJobRequest.ts#L24-L59 type Request struct { // Timeout Refer to the description for the `timeout` query parameter. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/openjob/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/openjob/response.go index 672604f9d..27eb1eb5c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/openjob/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/openjob/response.go @@ -16,15 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package openjob // Response holds the response body struct for the package openjob // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/open_job/MlOpenJobResponse.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/open_job/MlOpenJobResponse.ts#L22-L31 type Response struct { + + // Node The ID of the node that the job was started on. In serverless this will be + // the "serverless". + // If the job is allowed to open lazily and has not yet been assigned to a node, + // this value is an empty string. Node string `json:"node"` Opened bool `json:"opened"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/postcalendarevents/post_calendar_events.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/postcalendarevents/post_calendar_events.go index 2f337bfa8..ad2373ae1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/postcalendarevents/post_calendar_events.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/postcalendarevents/post_calendar_events.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Posts scheduled events in a calendar. package postcalendarevents @@ -52,8 +52,9 @@ type PostCalendarEvents struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -69,7 +70,7 @@ func NewPostCalendarEventsFunc(tp elastictransport.Interface) NewPostCalendarEve return func(calendarid string) *PostCalendarEvents { n := New(tp) - n.CalendarId(calendarid) + n._calendarid(calendarid) return n } @@ -84,6 +85,8 @@ func New(tp elastictransport.Interface) *PostCalendarEvents { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -113,9 +116,19 @@ func (r *PostCalendarEvents) HttpRequest(ctx context.Context) (*http.Request, er var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -123,6 +136,7 @@ func (r *PostCalendarEvents) HttpRequest(ctx context.Context) (*http.Request, er } r.buf.Write(data) + } r.path.Scheme = "http" @@ -207,7 +221,6 @@ func (r PostCalendarEvents) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -216,6 +229,10 @@ func (r PostCalendarEvents) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -228,9 +245,19 @@ func (r *PostCalendarEvents) Header(key, value string) *PostCalendarEvents { // CalendarId A string that uniquely identifies a calendar. // API Name: calendarid -func (r *PostCalendarEvents) CalendarId(v string) *PostCalendarEvents { +func (r *PostCalendarEvents) _calendarid(calendarid string) *PostCalendarEvents { r.paramSet |= calendaridMask - r.calendarid = v + r.calendarid = calendarid + + return r +} + +// Events A list of one of more scheduled events. The event’s start and end times can +// be specified as integer milliseconds since the epoch or as a string in ISO +// 8601 format. +// API name: events +func (r *PostCalendarEvents) Events(events ...types.CalendarEvent) *PostCalendarEvents { + r.req.Events = events return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/postcalendarevents/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/postcalendarevents/request.go index 49417bd26..2e516b1c5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/postcalendarevents/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/postcalendarevents/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package postcalendarevents @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package postcalendarevents // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/post_calendar_events/MlPostCalendarEventsRequest.ts#L24-L40 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/post_calendar_events/MlPostCalendarEventsRequest.ts#L24-L40 type Request struct { // Events A list of one of more scheduled events. The event’s start and end times can diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/postcalendarevents/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/postcalendarevents/response.go index 6202e694d..d69134798 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/postcalendarevents/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/postcalendarevents/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package postcalendarevents @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package postcalendarevents // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/post_calendar_events/MlPostCalendarEventsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/post_calendar_events/MlPostCalendarEventsResponse.ts#L22-L24 type Response struct { Events []types.CalendarEvent `json:"events"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/postdata/post_data.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/postdata/post_data.go new file mode 100644 index 000000000..2a4aac68c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/postdata/post_data.go @@ -0,0 +1,268 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Sends data to an anomaly detection job for analysis. +package postdata + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + jobidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PostData struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + buf *gobytes.Buffer + + req *Request + deferred []func(request *Request) error + raw io.Reader + + paramSet int + + jobid string +} + +// NewPostData type alias for index. +type NewPostData func(jobid string) *PostData + +// NewPostDataFunc returns a new instance of PostData with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPostDataFunc(tp elastictransport.Interface) NewPostData { + return func(jobid string) *PostData { + n := New(tp) + + n._jobid(jobid) + + return n + } +} + +// Sends data to an anomaly detection job for analysis. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-post-data.html +func New(tp elastictransport.Interface) *PostData { + r := &PostData{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + buf: gobytes.NewBuffer(nil), + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PostData) Raw(raw io.Reader) *PostData { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PostData) Request(req *Request) *PostData { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PostData) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw != nil { + r.buf.ReadFrom(r.raw) + } else if r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PostData: %w", err) + } + + r.buf.Write(data) + + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == jobidMask: + path.WriteString("/") + path.WriteString("_ml") + path.WriteString("/") + path.WriteString("anomaly_detectors") + path.WriteString("/") + + path.WriteString(r.jobid) + path.WriteString("/") + path.WriteString("_data") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.buf) + } else { + req, err = http.NewRequest(method, r.path.String(), r.buf) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.buf.Len() > 0 { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PostData) Perform(ctx context.Context) (*http.Response, error) { + req, err := r.HttpRequest(ctx) + if err != nil { + return nil, err + } + + res, err := r.transport.Perform(req) + if err != nil { + return nil, fmt.Errorf("an error happened during the PostData query execution: %w", err) + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a postdata.Response +func (r PostData) Do(ctx context.Context) (*Response, error) { + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + return nil, errorResponse +} + +// Header set a key, value pair in the PostData headers map. +func (r *PostData) Header(key, value string) *PostData { + r.headers.Set(key, value) + + return r +} + +// JobId Identifier for the anomaly detection job. The job must have a state of open +// to receive and process the data. +// API Name: jobid +func (r *PostData) _jobid(jobid string) *PostData { + r.paramSet |= jobidMask + r.jobid = jobid + + return r +} + +// ResetEnd Specifies the end of the bucket resetting range. +// API name: reset_end +func (r *PostData) ResetEnd(datetime string) *PostData { + r.values.Set("reset_end", datetime) + + return r +} + +// ResetStart Specifies the start of the bucket resetting range. +// API name: reset_start +func (r *PostData) ResetStart(datetime string) *PostData { + r.values.Set("reset_start", datetime) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/postdata/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/postdata/request.go new file mode 100644 index 000000000..78c3135bc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/postdata/request.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package postdata + +import ( + "encoding/json" +) + +// Request holds the request body struct for the package postdata +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/post_data/MlPostJobDataRequest.ts#L24-L68 +type Request = []json.RawMessage diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/postdata/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/postdata/response.go new file mode 100644 index 000000000..35b76d6a6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/postdata/response.go @@ -0,0 +1,49 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package postdata + +// Response holds the response body struct for the package postdata +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/post_data/MlPostJobDataResponse.ts#L23-L41 + +type Response struct { + BucketCount int64 `json:"bucket_count"` + EarliestRecordTimestamp int64 `json:"earliest_record_timestamp"` + EmptyBucketCount int64 `json:"empty_bucket_count"` + InputBytes int64 `json:"input_bytes"` + InputFieldCount int64 `json:"input_field_count"` + InputRecordCount int64 `json:"input_record_count"` + InvalidDateCount int64 `json:"invalid_date_count"` + JobId string `json:"job_id"` + LastDataTime int `json:"last_data_time"` + LatestRecordTimestamp int64 `json:"latest_record_timestamp"` + MissingFieldCount int64 `json:"missing_field_count"` + OutOfOrderTimestampCount int64 `json:"out_of_order_timestamp_count"` + ProcessedFieldCount int64 `json:"processed_field_count"` + ProcessedRecordCount int64 `json:"processed_record_count"` + SparseBucketCount int64 `json:"sparse_bucket_count"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/previewdatafeed/preview_datafeed.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/previewdatafeed/preview_datafeed.go index f8e6346ba..e00d83cac 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/previewdatafeed/preview_datafeed.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/previewdatafeed/preview_datafeed.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Previews a datafeed. package previewdatafeed @@ -52,8 +52,9 @@ type PreviewDatafeed struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -82,6 +83,8 @@ func New(tp elastictransport.Interface) *PreviewDatafeed { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -111,9 +114,19 @@ func (r *PreviewDatafeed) HttpRequest(ctx context.Context) (*http.Request, error var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -121,6 +134,7 @@ func (r *PreviewDatafeed) HttpRequest(ctx context.Context) (*http.Request, error } r.buf.Write(data) + } r.path.Scheme = "http" @@ -197,7 +211,7 @@ func (r PreviewDatafeed) Perform(ctx context.Context) (*http.Response, error) { } // Do runs the request through the transport, handle the response and returns a previewdatafeed.Response -func (r PreviewDatafeed) Do(ctx context.Context) (*Response, error) { +func (r PreviewDatafeed) Do(ctx context.Context) (Response, error) { response := NewResponse() @@ -208,13 +222,12 @@ func (r PreviewDatafeed) Do(ctx context.Context) (*Response, error) { defer res.Body.Close() if res.StatusCode < 299 { - err = json.NewDecoder(res.Body).Decode(response) + err = json.NewDecoder(res.Body).Decode(&response) if err != nil { return nil, err } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -223,6 +236,10 @@ func (r PreviewDatafeed) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -241,25 +258,50 @@ func (r *PreviewDatafeed) Header(key, value string) *PreviewDatafeed { // or anomaly detection job // configuration details in the request body. // API Name: datafeedid -func (r *PreviewDatafeed) DatafeedId(v string) *PreviewDatafeed { +func (r *PreviewDatafeed) DatafeedId(datafeedid string) *PreviewDatafeed { r.paramSet |= datafeedidMask - r.datafeedid = v + r.datafeedid = datafeedid return r } // Start The start time from where the datafeed preview should begin // API name: start -func (r *PreviewDatafeed) Start(v string) *PreviewDatafeed { - r.values.Set("start", v) +func (r *PreviewDatafeed) Start(datetime string) *PreviewDatafeed { + r.values.Set("start", datetime) return r } // End The end time when the datafeed preview should stop // API name: end -func (r *PreviewDatafeed) End(v string) *PreviewDatafeed { - r.values.Set("end", v) +func (r *PreviewDatafeed) End(datetime string) *PreviewDatafeed { + r.values.Set("end", datetime) + + return r +} + +// DatafeedConfig The datafeed definition to preview. +// API name: datafeed_config +func (r *PreviewDatafeed) DatafeedConfig(datafeedconfig *types.DatafeedConfig) *PreviewDatafeed { + + r.req.DatafeedConfig = datafeedconfig + + return r +} + +// JobConfig The configuration details for the anomaly detection job that is associated +// with the datafeed. If the +// `datafeed_config` object does not include a `job_id` that references an +// existing anomaly detection job, you must +// supply this `job_config` object. If you include both a `job_id` and a +// `job_config`, the latter information is +// used. You cannot specify a `job_config` object unless you also supply a +// `datafeed_config` object. +// API name: job_config +func (r *PreviewDatafeed) JobConfig(jobconfig *types.JobConfig) *PreviewDatafeed { + + r.req.JobConfig = jobconfig return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/previewdatafeed/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/previewdatafeed/request.go index cee0ecee8..56768a812 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/previewdatafeed/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/previewdatafeed/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package previewdatafeed @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package previewdatafeed // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/preview_datafeed/MlPreviewDatafeedRequest.ts#L26-L69 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/preview_datafeed/MlPreviewDatafeedRequest.ts#L26-L69 type Request struct { // DatafeedConfig The datafeed definition to preview. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/previewdatafeed/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/previewdatafeed/response.go index 9d3dee33a..f9f0e120c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/previewdatafeed/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/previewdatafeed/response.go @@ -16,22 +16,22 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package previewdatafeed -import "encoding/json" +import ( + "encoding/json" +) // Response holds the response body struct for the package previewdatafeed // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/preview_datafeed/MlPreviewDatafeedResponse.ts#L20-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/preview_datafeed/MlPreviewDatafeedResponse.ts#L20-L22 -type Response struct { - Data []json.RawMessage `json:"data"` -} +type Response []json.RawMessage // NewResponse returns a Response -func NewResponse() *Response { - r := &Response{} +func NewResponse() Response { + r := Response{} return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/previewdataframeanalytics/preview_data_frame_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/previewdataframeanalytics/preview_data_frame_analytics.go index 2b84ea356..ba3dfe06d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/previewdataframeanalytics/preview_data_frame_analytics.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/previewdataframeanalytics/preview_data_frame_analytics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Previews that will be analyzed given a data frame analytics config. package previewdataframeanalytics @@ -52,8 +52,9 @@ type PreviewDataFrameAnalytics struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -82,6 +83,8 @@ func New(tp elastictransport.Interface) *PreviewDataFrameAnalytics { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -111,9 +114,19 @@ func (r *PreviewDataFrameAnalytics) HttpRequest(ctx context.Context) (*http.Requ var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -121,6 +134,7 @@ func (r *PreviewDataFrameAnalytics) HttpRequest(ctx context.Context) (*http.Requ } r.buf.Write(data) + } r.path.Scheme = "http" @@ -218,7 +232,6 @@ func (r PreviewDataFrameAnalytics) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -227,6 +240,10 @@ func (r PreviewDataFrameAnalytics) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -239,9 +256,20 @@ func (r *PreviewDataFrameAnalytics) Header(key, value string) *PreviewDataFrameA // Id Identifier for the data frame analytics job. // API Name: id -func (r *PreviewDataFrameAnalytics) Id(v string) *PreviewDataFrameAnalytics { +func (r *PreviewDataFrameAnalytics) Id(id string) *PreviewDataFrameAnalytics { r.paramSet |= idMask - r.id = v + r.id = id + + return r +} + +// Config A data frame analytics config as described in create data frame analytics +// jobs. Note that `id` and `dest` don’t need to be provided in the context of +// this API. +// API name: config +func (r *PreviewDataFrameAnalytics) Config(config *types.DataframePreviewConfig) *PreviewDataFrameAnalytics { + + r.req.Config = config return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/previewdataframeanalytics/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/previewdataframeanalytics/request.go index 5ec6f8444..dd36f2d72 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/previewdataframeanalytics/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/previewdataframeanalytics/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package previewdataframeanalytics @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package previewdataframeanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/preview_data_frame_analytics/MlPreviewDataFrameAnalyticsRequest.ts#L24-L47 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/preview_data_frame_analytics/MlPreviewDataFrameAnalyticsRequest.ts#L24-L47 type Request struct { // Config A data frame analytics config as described in create data frame analytics diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/previewdataframeanalytics/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/previewdataframeanalytics/response.go index 36204f2b0..3c5b40987 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/previewdataframeanalytics/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/previewdataframeanalytics/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package previewdataframeanalytics // Response holds the response body struct for the package previewdataframeanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/preview_data_frame_analytics/MlPreviewDataFrameAnalyticsResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/preview_data_frame_analytics/MlPreviewDataFrameAnalyticsResponse.ts#L23-L28 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putcalendar/put_calendar.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putcalendar/put_calendar.go index d55adf892..b47890cb7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putcalendar/put_calendar.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putcalendar/put_calendar.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Instantiates a calendar. package putcalendar @@ -52,8 +52,9 @@ type PutCalendar struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -69,7 +70,7 @@ func NewPutCalendarFunc(tp elastictransport.Interface) NewPutCalendar { return func(calendarid string) *PutCalendar { n := New(tp) - n.CalendarId(calendarid) + n._calendarid(calendarid) return n } @@ -84,6 +85,8 @@ func New(tp elastictransport.Interface) *PutCalendar { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -113,9 +116,19 @@ func (r *PutCalendar) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -123,6 +136,7 @@ func (r *PutCalendar) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -205,7 +219,6 @@ func (r PutCalendar) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -214,6 +227,10 @@ func (r PutCalendar) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -226,9 +243,26 @@ func (r *PutCalendar) Header(key, value string) *PutCalendar { // CalendarId A string that uniquely identifies a calendar. // API Name: calendarid -func (r *PutCalendar) CalendarId(v string) *PutCalendar { +func (r *PutCalendar) _calendarid(calendarid string) *PutCalendar { r.paramSet |= calendaridMask - r.calendarid = v + r.calendarid = calendarid + + return r +} + +// Description A description of the calendar. +// API name: description +func (r *PutCalendar) Description(description string) *PutCalendar { + + r.req.Description = &description + + return r +} + +// JobIds An array of anomaly detection job identifiers. +// API name: job_ids +func (r *PutCalendar) JobIds(jobids ...string) *PutCalendar { + r.req.JobIds = jobids return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putcalendar/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putcalendar/request.go index 64155394b..186c77a38 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putcalendar/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putcalendar/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putcalendar @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package putcalendar // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/put_calendar/MlPutCalendarRequest.ts#L23-L43 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/put_calendar/MlPutCalendarRequest.ts#L23-L43 type Request struct { // Description A description of the calendar. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putcalendar/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putcalendar/response.go index b2a14dbec..a40c9fd54 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putcalendar/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putcalendar/response.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putcalendar +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Response holds the response body struct for the package putcalendar // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/put_calendar/MlPutCalendarResponse.ts#L22-L31 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/put_calendar/MlPutCalendarResponse.ts#L22-L31 type Response struct { @@ -39,3 +47,55 @@ func NewResponse() *Response { r := &Response{} return r } + +func (s *Response) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "calendar_id": + if err := dec.Decode(&s.CalendarId); err != nil { + return err + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "job_ids": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.JobIds = append(s.JobIds, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.JobIds); err != nil { + return err + } + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putcalendarjob/put_calendar_job.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putcalendarjob/put_calendar_job.go index 3cf2479ea..c3d4e9b37 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putcalendarjob/put_calendar_job.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putcalendarjob/put_calendar_job.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Adds an anomaly detection job to a calendar. package putcalendarjob @@ -70,9 +70,9 @@ func NewPutCalendarJobFunc(tp elastictransport.Interface) NewPutCalendarJob { return func(calendarid, jobid string) *PutCalendarJob { n := New(tp) - n.CalendarId(calendarid) + n._calendarid(calendarid) - n.JobId(jobid) + n._jobid(jobid) return n } @@ -180,7 +180,6 @@ func (r PutCalendarJob) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -189,6 +188,10 @@ func (r PutCalendarJob) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -222,9 +225,9 @@ func (r *PutCalendarJob) Header(key, value string) *PutCalendarJob { // CalendarId A string that uniquely identifies a calendar. // API Name: calendarid -func (r *PutCalendarJob) CalendarId(v string) *PutCalendarJob { +func (r *PutCalendarJob) _calendarid(calendarid string) *PutCalendarJob { r.paramSet |= calendaridMask - r.calendarid = v + r.calendarid = calendarid return r } @@ -232,9 +235,9 @@ func (r *PutCalendarJob) CalendarId(v string) *PutCalendarJob { // JobId An identifier for the anomaly detection jobs. It can be a job identifier, a // group name, or a comma-separated list of jobs or groups. // API Name: jobid -func (r *PutCalendarJob) JobId(v string) *PutCalendarJob { +func (r *PutCalendarJob) _jobid(jobid string) *PutCalendarJob { r.paramSet |= jobidMask - r.jobid = v + r.jobid = jobid return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putcalendarjob/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putcalendarjob/response.go index 2a20f5165..b7c4f67aa 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putcalendarjob/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putcalendarjob/response.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putcalendarjob +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Response holds the response body struct for the package putcalendarjob // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/put_calendar_job/MlPutCalendarJobResponse.ts#L22-L31 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/put_calendar_job/MlPutCalendarJobResponse.ts#L22-L31 type Response struct { @@ -39,3 +47,55 @@ func NewResponse() *Response { r := &Response{} return r } + +func (s *Response) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "calendar_id": + if err := dec.Decode(&s.CalendarId); err != nil { + return err + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "job_ids": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.JobIds = append(s.JobIds, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.JobIds); err != nil { + return err + } + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putdatafeed/put_datafeed.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putdatafeed/put_datafeed.go index 13841eebc..c67c1abf6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putdatafeed/put_datafeed.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putdatafeed/put_datafeed.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Instantiates a datafeed. package putdatafeed @@ -35,6 +35,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" ) const ( @@ -53,8 +54,9 @@ type PutDatafeed struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -70,7 +72,7 @@ func NewPutDatafeedFunc(tp elastictransport.Interface) NewPutDatafeed { return func(datafeedid string) *PutDatafeed { n := New(tp) - n.DatafeedId(datafeedid) + n._datafeedid(datafeedid) return n } @@ -85,6 +87,8 @@ func New(tp elastictransport.Interface) *PutDatafeed { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -114,9 +118,19 @@ func (r *PutDatafeed) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -124,6 +138,7 @@ func (r *PutDatafeed) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -206,7 +221,6 @@ func (r PutDatafeed) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -215,6 +229,10 @@ func (r PutDatafeed) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -230,9 +248,9 @@ func (r *PutDatafeed) Header(key, value string) *PutDatafeed { // hyphens, and underscores. // It must start and end with alphanumeric characters. // API Name: datafeedid -func (r *PutDatafeed) DatafeedId(v string) *PutDatafeed { +func (r *PutDatafeed) _datafeedid(datafeedid string) *PutDatafeed { r.paramSet |= datafeedidMask - r.datafeedid = v + r.datafeedid = datafeedid return r } @@ -241,8 +259,8 @@ func (r *PutDatafeed) DatafeedId(v string) *PutDatafeed { // are ignored. This includes the `_all` // string or when no indices are specified. // API name: allow_no_indices -func (r *PutDatafeed) AllowNoIndices(b bool) *PutDatafeed { - r.values.Set("allow_no_indices", strconv.FormatBool(b)) +func (r *PutDatafeed) AllowNoIndices(allownoindices bool) *PutDatafeed { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) return r } @@ -252,24 +270,195 @@ func (r *PutDatafeed) AllowNoIndices(b bool) *PutDatafeed { // whether wildcard expressions match hidden data streams. Supports // comma-separated values. // API name: expand_wildcards -func (r *PutDatafeed) ExpandWildcards(v string) *PutDatafeed { - r.values.Set("expand_wildcards", v) +func (r *PutDatafeed) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *PutDatafeed { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } // IgnoreThrottled If true, concrete, expanded, or aliased indices are ignored when frozen. // API name: ignore_throttled -func (r *PutDatafeed) IgnoreThrottled(b bool) *PutDatafeed { - r.values.Set("ignore_throttled", strconv.FormatBool(b)) +func (r *PutDatafeed) IgnoreThrottled(ignorethrottled bool) *PutDatafeed { + r.values.Set("ignore_throttled", strconv.FormatBool(ignorethrottled)) return r } // IgnoreUnavailable If true, unavailable indices (missing or closed) are ignored. // API name: ignore_unavailable -func (r *PutDatafeed) IgnoreUnavailable(b bool) *PutDatafeed { - r.values.Set("ignore_unavailable", strconv.FormatBool(b)) +func (r *PutDatafeed) IgnoreUnavailable(ignoreunavailable bool) *PutDatafeed { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// Aggregations If set, the datafeed performs aggregation searches. +// Support for aggregations is limited and should be used only with low +// cardinality data. +// API name: aggregations +func (r *PutDatafeed) Aggregations(aggregations map[string]types.Aggregations) *PutDatafeed { + + r.req.Aggregations = aggregations + + return r +} + +// ChunkingConfig Datafeeds might be required to search over long time periods, for several +// months or years. +// This search is split into time chunks in order to ensure the load on +// Elasticsearch is managed. +// Chunking configuration controls how the size of these time chunks are +// calculated; +// it is an advanced configuration option. +// API name: chunking_config +func (r *PutDatafeed) ChunkingConfig(chunkingconfig *types.ChunkingConfig) *PutDatafeed { + + r.req.ChunkingConfig = chunkingconfig + + return r +} + +// DelayedDataCheckConfig Specifies whether the datafeed checks for missing data and the size of the +// window. +// The datafeed can optionally search over indices that have already been read +// in an effort to determine whether +// any data has subsequently been added to the index. If missing data is found, +// it is a good indication that the +// `query_delay` is set too low and the data is being indexed after the datafeed +// has passed that moment in time. +// This check runs only on real-time datafeeds. +// API name: delayed_data_check_config +func (r *PutDatafeed) DelayedDataCheckConfig(delayeddatacheckconfig *types.DelayedDataCheckConfig) *PutDatafeed { + + r.req.DelayedDataCheckConfig = delayeddatacheckconfig + + return r +} + +// Frequency The interval at which scheduled queries are made while the datafeed runs in +// real time. +// The default value is either the bucket span for short bucket spans, or, for +// longer bucket spans, a sensible +// fraction of the bucket span. When `frequency` is shorter than the bucket +// span, interim results for the last +// (partial) bucket are written then eventually overwritten by the full bucket +// results. If the datafeed uses +// aggregations, this value must be divisible by the interval of the date +// histogram aggregation. +// API name: frequency +func (r *PutDatafeed) Frequency(duration types.Duration) *PutDatafeed { + r.req.Frequency = duration + + return r +} + +// API name: headers +func (r *PutDatafeed) Headers(httpheaders types.HttpHeaders) *PutDatafeed { + r.req.Headers = httpheaders + + return r +} + +// Indices An array of index names. Wildcards are supported. If any of the indices are +// in remote clusters, the machine +// learning nodes must have the `remote_cluster_client` role. +// API name: indices +func (r *PutDatafeed) Indices(indices ...string) *PutDatafeed { + r.req.Indices = indices + + return r +} + +// IndicesOptions Specifies index expansion options that are used during search +// API name: indices_options +func (r *PutDatafeed) IndicesOptions(indicesoptions *types.IndicesOptions) *PutDatafeed { + + r.req.IndicesOptions = indicesoptions + + return r +} + +// JobId Identifier for the anomaly detection job. +// API name: job_id +func (r *PutDatafeed) JobId(id string) *PutDatafeed { + r.req.JobId = &id + + return r +} + +// MaxEmptySearches If a real-time datafeed has never seen any data (including during any initial +// training period), it automatically +// stops and closes the associated job after this many real-time searches return +// no documents. In other words, +// it stops after `frequency` times `max_empty_searches` of real-time operation. +// If not set, a datafeed with no +// end time that sees no data remains started until it is explicitly stopped. By +// default, it is not set. +// API name: max_empty_searches +func (r *PutDatafeed) MaxEmptySearches(maxemptysearches int) *PutDatafeed { + r.req.MaxEmptySearches = &maxemptysearches + + return r +} + +// Query The Elasticsearch query domain-specific language (DSL). This value +// corresponds to the query object in an +// Elasticsearch search POST body. All the options that are supported by +// Elasticsearch can be used, as this +// object is passed verbatim to Elasticsearch. +// API name: query +func (r *PutDatafeed) Query(query *types.Query) *PutDatafeed { + + r.req.Query = query + + return r +} + +// QueryDelay The number of seconds behind real time that data is queried. For example, if +// data from 10:04 a.m. might +// not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 +// seconds. The default +// value is randomly selected between `60s` and `120s`. This randomness improves +// the query performance +// when there are multiple jobs running on the same node. +// API name: query_delay +func (r *PutDatafeed) QueryDelay(duration types.Duration) *PutDatafeed { + r.req.QueryDelay = duration + + return r +} + +// RuntimeMappings Specifies runtime fields for the datafeed search. +// API name: runtime_mappings +func (r *PutDatafeed) RuntimeMappings(runtimefields types.RuntimeFields) *PutDatafeed { + r.req.RuntimeMappings = runtimefields + + return r +} + +// ScriptFields Specifies scripts that evaluate custom expressions and returns script fields +// to the datafeed. +// The detector configuration objects in a job can contain functions that use +// these script fields. +// API name: script_fields +func (r *PutDatafeed) ScriptFields(scriptfields map[string]types.ScriptField) *PutDatafeed { + + r.req.ScriptFields = scriptfields + + return r +} + +// ScrollSize The size parameter that is used in Elasticsearch searches when the datafeed +// does not use aggregations. +// The maximum value is the value of `index.max_result_window`, which is 10,000 +// by default. +// API name: scroll_size +func (r *PutDatafeed) ScrollSize(scrollsize int) *PutDatafeed { + r.req.ScrollSize = &scrollsize return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putdatafeed/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putdatafeed/request.go index a8dcdf81e..2ca045d96 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putdatafeed/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putdatafeed/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putdatafeed @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package putdatafeed // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/put_datafeed/MlPutDatafeedRequest.ts#L37-L171 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/put_datafeed/MlPutDatafeedRequest.ts#L37-L172 type Request struct { // Aggregations If set, the datafeed performs aggregation searches. @@ -64,8 +64,8 @@ type Request struct { // results. If the datafeed uses // aggregations, this value must be divisible by the interval of the date // histogram aggregation. - Frequency types.Duration `json:"frequency,omitempty"` - Headers map[string][]string `json:"headers,omitempty"` + Frequency types.Duration `json:"frequency,omitempty"` + Headers types.HttpHeaders `json:"headers,omitempty"` // Indices An array of index names. Wildcards are supported. If any of the indices are // in remote clusters, the machine // learning nodes must have the `remote_cluster_client` role. @@ -98,7 +98,7 @@ type Request struct { // when there are multiple jobs running on the same node. QueryDelay types.Duration `json:"query_delay,omitempty"` // RuntimeMappings Specifies runtime fields for the datafeed search. - RuntimeMappings map[string]types.RuntimeField `json:"runtime_mappings,omitempty"` + RuntimeMappings types.RuntimeFields `json:"runtime_mappings,omitempty"` // ScriptFields Specifies scripts that evaluate custom expressions and returns script fields // to the datafeed. // The detector configuration objects in a job can contain functions that use diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putdatafeed/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putdatafeed/response.go index 4fbe76c7f..c3172c161 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putdatafeed/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putdatafeed/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putdatafeed @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package putdatafeed // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/put_datafeed/MlPutDatafeedResponse.ts#L31-L49 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/put_datafeed/MlPutDatafeedResponse.ts#L31-L49 type Response struct { Aggregations map[string]types.Aggregations `json:"aggregations,omitempty"` @@ -41,7 +41,7 @@ type Response struct { MaxEmptySearches *int `json:"max_empty_searches,omitempty"` Query types.Query `json:"query"` QueryDelay types.Duration `json:"query_delay"` - RuntimeMappings map[string]types.RuntimeField `json:"runtime_mappings,omitempty"` + RuntimeMappings types.RuntimeFields `json:"runtime_mappings,omitempty"` ScriptFields map[string]types.ScriptField `json:"script_fields,omitempty"` ScrollSize int `json:"scroll_size"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putdataframeanalytics/put_data_frame_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putdataframeanalytics/put_data_frame_analytics.go index abfc4730f..8466aee88 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putdataframeanalytics/put_data_frame_analytics.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putdataframeanalytics/put_data_frame_analytics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Instantiates a data frame analytics job. package putdataframeanalytics @@ -52,8 +52,9 @@ type PutDataFrameAnalytics struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -69,7 +70,7 @@ func NewPutDataFrameAnalyticsFunc(tp elastictransport.Interface) NewPutDataFrame return func(id string) *PutDataFrameAnalytics { n := New(tp) - n.Id(id) + n._id(id) return n } @@ -84,6 +85,8 @@ func New(tp elastictransport.Interface) *PutDataFrameAnalytics { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -113,9 +116,19 @@ func (r *PutDataFrameAnalytics) HttpRequest(ctx context.Context) (*http.Request, var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -123,6 +136,7 @@ func (r *PutDataFrameAnalytics) HttpRequest(ctx context.Context) (*http.Request, } r.buf.Write(data) + } r.path.Scheme = "http" @@ -207,7 +221,6 @@ func (r PutDataFrameAnalytics) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -216,6 +229,10 @@ func (r PutDataFrameAnalytics) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -230,9 +247,136 @@ func (r *PutDataFrameAnalytics) Header(key, value string) *PutDataFrameAnalytics // lowercase alphanumeric characters (a-z and 0-9), hyphens, and // underscores. It must start and end with alphanumeric characters. // API Name: id -func (r *PutDataFrameAnalytics) Id(v string) *PutDataFrameAnalytics { +func (r *PutDataFrameAnalytics) _id(id string) *PutDataFrameAnalytics { r.paramSet |= idMask - r.id = v + r.id = id + + return r +} + +// AllowLazyStart Specifies whether this job can start when there is insufficient machine +// learning node capacity for it to be immediately assigned to a node. If +// set to `false` and a machine learning node with capacity to run the job +// cannot be immediately found, the API returns an error. If set to `true`, +// the API does not return an error; the job waits in the `starting` state +// until sufficient machine learning node capacity is available. This +// behavior is also affected by the cluster-wide +// `xpack.ml.max_lazy_ml_nodes` setting. +// API name: allow_lazy_start +func (r *PutDataFrameAnalytics) AllowLazyStart(allowlazystart bool) *PutDataFrameAnalytics { + r.req.AllowLazyStart = &allowlazystart + + return r +} + +// Analysis The analysis configuration, which contains the information necessary to +// perform one of the following types of analysis: classification, outlier +// detection, or regression. +// API name: analysis +func (r *PutDataFrameAnalytics) Analysis(analysis *types.DataframeAnalysisContainer) *PutDataFrameAnalytics { + + r.req.Analysis = *analysis + + return r +} + +// AnalyzedFields Specifies `includes` and/or `excludes` patterns to select which fields +// will be included in the analysis. The patterns specified in `excludes` +// are applied last, therefore `excludes` takes precedence. In other words, +// if the same field is specified in both `includes` and `excludes`, then +// the field will not be included in the analysis. If `analyzed_fields` is +// not set, only the relevant fields will be included. For example, all the +// numeric fields for outlier detection. +// The supported fields vary for each type of analysis. Outlier detection +// requires numeric or `boolean` data to analyze. The algorithms don’t +// support missing values therefore fields that have data types other than +// numeric or boolean are ignored. Documents where included fields contain +// missing values, null values, or an array are also ignored. Therefore the +// `dest` index may contain documents that don’t have an outlier score. +// Regression supports fields that are numeric, `boolean`, `text`, +// `keyword`, and `ip` data types. It is also tolerant of missing values. +// Fields that are supported are included in the analysis, other fields are +// ignored. Documents where included fields contain an array with two or +// more values are also ignored. Documents in the `dest` index that don’t +// contain a results field are not included in the regression analysis. +// Classification supports fields that are numeric, `boolean`, `text`, +// `keyword`, and `ip` data types. It is also tolerant of missing values. +// Fields that are supported are included in the analysis, other fields are +// ignored. Documents where included fields contain an array with two or +// more values are also ignored. Documents in the `dest` index that don’t +// contain a results field are not included in the classification analysis. +// Classification analysis can be improved by mapping ordinal variable +// values to a single number. For example, in case of age ranges, you can +// model the values as `0-14 = 0`, `15-24 = 1`, `25-34 = 2`, and so on. +// API name: analyzed_fields +func (r *PutDataFrameAnalytics) AnalyzedFields(analyzedfields *types.DataframeAnalysisAnalyzedFields) *PutDataFrameAnalytics { + + r.req.AnalyzedFields = analyzedfields + + return r +} + +// Description A description of the job. +// API name: description +func (r *PutDataFrameAnalytics) Description(description string) *PutDataFrameAnalytics { + + r.req.Description = &description + + return r +} + +// Dest The destination configuration. +// API name: dest +func (r *PutDataFrameAnalytics) Dest(dest *types.DataframeAnalyticsDestination) *PutDataFrameAnalytics { + + r.req.Dest = *dest + + return r +} + +// API name: headers +func (r *PutDataFrameAnalytics) Headers(httpheaders types.HttpHeaders) *PutDataFrameAnalytics { + r.req.Headers = httpheaders + + return r +} + +// MaxNumThreads The maximum number of threads to be used by the analysis. Using more +// threads may decrease the time necessary to complete the analysis at the +// cost of using more CPU. Note that the process may use additional threads +// for operational functionality other than the analysis itself. +// API name: max_num_threads +func (r *PutDataFrameAnalytics) MaxNumThreads(maxnumthreads int) *PutDataFrameAnalytics { + r.req.MaxNumThreads = &maxnumthreads + + return r +} + +// ModelMemoryLimit The approximate maximum amount of memory resources that are permitted for +// analytical processing. If your `elasticsearch.yml` file contains an +// `xpack.ml.max_model_memory_limit` setting, an error occurs when you try +// to create data frame analytics jobs that have `model_memory_limit` values +// greater than that setting. +// API name: model_memory_limit +func (r *PutDataFrameAnalytics) ModelMemoryLimit(modelmemorylimit string) *PutDataFrameAnalytics { + + r.req.ModelMemoryLimit = &modelmemorylimit + + return r +} + +// Source The configuration of how to source the analysis data. +// API name: source +func (r *PutDataFrameAnalytics) Source(source *types.DataframeAnalyticsSource) *PutDataFrameAnalytics { + + r.req.Source = *source + + return r +} + +// API name: version +func (r *PutDataFrameAnalytics) Version(versionstring string) *PutDataFrameAnalytics { + r.req.Version = &versionstring return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putdataframeanalytics/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putdataframeanalytics/request.go index b7a04de0f..1a70f76b7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putdataframeanalytics/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putdataframeanalytics/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putdataframeanalytics @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package putdataframeanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/put_data_frame_analytics/MlPutDataFrameAnalyticsRequest.ts#L30-L139 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/put_data_frame_analytics/MlPutDataFrameAnalyticsRequest.ts#L30-L141 type Request struct { // AllowLazyStart Specifies whether this job can start when there is insufficient machine @@ -78,7 +78,7 @@ type Request struct { Description *string `json:"description,omitempty"` // Dest The destination configuration. Dest types.DataframeAnalyticsDestination `json:"dest"` - Headers map[string][]string `json:"headers,omitempty"` + Headers types.HttpHeaders `json:"headers,omitempty"` // MaxNumThreads The maximum number of threads to be used by the analysis. Using more // threads may decrease the time necessary to complete the analysis at the // cost of using more CPU. Note that the process may use additional threads diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putdataframeanalytics/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putdataframeanalytics/response.go index 54d099272..7a2ec95f1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putdataframeanalytics/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putdataframeanalytics/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putdataframeanalytics @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package putdataframeanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/put_data_frame_analytics/MlPutDataFrameAnalyticsResponse.ts#L31-L46 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/put_data_frame_analytics/MlPutDataFrameAnalyticsResponse.ts#L31-L46 type Response struct { AllowLazyStart bool `json:"allow_lazy_start"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putfilter/put_filter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putfilter/put_filter.go index e08a2610b..2d8eba1b1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putfilter/put_filter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putfilter/put_filter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Instantiates a filter. package putfilter @@ -52,8 +52,9 @@ type PutFilter struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -69,7 +70,7 @@ func NewPutFilterFunc(tp elastictransport.Interface) NewPutFilter { return func(filterid string) *PutFilter { n := New(tp) - n.FilterId(filterid) + n._filterid(filterid) return n } @@ -84,6 +85,8 @@ func New(tp elastictransport.Interface) *PutFilter { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -113,9 +116,19 @@ func (r *PutFilter) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -123,6 +136,7 @@ func (r *PutFilter) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -205,7 +219,6 @@ func (r PutFilter) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -214,6 +227,10 @@ func (r PutFilter) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -226,9 +243,28 @@ func (r *PutFilter) Header(key, value string) *PutFilter { // FilterId A string that uniquely identifies a filter. // API Name: filterid -func (r *PutFilter) FilterId(v string) *PutFilter { +func (r *PutFilter) _filterid(filterid string) *PutFilter { r.paramSet |= filteridMask - r.filterid = v + r.filterid = filterid + + return r +} + +// Description A description of the filter. +// API name: description +func (r *PutFilter) Description(description string) *PutFilter { + + r.req.Description = &description + + return r +} + +// Items The items of the filter. A wildcard `*` can be used at the beginning or the +// end of an item. +// Up to 10000 items are allowed in each filter. +// API name: items +func (r *PutFilter) Items(items ...string) *PutFilter { + r.req.Items = items return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putfilter/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putfilter/request.go index e21ce76a9..a6becad7a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putfilter/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putfilter/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putfilter @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package putfilter // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/put_filter/MlPutFilterRequest.ts#L23-L50 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/put_filter/MlPutFilterRequest.ts#L23-L50 type Request struct { // Description A description of the filter. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putfilter/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putfilter/response.go index 87adc0af4..8965fbeeb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putfilter/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putfilter/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putfilter // Response holds the response body struct for the package putfilter // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/put_filter/MlPutFilterResponse.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/put_filter/MlPutFilterResponse.ts#L22-L28 type Response struct { Description string `json:"description"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putjob/put_job.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putjob/put_job.go index fe5542058..499d6d4e5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putjob/put_job.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putjob/put_job.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Instantiates an anomaly detection job. package putjob @@ -52,8 +52,9 @@ type PutJob struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -69,7 +70,7 @@ func NewPutJobFunc(tp elastictransport.Interface) NewPutJob { return func(jobid string) *PutJob { n := New(tp) - n.JobId(jobid) + n._jobid(jobid) return n } @@ -84,6 +85,8 @@ func New(tp elastictransport.Interface) *PutJob { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -113,9 +116,19 @@ func (r *PutJob) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -123,6 +136,7 @@ func (r *PutJob) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -205,7 +219,6 @@ func (r PutJob) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -214,6 +227,10 @@ func (r PutJob) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -228,9 +245,188 @@ func (r *PutJob) Header(key, value string) *PutJob { // lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It // must start and end with alphanumeric characters. // API Name: jobid -func (r *PutJob) JobId(v string) *PutJob { +func (r *PutJob) _jobid(jobid string) *PutJob { r.paramSet |= jobidMask - r.jobid = v + r.jobid = jobid + + return r +} + +// AllowLazyOpen Advanced configuration option. Specifies whether this job can open when there +// is insufficient machine learning node capacity for it to be immediately +// assigned to a node. By default, if a machine learning node with capacity to +// run the job cannot immediately be found, the open anomaly detection jobs API +// returns an error. However, this is also subject to the cluster-wide +// `xpack.ml.max_lazy_ml_nodes` setting. If this option is set to true, the open +// anomaly detection jobs API does not return an error and the job waits in the +// opening state until sufficient machine learning node capacity is available. +// API name: allow_lazy_open +func (r *PutJob) AllowLazyOpen(allowlazyopen bool) *PutJob { + r.req.AllowLazyOpen = &allowlazyopen + + return r +} + +// AnalysisConfig Specifies how to analyze the data. After you create a job, you cannot change +// the analysis configuration; all the properties are informational. +// API name: analysis_config +func (r *PutJob) AnalysisConfig(analysisconfig *types.AnalysisConfig) *PutJob { + + r.req.AnalysisConfig = *analysisconfig + + return r +} + +// AnalysisLimits Limits can be applied for the resources required to hold the mathematical +// models in memory. These limits are approximate and can be set per job. They +// do not control the memory used by other processes, for example the +// Elasticsearch Java processes. +// API name: analysis_limits +func (r *PutJob) AnalysisLimits(analysislimits *types.AnalysisLimits) *PutJob { + + r.req.AnalysisLimits = analysislimits + + return r +} + +// BackgroundPersistInterval Advanced configuration option. The time between each periodic persistence of +// the model. The default value is a randomized value between 3 to 4 hours, +// which avoids all jobs persisting at exactly the same time. The smallest +// allowed value is 1 hour. For very large models (several GB), persistence +// could take 10-20 minutes, so do not set the `background_persist_interval` +// value too low. +// API name: background_persist_interval +func (r *PutJob) BackgroundPersistInterval(duration types.Duration) *PutJob { + r.req.BackgroundPersistInterval = duration + + return r +} + +// CustomSettings Advanced configuration option. Contains custom meta data about the job. +// API name: custom_settings +func (r *PutJob) CustomSettings(customsettings json.RawMessage) *PutJob { + r.req.CustomSettings = customsettings + + return r +} + +// DailyModelSnapshotRetentionAfterDays Advanced configuration option, which affects the automatic removal of old +// model snapshots for this job. It specifies a period of time (in days) after +// which only the first snapshot per day is retained. This period is relative to +// the timestamp of the most recent snapshot for this job. Valid values range +// from 0 to `model_snapshot_retention_days`. +// API name: daily_model_snapshot_retention_after_days +func (r *PutJob) DailyModelSnapshotRetentionAfterDays(dailymodelsnapshotretentionafterdays int64) *PutJob { + + r.req.DailyModelSnapshotRetentionAfterDays = &dailymodelsnapshotretentionafterdays + + return r +} + +// DataDescription Defines the format of the input data when you send data to the job by using +// the post data API. Note that when configure a datafeed, these properties are +// automatically set. When data is received via the post data API, it is not +// stored in Elasticsearch. Only the results for anomaly detection are retained. +// API name: data_description +func (r *PutJob) DataDescription(datadescription *types.DataDescription) *PutJob { + + r.req.DataDescription = *datadescription + + return r +} + +// DatafeedConfig Defines a datafeed for the anomaly detection job. If Elasticsearch security +// features are enabled, your datafeed remembers which roles the user who +// created it had at the time of creation and runs the query using those same +// roles. If you provide secondary authorization headers, those credentials are +// used instead. +// API name: datafeed_config +func (r *PutJob) DatafeedConfig(datafeedconfig *types.DatafeedConfig) *PutJob { + + r.req.DatafeedConfig = datafeedconfig + + return r +} + +// Description A description of the job. +// API name: description +func (r *PutJob) Description(description string) *PutJob { + + r.req.Description = &description + + return r +} + +// Groups A list of job groups. A job can belong to no groups or many. +// API name: groups +func (r *PutJob) Groups(groups ...string) *PutJob { + r.req.Groups = groups + + return r +} + +// ModelPlotConfig This advanced configuration option stores model information along with the +// results. It provides a more detailed view into anomaly detection. If you +// enable model plot it can add considerable overhead to the performance of the +// system; it is not feasible for jobs with many entities. Model plot provides a +// simplified and indicative view of the model and its bounds. It does not +// display complex features such as multivariate correlations or multimodal +// data. As such, anomalies may occasionally be reported which cannot be seen in +// the model plot. Model plot config can be configured when the job is created +// or updated later. It must be disabled if performance issues are experienced. +// API name: model_plot_config +func (r *PutJob) ModelPlotConfig(modelplotconfig *types.ModelPlotConfig) *PutJob { + + r.req.ModelPlotConfig = modelplotconfig + + return r +} + +// ModelSnapshotRetentionDays Advanced configuration option, which affects the automatic removal of old +// model snapshots for this job. It specifies the maximum period of time (in +// days) that snapshots are retained. This period is relative to the timestamp +// of the most recent snapshot for this job. By default, snapshots ten days +// older than the newest snapshot are deleted. +// API name: model_snapshot_retention_days +func (r *PutJob) ModelSnapshotRetentionDays(modelsnapshotretentiondays int64) *PutJob { + + r.req.ModelSnapshotRetentionDays = &modelsnapshotretentiondays + + return r +} + +// RenormalizationWindowDays Advanced configuration option. The period over which adjustments to the score +// are applied, as new data is seen. The default value is the longer of 30 days +// or 100 bucket spans. +// API name: renormalization_window_days +func (r *PutJob) RenormalizationWindowDays(renormalizationwindowdays int64) *PutJob { + + r.req.RenormalizationWindowDays = &renormalizationwindowdays + + return r +} + +// ResultsIndexName A text string that affects the name of the machine learning results index. By +// default, the job generates an index named `.ml-anomalies-shared`. +// API name: results_index_name +func (r *PutJob) ResultsIndexName(indexname string) *PutJob { + r.req.ResultsIndexName = &indexname + + return r +} + +// ResultsRetentionDays Advanced configuration option. The period of time (in days) that results are +// retained. Age is calculated relative to the timestamp of the latest bucket +// result. If this property has a non-null value, once per day at 00:30 (server +// time), results that are the specified number of days older than the latest +// bucket result are deleted from Elasticsearch. The default value is null, +// which means all results are retained. Annotations generated by the system +// also count as results for retention purposes; they are deleted after the same +// number of days as results. Annotations added by users are retained forever. +// API name: results_retention_days +func (r *PutJob) ResultsRetentionDays(resultsretentiondays int64) *PutJob { + + r.req.ResultsRetentionDays = &resultsretentiondays return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putjob/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putjob/request.go index f48576e93..78a33bc0d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putjob/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putjob/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putjob @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package putjob // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/put_job/MlPutJobRequest.ts#L30-L111 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/put_job/MlPutJobRequest.ts#L30-L111 type Request struct { // AllowLazyOpen Advanced configuration option. Specifies whether this job can open when there diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putjob/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putjob/response.go index 29bacdc97..b4490767d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putjob/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/putjob/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putjob @@ -28,7 +28,7 @@ import ( // Response holds the response body struct for the package putjob // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/put_job/MlPutJobResponse.ts#L29-L52 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/put_job/MlPutJobResponse.ts#L29-L52 type Response struct { AllowLazyOpen bool `json:"allow_lazy_open"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodel/put_trained_model.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodel/put_trained_model.go index 29dd296b2..1322768ef 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodel/put_trained_model.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodel/put_trained_model.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Creates an inference trained model. package puttrainedmodel @@ -35,6 +35,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/trainedmodeltype" ) const ( @@ -53,8 +54,9 @@ type PutTrainedModel struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -70,7 +72,7 @@ func NewPutTrainedModelFunc(tp elastictransport.Interface) NewPutTrainedModel { return func(modelid string) *PutTrainedModel { n := New(tp) - n.ModelId(modelid) + n._modelid(modelid) return n } @@ -85,6 +87,8 @@ func New(tp elastictransport.Interface) *PutTrainedModel { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -114,9 +118,19 @@ func (r *PutTrainedModel) HttpRequest(ctx context.Context) (*http.Request, error var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -124,6 +138,7 @@ func (r *PutTrainedModel) HttpRequest(ctx context.Context) (*http.Request, error } r.buf.Write(data) + } r.path.Scheme = "http" @@ -206,7 +221,6 @@ func (r PutTrainedModel) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -215,6 +229,10 @@ func (r PutTrainedModel) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -227,9 +245,9 @@ func (r *PutTrainedModel) Header(key, value string) *PutTrainedModel { // ModelId The unique identifier of the trained model. // API Name: modelid -func (r *PutTrainedModel) ModelId(v string) *PutTrainedModel { +func (r *PutTrainedModel) _modelid(modelid string) *PutTrainedModel { r.paramSet |= modelidMask - r.modelid = v + r.modelid = modelid return r } @@ -237,8 +255,128 @@ func (r *PutTrainedModel) ModelId(v string) *PutTrainedModel { // DeferDefinitionDecompression If set to `true` and a `compressed_definition` is provided, the request // defers definition decompression and skips relevant validations. // API name: defer_definition_decompression -func (r *PutTrainedModel) DeferDefinitionDecompression(b bool) *PutTrainedModel { - r.values.Set("defer_definition_decompression", strconv.FormatBool(b)) +func (r *PutTrainedModel) DeferDefinitionDecompression(deferdefinitiondecompression bool) *PutTrainedModel { + r.values.Set("defer_definition_decompression", strconv.FormatBool(deferdefinitiondecompression)) + + return r +} + +// CompressedDefinition The compressed (GZipped and Base64 encoded) inference definition of the +// model. If compressed_definition is specified, then definition cannot be +// specified. +// API name: compressed_definition +func (r *PutTrainedModel) CompressedDefinition(compresseddefinition string) *PutTrainedModel { + + r.req.CompressedDefinition = &compresseddefinition + + return r +} + +// Definition The inference definition for the model. If definition is specified, then +// compressed_definition cannot be specified. +// API name: definition +func (r *PutTrainedModel) Definition(definition *types.Definition) *PutTrainedModel { + + r.req.Definition = definition + + return r +} + +// Description A human-readable description of the inference trained model. +// API name: description +func (r *PutTrainedModel) Description(description string) *PutTrainedModel { + + r.req.Description = &description + + return r +} + +// InferenceConfig The default configuration for inference. This can be either a regression +// or classification configuration. It must match the underlying +// definition.trained_model's target_type. For pre-packaged models such as +// ELSER the config is not required. +// API name: inference_config +func (r *PutTrainedModel) InferenceConfig(inferenceconfig *types.InferenceConfigCreateContainer) *PutTrainedModel { + + r.req.InferenceConfig = inferenceconfig + + return r +} + +// Input The input field names for the model definition. +// API name: input +func (r *PutTrainedModel) Input(input *types.Input) *PutTrainedModel { + + r.req.Input = input + + return r +} + +// Metadata An object map that contains metadata about the model. +// API name: metadata +// +// metadata should be a json.RawMessage or a structure +// if a structure is provided, the client will defer a json serialization +// prior to sending the payload to Elasticsearch. +func (r *PutTrainedModel) Metadata(metadata interface{}) *PutTrainedModel { + switch casted := metadata.(type) { + case json.RawMessage: + r.req.Metadata = casted + default: + r.deferred = append(r.deferred, func(request *Request) error { + data, err := json.Marshal(metadata) + if err != nil { + return err + } + r.req.Metadata = data + return nil + }) + } + + return r +} + +// ModelSizeBytes The estimated memory usage in bytes to keep the trained model in memory. +// This property is supported only if defer_definition_decompression is true +// or the model definition is not supplied. +// API name: model_size_bytes +func (r *PutTrainedModel) ModelSizeBytes(modelsizebytes int64) *PutTrainedModel { + + r.req.ModelSizeBytes = &modelsizebytes + + return r +} + +// ModelType The model type. +// API name: model_type +func (r *PutTrainedModel) ModelType(modeltype trainedmodeltype.TrainedModelType) *PutTrainedModel { + r.req.ModelType = &modeltype + + return r +} + +// PlatformArchitecture The platform architecture (if applicable) of the trained mode. If the model +// only works on one platform, because it is heavily optimized for a particular +// processor architecture and OS combination, then this field specifies which. +// The format of the string must match the platform identifiers used by +// Elasticsearch, +// so one of, `linux-x86_64`, `linux-aarch64`, `darwin-x86_64`, +// `darwin-aarch64`, +// or `windows-x86_64`. For portable models (those that work independent of +// processor +// architecture or OS features), leave this field unset. +// API name: platform_architecture +func (r *PutTrainedModel) PlatformArchitecture(platformarchitecture string) *PutTrainedModel { + + r.req.PlatformArchitecture = &platformarchitecture + + return r +} + +// Tags An array of tags to organize the model. +// API name: tags +func (r *PutTrainedModel) Tags(tags ...string) *PutTrainedModel { + r.req.Tags = tags return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodel/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodel/request.go index f6429b0ed..44abc2526 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodel/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodel/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package puttrainedmodel @@ -30,7 +30,7 @@ import ( // Request holds the request body struct for the package puttrainedmodel // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/put_trained_model/MlPutTrainedModelRequest.ts#L28-L94 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/put_trained_model/MlPutTrainedModelRequest.ts#L28-L106 type Request struct { // CompressedDefinition The compressed (GZipped and Base64 encoded) inference definition of the @@ -44,8 +44,9 @@ type Request struct { Description *string `json:"description,omitempty"` // InferenceConfig The default configuration for inference. This can be either a regression // or classification configuration. It must match the underlying - // definition.trained_model's target_type. - InferenceConfig types.InferenceConfigCreateContainer `json:"inference_config"` + // definition.trained_model's target_type. For pre-packaged models such as + // ELSER the config is not required. + InferenceConfig *types.InferenceConfigCreateContainer `json:"inference_config,omitempty"` // Input The input field names for the model definition. Input *types.Input `json:"input,omitempty"` // Metadata An object map that contains metadata about the model. @@ -56,6 +57,17 @@ type Request struct { ModelSizeBytes *int64 `json:"model_size_bytes,omitempty"` // ModelType The model type. ModelType *trainedmodeltype.TrainedModelType `json:"model_type,omitempty"` + // PlatformArchitecture The platform architecture (if applicable) of the trained mode. If the model + // only works on one platform, because it is heavily optimized for a particular + // processor architecture and OS combination, then this field specifies which. + // The format of the string must match the platform identifiers used by + // Elasticsearch, + // so one of, `linux-x86_64`, `linux-aarch64`, `darwin-x86_64`, + // `darwin-aarch64`, + // or `windows-x86_64`. For portable models (those that work independent of + // processor + // architecture or OS features), leave this field unset. + PlatformArchitecture *string `json:"platform_architecture,omitempty"` // Tags An array of tags to organize the model. Tags []string `json:"tags,omitempty"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodel/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodel/response.go index cf9733b7f..d81cb5df4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodel/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodel/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package puttrainedmodel @@ -27,7 +27,7 @@ import ( // Response holds the response body struct for the package puttrainedmodel // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/put_trained_model/MlPutTrainedModelResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/put_trained_model/MlPutTrainedModelResponse.ts#L22-L24 type Response struct { CompressedDefinition *string `json:"compressed_definition,omitempty"` @@ -43,10 +43,13 @@ type Response struct { EstimatedHeapMemoryUsageBytes *int `json:"estimated_heap_memory_usage_bytes,omitempty"` // EstimatedOperations The estimated number of operations to use the trained model. EstimatedOperations *int `json:"estimated_operations,omitempty"` + // FullyDefined True if the full model definition is present. + FullyDefined *bool `json:"fully_defined,omitempty"` // InferenceConfig The default configuration for inference. This can be either a regression, // classification, or one of the many NLP focused configurations. It must match - // the underlying definition.trained_model's target_type. - InferenceConfig types.InferenceConfigCreateContainer `json:"inference_config"` + // the underlying definition.trained_model's target_type. For pre-packaged + // models such as ELSER the config is not required. + InferenceConfig *types.InferenceConfigCreateContainer `json:"inference_config,omitempty"` // Input The input field names for the model definition. Input types.TrainedModelConfigInput `json:"input"` // LicenseLevel The license level of the trained model. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodelalias/put_trained_model_alias.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodelalias/put_trained_model_alias.go index 3b91e8a9f..b387890f8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodelalias/put_trained_model_alias.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodelalias/put_trained_model_alias.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Creates a new model alias (or reassigns an existing one) to refer to the // trained model @@ -72,9 +72,9 @@ func NewPutTrainedModelAliasFunc(tp elastictransport.Interface) NewPutTrainedMod return func(modelid, modelalias string) *PutTrainedModelAlias { n := New(tp) - n.ModelAlias(modelalias) + n._modelalias(modelalias) - n.ModelId(modelid) + n._modelid(modelid) return n } @@ -189,7 +189,6 @@ func (r PutTrainedModelAlias) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -198,6 +197,10 @@ func (r PutTrainedModelAlias) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -231,18 +234,18 @@ func (r *PutTrainedModelAlias) Header(key, value string) *PutTrainedModelAlias { // ModelAlias The alias to create or update. This value cannot end in numbers. // API Name: modelalias -func (r *PutTrainedModelAlias) ModelAlias(v string) *PutTrainedModelAlias { +func (r *PutTrainedModelAlias) _modelalias(modelalias string) *PutTrainedModelAlias { r.paramSet |= modelaliasMask - r.modelalias = v + r.modelalias = modelalias return r } // ModelId The identifier for the trained model that the alias refers to. // API Name: modelid -func (r *PutTrainedModelAlias) ModelId(v string) *PutTrainedModelAlias { +func (r *PutTrainedModelAlias) _modelid(modelid string) *PutTrainedModelAlias { r.paramSet |= modelidMask - r.modelid = v + r.modelid = modelid return r } @@ -251,8 +254,8 @@ func (r *PutTrainedModelAlias) ModelId(v string) *PutTrainedModelAlias { // model if it is already assigned to a different model. If the alias is // already assigned and this parameter is false, the API returns an error. // API name: reassign -func (r *PutTrainedModelAlias) Reassign(b bool) *PutTrainedModelAlias { - r.values.Set("reassign", strconv.FormatBool(b)) +func (r *PutTrainedModelAlias) Reassign(reassign bool) *PutTrainedModelAlias { + r.values.Set("reassign", strconv.FormatBool(reassign)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodelalias/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodelalias/response.go index 56fc4400a..19ef95a9c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodelalias/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodelalias/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package puttrainedmodelalias // Response holds the response body struct for the package puttrainedmodelalias // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/put_trained_model_alias/MlPutTrainedModelAliasResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/put_trained_model_alias/MlPutTrainedModelAliasResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodeldefinitionpart/put_trained_model_definition_part.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodeldefinitionpart/put_trained_model_definition_part.go index a46a06eef..593ab7309 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodeldefinitionpart/put_trained_model_definition_part.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodeldefinitionpart/put_trained_model_definition_part.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Creates part of a trained model definition package puttrainedmodeldefinitionpart @@ -54,8 +54,9 @@ type PutTrainedModelDefinitionPart struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -72,9 +73,9 @@ func NewPutTrainedModelDefinitionPartFunc(tp elastictransport.Interface) NewPutT return func(modelid, part string) *PutTrainedModelDefinitionPart { n := New(tp) - n.ModelId(modelid) + n._modelid(modelid) - n.Part(part) + n._part(part) return n } @@ -89,6 +90,8 @@ func New(tp elastictransport.Interface) *PutTrainedModelDefinitionPart { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -118,9 +121,19 @@ func (r *PutTrainedModelDefinitionPart) HttpRequest(ctx context.Context) (*http. var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -128,6 +141,7 @@ func (r *PutTrainedModelDefinitionPart) HttpRequest(ctx context.Context) (*http. } r.buf.Write(data) + } r.path.Scheme = "http" @@ -215,7 +229,6 @@ func (r PutTrainedModelDefinitionPart) Do(ctx context.Context) (*Response, error } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -224,6 +237,10 @@ func (r PutTrainedModelDefinitionPart) Do(ctx context.Context) (*Response, error return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -236,9 +253,9 @@ func (r *PutTrainedModelDefinitionPart) Header(key, value string) *PutTrainedMod // ModelId The unique identifier of the trained model. // API Name: modelid -func (r *PutTrainedModelDefinitionPart) ModelId(v string) *PutTrainedModelDefinitionPart { +func (r *PutTrainedModelDefinitionPart) _modelid(modelid string) *PutTrainedModelDefinitionPart { r.paramSet |= modelidMask - r.modelid = v + r.modelid = modelid return r } @@ -248,9 +265,35 @@ func (r *PutTrainedModelDefinitionPart) ModelId(v string) *PutTrainedModelDefini // order of their part number. The first part must be `0` and the final part // must be `total_parts - 1`. // API Name: part -func (r *PutTrainedModelDefinitionPart) Part(v string) *PutTrainedModelDefinitionPart { +func (r *PutTrainedModelDefinitionPart) _part(part string) *PutTrainedModelDefinitionPart { r.paramSet |= partMask - r.part = v + r.part = part + + return r +} + +// Definition The definition part for the model. Must be a base64 encoded string. +// API name: definition +func (r *PutTrainedModelDefinitionPart) Definition(definition string) *PutTrainedModelDefinitionPart { + + r.req.Definition = definition + + return r +} + +// TotalDefinitionLength The total uncompressed definition length in bytes. Not base64 encoded. +// API name: total_definition_length +func (r *PutTrainedModelDefinitionPart) TotalDefinitionLength(totaldefinitionlength int64) *PutTrainedModelDefinitionPart { + + r.req.TotalDefinitionLength = totaldefinitionlength + + return r +} + +// TotalParts The total number of parts that will be uploaded. Must be greater than 0. +// API name: total_parts +func (r *PutTrainedModelDefinitionPart) TotalParts(totalparts int) *PutTrainedModelDefinitionPart { + r.req.TotalParts = totalparts return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodeldefinitionpart/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodeldefinitionpart/request.go index 0f7976669..bf2d65e4c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodeldefinitionpart/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodeldefinitionpart/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package puttrainedmodeldefinitionpart @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package puttrainedmodeldefinitionpart // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/put_trained_model_definition_part/MlPutTrainedModelDefinitionPartRequest.ts#L24-L57 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/put_trained_model_definition_part/MlPutTrainedModelDefinitionPartRequest.ts#L24-L57 type Request struct { // Definition The definition part for the model. Must be a base64 encoded string. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodeldefinitionpart/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodeldefinitionpart/response.go index bbdb67b4c..78b766d39 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodeldefinitionpart/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodeldefinitionpart/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package puttrainedmodeldefinitionpart // Response holds the response body struct for the package puttrainedmodeldefinitionpart // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/put_trained_model_definition_part/MlPutTrainedModelDefinitionPartResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/put_trained_model_definition_part/MlPutTrainedModelDefinitionPartResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodelvocabulary/put_trained_model_vocabulary.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodelvocabulary/put_trained_model_vocabulary.go index fcb4f5bfb..40f87d7a0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodelvocabulary/put_trained_model_vocabulary.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodelvocabulary/put_trained_model_vocabulary.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Creates a trained model vocabulary package puttrainedmodelvocabulary @@ -52,8 +52,9 @@ type PutTrainedModelVocabulary struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -69,7 +70,7 @@ func NewPutTrainedModelVocabularyFunc(tp elastictransport.Interface) NewPutTrain return func(modelid string) *PutTrainedModelVocabulary { n := New(tp) - n.ModelId(modelid) + n._modelid(modelid) return n } @@ -84,6 +85,8 @@ func New(tp elastictransport.Interface) *PutTrainedModelVocabulary { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -113,9 +116,19 @@ func (r *PutTrainedModelVocabulary) HttpRequest(ctx context.Context) (*http.Requ var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -123,6 +136,7 @@ func (r *PutTrainedModelVocabulary) HttpRequest(ctx context.Context) (*http.Requ } r.buf.Write(data) + } r.path.Scheme = "http" @@ -207,7 +221,6 @@ func (r PutTrainedModelVocabulary) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -216,6 +229,10 @@ func (r PutTrainedModelVocabulary) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -228,9 +245,25 @@ func (r *PutTrainedModelVocabulary) Header(key, value string) *PutTrainedModelVo // ModelId The unique identifier of the trained model. // API Name: modelid -func (r *PutTrainedModelVocabulary) ModelId(v string) *PutTrainedModelVocabulary { +func (r *PutTrainedModelVocabulary) _modelid(modelid string) *PutTrainedModelVocabulary { r.paramSet |= modelidMask - r.modelid = v + r.modelid = modelid + + return r +} + +// Merges The optional model merges if required by the tokenizer. +// API name: merges +func (r *PutTrainedModelVocabulary) Merges(merges ...string) *PutTrainedModelVocabulary { + r.req.Merges = merges + + return r +} + +// Vocabulary The model vocabulary, which must not be empty. +// API name: vocabulary +func (r *PutTrainedModelVocabulary) Vocabulary(vocabularies ...string) *PutTrainedModelVocabulary { + r.req.Vocabulary = vocabularies return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodelvocabulary/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodelvocabulary/request.go index dd43c9793..99620c108 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodelvocabulary/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodelvocabulary/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package puttrainedmodelvocabulary @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package puttrainedmodelvocabulary // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/put_trained_model_vocabulary/MlPutTrainedModelVocabularyRequest.ts#L23-L51 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/put_trained_model_vocabulary/MlPutTrainedModelVocabularyRequest.ts#L23-L52 type Request struct { // Merges The optional model merges if required by the tokenizer. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodelvocabulary/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodelvocabulary/response.go index cf2a49c03..e76dbc21c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodelvocabulary/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/puttrainedmodelvocabulary/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package puttrainedmodelvocabulary // Response holds the response body struct for the package puttrainedmodelvocabulary // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/put_trained_model_vocabulary/MlPutTrainedModelVocabularyResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/put_trained_model_vocabulary/MlPutTrainedModelVocabularyResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/resetjob/reset_job.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/resetjob/reset_job.go index 7cbade849..10f1e687e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/resetjob/reset_job.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/resetjob/reset_job.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Resets an existing anomaly detection job. package resetjob @@ -68,7 +68,7 @@ func NewResetJobFunc(tp elastictransport.Interface) NewResetJob { return func(jobid string) *ResetJob { n := New(tp) - n.JobId(jobid) + n._jobid(jobid) return n } @@ -173,7 +173,6 @@ func (r ResetJob) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -182,6 +181,10 @@ func (r ResetJob) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -215,9 +218,9 @@ func (r *ResetJob) Header(key, value string) *ResetJob { // JobId The ID of the job to reset. // API Name: jobid -func (r *ResetJob) JobId(v string) *ResetJob { +func (r *ResetJob) _jobid(jobid string) *ResetJob { r.paramSet |= jobidMask - r.jobid = v + r.jobid = jobid return r } @@ -225,8 +228,8 @@ func (r *ResetJob) JobId(v string) *ResetJob { // WaitForCompletion Should this request wait until the operation has completed before // returning. // API name: wait_for_completion -func (r *ResetJob) WaitForCompletion(b bool) *ResetJob { - r.values.Set("wait_for_completion", strconv.FormatBool(b)) +func (r *ResetJob) WaitForCompletion(waitforcompletion bool) *ResetJob { + r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) return r } @@ -236,8 +239,8 @@ func (r *ResetJob) WaitForCompletion(b bool) *ResetJob { // is // reset. // API name: delete_user_annotations -func (r *ResetJob) DeleteUserAnnotations(b bool) *ResetJob { - r.values.Set("delete_user_annotations", strconv.FormatBool(b)) +func (r *ResetJob) DeleteUserAnnotations(deleteuserannotations bool) *ResetJob { + r.values.Set("delete_user_annotations", strconv.FormatBool(deleteuserannotations)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/resetjob/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/resetjob/response.go index 824a07668..57ac4eb0f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/resetjob/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/resetjob/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package resetjob // Response holds the response body struct for the package resetjob // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/reset_job/MlResetJobResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/reset_job/MlResetJobResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/revertmodelsnapshot/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/revertmodelsnapshot/request.go index 9f6954454..9aa42f1e7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/revertmodelsnapshot/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/revertmodelsnapshot/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package revertmodelsnapshot @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package revertmodelsnapshot // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/revert_model_snapshot/MlRevertModelSnapshotRequest.ts#L23-L69 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/revert_model_snapshot/MlRevertModelSnapshotRequest.ts#L23-L69 type Request struct { // DeleteInterveningResults Refer to the description for the `delete_intervening_results` query diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/revertmodelsnapshot/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/revertmodelsnapshot/response.go index 69cacef7d..b2f7ef4a8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/revertmodelsnapshot/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/revertmodelsnapshot/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package revertmodelsnapshot @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package revertmodelsnapshot // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/revert_model_snapshot/MlRevertModelSnapshotResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/revert_model_snapshot/MlRevertModelSnapshotResponse.ts#L22-L24 type Response struct { Model types.ModelSnapshot `json:"model"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/revertmodelsnapshot/revert_model_snapshot.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/revertmodelsnapshot/revert_model_snapshot.go index 62fcb5018..633510d8d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/revertmodelsnapshot/revert_model_snapshot.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/revertmodelsnapshot/revert_model_snapshot.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Reverts to a specific snapshot. package revertmodelsnapshot @@ -30,7 +30,6 @@ import ( "io" "net/http" "net/url" - "strconv" "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" @@ -55,8 +54,9 @@ type RevertModelSnapshot struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -73,9 +73,9 @@ func NewRevertModelSnapshotFunc(tp elastictransport.Interface) NewRevertModelSna return func(jobid, snapshotid string) *RevertModelSnapshot { n := New(tp) - n.JobId(jobid) + n._jobid(jobid) - n.SnapshotId(snapshotid) + n._snapshotid(snapshotid) return n } @@ -90,6 +90,8 @@ func New(tp elastictransport.Interface) *RevertModelSnapshot { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -119,9 +121,19 @@ func (r *RevertModelSnapshot) HttpRequest(ctx context.Context) (*http.Request, e var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -129,6 +141,7 @@ func (r *RevertModelSnapshot) HttpRequest(ctx context.Context) (*http.Request, e } r.buf.Write(data) + } r.path.Scheme = "http" @@ -218,7 +231,6 @@ func (r RevertModelSnapshot) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -227,6 +239,10 @@ func (r RevertModelSnapshot) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -239,9 +255,9 @@ func (r *RevertModelSnapshot) Header(key, value string) *RevertModelSnapshot { // JobId Identifier for the anomaly detection job. // API Name: jobid -func (r *RevertModelSnapshot) JobId(v string) *RevertModelSnapshot { +func (r *RevertModelSnapshot) _jobid(jobid string) *RevertModelSnapshot { r.paramSet |= jobidMask - r.jobid = v + r.jobid = jobid return r } @@ -250,22 +266,18 @@ func (r *RevertModelSnapshot) JobId(v string) *RevertModelSnapshot { // snapshot means the anomaly detection job starts learning a new model from // scratch when it is started. // API Name: snapshotid -func (r *RevertModelSnapshot) SnapshotId(v string) *RevertModelSnapshot { +func (r *RevertModelSnapshot) _snapshotid(snapshotid string) *RevertModelSnapshot { r.paramSet |= snapshotidMask - r.snapshotid = v + r.snapshotid = snapshotid return r } -// DeleteInterveningResults If true, deletes the results in the time period between the latest -// results and the time of the reverted snapshot. It also resets the model -// to accept records for this time period. If you choose not to delete -// intervening results when reverting a snapshot, the job will not accept -// input data that is older than the current time. If you want to resend -// data, then delete the intervening results. +// DeleteInterveningResults Refer to the description for the `delete_intervening_results` query +// parameter. // API name: delete_intervening_results -func (r *RevertModelSnapshot) DeleteInterveningResults(b bool) *RevertModelSnapshot { - r.values.Set("delete_intervening_results", strconv.FormatBool(b)) +func (r *RevertModelSnapshot) DeleteInterveningResults(deleteinterveningresults bool) *RevertModelSnapshot { + r.req.DeleteInterveningResults = &deleteinterveningresults return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/setupgrademode/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/setupgrademode/response.go index 784468381..168365c92 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/setupgrademode/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/setupgrademode/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package setupgrademode // Response holds the response body struct for the package setupgrademode // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/set_upgrade_mode/MlSetUpgradeModeResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/set_upgrade_mode/MlSetUpgradeModeResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/setupgrademode/set_upgrade_mode.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/setupgrademode/set_upgrade_mode.go index c4933e6fb..d00978d78 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/setupgrademode/set_upgrade_mode.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/setupgrademode/set_upgrade_mode.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Sets a cluster wide upgrade_mode setting that prepares machine learning // indices for an upgrade. @@ -162,7 +162,6 @@ func (r SetUpgradeMode) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -171,6 +170,10 @@ func (r SetUpgradeMode) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -206,16 +209,16 @@ func (r *SetUpgradeMode) Header(key, value string) *SetUpgradeMode { // and datafeed tasks and prohibits new job and datafeed tasks from // starting. // API name: enabled -func (r *SetUpgradeMode) Enabled(b bool) *SetUpgradeMode { - r.values.Set("enabled", strconv.FormatBool(b)) +func (r *SetUpgradeMode) Enabled(enabled bool) *SetUpgradeMode { + r.values.Set("enabled", strconv.FormatBool(enabled)) return r } // Timeout The time to wait for the request to be completed. // API name: timeout -func (r *SetUpgradeMode) Timeout(v string) *SetUpgradeMode { - r.values.Set("timeout", v) +func (r *SetUpgradeMode) Timeout(duration string) *SetUpgradeMode { + r.values.Set("timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/startdatafeed/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/startdatafeed/request.go index fe96bd880..64d41b59d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/startdatafeed/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/startdatafeed/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package startdatafeed @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package startdatafeed // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/start_datafeed/MlStartDatafeedRequest.ts#L24-L91 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/start_datafeed/MlStartDatafeedRequest.ts#L24-L91 type Request struct { // End Refer to the description for the `end` query parameter. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/startdatafeed/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/startdatafeed/response.go index 6c957c2db..62663de8d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/startdatafeed/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/startdatafeed/response.go @@ -16,19 +16,28 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package startdatafeed +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Response holds the response body struct for the package startdatafeed // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/start_datafeed/MlStartDatafeedResponse.ts#L22-L34 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/start_datafeed/MlStartDatafeedResponse.ts#L22-L34 type Response struct { - // Node The ID of the node that the datafeed was started on. If the datafeed is - // allowed to open lazily and has not yet - // been assigned to a node, this value is an empty string. + // Node The ID of the node that the job was started on. In serverless this will be + // the "serverless". + // If the job is allowed to open lazily and has not yet been assigned to a node, + // this value is an empty string. Node []string `json:"node"` // Started For a successful response, this value is always `true`. On failure, an // exception is returned instead. @@ -40,3 +49,52 @@ func NewResponse() *Response { r := &Response{} return r } + +func (s *Response) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "node": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Node = append(s.Node, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Node); err != nil { + return err + } + } + + case "started": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Started = value + case bool: + s.Started = v + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/startdatafeed/start_datafeed.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/startdatafeed/start_datafeed.go index c1f6c6dac..21f562bfc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/startdatafeed/start_datafeed.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/startdatafeed/start_datafeed.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Starts one or more datafeeds. package startdatafeed @@ -52,8 +52,9 @@ type StartDatafeed struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -69,7 +70,7 @@ func NewStartDatafeedFunc(tp elastictransport.Interface) NewStartDatafeed { return func(datafeedid string) *StartDatafeed { n := New(tp) - n.DatafeedId(datafeedid) + n._datafeedid(datafeedid) return n } @@ -84,6 +85,8 @@ func New(tp elastictransport.Interface) *StartDatafeed { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -113,9 +116,19 @@ func (r *StartDatafeed) HttpRequest(ctx context.Context) (*http.Request, error) var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -123,6 +136,7 @@ func (r *StartDatafeed) HttpRequest(ctx context.Context) (*http.Request, error) } r.buf.Write(data) + } r.path.Scheme = "http" @@ -207,7 +221,6 @@ func (r StartDatafeed) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -216,6 +229,10 @@ func (r StartDatafeed) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -232,57 +249,33 @@ func (r *StartDatafeed) Header(key, value string) *StartDatafeed { // start and end with alphanumeric // characters. // API Name: datafeedid -func (r *StartDatafeed) DatafeedId(v string) *StartDatafeed { +func (r *StartDatafeed) _datafeedid(datafeedid string) *StartDatafeed { r.paramSet |= datafeedidMask - r.datafeedid = v + r.datafeedid = datafeedid return r } -// End The time that the datafeed should end, which can be specified by using one of -// the following formats: -// -// * ISO 8601 format with milliseconds, for example `2017-01-22T06:00:00.000Z` -// * ISO 8601 format without milliseconds, for example -// `2017-01-22T06:00:00+00:00` -// * Milliseconds since the epoch, for example `1485061200000` -// -// Date-time arguments using either of the ISO 8601 formats must have a time -// zone designator, where `Z` is accepted -// as an abbreviation for UTC time. When a URL is expected (for example, in -// browsers), the `+` used in time zone -// designators must be encoded as `%2B`. -// The end time value is exclusive. If you do not specify an end time, the -// datafeed -// runs continuously. +// End Refer to the description for the `end` query parameter. // API name: end -func (r *StartDatafeed) End(v string) *StartDatafeed { - r.values.Set("end", v) +func (r *StartDatafeed) End(datetime types.DateTime) *StartDatafeed { + r.req.End = datetime return r } -// Start The time that the datafeed should begin, which can be specified by using the -// same formats as the `end` parameter. -// This value is inclusive. -// If you do not specify a start time and the datafeed is associated with a new -// anomaly detection job, the analysis -// starts from the earliest time for which data is available. -// If you restart a stopped datafeed and specify a start value that is earlier -// than the timestamp of the latest -// processed record, the datafeed continues from 1 millisecond after the -// timestamp of the latest processed record. +// Start Refer to the description for the `start` query parameter. // API name: start -func (r *StartDatafeed) Start(v string) *StartDatafeed { - r.values.Set("start", v) +func (r *StartDatafeed) Start(datetime types.DateTime) *StartDatafeed { + r.req.Start = datetime return r } -// Timeout Specifies the amount of time to wait until a datafeed starts. +// Timeout Refer to the description for the `timeout` query parameter. // API name: timeout -func (r *StartDatafeed) Timeout(v string) *StartDatafeed { - r.values.Set("timeout", v) +func (r *StartDatafeed) Timeout(duration types.Duration) *StartDatafeed { + r.req.Timeout = duration return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/startdataframeanalytics/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/startdataframeanalytics/response.go index c75ccad24..761f2e0c8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/startdataframeanalytics/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/startdataframeanalytics/response.go @@ -16,19 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package startdataframeanalytics // Response holds the response body struct for the package startdataframeanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/start_data_frame_analytics/MlStartDataFrameAnalyticsResponse.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/start_data_frame_analytics/MlStartDataFrameAnalyticsResponse.ts#L22-L34 type Response struct { Acknowledged bool `json:"acknowledged"` // Node The ID of the node that the job was started on. If the job is allowed to open // lazily and has not yet been assigned to a node, this value is an empty // string. + // The node ID of the node the job has been assigned to, or + // an empty string if it hasn't been assigned to a node. In + // serverless if the job has been assigned to run then the + // node ID will be "serverless". Node string `json:"node"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/startdataframeanalytics/start_data_frame_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/startdataframeanalytics/start_data_frame_analytics.go index c8ce1e175..ad20963ee 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/startdataframeanalytics/start_data_frame_analytics.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/startdataframeanalytics/start_data_frame_analytics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Starts a data frame analytics job. package startdataframeanalytics @@ -67,7 +67,7 @@ func NewStartDataFrameAnalyticsFunc(tp elastictransport.Interface) NewStartDataF return func(id string) *StartDataFrameAnalytics { n := New(tp) - n.Id(id) + n._id(id) return n } @@ -180,7 +180,6 @@ func (r StartDataFrameAnalytics) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -189,6 +188,10 @@ func (r StartDataFrameAnalytics) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -224,9 +227,9 @@ func (r *StartDataFrameAnalytics) Header(key, value string) *StartDataFrameAnaly // lowercase alphanumeric characters (a-z and 0-9), hyphens, and // underscores. It must start and end with alphanumeric characters. // API Name: id -func (r *StartDataFrameAnalytics) Id(v string) *StartDataFrameAnalytics { +func (r *StartDataFrameAnalytics) _id(id string) *StartDataFrameAnalytics { r.paramSet |= idMask - r.id = v + r.id = id return r } @@ -234,8 +237,8 @@ func (r *StartDataFrameAnalytics) Id(v string) *StartDataFrameAnalytics { // Timeout Controls the amount of time to wait until the data frame analytics job // starts. // API name: timeout -func (r *StartDataFrameAnalytics) Timeout(v string) *StartDataFrameAnalytics { - r.values.Set("timeout", v) +func (r *StartDataFrameAnalytics) Timeout(duration string) *StartDataFrameAnalytics { + r.values.Set("timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/starttrainedmodeldeployment/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/starttrainedmodeldeployment/response.go index c24aae0ca..aa7d84b96 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/starttrainedmodeldeployment/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/starttrainedmodeldeployment/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package starttrainedmodeldeployment @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package starttrainedmodeldeployment // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/start_trained_model_deployment/MlStartTrainedModelDeploymentResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/start_trained_model_deployment/MlStartTrainedModelDeploymentResponse.ts#L22-L26 type Response struct { Assignment types.TrainedModelAssignment `json:"assignment"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/starttrainedmodeldeployment/start_trained_model_deployment.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/starttrainedmodeldeployment/start_trained_model_deployment.go index e89332cfa..7a8c20eb6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/starttrainedmodeldeployment/start_trained_model_deployment.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/starttrainedmodeldeployment/start_trained_model_deployment.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Start a trained model deployment. package starttrainedmodeldeployment @@ -36,7 +36,6 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/deploymentallocationstate" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/trainingpriority" ) @@ -71,7 +70,7 @@ func NewStartTrainedModelDeploymentFunc(tp elastictransport.Interface) NewStartT return func(modelid string) *StartTrainedModelDeployment { n := New(tp) - n.ModelId(modelid) + n._modelid(modelid) return n } @@ -184,7 +183,6 @@ func (r StartTrainedModelDeployment) Do(ctx context.Context) (*Response, error) } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -193,6 +191,10 @@ func (r StartTrainedModelDeployment) Do(ctx context.Context) (*Response, error) return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -227,9 +229,9 @@ func (r *StartTrainedModelDeployment) Header(key, value string) *StartTrainedMod // ModelId The unique identifier of the trained model. Currently, only PyTorch models // are supported. // API Name: modelid -func (r *StartTrainedModelDeployment) ModelId(v string) *StartTrainedModelDeployment { +func (r *StartTrainedModelDeployment) _modelid(modelid string) *StartTrainedModelDeployment { r.paramSet |= modelidMask - r.modelid = v + r.modelid = modelid return r } @@ -240,8 +242,8 @@ func (r *StartTrainedModelDeployment) ModelId(v string) *StartTrainedModelDeploy // cache, // `0b` can be provided. // API name: cache_size -func (r *StartTrainedModelDeployment) CacheSize(v string) *StartTrainedModelDeployment { - r.values.Set("cache_size", v) +func (r *StartTrainedModelDeployment) CacheSize(bytesize string) *StartTrainedModelDeployment { + r.values.Set("cache_size", bytesize) return r } @@ -254,16 +256,16 @@ func (r *StartTrainedModelDeployment) CacheSize(v string) *StartTrainedModelDepl // it will automatically be changed to a value less than the number of hardware // threads. // API name: number_of_allocations -func (r *StartTrainedModelDeployment) NumberOfAllocations(i int) *StartTrainedModelDeployment { - r.values.Set("number_of_allocations", strconv.Itoa(i)) +func (r *StartTrainedModelDeployment) NumberOfAllocations(numberofallocations int) *StartTrainedModelDeployment { + r.values.Set("number_of_allocations", strconv.Itoa(numberofallocations)) return r } // Priority The deployment priority. // API name: priority -func (r *StartTrainedModelDeployment) Priority(enum trainingpriority.TrainingPriority) *StartTrainedModelDeployment { - r.values.Set("priority", enum.String()) +func (r *StartTrainedModelDeployment) Priority(priority trainingpriority.TrainingPriority) *StartTrainedModelDeployment { + r.values.Set("priority", priority.String()) return r } @@ -272,8 +274,8 @@ func (r *StartTrainedModelDeployment) Priority(enum trainingpriority.TrainingPri // After the number of requests exceeds // this value, new requests are rejected with a 429 error. // API name: queue_capacity -func (r *StartTrainedModelDeployment) QueueCapacity(i int) *StartTrainedModelDeployment { - r.values.Set("queue_capacity", strconv.Itoa(i)) +func (r *StartTrainedModelDeployment) QueueCapacity(queuecapacity int) *StartTrainedModelDeployment { + r.values.Set("queue_capacity", strconv.Itoa(queuecapacity)) return r } @@ -289,24 +291,24 @@ func (r *StartTrainedModelDeployment) QueueCapacity(i int) *StartTrainedModelDep // it will automatically be changed to a value less than the number of hardware // threads. // API name: threads_per_allocation -func (r *StartTrainedModelDeployment) ThreadsPerAllocation(i int) *StartTrainedModelDeployment { - r.values.Set("threads_per_allocation", strconv.Itoa(i)) +func (r *StartTrainedModelDeployment) ThreadsPerAllocation(threadsperallocation int) *StartTrainedModelDeployment { + r.values.Set("threads_per_allocation", strconv.Itoa(threadsperallocation)) return r } // Timeout Specifies the amount of time to wait for the model to deploy. // API name: timeout -func (r *StartTrainedModelDeployment) Timeout(v string) *StartTrainedModelDeployment { - r.values.Set("timeout", v) +func (r *StartTrainedModelDeployment) Timeout(duration string) *StartTrainedModelDeployment { + r.values.Set("timeout", duration) return r } // WaitFor Specifies the allocation status to wait for before returning. // API name: wait_for -func (r *StartTrainedModelDeployment) WaitFor(enum deploymentallocationstate.DeploymentAllocationState) *StartTrainedModelDeployment { - r.values.Set("wait_for", enum.String()) +func (r *StartTrainedModelDeployment) WaitFor(waitfor deploymentallocationstate.DeploymentAllocationState) *StartTrainedModelDeployment { + r.values.Set("wait_for", waitfor.String()) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/stopdatafeed/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/stopdatafeed/request.go index d8d19e229..3f8d6d684 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/stopdatafeed/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/stopdatafeed/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package stopdatafeed @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package stopdatafeed // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/stop_datafeed/MlStopDatafeedRequest.ts#L24-L78 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/stop_datafeed/MlStopDatafeedRequest.ts#L24-L78 type Request struct { // AllowNoMatch Refer to the description for the `allow_no_match` query parameter. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/stopdatafeed/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/stopdatafeed/response.go index 61851fd09..62c37a89b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/stopdatafeed/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/stopdatafeed/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package stopdatafeed // Response holds the response body struct for the package stopdatafeed // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/stop_datafeed/MlStopDatafeedResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/stop_datafeed/MlStopDatafeedResponse.ts#L20-L22 type Response struct { Stopped bool `json:"stopped"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/stopdatafeed/stop_datafeed.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/stopdatafeed/stop_datafeed.go index 5a2e709cf..8c6b2165f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/stopdatafeed/stop_datafeed.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/stopdatafeed/stop_datafeed.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Stops one or more datafeeds. package stopdatafeed @@ -30,7 +30,6 @@ import ( "io" "net/http" "net/url" - "strconv" "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" @@ -53,8 +52,9 @@ type StopDatafeed struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -70,7 +70,7 @@ func NewStopDatafeedFunc(tp elastictransport.Interface) NewStopDatafeed { return func(datafeedid string) *StopDatafeed { n := New(tp) - n.DatafeedId(datafeedid) + n._datafeedid(datafeedid) return n } @@ -85,6 +85,8 @@ func New(tp elastictransport.Interface) *StopDatafeed { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -114,9 +116,19 @@ func (r *StopDatafeed) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -124,6 +136,7 @@ func (r *StopDatafeed) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -208,7 +221,6 @@ func (r StopDatafeed) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -217,6 +229,10 @@ func (r StopDatafeed) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -233,43 +249,33 @@ func (r *StopDatafeed) Header(key, value string) *StopDatafeed { // using `_all` or by specifying `*` as // the identifier. // API Name: datafeedid -func (r *StopDatafeed) DatafeedId(v string) *StopDatafeed { +func (r *StopDatafeed) _datafeedid(datafeedid string) *StopDatafeed { r.paramSet |= datafeedidMask - r.datafeedid = v + r.datafeedid = datafeedid return r } -// AllowNoMatch Specifies what to do when the request: -// -// * Contains wildcard expressions and there are no datafeeds that match. -// * Contains the `_all` string or no identifiers and there are no matches. -// * Contains wildcard expressions and there are only partial matches. -// -// If `true`, the API returns an empty datafeeds array when there are no matches -// and the subset of results when -// there are partial matches. If `false`, the API returns a 404 status code when -// there are no matches or only -// partial matches. +// AllowNoMatch Refer to the description for the `allow_no_match` query parameter. // API name: allow_no_match -func (r *StopDatafeed) AllowNoMatch(b bool) *StopDatafeed { - r.values.Set("allow_no_match", strconv.FormatBool(b)) +func (r *StopDatafeed) AllowNoMatch(allownomatch bool) *StopDatafeed { + r.req.AllowNoMatch = &allownomatch return r } -// Force If `true`, the datafeed is stopped forcefully. +// Force Refer to the description for the `force` query parameter. // API name: force -func (r *StopDatafeed) Force(b bool) *StopDatafeed { - r.values.Set("force", strconv.FormatBool(b)) +func (r *StopDatafeed) Force(force bool) *StopDatafeed { + r.req.Force = &force return r } -// Timeout Specifies the amount of time to wait until a datafeed stops. +// Timeout Refer to the description for the `timeout` query parameter. // API name: timeout -func (r *StopDatafeed) Timeout(v string) *StopDatafeed { - r.values.Set("timeout", v) +func (r *StopDatafeed) Timeout(duration types.Duration) *StopDatafeed { + r.req.Timeout = duration return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/stopdataframeanalytics/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/stopdataframeanalytics/response.go index 9765df56f..236219f58 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/stopdataframeanalytics/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/stopdataframeanalytics/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package stopdataframeanalytics // Response holds the response body struct for the package stopdataframeanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/stop_data_frame_analytics/MlStopDataFrameAnalyticsResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/stop_data_frame_analytics/MlStopDataFrameAnalyticsResponse.ts#L20-L22 type Response struct { Stopped bool `json:"stopped"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/stopdataframeanalytics/stop_data_frame_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/stopdataframeanalytics/stop_data_frame_analytics.go index 8d4e4a4aa..15e441f5c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/stopdataframeanalytics/stop_data_frame_analytics.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/stopdataframeanalytics/stop_data_frame_analytics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Stops one or more data frame analytics jobs. package stopdataframeanalytics @@ -68,7 +68,7 @@ func NewStopDataFrameAnalyticsFunc(tp elastictransport.Interface) NewStopDataFra return func(id string) *StopDataFrameAnalytics { n := New(tp) - n.Id(id) + n._id(id) return n } @@ -181,7 +181,6 @@ func (r StopDataFrameAnalytics) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -190,6 +189,10 @@ func (r StopDataFrameAnalytics) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -225,9 +228,9 @@ func (r *StopDataFrameAnalytics) Header(key, value string) *StopDataFrameAnalyti // lowercase alphanumeric characters (a-z and 0-9), hyphens, and // underscores. It must start and end with alphanumeric characters. // API Name: id -func (r *StopDataFrameAnalytics) Id(v string) *StopDataFrameAnalytics { +func (r *StopDataFrameAnalytics) _id(id string) *StopDataFrameAnalytics { r.paramSet |= idMask - r.id = v + r.id = id return r } @@ -244,16 +247,16 @@ func (r *StopDataFrameAnalytics) Id(v string) *StopDataFrameAnalytics { // partial matches. If this parameter is false, the request returns a 404 // status code when there are no matches or only partial matches. // API name: allow_no_match -func (r *StopDataFrameAnalytics) AllowNoMatch(b bool) *StopDataFrameAnalytics { - r.values.Set("allow_no_match", strconv.FormatBool(b)) +func (r *StopDataFrameAnalytics) AllowNoMatch(allownomatch bool) *StopDataFrameAnalytics { + r.values.Set("allow_no_match", strconv.FormatBool(allownomatch)) return r } // Force If true, the data frame analytics job is stopped forcefully. // API name: force -func (r *StopDataFrameAnalytics) Force(b bool) *StopDataFrameAnalytics { - r.values.Set("force", strconv.FormatBool(b)) +func (r *StopDataFrameAnalytics) Force(force bool) *StopDataFrameAnalytics { + r.values.Set("force", strconv.FormatBool(force)) return r } @@ -261,8 +264,8 @@ func (r *StopDataFrameAnalytics) Force(b bool) *StopDataFrameAnalytics { // Timeout Controls the amount of time to wait until the data frame analytics job // stops. Defaults to 20 seconds. // API name: timeout -func (r *StopDataFrameAnalytics) Timeout(v string) *StopDataFrameAnalytics { - r.values.Set("timeout", v) +func (r *StopDataFrameAnalytics) Timeout(duration string) *StopDataFrameAnalytics { + r.values.Set("timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/stoptrainedmodeldeployment/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/stoptrainedmodeldeployment/response.go index b5a73a42d..b62953abd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/stoptrainedmodeldeployment/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/stoptrainedmodeldeployment/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package stoptrainedmodeldeployment // Response holds the response body struct for the package stoptrainedmodeldeployment // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/stop_trained_model_deployment/MlStopTrainedModelDeploymentResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/stop_trained_model_deployment/MlStopTrainedModelDeploymentResponse.ts#L20-L22 type Response struct { Stopped bool `json:"stopped"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/stoptrainedmodeldeployment/stop_trained_model_deployment.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/stoptrainedmodeldeployment/stop_trained_model_deployment.go index b19d44d15..c94fc79a9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/stoptrainedmodeldeployment/stop_trained_model_deployment.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/stoptrainedmodeldeployment/stop_trained_model_deployment.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Stop a trained model deployment. package stoptrainedmodeldeployment @@ -68,7 +68,7 @@ func NewStopTrainedModelDeploymentFunc(tp elastictransport.Interface) NewStopTra return func(modelid string) *StopTrainedModelDeployment { n := New(tp) - n.ModelId(modelid) + n._modelid(modelid) return n } @@ -181,7 +181,6 @@ func (r StopTrainedModelDeployment) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -190,6 +189,10 @@ func (r StopTrainedModelDeployment) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -223,9 +226,9 @@ func (r *StopTrainedModelDeployment) Header(key, value string) *StopTrainedModel // ModelId The unique identifier of the trained model. // API Name: modelid -func (r *StopTrainedModelDeployment) ModelId(v string) *StopTrainedModelDeployment { +func (r *StopTrainedModelDeployment) _modelid(modelid string) *StopTrainedModelDeployment { r.paramSet |= modelidMask - r.modelid = v + r.modelid = modelid return r } @@ -240,8 +243,8 @@ func (r *StopTrainedModelDeployment) ModelId(v string) *StopTrainedModelDeployme // If `false`, the request returns a 404 status code when there are no matches // or only partial matches. // API name: allow_no_match -func (r *StopTrainedModelDeployment) AllowNoMatch(b bool) *StopTrainedModelDeployment { - r.values.Set("allow_no_match", strconv.FormatBool(b)) +func (r *StopTrainedModelDeployment) AllowNoMatch(allownomatch bool) *StopTrainedModelDeployment { + r.values.Set("allow_no_match", strconv.FormatBool(allownomatch)) return r } @@ -250,8 +253,8 @@ func (r *StopTrainedModelDeployment) AllowNoMatch(b bool) *StopTrainedModelDeplo // can't use these pipelines until you // restart the model deployment. // API name: force -func (r *StopTrainedModelDeployment) Force(b bool) *StopTrainedModelDeployment { - r.values.Set("force", strconv.FormatBool(b)) +func (r *StopTrainedModelDeployment) Force(force bool) *StopTrainedModelDeployment { + r.values.Set("force", strconv.FormatBool(force)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatedatafeed/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatedatafeed/request.go index c62decf4f..95a6a9bcf 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatedatafeed/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatedatafeed/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package updatedatafeed @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package updatedatafeed // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/update_datafeed/MlUpdateDatafeedRequest.ts#L31-L162 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/update_datafeed/MlUpdateDatafeedRequest.ts#L31-L162 type Request struct { // Aggregations If set, the datafeed performs aggregation searches. Support for aggregations @@ -101,7 +101,7 @@ type Request struct { // when there are multiple jobs running on the same node. QueryDelay types.Duration `json:"query_delay,omitempty"` // RuntimeMappings Specifies runtime fields for the datafeed search. - RuntimeMappings map[string]types.RuntimeField `json:"runtime_mappings,omitempty"` + RuntimeMappings types.RuntimeFields `json:"runtime_mappings,omitempty"` // ScriptFields Specifies scripts that evaluate custom expressions and returns script fields // to the datafeed. // The detector configuration objects in a job can contain functions that use diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatedatafeed/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatedatafeed/response.go index 1987bd6e8..8f9fcd95b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatedatafeed/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatedatafeed/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package updatedatafeed @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updatedatafeed // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/update_datafeed/MlUpdateDatafeedResponse.ts#L31-L49 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/update_datafeed/MlUpdateDatafeedResponse.ts#L31-L49 type Response struct { Aggregations map[string]types.Aggregations `json:"aggregations,omitempty"` @@ -41,7 +41,7 @@ type Response struct { MaxEmptySearches *int `json:"max_empty_searches,omitempty"` Query types.Query `json:"query"` QueryDelay types.Duration `json:"query_delay"` - RuntimeMappings map[string]types.RuntimeField `json:"runtime_mappings,omitempty"` + RuntimeMappings types.RuntimeFields `json:"runtime_mappings,omitempty"` ScriptFields map[string]types.ScriptField `json:"script_fields,omitempty"` ScrollSize int `json:"scroll_size"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatedatafeed/update_datafeed.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatedatafeed/update_datafeed.go index 2efcb6b04..96233f736 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatedatafeed/update_datafeed.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatedatafeed/update_datafeed.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Updates certain properties of a datafeed. package updatedatafeed @@ -35,6 +35,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" ) const ( @@ -53,8 +54,9 @@ type UpdateDatafeed struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -70,7 +72,7 @@ func NewUpdateDatafeedFunc(tp elastictransport.Interface) NewUpdateDatafeed { return func(datafeedid string) *UpdateDatafeed { n := New(tp) - n.DatafeedId(datafeedid) + n._datafeedid(datafeedid) return n } @@ -85,6 +87,8 @@ func New(tp elastictransport.Interface) *UpdateDatafeed { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -114,9 +118,19 @@ func (r *UpdateDatafeed) HttpRequest(ctx context.Context) (*http.Request, error) var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -124,6 +138,7 @@ func (r *UpdateDatafeed) HttpRequest(ctx context.Context) (*http.Request, error) } r.buf.Write(data) + } r.path.Scheme = "http" @@ -208,7 +223,6 @@ func (r UpdateDatafeed) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -217,6 +231,10 @@ func (r UpdateDatafeed) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -232,9 +250,9 @@ func (r *UpdateDatafeed) Header(key, value string) *UpdateDatafeed { // hyphens, and underscores. // It must start and end with alphanumeric characters. // API Name: datafeedid -func (r *UpdateDatafeed) DatafeedId(v string) *UpdateDatafeed { +func (r *UpdateDatafeed) _datafeedid(datafeedid string) *UpdateDatafeed { r.paramSet |= datafeedidMask - r.datafeedid = v + r.datafeedid = datafeedid return r } @@ -243,8 +261,8 @@ func (r *UpdateDatafeed) DatafeedId(v string) *UpdateDatafeed { // are ignored. This includes the // `_all` string or when no indices are specified. // API name: allow_no_indices -func (r *UpdateDatafeed) AllowNoIndices(b bool) *UpdateDatafeed { - r.values.Set("allow_no_indices", strconv.FormatBool(b)) +func (r *UpdateDatafeed) AllowNoIndices(allownoindices bool) *UpdateDatafeed { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) return r } @@ -263,24 +281,191 @@ func (r *UpdateDatafeed) AllowNoIndices(b bool) *UpdateDatafeed { // * `open`: Match open, non-hidden indices. Also matches any non-hidden data // stream. // API name: expand_wildcards -func (r *UpdateDatafeed) ExpandWildcards(v string) *UpdateDatafeed { - r.values.Set("expand_wildcards", v) +func (r *UpdateDatafeed) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *UpdateDatafeed { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } // IgnoreThrottled If `true`, concrete, expanded or aliased indices are ignored when frozen. // API name: ignore_throttled -func (r *UpdateDatafeed) IgnoreThrottled(b bool) *UpdateDatafeed { - r.values.Set("ignore_throttled", strconv.FormatBool(b)) +func (r *UpdateDatafeed) IgnoreThrottled(ignorethrottled bool) *UpdateDatafeed { + r.values.Set("ignore_throttled", strconv.FormatBool(ignorethrottled)) return r } // IgnoreUnavailable If `true`, unavailable indices (missing or closed) are ignored. // API name: ignore_unavailable -func (r *UpdateDatafeed) IgnoreUnavailable(b bool) *UpdateDatafeed { - r.values.Set("ignore_unavailable", strconv.FormatBool(b)) +func (r *UpdateDatafeed) IgnoreUnavailable(ignoreunavailable bool) *UpdateDatafeed { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) + + return r +} + +// Aggregations If set, the datafeed performs aggregation searches. Support for aggregations +// is limited and should be used only +// with low cardinality data. +// API name: aggregations +func (r *UpdateDatafeed) Aggregations(aggregations map[string]types.Aggregations) *UpdateDatafeed { + + r.req.Aggregations = aggregations + + return r +} + +// ChunkingConfig Datafeeds might search over long time periods, for several months or years. +// This search is split into time +// chunks in order to ensure the load on Elasticsearch is managed. Chunking +// configuration controls how the size of +// these time chunks are calculated; it is an advanced configuration option. +// API name: chunking_config +func (r *UpdateDatafeed) ChunkingConfig(chunkingconfig *types.ChunkingConfig) *UpdateDatafeed { + + r.req.ChunkingConfig = chunkingconfig + + return r +} + +// DelayedDataCheckConfig Specifies whether the datafeed checks for missing data and the size of the +// window. The datafeed can optionally +// search over indices that have already been read in an effort to determine +// whether any data has subsequently been +// added to the index. If missing data is found, it is a good indication that +// the `query_delay` is set too low and +// the data is being indexed after the datafeed has passed that moment in time. +// This check runs only on real-time +// datafeeds. +// API name: delayed_data_check_config +func (r *UpdateDatafeed) DelayedDataCheckConfig(delayeddatacheckconfig *types.DelayedDataCheckConfig) *UpdateDatafeed { + + r.req.DelayedDataCheckConfig = delayeddatacheckconfig + + return r +} + +// Frequency The interval at which scheduled queries are made while the datafeed runs in +// real time. The default value is +// either the bucket span for short bucket spans, or, for longer bucket spans, a +// sensible fraction of the bucket +// span. When `frequency` is shorter than the bucket span, interim results for +// the last (partial) bucket are +// written then eventually overwritten by the full bucket results. If the +// datafeed uses aggregations, this value +// must be divisible by the interval of the date histogram aggregation. +// API name: frequency +func (r *UpdateDatafeed) Frequency(duration types.Duration) *UpdateDatafeed { + r.req.Frequency = duration + + return r +} + +// Indices An array of index names. Wildcards are supported. If any of the indices are +// in remote clusters, the machine +// learning nodes must have the `remote_cluster_client` role. +// API name: indices +func (r *UpdateDatafeed) Indices(indices ...string) *UpdateDatafeed { + r.req.Indices = indices + + return r +} + +// IndicesOptions Specifies index expansion options that are used during search. +// API name: indices_options +func (r *UpdateDatafeed) IndicesOptions(indicesoptions *types.IndicesOptions) *UpdateDatafeed { + + r.req.IndicesOptions = indicesoptions + + return r +} + +// API name: job_id +func (r *UpdateDatafeed) JobId(id string) *UpdateDatafeed { + r.req.JobId = &id + + return r +} + +// MaxEmptySearches If a real-time datafeed has never seen any data (including during any initial +// training period), it automatically +// stops and closes the associated job after this many real-time searches return +// no documents. In other words, +// it stops after `frequency` times `max_empty_searches` of real-time operation. +// If not set, a datafeed with no +// end time that sees no data remains started until it is explicitly stopped. By +// default, it is not set. +// API name: max_empty_searches +func (r *UpdateDatafeed) MaxEmptySearches(maxemptysearches int) *UpdateDatafeed { + r.req.MaxEmptySearches = &maxemptysearches + + return r +} + +// Query The Elasticsearch query domain-specific language (DSL). This value +// corresponds to the query object in an +// Elasticsearch search POST body. All the options that are supported by +// Elasticsearch can be used, as this +// object is passed verbatim to Elasticsearch. Note that if you change the +// query, the analyzed data is also +// changed. Therefore, the time required to learn might be long and the +// understandability of the results is +// unpredictable. If you want to make significant changes to the source data, it +// is recommended that you +// clone the job and datafeed and make the amendments in the clone. Let both run +// in parallel and close one +// when you are satisfied with the results of the job. +// API name: query +func (r *UpdateDatafeed) Query(query *types.Query) *UpdateDatafeed { + + r.req.Query = query + + return r +} + +// QueryDelay The number of seconds behind real time that data is queried. For example, if +// data from 10:04 a.m. might +// not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 +// seconds. The default +// value is randomly selected between `60s` and `120s`. This randomness improves +// the query performance +// when there are multiple jobs running on the same node. +// API name: query_delay +func (r *UpdateDatafeed) QueryDelay(duration types.Duration) *UpdateDatafeed { + r.req.QueryDelay = duration + + return r +} + +// RuntimeMappings Specifies runtime fields for the datafeed search. +// API name: runtime_mappings +func (r *UpdateDatafeed) RuntimeMappings(runtimefields types.RuntimeFields) *UpdateDatafeed { + r.req.RuntimeMappings = runtimefields + + return r +} + +// ScriptFields Specifies scripts that evaluate custom expressions and returns script fields +// to the datafeed. +// The detector configuration objects in a job can contain functions that use +// these script fields. +// API name: script_fields +func (r *UpdateDatafeed) ScriptFields(scriptfields map[string]types.ScriptField) *UpdateDatafeed { + + r.req.ScriptFields = scriptfields + + return r +} + +// ScrollSize The size parameter that is used in Elasticsearch searches when the datafeed +// does not use aggregations. +// The maximum value is the value of `index.max_result_window`. +// API name: scroll_size +func (r *UpdateDatafeed) ScrollSize(scrollsize int) *UpdateDatafeed { + r.req.ScrollSize = &scrollsize return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatedataframeanalytics/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatedataframeanalytics/request.go index aa9d21758..cfa567aee 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatedataframeanalytics/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatedataframeanalytics/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package updatedataframeanalytics @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package updatedataframeanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/update_data_frame_analytics/MlUpdateDataFrameAnalyticsRequest.ts#L24-L72 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/update_data_frame_analytics/MlUpdateDataFrameAnalyticsRequest.ts#L24-L72 type Request struct { // AllowLazyStart Specifies whether this job can start when there is insufficient machine diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatedataframeanalytics/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatedataframeanalytics/response.go index 2308d9e96..ca6b3bfd2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatedataframeanalytics/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatedataframeanalytics/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package updatedataframeanalytics @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updatedataframeanalytics // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/update_data_frame_analytics/MlUpdateDataFrameAnalyticsResponse.ts#L30-L45 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/update_data_frame_analytics/MlUpdateDataFrameAnalyticsResponse.ts#L30-L45 type Response struct { AllowLazyStart bool `json:"allow_lazy_start"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatedataframeanalytics/update_data_frame_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatedataframeanalytics/update_data_frame_analytics.go index fb420554f..0729bb266 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatedataframeanalytics/update_data_frame_analytics.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatedataframeanalytics/update_data_frame_analytics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Updates certain properties of a data frame analytics job. package updatedataframeanalytics @@ -52,8 +52,9 @@ type UpdateDataFrameAnalytics struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -69,7 +70,7 @@ func NewUpdateDataFrameAnalyticsFunc(tp elastictransport.Interface) NewUpdateDat return func(id string) *UpdateDataFrameAnalytics { n := New(tp) - n.Id(id) + n._id(id) return n } @@ -84,6 +85,8 @@ func New(tp elastictransport.Interface) *UpdateDataFrameAnalytics { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -113,9 +116,19 @@ func (r *UpdateDataFrameAnalytics) HttpRequest(ctx context.Context) (*http.Reque var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -123,6 +136,7 @@ func (r *UpdateDataFrameAnalytics) HttpRequest(ctx context.Context) (*http.Reque } r.buf.Write(data) + } r.path.Scheme = "http" @@ -209,7 +223,6 @@ func (r UpdateDataFrameAnalytics) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -218,6 +231,10 @@ func (r UpdateDataFrameAnalytics) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -232,9 +249,51 @@ func (r *UpdateDataFrameAnalytics) Header(key, value string) *UpdateDataFrameAna // lowercase alphanumeric characters (a-z and 0-9), hyphens, and // underscores. It must start and end with alphanumeric characters. // API Name: id -func (r *UpdateDataFrameAnalytics) Id(v string) *UpdateDataFrameAnalytics { +func (r *UpdateDataFrameAnalytics) _id(id string) *UpdateDataFrameAnalytics { r.paramSet |= idMask - r.id = v + r.id = id + + return r +} + +// AllowLazyStart Specifies whether this job can start when there is insufficient machine +// learning node capacity for it to be immediately assigned to a node. +// API name: allow_lazy_start +func (r *UpdateDataFrameAnalytics) AllowLazyStart(allowlazystart bool) *UpdateDataFrameAnalytics { + r.req.AllowLazyStart = &allowlazystart + + return r +} + +// Description A description of the job. +// API name: description +func (r *UpdateDataFrameAnalytics) Description(description string) *UpdateDataFrameAnalytics { + + r.req.Description = &description + + return r +} + +// MaxNumThreads The maximum number of threads to be used by the analysis. Using more +// threads may decrease the time necessary to complete the analysis at the +// cost of using more CPU. Note that the process may use additional threads +// for operational functionality other than the analysis itself. +// API name: max_num_threads +func (r *UpdateDataFrameAnalytics) MaxNumThreads(maxnumthreads int) *UpdateDataFrameAnalytics { + r.req.MaxNumThreads = &maxnumthreads + + return r +} + +// ModelMemoryLimit The approximate maximum amount of memory resources that are permitted for +// analytical processing. If your `elasticsearch.yml` file contains an +// `xpack.ml.max_model_memory_limit` setting, an error occurs when you try +// to create data frame analytics jobs that have `model_memory_limit` values +// greater than that setting. +// API name: model_memory_limit +func (r *UpdateDataFrameAnalytics) ModelMemoryLimit(modelmemorylimit string) *UpdateDataFrameAnalytics { + + r.req.ModelMemoryLimit = &modelmemorylimit return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatefilter/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatefilter/request.go index 8b0fe5a36..b620ed032 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatefilter/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatefilter/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package updatefilter @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package updatefilter // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/update_filter/MlUpdateFilterRequest.ts#L23-L51 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/update_filter/MlUpdateFilterRequest.ts#L23-L51 type Request struct { // AddItems The items to add to the filter. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatefilter/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatefilter/response.go index ca51d3e1f..6a73e9edf 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatefilter/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatefilter/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package updatefilter // Response holds the response body struct for the package updatefilter // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/update_filter/MlUpdateFilterResponse.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/update_filter/MlUpdateFilterResponse.ts#L22-L28 type Response struct { Description string `json:"description"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatefilter/update_filter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatefilter/update_filter.go index 733e65a88..d04567bcf 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatefilter/update_filter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatefilter/update_filter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Updates the description of a filter, adds items, or removes items. package updatefilter @@ -52,8 +52,9 @@ type UpdateFilter struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -69,7 +70,7 @@ func NewUpdateFilterFunc(tp elastictransport.Interface) NewUpdateFilter { return func(filterid string) *UpdateFilter { n := New(tp) - n.FilterId(filterid) + n._filterid(filterid) return n } @@ -84,6 +85,8 @@ func New(tp elastictransport.Interface) *UpdateFilter { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -113,9 +116,19 @@ func (r *UpdateFilter) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -123,6 +136,7 @@ func (r *UpdateFilter) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -207,7 +221,6 @@ func (r UpdateFilter) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -216,6 +229,10 @@ func (r UpdateFilter) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -228,9 +245,34 @@ func (r *UpdateFilter) Header(key, value string) *UpdateFilter { // FilterId A string that uniquely identifies a filter. // API Name: filterid -func (r *UpdateFilter) FilterId(v string) *UpdateFilter { +func (r *UpdateFilter) _filterid(filterid string) *UpdateFilter { r.paramSet |= filteridMask - r.filterid = v + r.filterid = filterid + + return r +} + +// AddItems The items to add to the filter. +// API name: add_items +func (r *UpdateFilter) AddItems(additems ...string) *UpdateFilter { + r.req.AddItems = additems + + return r +} + +// Description A description for the filter. +// API name: description +func (r *UpdateFilter) Description(description string) *UpdateFilter { + + r.req.Description = &description + + return r +} + +// RemoveItems The items to remove from the filter. +// API name: remove_items +func (r *UpdateFilter) RemoveItems(removeitems ...string) *UpdateFilter { + r.req.RemoveItems = removeitems return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatejob/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatejob/request.go index d72b54220..60d8ce6c2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatejob/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatejob/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package updatejob @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package updatejob // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/update_job/MlUpdateJobRequest.ts#L33-L138 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/update_job/MlUpdateJobRequest.ts#L33-L138 type Request struct { // AllowLazyOpen Advanced configuration option. Specifies whether this job can open when diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatejob/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatejob/response.go index ed438b064..4e9df77b2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatejob/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatejob/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package updatejob @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updatejob // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/update_job/MlUpdateJobResponse.ts#L29-L53 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/update_job/MlUpdateJobResponse.ts#L29-L53 type Response struct { AllowLazyOpen bool `json:"allow_lazy_open"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatejob/update_job.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatejob/update_job.go index 21d425d28..6faa8049b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatejob/update_job.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatejob/update_job.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Updates certain properties of an anomaly detection job. package updatejob @@ -52,8 +52,9 @@ type UpdateJob struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -69,7 +70,7 @@ func NewUpdateJobFunc(tp elastictransport.Interface) NewUpdateJob { return func(jobid string) *UpdateJob { n := New(tp) - n.JobId(jobid) + n._jobid(jobid) return n } @@ -84,6 +85,8 @@ func New(tp elastictransport.Interface) *UpdateJob { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -113,9 +116,19 @@ func (r *UpdateJob) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -123,6 +136,7 @@ func (r *UpdateJob) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -207,7 +221,6 @@ func (r UpdateJob) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -216,6 +229,10 @@ func (r UpdateJob) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -228,9 +245,168 @@ func (r *UpdateJob) Header(key, value string) *UpdateJob { // JobId Identifier for the job. // API Name: jobid -func (r *UpdateJob) JobId(v string) *UpdateJob { +func (r *UpdateJob) _jobid(jobid string) *UpdateJob { r.paramSet |= jobidMask - r.jobid = v + r.jobid = jobid + + return r +} + +// AllowLazyOpen Advanced configuration option. Specifies whether this job can open when +// there is insufficient machine learning node capacity for it to be +// immediately assigned to a node. If `false` and a machine learning node +// with capacity to run the job cannot immediately be found, the open +// anomaly detection jobs API returns an error. However, this is also +// subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this +// option is set to `true`, the open anomaly detection jobs API does not +// return an error and the job waits in the opening state until sufficient +// machine learning node capacity is available. +// API name: allow_lazy_open +func (r *UpdateJob) AllowLazyOpen(allowlazyopen bool) *UpdateJob { + r.req.AllowLazyOpen = &allowlazyopen + + return r +} + +// API name: analysis_limits +func (r *UpdateJob) AnalysisLimits(analysislimits *types.AnalysisMemoryLimit) *UpdateJob { + + r.req.AnalysisLimits = analysislimits + + return r +} + +// BackgroundPersistInterval Advanced configuration option. The time between each periodic persistence +// of the model. +// The default value is a randomized value between 3 to 4 hours, which +// avoids all jobs persisting at exactly the same time. The smallest allowed +// value is 1 hour. +// For very large models (several GB), persistence could take 10-20 minutes, +// so do not set the value too low. +// If the job is open when you make the update, you must stop the datafeed, +// close the job, then reopen the job and restart the datafeed for the +// changes to take effect. +// API name: background_persist_interval +func (r *UpdateJob) BackgroundPersistInterval(duration types.Duration) *UpdateJob { + r.req.BackgroundPersistInterval = duration + + return r +} + +// API name: categorization_filters +func (r *UpdateJob) CategorizationFilters(categorizationfilters ...string) *UpdateJob { + r.req.CategorizationFilters = categorizationfilters + + return r +} + +// CustomSettings Advanced configuration option. Contains custom meta data about the job. +// For example, it can contain custom URL information as shown in Adding +// custom URLs to machine learning results. +// API name: custom_settings +func (r *UpdateJob) CustomSettings(customsettings map[string]json.RawMessage) *UpdateJob { + + r.req.CustomSettings = customsettings + + return r +} + +// DailyModelSnapshotRetentionAfterDays Advanced configuration option, which affects the automatic removal of old +// model snapshots for this job. It specifies a period of time (in days) +// after which only the first snapshot per day is retained. This period is +// relative to the timestamp of the most recent snapshot for this job. Valid +// values range from 0 to `model_snapshot_retention_days`. For jobs created +// before version 7.8.0, the default value matches +// `model_snapshot_retention_days`. +// API name: daily_model_snapshot_retention_after_days +func (r *UpdateJob) DailyModelSnapshotRetentionAfterDays(dailymodelsnapshotretentionafterdays int64) *UpdateJob { + + r.req.DailyModelSnapshotRetentionAfterDays = &dailymodelsnapshotretentionafterdays + + return r +} + +// Description A description of the job. +// API name: description +func (r *UpdateJob) Description(description string) *UpdateJob { + + r.req.Description = &description + + return r +} + +// Detectors An array of detector update objects. +// API name: detectors +func (r *UpdateJob) Detectors(detectors ...types.Detector) *UpdateJob { + r.req.Detectors = detectors + + return r +} + +// Groups A list of job groups. A job can belong to no groups or many. +// API name: groups +func (r *UpdateJob) Groups(groups ...string) *UpdateJob { + r.req.Groups = groups + + return r +} + +// API name: model_plot_config +func (r *UpdateJob) ModelPlotConfig(modelplotconfig *types.ModelPlotConfig) *UpdateJob { + + r.req.ModelPlotConfig = modelplotconfig + + return r +} + +// API name: model_prune_window +func (r *UpdateJob) ModelPruneWindow(duration types.Duration) *UpdateJob { + r.req.ModelPruneWindow = duration + + return r +} + +// ModelSnapshotRetentionDays Advanced configuration option, which affects the automatic removal of old +// model snapshots for this job. It specifies the maximum period of time (in +// days) that snapshots are retained. This period is relative to the +// timestamp of the most recent snapshot for this job. +// API name: model_snapshot_retention_days +func (r *UpdateJob) ModelSnapshotRetentionDays(modelsnapshotretentiondays int64) *UpdateJob { + + r.req.ModelSnapshotRetentionDays = &modelsnapshotretentiondays + + return r +} + +// PerPartitionCategorization Settings related to how categorization interacts with partition fields. +// API name: per_partition_categorization +func (r *UpdateJob) PerPartitionCategorization(perpartitioncategorization *types.PerPartitionCategorization) *UpdateJob { + + r.req.PerPartitionCategorization = perpartitioncategorization + + return r +} + +// RenormalizationWindowDays Advanced configuration option. The period over which adjustments to the +// score are applied, as new data is seen. +// API name: renormalization_window_days +func (r *UpdateJob) RenormalizationWindowDays(renormalizationwindowdays int64) *UpdateJob { + + r.req.RenormalizationWindowDays = &renormalizationwindowdays + + return r +} + +// ResultsRetentionDays Advanced configuration option. The period of time (in days) that results +// are retained. Age is calculated relative to the timestamp of the latest +// bucket result. If this property has a non-null value, once per day at +// 00:30 (server time), results that are the specified number of days older +// than the latest bucket result are deleted from Elasticsearch. The default +// value is null, which means all results are retained. +// API name: results_retention_days +func (r *UpdateJob) ResultsRetentionDays(resultsretentiondays int64) *UpdateJob { + + r.req.ResultsRetentionDays = &resultsretentiondays return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatemodelsnapshot/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatemodelsnapshot/request.go index 996b5b74f..874776a86 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatemodelsnapshot/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatemodelsnapshot/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package updatemodelsnapshot @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package updatemodelsnapshot // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/update_model_snapshot/MlUpdateModelSnapshotRequest.ts#L23-L54 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/update_model_snapshot/MlUpdateModelSnapshotRequest.ts#L23-L54 type Request struct { // Description A description of the model snapshot. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatemodelsnapshot/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatemodelsnapshot/response.go index c7d6a9708..08dae8486 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatemodelsnapshot/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatemodelsnapshot/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package updatemodelsnapshot @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package updatemodelsnapshot // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/update_model_snapshot/MlUpdateModelSnapshotResponse.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/update_model_snapshot/MlUpdateModelSnapshotResponse.ts#L22-L27 type Response struct { Acknowledged bool `json:"acknowledged"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatemodelsnapshot/update_model_snapshot.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatemodelsnapshot/update_model_snapshot.go index 1970a6d6b..11f6f49ab 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatemodelsnapshot/update_model_snapshot.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatemodelsnapshot/update_model_snapshot.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Updates certain properties of a snapshot. package updatemodelsnapshot @@ -54,8 +54,9 @@ type UpdateModelSnapshot struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -72,9 +73,9 @@ func NewUpdateModelSnapshotFunc(tp elastictransport.Interface) NewUpdateModelSna return func(jobid, snapshotid string) *UpdateModelSnapshot { n := New(tp) - n.JobId(jobid) + n._jobid(jobid) - n.SnapshotId(snapshotid) + n._snapshotid(snapshotid) return n } @@ -89,6 +90,8 @@ func New(tp elastictransport.Interface) *UpdateModelSnapshot { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -118,9 +121,19 @@ func (r *UpdateModelSnapshot) HttpRequest(ctx context.Context) (*http.Request, e var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -128,6 +141,7 @@ func (r *UpdateModelSnapshot) HttpRequest(ctx context.Context) (*http.Request, e } r.buf.Write(data) + } r.path.Scheme = "http" @@ -217,7 +231,6 @@ func (r UpdateModelSnapshot) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -226,6 +239,10 @@ func (r UpdateModelSnapshot) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -238,18 +255,37 @@ func (r *UpdateModelSnapshot) Header(key, value string) *UpdateModelSnapshot { // JobId Identifier for the anomaly detection job. // API Name: jobid -func (r *UpdateModelSnapshot) JobId(v string) *UpdateModelSnapshot { +func (r *UpdateModelSnapshot) _jobid(jobid string) *UpdateModelSnapshot { r.paramSet |= jobidMask - r.jobid = v + r.jobid = jobid return r } // SnapshotId Identifier for the model snapshot. // API Name: snapshotid -func (r *UpdateModelSnapshot) SnapshotId(v string) *UpdateModelSnapshot { +func (r *UpdateModelSnapshot) _snapshotid(snapshotid string) *UpdateModelSnapshot { r.paramSet |= snapshotidMask - r.snapshotid = v + r.snapshotid = snapshotid + + return r +} + +// Description A description of the model snapshot. +// API name: description +func (r *UpdateModelSnapshot) Description(description string) *UpdateModelSnapshot { + + r.req.Description = &description + + return r +} + +// Retain If `true`, this snapshot will not be deleted during automatic cleanup of +// snapshots older than `model_snapshot_retention_days`. However, this +// snapshot will be deleted when the job is deleted. +// API name: retain +func (r *UpdateModelSnapshot) Retain(retain bool) *UpdateModelSnapshot { + r.req.Retain = &retain return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/upgradejobsnapshot/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/upgradejobsnapshot/response.go index 4b385602d..c52ba108b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/upgradejobsnapshot/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/upgradejobsnapshot/response.go @@ -16,19 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package upgradejobsnapshot // Response holds the response body struct for the package upgradejobsnapshot // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/upgrade_job_snapshot/MlUpgradeJobSnapshotResponse.ts#L22-L29 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/upgrade_job_snapshot/MlUpgradeJobSnapshotResponse.ts#L22-L31 type Response struct { // Completed When true, this means the task is complete. When false, it is still running. Completed bool `json:"completed"` - // Node The ID of the assigned node for the upgrade task if it is still running. + // Node The ID of the node that the upgrade task was started on if it is still + // running. In serverless this will be the "serverless". Node string `json:"node"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/upgradejobsnapshot/upgrade_job_snapshot.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/upgradejobsnapshot/upgrade_job_snapshot.go index 30d94c59d..2f11fb0ee 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/upgradejobsnapshot/upgrade_job_snapshot.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/upgradejobsnapshot/upgrade_job_snapshot.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Upgrades a given job snapshot to the current major version. package upgradejobsnapshot @@ -71,9 +71,9 @@ func NewUpgradeJobSnapshotFunc(tp elastictransport.Interface) NewUpgradeJobSnaps return func(jobid, snapshotid string) *UpgradeJobSnapshot { n := New(tp) - n.JobId(jobid) + n._jobid(jobid) - n.SnapshotId(snapshotid) + n._snapshotid(snapshotid) return n } @@ -183,7 +183,6 @@ func (r UpgradeJobSnapshot) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -192,6 +191,10 @@ func (r UpgradeJobSnapshot) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -225,18 +228,18 @@ func (r *UpgradeJobSnapshot) Header(key, value string) *UpgradeJobSnapshot { // JobId Identifier for the anomaly detection job. // API Name: jobid -func (r *UpgradeJobSnapshot) JobId(v string) *UpgradeJobSnapshot { +func (r *UpgradeJobSnapshot) _jobid(jobid string) *UpgradeJobSnapshot { r.paramSet |= jobidMask - r.jobid = v + r.jobid = jobid return r } // SnapshotId A numerical character string that uniquely identifies the model snapshot. // API Name: snapshotid -func (r *UpgradeJobSnapshot) SnapshotId(v string) *UpgradeJobSnapshot { +func (r *UpgradeJobSnapshot) _snapshotid(snapshotid string) *UpgradeJobSnapshot { r.paramSet |= snapshotidMask - r.snapshotid = v + r.snapshotid = snapshotid return r } @@ -244,16 +247,16 @@ func (r *UpgradeJobSnapshot) SnapshotId(v string) *UpgradeJobSnapshot { // WaitForCompletion When true, the API won’t respond until the upgrade is complete. // Otherwise, it responds as soon as the upgrade task is assigned to a node. // API name: wait_for_completion -func (r *UpgradeJobSnapshot) WaitForCompletion(b bool) *UpgradeJobSnapshot { - r.values.Set("wait_for_completion", strconv.FormatBool(b)) +func (r *UpgradeJobSnapshot) WaitForCompletion(waitforcompletion bool) *UpgradeJobSnapshot { + r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) return r } // Timeout Controls the time to wait for the request to complete. // API name: timeout -func (r *UpgradeJobSnapshot) Timeout(v string) *UpgradeJobSnapshot { - r.values.Set("timeout", v) +func (r *UpgradeJobSnapshot) Timeout(duration string) *UpgradeJobSnapshot { + r.values.Set("timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/validate/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/validate/request.go index 103ce17e7..58f0af4cb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/validate/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/validate/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package validate @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package validate // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/validate/MlValidateJobRequest.ts#L27-L45 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/validate/MlValidateJobRequest.ts#L27-L44 type Request struct { AnalysisConfig *types.AnalysisConfig `json:"analysis_config,omitempty"` AnalysisLimits *types.AnalysisLimits `json:"analysis_limits,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/validate/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/validate/response.go index 42690ae05..2a6cd02f1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/validate/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/validate/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package validate // Response holds the response body struct for the package validate // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/validate/MlValidateJobResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/validate/MlValidateJobResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/validate/validate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/validate/validate.go index 1835feb6b..6b2aecc09 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/validate/validate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/validate/validate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Validates an anomaly detection job. package validate @@ -48,8 +48,9 @@ type Validate struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int } @@ -76,6 +77,8 @@ func New(tp elastictransport.Interface) *Validate { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -105,9 +108,19 @@ func (r *Validate) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -115,6 +128,7 @@ func (r *Validate) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -196,7 +210,6 @@ func (r Validate) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -205,6 +218,10 @@ func (r Validate) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -214,3 +231,72 @@ func (r *Validate) Header(key, value string) *Validate { return r } + +// API name: analysis_config +func (r *Validate) AnalysisConfig(analysisconfig *types.AnalysisConfig) *Validate { + + r.req.AnalysisConfig = analysisconfig + + return r +} + +// API name: analysis_limits +func (r *Validate) AnalysisLimits(analysislimits *types.AnalysisLimits) *Validate { + + r.req.AnalysisLimits = analysislimits + + return r +} + +// API name: data_description +func (r *Validate) DataDescription(datadescription *types.DataDescription) *Validate { + + r.req.DataDescription = datadescription + + return r +} + +// API name: description +func (r *Validate) Description(description string) *Validate { + + r.req.Description = &description + + return r +} + +// API name: job_id +func (r *Validate) JobId(id string) *Validate { + r.req.JobId = &id + + return r +} + +// API name: model_plot +func (r *Validate) ModelPlot(modelplot *types.ModelPlotConfig) *Validate { + + r.req.ModelPlot = modelplot + + return r +} + +// API name: model_snapshot_id +func (r *Validate) ModelSnapshotId(id string) *Validate { + r.req.ModelSnapshotId = &id + + return r +} + +// API name: model_snapshot_retention_days +func (r *Validate) ModelSnapshotRetentionDays(modelsnapshotretentiondays int64) *Validate { + + r.req.ModelSnapshotRetentionDays = &modelsnapshotretentiondays + + return r +} + +// API name: results_index_name +func (r *Validate) ResultsIndexName(indexname string) *Validate { + r.req.ResultsIndexName = &indexname + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/validatedetector/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/validatedetector/request.go new file mode 100644 index 000000000..5c007c909 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/validatedetector/request.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package validatedetector + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Request holds the request body struct for the package validatedetector +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/validate_detector/MlValidateDetectorRequest.ts#L23-L31 +type Request = types.Detector diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/validatedetector/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/validatedetector/response.go index a5d402c9a..4d25b3f6d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/validatedetector/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/validatedetector/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package validatedetector // Response holds the response body struct for the package validatedetector // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/validate_detector/MlValidateDetectorResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/validate_detector/MlValidateDetectorResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/validatedetector/validate_detector.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/validatedetector/validate_detector.go index 4fd6d0c40..05dfe7a46 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/validatedetector/validate_detector.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ml/validatedetector/validate_detector.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Validates an anomaly detection detector. package validatedetector @@ -34,6 +34,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/excludefrequent" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -48,8 +49,9 @@ type ValidateDetector struct { buf *gobytes.Buffer - req *types.Detector - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int } @@ -90,7 +92,7 @@ func (r *ValidateDetector) Raw(raw io.Reader) *ValidateDetector { } // Request allows to set the request property with the appropriate payload. -func (r *ValidateDetector) Request(req *types.Detector) *ValidateDetector { +func (r *ValidateDetector) Request(req *Request) *ValidateDetector { r.req = req return r @@ -105,9 +107,19 @@ func (r *ValidateDetector) HttpRequest(ctx context.Context) (*http.Request, erro var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -115,6 +127,7 @@ func (r *ValidateDetector) HttpRequest(ctx context.Context) (*http.Request, erro } r.buf.Write(data) + } r.path.Scheme = "http" @@ -198,7 +211,6 @@ func (r ValidateDetector) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -207,6 +219,10 @@ func (r ValidateDetector) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -216,3 +232,101 @@ func (r *ValidateDetector) Header(key, value string) *ValidateDetector { return r } + +// ByFieldName The field used to split the data. In particular, this property is used for +// analyzing the splits with respect to their own history. It is used for +// finding unusual values in the context of the split. +// API name: by_field_name +func (r *ValidateDetector) ByFieldName(field string) *ValidateDetector { + r.req.ByFieldName = &field + + return r +} + +// CustomRules Custom rules enable you to customize the way detectors operate. For example, +// a rule may dictate conditions under which results should be skipped. Kibana +// refers to custom rules as job rules. +// API name: custom_rules +func (r *ValidateDetector) CustomRules(customrules ...types.DetectionRule) *ValidateDetector { + r.req.CustomRules = customrules + + return r +} + +// DetectorDescription A description of the detector. +// API name: detector_description +func (r *ValidateDetector) DetectorDescription(detectordescription string) *ValidateDetector { + + r.req.DetectorDescription = &detectordescription + + return r +} + +// DetectorIndex A unique identifier for the detector. This identifier is based on the order +// of the detectors in the `analysis_config`, starting at zero. If you specify a +// value for this property, it is ignored. +// API name: detector_index +func (r *ValidateDetector) DetectorIndex(detectorindex int) *ValidateDetector { + r.req.DetectorIndex = &detectorindex + + return r +} + +// ExcludeFrequent If set, frequent entities are excluded from influencing the anomaly results. +// Entities can be considered frequent over time or frequent in a population. If +// you are working with both over and by fields, you can set `exclude_frequent` +// to `all` for both fields, or to `by` or `over` for those specific fields. +// API name: exclude_frequent +func (r *ValidateDetector) ExcludeFrequent(excludefrequent excludefrequent.ExcludeFrequent) *ValidateDetector { + r.req.ExcludeFrequent = &excludefrequent + + return r +} + +// FieldName The field that the detector uses in the function. If you use an event rate +// function such as count or rare, do not specify this field. The `field_name` +// cannot contain double quotes or backslashes. +// API name: field_name +func (r *ValidateDetector) FieldName(field string) *ValidateDetector { + r.req.FieldName = &field + + return r +} + +// Function The analysis function that is used. For example, `count`, `rare`, `mean`, +// `min`, `max`, or `sum`. +// API name: function +func (r *ValidateDetector) Function(function string) *ValidateDetector { + + r.req.Function = &function + + return r +} + +// OverFieldName The field used to split the data. In particular, this property is used for +// analyzing the splits with respect to the history of all splits. It is used +// for finding unusual values in the population of all splits. +// API name: over_field_name +func (r *ValidateDetector) OverFieldName(field string) *ValidateDetector { + r.req.OverFieldName = &field + + return r +} + +// PartitionFieldName The field used to segment the analysis. When you use this property, you have +// completely independent baselines for each value of this field. +// API name: partition_field_name +func (r *ValidateDetector) PartitionFieldName(field string) *ValidateDetector { + r.req.PartitionFieldName = &field + + return r +} + +// UseNull Defines whether a new series is used as the null series when there is no +// value for the by or partition fields. +// API name: use_null +func (r *ValidateDetector) UseNull(usenull bool) *ValidateDetector { + r.req.UseNull = &usenull + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/monitoring/bulk/bulk.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/monitoring/bulk/bulk.go new file mode 100644 index 000000000..ebef5c38c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/monitoring/bulk/bulk.go @@ -0,0 +1,282 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Used by the monitoring features to send monitoring data. +package bulk + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + type_Mask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Bulk struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + buf *gobytes.Buffer + + req *Request + deferred []func(request *Request) error + raw io.Reader + + paramSet int + + type_ string +} + +// NewBulk type alias for index. +type NewBulk func() *Bulk + +// NewBulkFunc returns a new instance of Bulk with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewBulkFunc(tp elastictransport.Interface) NewBulk { + return func() *Bulk { + n := New(tp) + + return n + } +} + +// Used by the monitoring features to send monitoring data. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/master/monitor-elasticsearch-cluster.html +func New(tp elastictransport.Interface) *Bulk { + r := &Bulk{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + buf: gobytes.NewBuffer(nil), + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Bulk) Raw(raw io.Reader) *Bulk { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Bulk) Request(req *Request) *Bulk { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Bulk) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw != nil { + r.buf.ReadFrom(r.raw) + } else if r.req != nil { + + for _, elem := range *r.req { + data, err := json.Marshal(elem) + if err != nil { + return nil, err + } + r.buf.Write(data) + r.buf.Write([]byte("\n")) + } + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Bulk: %w", err) + } + + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_monitoring") + path.WriteString("/") + path.WriteString("bulk") + + method = http.MethodPost + case r.paramSet == type_Mask: + path.WriteString("/") + path.WriteString("_monitoring") + path.WriteString("/") + + path.WriteString(r.type_) + path.WriteString("/") + path.WriteString("bulk") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.buf) + } else { + req, err = http.NewRequest(method, r.path.String(), r.buf) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.buf.Len() > 0 { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+x-ndjson;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Bulk) Perform(ctx context.Context) (*http.Response, error) { + req, err := r.HttpRequest(ctx) + if err != nil { + return nil, err + } + + res, err := r.transport.Perform(req) + if err != nil { + return nil, fmt.Errorf("an error happened during the Bulk query execution: %w", err) + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a bulk.Response +func (r Bulk) Do(ctx context.Context) (*Response, error) { + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + return nil, errorResponse +} + +// Header set a key, value pair in the Bulk headers map. +func (r *Bulk) Header(key, value string) *Bulk { + r.headers.Set(key, value) + + return r +} + +// Type Default document type for items which don't provide one +// API Name: type_ +func (r *Bulk) Type(type_ string) *Bulk { + r.paramSet |= type_Mask + r.type_ = type_ + + return r +} + +// SystemId Identifier of the monitored system +// API name: system_id +func (r *Bulk) SystemId(systemid string) *Bulk { + r.values.Set("system_id", systemid) + + return r +} + +// API name: system_api_version +func (r *Bulk) SystemApiVersion(systemapiversion string) *Bulk { + r.values.Set("system_api_version", systemapiversion) + + return r +} + +// Interval Collection interval (e.g., '10s' or '10000ms') of the payload +// API name: interval +func (r *Bulk) Interval(duration string) *Bulk { + r.values.Set("interval", duration) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/monitoring/bulk/helpers.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/monitoring/bulk/helpers.go new file mode 100644 index 000000000..f211ae040 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/monitoring/bulk/helpers.go @@ -0,0 +1,233 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package bulk + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// CreateOp is a helper function to add a CreateOperation to the current bulk request. +// doc argument can be a []byte, json.RawMessage or a struct. +func (r *Bulk) CreateOp(op types.CreateOperation, doc interface{}) error { + operation := types.OperationContainer{Create: &op} + header, err := json.Marshal(operation) + if err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.CreateOp: %w", err) + } + + if _, err := r.buf.Write(header); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.CreateOp: %w", err) + } + if _, err := r.buf.Write([]byte("\n")); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.CreateOp: %w", err) + } + + switch v := doc.(type) { + case []byte: + if json.Valid(v) { + if _, err := r.buf.Write(v); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.CreateOp: %w", err) + } + } else { + r.buf.Reset() + return fmt.Errorf("bulk.CreateOp: invalid json") + } + case json.RawMessage: + if json.Valid(v) { + if _, err := r.buf.Write(v); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.CreateOp: %w", err) + } + } else { + r.buf.Reset() + return fmt.Errorf("bulk.CreateOp: invalid json") + } + default: + body, err := json.Marshal(doc) + if err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.CreateOp: %w", err) + } + if _, err := r.buf.Write(body); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.CreateOp: %w", err) + } + } + + if _, err := r.buf.Write([]byte("\n")); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.CreateOp: %w", err) + } + + return nil +} + +// IndexOp is a helper function to add an IndexOperation to the current bulk request. +// doc argument can be a []byte, json.RawMessage or a struct. +func (r *Bulk) IndexOp(op types.IndexOperation, doc interface{}) error { + operation := types.OperationContainer{Index: &op} + header, err := json.Marshal(operation) + if err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.IndexOp: %w", err) + } + + if _, err := r.buf.Write(header); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.IndexOp: %w", err) + } + if _, err := r.buf.Write([]byte("\n")); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.IndexOp: %w", err) + } + + switch v := doc.(type) { + case []byte: + if json.Valid(v) { + if _, err := r.buf.Write(v); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.IndexOp: %w", err) + } + } else { + r.buf.Reset() + return fmt.Errorf("bulk.IndexOp: invalid json") + } + case json.RawMessage: + if json.Valid(v) { + if _, err := r.buf.Write(v); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.IndexOp: %w", err) + } + } else { + r.buf.Reset() + return fmt.Errorf("bulk.IndexOp: invalid json") + } + default: + body, err := json.Marshal(doc) + if err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.IndexOp: %w", err) + } + if _, err := r.buf.Write(body); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.IndexOp: %w", err) + } + } + + if _, err := r.buf.Write([]byte("\n")); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.IndexOp: %w", err) + } + + return nil +} + +// UpdateOp is a helper function to add an UpdateOperation with and UpdateAction to the current bulk request. +// update is optional, if both doc and update.Doc are provided, update.Doc has precedence. +func (r *Bulk) UpdateOp(op types.UpdateOperation, doc interface{}, update *types.UpdateAction) error { + operation := types.OperationContainer{Update: &op} + header, err := json.Marshal(operation) + if err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.UpdateOp: %w", err) + } + + if _, err := r.buf.Write(header); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.UpdateOp: %w", err) + } + if _, err := r.buf.Write([]byte("\n")); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.UpdateOp: %w", err) + } + + if update == nil { + update = types.NewUpdateAction() + } + + if len(update.Doc) == 0 { + switch v := doc.(type) { + case []byte: + if json.Valid(v) { + update.Doc = v + } else { + r.buf.Reset() + return fmt.Errorf("bulk.UpdateOp: invalid json") + } + case json.RawMessage: + if json.Valid(v) { + update.Doc = v + } else { + r.buf.Reset() + return fmt.Errorf("bulk.UpdateOp: invalid json") + } + default: + body, err := json.Marshal(doc) + if err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.UpdateOp: %w", err) + } + update.Doc = body + } + } + + body, err := json.Marshal(update) + if err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.UpdateOp: %w", err) + } + if _, err := r.buf.Write(body); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.UpdateOp: %w", err) + } + + if _, err := r.buf.Write([]byte("\n")); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.UpdateOp: %w", err) + } + + return nil +} + +// DeleteOp is a helper function to add a DeleteOperation to the current bulk request. +func (r *Bulk) DeleteOp(op types.DeleteOperation) error { + operation := types.OperationContainer{Delete: &op} + header, err := json.Marshal(operation) + if err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.DeleteOp: %w", err) + } + + if _, err := r.buf.Write(header); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.DeleteOp: %w", err) + } + if _, err := r.buf.Write([]byte("\n")); err != nil { + r.buf.Reset() + return fmt.Errorf("bulk.DeleteOp: %w", err) + } + + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/monitoring/bulk/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/monitoring/bulk/request.go new file mode 100644 index 000000000..d78192628 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/monitoring/bulk/request.go @@ -0,0 +1,26 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package bulk + +// Request holds the request body struct for the package bulk +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/monitoring/bulk/BulkMonitoringRequest.ts#L24-L59 +type Request = []interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/monitoring/bulk/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/monitoring/bulk/response.go new file mode 100644 index 000000000..8520afcba --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/monitoring/bulk/response.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package bulk + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Response holds the response body struct for the package bulk +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/monitoring/bulk/BulkMonitoringResponse.ts#L23-L32 + +type Response struct { + Error *types.ErrorCause `json:"error,omitempty"` + // Errors True if there is was an error + Errors bool `json:"errors"` + // Ignored Was collection disabled? + Ignored bool `json:"ignored"` + Took int64 `json:"took"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/clearrepositoriesmeteringarchive/clear_repositories_metering_archive.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/clearrepositoriesmeteringarchive/clear_repositories_metering_archive.go index 90a95a4fc..a4be52e97 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/clearrepositoriesmeteringarchive/clear_repositories_metering_archive.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/clearrepositoriesmeteringarchive/clear_repositories_metering_archive.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Removes the archived repositories metering information present in the // cluster. @@ -71,9 +71,9 @@ func NewClearRepositoriesMeteringArchiveFunc(tp elastictransport.Interface) NewC return func(nodeid, maxarchiveversion string) *ClearRepositoriesMeteringArchive { n := New(tp) - n.NodeId(nodeid) + n._nodeid(nodeid) - n.MaxArchiveVersion(maxarchiveversion) + n._maxarchiveversion(maxarchiveversion) return n } @@ -180,7 +180,6 @@ func (r ClearRepositoriesMeteringArchive) Do(ctx context.Context) (*Response, er } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -189,6 +188,10 @@ func (r ClearRepositoriesMeteringArchive) Do(ctx context.Context) (*Response, er return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -224,9 +227,9 @@ func (r *ClearRepositoriesMeteringArchive) Header(key, value string) *ClearRepos // All the nodes selective options are explained // [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html#cluster-nodes). // API Name: nodeid -func (r *ClearRepositoriesMeteringArchive) NodeId(v string) *ClearRepositoriesMeteringArchive { +func (r *ClearRepositoriesMeteringArchive) _nodeid(nodeid string) *ClearRepositoriesMeteringArchive { r.paramSet |= nodeidMask - r.nodeid = v + r.nodeid = nodeid return r } @@ -235,9 +238,9 @@ func (r *ClearRepositoriesMeteringArchive) NodeId(v string) *ClearRepositoriesMe // [archive_version](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-repositories-metering-api.html#get-repositories-metering-api-response-body) // to be cleared from the archive. // API Name: maxarchiveversion -func (r *ClearRepositoriesMeteringArchive) MaxArchiveVersion(v string) *ClearRepositoriesMeteringArchive { +func (r *ClearRepositoriesMeteringArchive) _maxarchiveversion(maxarchiveversion string) *ClearRepositoriesMeteringArchive { r.paramSet |= maxarchiveversionMask - r.maxarchiveversion = v + r.maxarchiveversion = maxarchiveversion return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/clearrepositoriesmeteringarchive/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/clearrepositoriesmeteringarchive/response.go index 9a1d37b01..4e3d79c3c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/clearrepositoriesmeteringarchive/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/clearrepositoriesmeteringarchive/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package clearrepositoriesmeteringarchive @@ -26,13 +26,16 @@ import ( // Response holds the response body struct for the package clearrepositoriesmeteringarchive // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/clear_repositories_metering_archive/ClearRepositoriesMeteringArchiveResponse.ts#L36-L38 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/clear_repositories_metering_archive/ClearRepositoriesMeteringArchiveResponse.ts#L36-L38 type Response struct { // ClusterName Name of the cluster. Based on the [Cluster name // setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). ClusterName string `json:"cluster_name"` + // NodeStats Contains statistics about the number of nodes selected by the request’s node + // filters. + NodeStats *types.NodeStatistics `json:"_nodes,omitempty"` // Nodes Contains repositories metering information for the nodes selected by the // request. Nodes map[string]types.RepositoryMeteringInformation `json:"nodes"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/getrepositoriesmeteringinfo/get_repositories_metering_info.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/getrepositoriesmeteringinfo/get_repositories_metering_info.go index ca95120ef..54c5a340d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/getrepositoriesmeteringinfo/get_repositories_metering_info.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/getrepositoriesmeteringinfo/get_repositories_metering_info.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns cluster repositories metering information. package getrepositoriesmeteringinfo @@ -67,7 +67,7 @@ func NewGetRepositoriesMeteringInfoFunc(tp elastictransport.Interface) NewGetRep return func(nodeid string) *GetRepositoriesMeteringInfo { n := New(tp) - n.NodeId(nodeid) + n._nodeid(nodeid) return n } @@ -170,7 +170,6 @@ func (r GetRepositoriesMeteringInfo) Do(ctx context.Context) (*Response, error) } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -179,6 +178,10 @@ func (r GetRepositoriesMeteringInfo) Do(ctx context.Context) (*Response, error) return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -214,9 +217,9 @@ func (r *GetRepositoriesMeteringInfo) Header(key, value string) *GetRepositories // All the nodes selective options are explained // [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html#cluster-nodes). // API Name: nodeid -func (r *GetRepositoriesMeteringInfo) NodeId(v string) *GetRepositoriesMeteringInfo { +func (r *GetRepositoriesMeteringInfo) _nodeid(nodeid string) *GetRepositoriesMeteringInfo { r.paramSet |= nodeidMask - r.nodeid = v + r.nodeid = nodeid return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/getrepositoriesmeteringinfo/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/getrepositoriesmeteringinfo/response.go index 2a3111025..008ae4204 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/getrepositoriesmeteringinfo/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/getrepositoriesmeteringinfo/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getrepositoriesmeteringinfo @@ -26,13 +26,16 @@ import ( // Response holds the response body struct for the package getrepositoriesmeteringinfo // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/get_repositories_metering_info/GetRepositoriesMeteringInfoResponse.ts#L36-L38 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/get_repositories_metering_info/GetRepositoriesMeteringInfoResponse.ts#L36-L38 type Response struct { // ClusterName Name of the cluster. Based on the [Cluster name // setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). ClusterName string `json:"cluster_name"` + // NodeStats Contains statistics about the number of nodes selected by the request’s node + // filters. + NodeStats *types.NodeStatistics `json:"_nodes,omitempty"` // Nodes Contains repositories metering information for the nodes selected by the // request. Nodes map[string]types.RepositoryMeteringInformation `json:"nodes"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/hotthreads/hot_threads.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/hotthreads/hot_threads.go index 803600ad3..552005a08 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/hotthreads/hot_threads.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/hotthreads/hot_threads.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns information about hot threads on each node in the cluster. package hotthreads @@ -36,7 +36,6 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/threadtype" ) @@ -178,7 +177,6 @@ func (r HotThreads) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -187,6 +185,10 @@ func (r HotThreads) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -220,9 +222,9 @@ func (r *HotThreads) Header(key, value string) *HotThreads { // NodeId List of node IDs or names used to limit returned information. // API Name: nodeid -func (r *HotThreads) NodeId(v string) *HotThreads { +func (r *HotThreads) NodeId(nodeid string) *HotThreads { r.paramSet |= nodeidMask - r.nodeid = v + r.nodeid = nodeid return r } @@ -230,24 +232,24 @@ func (r *HotThreads) NodeId(v string) *HotThreads { // IgnoreIdleThreads If true, known idle threads (e.g. waiting in a socket select, or to get // a task from an empty queue) are filtered out. // API name: ignore_idle_threads -func (r *HotThreads) IgnoreIdleThreads(b bool) *HotThreads { - r.values.Set("ignore_idle_threads", strconv.FormatBool(b)) +func (r *HotThreads) IgnoreIdleThreads(ignoreidlethreads bool) *HotThreads { + r.values.Set("ignore_idle_threads", strconv.FormatBool(ignoreidlethreads)) return r } // Interval The interval to do the second sampling of threads. // API name: interval -func (r *HotThreads) Interval(v string) *HotThreads { - r.values.Set("interval", v) +func (r *HotThreads) Interval(duration string) *HotThreads { + r.values.Set("interval", duration) return r } // Snapshots Number of samples of thread stacktrace. // API name: snapshots -func (r *HotThreads) Snapshots(v string) *HotThreads { - r.values.Set("snapshots", v) +func (r *HotThreads) Snapshots(snapshots string) *HotThreads { + r.values.Set("snapshots", snapshots) return r } @@ -256,16 +258,16 @@ func (r *HotThreads) Snapshots(v string) *HotThreads { // is received before the timeout expires, the request fails and // returns an error. // API name: master_timeout -func (r *HotThreads) MasterTimeout(v string) *HotThreads { - r.values.Set("master_timeout", v) +func (r *HotThreads) MasterTimeout(duration string) *HotThreads { + r.values.Set("master_timeout", duration) return r } // Threads Specifies the number of hot threads to provide information for. // API name: threads -func (r *HotThreads) Threads(v string) *HotThreads { - r.values.Set("threads", v) +func (r *HotThreads) Threads(threads string) *HotThreads { + r.values.Set("threads", threads) return r } @@ -273,24 +275,24 @@ func (r *HotThreads) Threads(v string) *HotThreads { // Timeout Period to wait for a response. If no response is received // before the timeout expires, the request fails and returns an error. // API name: timeout -func (r *HotThreads) Timeout(v string) *HotThreads { - r.values.Set("timeout", v) +func (r *HotThreads) Timeout(duration string) *HotThreads { + r.values.Set("timeout", duration) return r } // Type The type to sample. // API name: type -func (r *HotThreads) Type(enum threadtype.ThreadType) *HotThreads { - r.values.Set("type", enum.String()) +func (r *HotThreads) Type(type_ threadtype.ThreadType) *HotThreads { + r.values.Set("type", type_.String()) return r } // Sort The sort order for 'cpu' type (default: total) // API name: sort -func (r *HotThreads) Sort(enum threadtype.ThreadType) *HotThreads { - r.values.Set("sort", enum.String()) +func (r *HotThreads) Sort(sort threadtype.ThreadType) *HotThreads { + r.values.Set("sort", sort.String()) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/hotthreads/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/hotthreads/response.go index 20251f17f..ab3029fd7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/hotthreads/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/hotthreads/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package hotthreads @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package hotthreads // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/hot_threads/NodesHotThreadsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/hot_threads/NodesHotThreadsResponse.ts#L22-L24 type Response struct { HotThreads []types.HotThread `json:"hot_threads"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/info/info.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/info/info.go index 306e6566f..e2f70d482 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/info/info.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/info/info.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns information about nodes in the cluster. package info @@ -194,7 +194,6 @@ func (r Info) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -203,6 +202,10 @@ func (r Info) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -236,9 +239,9 @@ func (r *Info) Header(key, value string) *Info { // NodeId Comma-separated list of node IDs or names used to limit returned information. // API Name: nodeid -func (r *Info) NodeId(v string) *Info { +func (r *Info) NodeId(nodeid string) *Info { r.paramSet |= nodeidMask - r.nodeid = v + r.nodeid = nodeid return r } @@ -246,17 +249,17 @@ func (r *Info) NodeId(v string) *Info { // Metric Limits the information returned to the specific metrics. Supports a // comma-separated list, such as http,ingest. // API Name: metric -func (r *Info) Metric(v string) *Info { +func (r *Info) Metric(metric string) *Info { r.paramSet |= metricMask - r.metric = v + r.metric = metric return r } // FlatSettings If true, returns settings in flat format. // API name: flat_settings -func (r *Info) FlatSettings(b bool) *Info { - r.values.Set("flat_settings", strconv.FormatBool(b)) +func (r *Info) FlatSettings(flatsettings bool) *Info { + r.values.Set("flat_settings", strconv.FormatBool(flatsettings)) return r } @@ -264,8 +267,8 @@ func (r *Info) FlatSettings(b bool) *Info { // MasterTimeout Period to wait for a connection to the master node. If no response is // received before the timeout expires, the request fails and returns an error. // API name: master_timeout -func (r *Info) MasterTimeout(v string) *Info { - r.values.Set("master_timeout", v) +func (r *Info) MasterTimeout(duration string) *Info { + r.values.Set("master_timeout", duration) return r } @@ -273,8 +276,8 @@ func (r *Info) MasterTimeout(v string) *Info { // Timeout Period to wait for a response. If no response is received before the timeout // expires, the request fails and returns an error. // API name: timeout -func (r *Info) Timeout(v string) *Info { - r.values.Set("timeout", v) +func (r *Info) Timeout(duration string) *Info { + r.values.Set("timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/info/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/info/response.go index a93aeb154..45916dd05 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/info/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/info/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package info @@ -26,11 +26,14 @@ import ( // Response holds the response body struct for the package info // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/NodesInfoResponse.ts#L30-L32 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/NodesInfoResponse.ts#L30-L32 type Response struct { - ClusterName string `json:"cluster_name"` - Nodes map[string]types.NodeInfo `json:"nodes"` + ClusterName string `json:"cluster_name"` + // NodeStats Contains statistics about the number of nodes selected by the request’s node + // filters. + NodeStats *types.NodeStatistics `json:"_nodes,omitempty"` + Nodes map[string]types.NodeInfo `json:"nodes"` } // NewResponse returns a Response diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/reloadsecuresettings/reload_secure_settings.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/reloadsecuresettings/reload_secure_settings.go index 4d8788126..b3671fd7e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/reloadsecuresettings/reload_secure_settings.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/reloadsecuresettings/reload_secure_settings.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Reloads secure settings. package reloadsecuresettings @@ -52,8 +52,9 @@ type ReloadSecureSettings struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -82,6 +83,8 @@ func New(tp elastictransport.Interface) *ReloadSecureSettings { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -111,9 +114,19 @@ func (r *ReloadSecureSettings) HttpRequest(ctx context.Context) (*http.Request, var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -121,6 +134,7 @@ func (r *ReloadSecureSettings) HttpRequest(ctx context.Context) (*http.Request, } r.buf.Write(data) + } r.path.Scheme = "http" @@ -210,7 +224,6 @@ func (r ReloadSecureSettings) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -219,6 +232,10 @@ func (r ReloadSecureSettings) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -229,20 +246,29 @@ func (r *ReloadSecureSettings) Header(key, value string) *ReloadSecureSettings { return r } -// NodeId A comma-separated list of node IDs to span the reload/reinit call. Should -// stay empty because reloading usually involves all cluster nodes. +// NodeId The names of particular nodes in the cluster to target. // API Name: nodeid -func (r *ReloadSecureSettings) NodeId(v string) *ReloadSecureSettings { +func (r *ReloadSecureSettings) NodeId(nodeid string) *ReloadSecureSettings { r.paramSet |= nodeidMask - r.nodeid = v + r.nodeid = nodeid return r } -// Timeout Explicit operation timeout +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: timeout -func (r *ReloadSecureSettings) Timeout(v string) *ReloadSecureSettings { - r.values.Set("timeout", v) +func (r *ReloadSecureSettings) Timeout(duration string) *ReloadSecureSettings { + r.values.Set("timeout", duration) + + return r +} + +// SecureSettingsPassword The password for the Elasticsearch keystore. +// API name: secure_settings_password +func (r *ReloadSecureSettings) SecureSettingsPassword(password string) *ReloadSecureSettings { + r.req.SecureSettingsPassword = &password return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/reloadsecuresettings/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/reloadsecuresettings/request.go index ebee8ff76..c8ded8871 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/reloadsecuresettings/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/reloadsecuresettings/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package reloadsecuresettings @@ -27,8 +27,10 @@ import ( // Request holds the request body struct for the package reloadsecuresettings // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/reload_secure_settings/ReloadSecureSettingsRequest.ts#L24-L39 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/reload_secure_settings/ReloadSecureSettingsRequest.ts#L24-L50 type Request struct { + + // SecureSettingsPassword The password for the Elasticsearch keystore. SecureSettingsPassword *string `json:"secure_settings_password,omitempty"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/reloadsecuresettings/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/reloadsecuresettings/response.go index dd8b83934..7c8c166ad 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/reloadsecuresettings/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/reloadsecuresettings/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package reloadsecuresettings @@ -26,11 +26,14 @@ import ( // Response holds the response body struct for the package reloadsecuresettings // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/reload_secure_settings/ReloadSecureSettingsResponse.ts#L30-L32 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/reload_secure_settings/ReloadSecureSettingsResponse.ts#L30-L32 type Response struct { - ClusterName string `json:"cluster_name"` - Nodes map[string]types.NodeReloadResult `json:"nodes"` + ClusterName string `json:"cluster_name"` + // NodeStats Contains statistics about the number of nodes selected by the request’s node + // filters. + NodeStats *types.NodeStatistics `json:"_nodes,omitempty"` + Nodes map[string]types.NodeReloadResult `json:"nodes"` } // NewResponse returns a Response diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/stats/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/stats/response.go index d77bf7348..2ffc589cc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/stats/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/stats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package stats @@ -26,11 +26,14 @@ import ( // Response holds the response body struct for the package stats // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/stats/NodesStatsResponse.ts#L30-L32 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/stats/NodesStatsResponse.ts#L30-L32 type Response struct { - ClusterName *string `json:"cluster_name,omitempty"` - Nodes map[string]types.Stats `json:"nodes"` + ClusterName *string `json:"cluster_name,omitempty"` + // NodeStats Contains statistics about the number of nodes selected by the request’s node + // filters. + NodeStats *types.NodeStatistics `json:"_nodes,omitempty"` + Nodes map[string]types.Stats `json:"nodes"` } // NewResponse returns a Response diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/stats/stats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/stats/stats.go index 083d35f3f..059c46c34 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/stats/stats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/stats/stats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns statistical information about nodes in the cluster. package stats @@ -36,7 +36,6 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/level" ) @@ -236,7 +235,6 @@ func (r Stats) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -245,6 +243,10 @@ func (r Stats) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -278,18 +280,18 @@ func (r *Stats) Header(key, value string) *Stats { // NodeId Comma-separated list of node IDs or names used to limit returned information. // API Name: nodeid -func (r *Stats) NodeId(v string) *Stats { +func (r *Stats) NodeId(nodeid string) *Stats { r.paramSet |= nodeidMask - r.nodeid = v + r.nodeid = nodeid return r } // Metric Limit the information returned to the specified metrics // API Name: metric -func (r *Stats) Metric(v string) *Stats { +func (r *Stats) Metric(metric string) *Stats { r.paramSet |= metricMask - r.metric = v + r.metric = metric return r } @@ -297,9 +299,9 @@ func (r *Stats) Metric(v string) *Stats { // IndexMetric Limit the information returned for indices metric to the specific index // metrics. It can be used only if indices (or all) metric is specified. // API Name: indexmetric -func (r *Stats) IndexMetric(v string) *Stats { +func (r *Stats) IndexMetric(indexmetric string) *Stats { r.paramSet |= indexmetricMask - r.indexmetric = v + r.indexmetric = indexmetric return r } @@ -307,8 +309,8 @@ func (r *Stats) IndexMetric(v string) *Stats { // CompletionFields Comma-separated list or wildcard expressions of fields to include in // fielddata and suggest statistics. // API name: completion_fields -func (r *Stats) CompletionFields(v string) *Stats { - r.values.Set("completion_fields", v) +func (r *Stats) CompletionFields(fields ...string) *Stats { + r.values.Set("completion_fields", strings.Join(fields, ",")) return r } @@ -316,8 +318,8 @@ func (r *Stats) CompletionFields(v string) *Stats { // FielddataFields Comma-separated list or wildcard expressions of fields to include in // fielddata statistics. // API name: fielddata_fields -func (r *Stats) FielddataFields(v string) *Stats { - r.values.Set("fielddata_fields", v) +func (r *Stats) FielddataFields(fields ...string) *Stats { + r.values.Set("fielddata_fields", strings.Join(fields, ",")) return r } @@ -325,16 +327,16 @@ func (r *Stats) FielddataFields(v string) *Stats { // Fields Comma-separated list or wildcard expressions of fields to include in the // statistics. // API name: fields -func (r *Stats) Fields(v string) *Stats { - r.values.Set("fields", v) +func (r *Stats) Fields(fields ...string) *Stats { + r.values.Set("fields", strings.Join(fields, ",")) return r } // Groups Comma-separated list of search groups to include in the search statistics. // API name: groups -func (r *Stats) Groups(b bool) *Stats { - r.values.Set("groups", strconv.FormatBool(b)) +func (r *Stats) Groups(groups bool) *Stats { + r.values.Set("groups", strconv.FormatBool(groups)) return r } @@ -342,8 +344,8 @@ func (r *Stats) Groups(b bool) *Stats { // IncludeSegmentFileSizes If true, the call reports the aggregated disk usage of each one of the Lucene // index files (only applies if segment stats are requested). // API name: include_segment_file_sizes -func (r *Stats) IncludeSegmentFileSizes(b bool) *Stats { - r.values.Set("include_segment_file_sizes", strconv.FormatBool(b)) +func (r *Stats) IncludeSegmentFileSizes(includesegmentfilesizes bool) *Stats { + r.values.Set("include_segment_file_sizes", strconv.FormatBool(includesegmentfilesizes)) return r } @@ -351,8 +353,8 @@ func (r *Stats) IncludeSegmentFileSizes(b bool) *Stats { // Level Indicates whether statistics are aggregated at the cluster, index, or shard // level. // API name: level -func (r *Stats) Level(enum level.Level) *Stats { - r.values.Set("level", enum.String()) +func (r *Stats) Level(level level.Level) *Stats { + r.values.Set("level", level.String()) return r } @@ -360,8 +362,8 @@ func (r *Stats) Level(enum level.Level) *Stats { // MasterTimeout Period to wait for a connection to the master node. If no response is // received before the timeout expires, the request fails and returns an error. // API name: master_timeout -func (r *Stats) MasterTimeout(v string) *Stats { - r.values.Set("master_timeout", v) +func (r *Stats) MasterTimeout(duration string) *Stats { + r.values.Set("master_timeout", duration) return r } @@ -369,25 +371,29 @@ func (r *Stats) MasterTimeout(v string) *Stats { // Timeout Period to wait for a response. If no response is received before the timeout // expires, the request fails and returns an error. // API name: timeout -func (r *Stats) Timeout(v string) *Stats { - r.values.Set("timeout", v) +func (r *Stats) Timeout(duration string) *Stats { + r.values.Set("timeout", duration) return r } // Types A comma-separated list of document types for the indexing index metric. // API name: types -func (r *Stats) Types(v string) *Stats { - r.values.Set("types", v) +func (r *Stats) Types(types ...string) *Stats { + tmp := []string{} + for _, item := range types { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("types", strings.Join(tmp, ",")) return r } -// IncludeUnloadedSegments If set to true segment stats will include stats for segments that are not -// currently loaded into memory +// IncludeUnloadedSegments If `true`, the response includes information from segments that are not +// loaded into memory. // API name: include_unloaded_segments -func (r *Stats) IncludeUnloadedSegments(b bool) *Stats { - r.values.Set("include_unloaded_segments", strconv.FormatBool(b)) +func (r *Stats) IncludeUnloadedSegments(includeunloadedsegments bool) *Stats { + r.values.Set("include_unloaded_segments", strconv.FormatBool(includeunloadedsegments)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/usage/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/usage/response.go index 1f20b0820..86fb49dcd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/usage/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/usage/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package usage @@ -26,11 +26,14 @@ import ( // Response holds the response body struct for the package usage // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/usage/NodesUsageResponse.ts#L30-L32 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/usage/NodesUsageResponse.ts#L30-L32 type Response struct { - ClusterName string `json:"cluster_name"` - Nodes map[string]types.NodeUsage `json:"nodes"` + ClusterName string `json:"cluster_name"` + // NodeStats Contains statistics about the number of nodes selected by the request’s node + // filters. + NodeStats *types.NodeStatistics `json:"_nodes,omitempty"` + Nodes map[string]types.NodeUsage `json:"nodes"` } // NewResponse returns a Response diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/usage/usage.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/usage/usage.go index 340c364ff..1c1e5981f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/usage/usage.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/nodes/usage/usage.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns low-level information about REST actions usage on nodes. package usage @@ -201,7 +201,6 @@ func (r Usage) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -210,6 +209,10 @@ func (r Usage) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -245,26 +248,29 @@ func (r *Usage) Header(key, value string) *Usage { // information; use `_local` to return information from the node you're // connecting to, leave empty to get information from all nodes // API Name: nodeid -func (r *Usage) NodeId(v string) *Usage { +func (r *Usage) NodeId(nodeid string) *Usage { r.paramSet |= nodeidMask - r.nodeid = v + r.nodeid = nodeid return r } -// Metric Limit the information returned to the specified metrics +// Metric Limits the information returned to the specific metrics. +// A comma-separated list of the following options: `_all`, `rest_actions`. // API Name: metric -func (r *Usage) Metric(v string) *Usage { +func (r *Usage) Metric(metric string) *Usage { r.paramSet |= metricMask - r.metric = v + r.metric = metric return r } -// Timeout Explicit operation timeout +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: timeout -func (r *Usage) Timeout(v string) *Usage { - r.values.Set("timeout", v) +func (r *Usage) Timeout(duration string) *Usage { + r.values.Set("timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/queryruleset/delete/delete.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/queryruleset/delete/delete.go new file mode 100644 index 000000000..189bfad3e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/queryruleset/delete/delete.go @@ -0,0 +1,221 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Deletes a query ruleset. +package delete + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + rulesetidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Delete struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + buf *gobytes.Buffer + + paramSet int + + rulesetid string +} + +// NewDelete type alias for index. +type NewDelete func(rulesetid string) *Delete + +// NewDeleteFunc returns a new instance of Delete with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteFunc(tp elastictransport.Interface) NewDelete { + return func(rulesetid string) *Delete { + n := New(tp) + + n._rulesetid(rulesetid) + + return n + } +} + +// Deletes a query ruleset. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-query-ruleset.html +func New(tp elastictransport.Interface) *Delete { + r := &Delete{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + buf: gobytes.NewBuffer(nil), + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Delete) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == rulesetidMask: + path.WriteString("/") + path.WriteString("_query_rules") + path.WriteString("/") + + path.WriteString(r.rulesetid) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.buf) + } else { + req, err = http.NewRequest(method, r.path.String(), r.buf) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Delete) Perform(ctx context.Context) (*http.Response, error) { + req, err := r.HttpRequest(ctx) + if err != nil { + return nil, err + } + + res, err := r.transport.Perform(req) + if err != nil { + return nil, fmt.Errorf("an error happened during the Delete query execution: %w", err) + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a delete.Response +func (r Delete) Do(ctx context.Context) (*Response, error) { + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Delete) IsSuccess(ctx context.Context) (bool, error) { + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(ioutil.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + return false, nil +} + +// Header set a key, value pair in the Delete headers map. +func (r *Delete) Header(key, value string) *Delete { + r.headers.Set(key, value) + + return r +} + +// RulesetId The unique identifier of the query ruleset to delete +// API Name: rulesetid +func (r *Delete) _rulesetid(rulesetid string) *Delete { + r.paramSet |= rulesetidMask + r.rulesetid = rulesetid + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/logstash/deletepipeline/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/queryruleset/delete/response.go similarity index 69% rename from vendor/github.com/elastic/go-elasticsearch/v8/typedapi/logstash/deletepipeline/response.go rename to vendor/github.com/elastic/go-elasticsearch/v8/typedapi/queryruleset/delete/response.go index 74d63f6d6..f33df7ea2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/logstash/deletepipeline/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/queryruleset/delete/response.go @@ -16,15 +16,19 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 -package deletepipeline +package delete -// Response holds the response body struct for the package deletepipeline +// Response holds the response body struct for the package delete // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/logstash/delete_pipeline/LogstashDeletePipelineResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/query_ruleset/delete/QueryRulesetDeleteResponse.ts#L22-L24 type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` } // NewResponse returns a Response diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/queryruleset/get/get.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/queryruleset/get/get.go new file mode 100644 index 000000000..52489ecf0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/queryruleset/get/get.go @@ -0,0 +1,221 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Returns the details about a query ruleset. +package get + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + rulesetidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Get struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + buf *gobytes.Buffer + + paramSet int + + rulesetid string +} + +// NewGet type alias for index. +type NewGet func(rulesetid string) *Get + +// NewGetFunc returns a new instance of Get with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetFunc(tp elastictransport.Interface) NewGet { + return func(rulesetid string) *Get { + n := New(tp) + + n._rulesetid(rulesetid) + + return n + } +} + +// Returns the details about a query ruleset. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/master/get-query-ruleset.html +func New(tp elastictransport.Interface) *Get { + r := &Get{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + buf: gobytes.NewBuffer(nil), + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Get) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == rulesetidMask: + path.WriteString("/") + path.WriteString("_query_rules") + path.WriteString("/") + + path.WriteString(r.rulesetid) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.buf) + } else { + req, err = http.NewRequest(method, r.path.String(), r.buf) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Get) Perform(ctx context.Context) (*http.Response, error) { + req, err := r.HttpRequest(ctx) + if err != nil { + return nil, err + } + + res, err := r.transport.Perform(req) + if err != nil { + return nil, fmt.Errorf("an error happened during the Get query execution: %w", err) + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a get.Response +func (r Get) Do(ctx context.Context) (*Response, error) { + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Get) IsSuccess(ctx context.Context) (bool, error) { + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(ioutil.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + return false, nil +} + +// Header set a key, value pair in the Get headers map. +func (r *Get) Header(key, value string) *Get { + r.headers.Set(key, value) + + return r +} + +// RulesetId The unique identifier of the query ruleset +// API Name: rulesetid +func (r *Get) _rulesetid(rulesetid string) *Get { + r.paramSet |= rulesetidMask + r.rulesetid = rulesetid + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/queryruleset/get/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/queryruleset/get/response.go new file mode 100644 index 000000000..ee1b45c07 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/queryruleset/get/response.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package get + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Response holds the response body struct for the package get +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/query_ruleset/get/QueryRulesetGetResponse.ts#L22-L24 + +type Response struct { + + // Rules Rules associated with the query ruleset + Rules []types.QueryRule `json:"rules"` + // RulesetId Query Ruleset unique identifier + RulesetId string `json:"ruleset_id"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/queryruleset/list/list.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/queryruleset/list/list.go new file mode 100644 index 000000000..66d1af65c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/queryruleset/list/list.go @@ -0,0 +1,218 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Lists query rulesets. +package list + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type List struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + buf *gobytes.Buffer + + paramSet int +} + +// NewList type alias for index. +type NewList func() *List + +// NewListFunc returns a new instance of List with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewListFunc(tp elastictransport.Interface) NewList { + return func() *List { + n := New(tp) + + return n + } +} + +// Lists query rulesets. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/master/list-query-rulesets.html +func New(tp elastictransport.Interface) *List { + r := &List{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + buf: gobytes.NewBuffer(nil), + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *List) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_query_rules") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.buf) + } else { + req, err = http.NewRequest(method, r.path.String(), r.buf) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r List) Perform(ctx context.Context) (*http.Response, error) { + req, err := r.HttpRequest(ctx) + if err != nil { + return nil, err + } + + res, err := r.transport.Perform(req) + if err != nil { + return nil, fmt.Errorf("an error happened during the List query execution: %w", err) + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a list.Response +func (r List) Do(ctx context.Context) (*Response, error) { + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r List) IsSuccess(ctx context.Context) (bool, error) { + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(ioutil.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + return false, nil +} + +// Header set a key, value pair in the List headers map. +func (r *List) Header(key, value string) *List { + r.headers.Set(key, value) + + return r +} + +// From Starting offset (default: 0) +// API name: from +func (r *List) From(from int) *List { + r.values.Set("from", strconv.Itoa(from)) + + return r +} + +// Size specifies a max number of results to get +// API name: size +func (r *List) Size(size int) *List { + r.values.Set("size", strconv.Itoa(size)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/queryruleset/list/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/queryruleset/list/response.go new file mode 100644 index 000000000..101daa647 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/queryruleset/list/response.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package list + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Response holds the response body struct for the package list +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/query_ruleset/list/QueryRulesetListResponse.ts#L23-L28 + +type Response struct { + Count int64 `json:"count"` + Results []types.QueryRulesetListItem `json:"results"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/queryruleset/put/put.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/queryruleset/put/put.go new file mode 100644 index 000000000..f43c67487 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/queryruleset/put/put.go @@ -0,0 +1,256 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Creates or updates a query ruleset. +package put + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + rulesetidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Put struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + buf *gobytes.Buffer + + req *Request + deferred []func(request *Request) error + raw io.Reader + + paramSet int + + rulesetid string +} + +// NewPut type alias for index. +type NewPut func(rulesetid string) *Put + +// NewPutFunc returns a new instance of Put with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutFunc(tp elastictransport.Interface) NewPut { + return func(rulesetid string) *Put { + n := New(tp) + + n._rulesetid(rulesetid) + + return n + } +} + +// Creates or updates a query ruleset. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/master/put-query-ruleset.html +func New(tp elastictransport.Interface) *Put { + r := &Put{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + buf: gobytes.NewBuffer(nil), + + req: NewRequest(), + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Put) Raw(raw io.Reader) *Put { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Put) Request(req *Request) *Put { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Put) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw != nil { + r.buf.ReadFrom(r.raw) + } else if r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Put: %w", err) + } + + r.buf.Write(data) + + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == rulesetidMask: + path.WriteString("/") + path.WriteString("_query_rules") + path.WriteString("/") + + path.WriteString(r.rulesetid) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.buf) + } else { + req, err = http.NewRequest(method, r.path.String(), r.buf) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.buf.Len() > 0 { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Put) Perform(ctx context.Context) (*http.Response, error) { + req, err := r.HttpRequest(ctx) + if err != nil { + return nil, err + } + + res, err := r.transport.Perform(req) + if err != nil { + return nil, fmt.Errorf("an error happened during the Put query execution: %w", err) + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a put.Response +func (r Put) Do(ctx context.Context) (*Response, error) { + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + return nil, errorResponse +} + +// Header set a key, value pair in the Put headers map. +func (r *Put) Header(key, value string) *Put { + r.headers.Set(key, value) + + return r +} + +// RulesetId The unique identifier of the query ruleset to be created or updated +// API Name: rulesetid +func (r *Put) _rulesetid(rulesetid string) *Put { + r.paramSet |= rulesetidMask + r.rulesetid = rulesetid + + return r +} + +// API name: rules +func (r *Put) Rules(rules ...types.QueryRule) *Put { + r.req.Rules = rules + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/queryruleset/put/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/queryruleset/put/request.go new file mode 100644 index 000000000..66d422928 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/queryruleset/put/request.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package put + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Request holds the request body struct for the package put +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/query_ruleset/put/QueryRulesetPutRequest.ts#L23-L43 +type Request struct { + Rules []types.QueryRule `json:"rules"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Put request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/exists/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/queryruleset/put/response.go similarity index 67% rename from vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/exists/response.go rename to vendor/github.com/elastic/go-elasticsearch/v8/typedapi/queryruleset/put/response.go index 4a8fb83dd..4313c3dc6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/core/exists/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/queryruleset/put/response.go @@ -16,15 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 -package exists +package put -// Response holds the response body struct for the package exists +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/result" +) + +// Response holds the response body struct for the package put // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/exists/DocumentExistsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/query_ruleset/put/QueryRulesetPutResponse.ts#L22-L26 type Response struct { + Result result.Result `json:"result"` } // NewResponse returns a Response diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/deletejob/delete_job.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/deletejob/delete_job.go index b0d1877a0..0182a99a1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/deletejob/delete_job.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/deletejob/delete_job.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Deletes an existing rollup job. package deletejob @@ -67,7 +67,7 @@ func NewDeleteJobFunc(tp elastictransport.Interface) NewDeleteJob { return func(id string) *DeleteJob { n := New(tp) - n.Id(id) + n._id(id) return n } @@ -170,7 +170,6 @@ func (r DeleteJob) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -179,6 +178,10 @@ func (r DeleteJob) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -210,11 +213,11 @@ func (r *DeleteJob) Header(key, value string) *DeleteJob { return r } -// Id The ID of the job to delete +// Id Identifier for the job. // API Name: id -func (r *DeleteJob) Id(v string) *DeleteJob { +func (r *DeleteJob) _id(id string) *DeleteJob { r.paramSet |= idMask - r.id = v + r.id = id return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/deletejob/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/deletejob/response.go index dff14e777..13689d509 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/deletejob/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/deletejob/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package deletejob @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package deletejob // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/rollup/delete_job/DeleteRollupJobResponse.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/rollup/delete_job/DeleteRollupJobResponse.ts#L22-L27 type Response struct { Acknowledged bool `json:"acknowledged"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/getjobs/get_jobs.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/getjobs/get_jobs.go index 97640a381..db0e9fc4f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/getjobs/get_jobs.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/getjobs/get_jobs.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves the configuration, stats, and status of rollup jobs. package getjobs @@ -112,7 +112,7 @@ func (r *GetJobs) HttpRequest(ctx context.Context) (*http.Request, error) { path.WriteString("_rollup") path.WriteString("/") path.WriteString("job") - path.WriteString("/") + method = http.MethodGet } @@ -175,7 +175,6 @@ func (r GetJobs) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -184,6 +183,10 @@ func (r GetJobs) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -215,12 +218,12 @@ func (r *GetJobs) Header(key, value string) *GetJobs { return r } -// Id The ID of the job(s) to fetch. Accepts glob patterns, or left blank for all -// jobs +// Id Identifier for the rollup job. +// If it is `_all` or omitted, the API returns all rollup jobs. // API Name: id -func (r *GetJobs) Id(v string) *GetJobs { +func (r *GetJobs) Id(id string) *GetJobs { r.paramSet |= idMask - r.id = v + r.id = id return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/getjobs/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/getjobs/response.go index d0cb15dcf..2e8951515 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/getjobs/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/getjobs/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getjobs @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getjobs // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/rollup/get_jobs/GetRollupJobResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/rollup/get_jobs/GetRollupJobResponse.ts#L22-L24 type Response struct { Jobs []types.RollupJob `json:"jobs"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/getrollupcaps/get_rollup_caps.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/getrollupcaps/get_rollup_caps.go index 120a972f1..da499213e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/getrollupcaps/get_rollup_caps.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/getrollupcaps/get_rollup_caps.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns the capabilities of any rollup jobs that have been configured for a // specific index or index pattern. @@ -114,7 +114,7 @@ func (r *GetRollupCaps) HttpRequest(ctx context.Context) (*http.Request, error) path.WriteString("_rollup") path.WriteString("/") path.WriteString("data") - path.WriteString("/") + method = http.MethodGet } @@ -177,7 +177,6 @@ func (r GetRollupCaps) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -186,6 +185,10 @@ func (r GetRollupCaps) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -217,12 +220,12 @@ func (r *GetRollupCaps) Header(key, value string) *GetRollupCaps { return r } -// Id The ID of the index to check rollup capabilities on, or left blank for all -// jobs +// Id Index, indices or index-pattern to return rollup capabilities for. +// `_all` may be used to fetch rollup capabilities from all jobs. // API Name: id -func (r *GetRollupCaps) Id(v string) *GetRollupCaps { +func (r *GetRollupCaps) Id(id string) *GetRollupCaps { r.paramSet |= idMask - r.id = v + r.id = id return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/getrollupcaps/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/getrollupcaps/response.go index 180f00f85..2c20cd377 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/getrollupcaps/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/getrollupcaps/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getrollupcaps @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getrollupcaps // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/rollup/get_rollup_caps/GetRollupCapabilitiesResponse.ts#L24-L26 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/rollup/get_rollup_caps/GetRollupCapabilitiesResponse.ts#L24-L26 type Response map[string]types.RollupCapabilities diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/getrollupindexcaps/get_rollup_index_caps.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/getrollupindexcaps/get_rollup_index_caps.go index bee3ea7e3..a53825dcc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/getrollupindexcaps/get_rollup_index_caps.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/getrollupindexcaps/get_rollup_index_caps.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns the rollup capabilities of all jobs inside of a rollup index (e.g. // the index where rollup data is stored). @@ -68,7 +68,7 @@ func NewGetRollupIndexCapsFunc(tp elastictransport.Interface) NewGetRollupIndexC return func(index string) *GetRollupIndexCaps { n := New(tp) - n.Index(index) + n._index(index) return n } @@ -172,7 +172,6 @@ func (r GetRollupIndexCaps) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -181,6 +180,10 @@ func (r GetRollupIndexCaps) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -212,11 +215,12 @@ func (r *GetRollupIndexCaps) Header(key, value string) *GetRollupIndexCaps { return r } -// Index The rollup index or index pattern to obtain rollup capabilities from. +// Index Data stream or index to check for rollup capabilities. +// Wildcard (`*`) expressions are supported. // API Name: index -func (r *GetRollupIndexCaps) Index(v string) *GetRollupIndexCaps { +func (r *GetRollupIndexCaps) _index(index string) *GetRollupIndexCaps { r.paramSet |= indexMask - r.index = v + r.index = index return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/getrollupindexcaps/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/getrollupindexcaps/response.go index 1282085ee..d590673fe 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/getrollupindexcaps/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/getrollupindexcaps/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getrollupindexcaps @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getrollupindexcaps // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/rollup/get_rollup_index_caps/GetRollupIndexCapabilitiesResponse.ts#L24-L26 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/rollup/get_rollup_index_caps/GetRollupIndexCapabilitiesResponse.ts#L24-L26 type Response map[string]types.IndexCapabilities diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/putjob/put_job.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/putjob/put_job.go index eae78940f..f1b67c1e4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/putjob/put_job.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/putjob/put_job.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Creates a rollup job. package putjob @@ -52,8 +52,9 @@ type PutJob struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -69,7 +70,7 @@ func NewPutJobFunc(tp elastictransport.Interface) NewPutJob { return func(id string) *PutJob { n := New(tp) - n.Id(id) + n._id(id) return n } @@ -84,6 +85,8 @@ func New(tp elastictransport.Interface) *PutJob { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -113,9 +116,19 @@ func (r *PutJob) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -123,6 +136,7 @@ func (r *PutJob) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -205,7 +219,6 @@ func (r PutJob) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -214,6 +227,10 @@ func (r PutJob) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -234,9 +251,106 @@ func (r *PutJob) Header(key, value string) *PutJob { // job with the same ID // since that could lead to problems with mismatched job configurations. // API Name: id -func (r *PutJob) Id(v string) *PutJob { +func (r *PutJob) _id(id string) *PutJob { r.paramSet |= idMask - r.id = v + r.id = id + + return r +} + +// Cron A cron string which defines the intervals when the rollup job should be +// executed. When the interval +// triggers, the indexer attempts to rollup the data in the index pattern. The +// cron pattern is unrelated +// to the time interval of the data being rolled up. For example, you may wish +// to create hourly rollups +// of your document but to only run the indexer on a daily basis at midnight, as +// defined by the cron. The +// cron pattern is defined just like a Watcher cron schedule. +// API name: cron +func (r *PutJob) Cron(cron string) *PutJob { + + r.req.Cron = cron + + return r +} + +// Groups Defines the grouping fields and aggregations that are defined for this rollup +// job. These fields will then be +// available later for aggregating into buckets. These aggs and fields can be +// used in any combination. Think of +// the groups configuration as defining a set of tools that can later be used in +// aggregations to partition the +// data. Unlike raw data, we have to think ahead to which fields and +// aggregations might be used. Rollups provide +// enough flexibility that you simply need to determine which fields are needed, +// not in what order they are needed. +// API name: groups +func (r *PutJob) Groups(groups *types.Groupings) *PutJob { + + r.req.Groups = *groups + + return r +} + +// API name: headers +func (r *PutJob) Headers(httpheaders types.HttpHeaders) *PutJob { + r.req.Headers = httpheaders + + return r +} + +// IndexPattern The index or index pattern to roll up. Supports wildcard-style patterns +// (`logstash-*`). The job attempts to +// rollup the entire index or index-pattern. +// API name: index_pattern +func (r *PutJob) IndexPattern(indexpattern string) *PutJob { + + r.req.IndexPattern = indexpattern + + return r +} + +// Metrics Defines the metrics to collect for each grouping tuple. By default, only the +// doc_counts are collected for each +// group. To make rollup useful, you will often add metrics like averages, mins, +// maxes, etc. Metrics are defined +// on a per-field basis and for each field you configure which metric should be +// collected. +// API name: metrics +func (r *PutJob) Metrics(metrics ...types.FieldMetric) *PutJob { + r.req.Metrics = metrics + + return r +} + +// PageSize The number of bucket results that are processed on each iteration of the +// rollup indexer. A larger value tends +// to execute faster, but requires more memory during processing. This value has +// no effect on how the data is +// rolled up; it is merely used for tweaking the speed or memory cost of the +// indexer. +// API name: page_size +func (r *PutJob) PageSize(pagesize int) *PutJob { + r.req.PageSize = pagesize + + return r +} + +// RollupIndex The index that contains the rollup results. The index can be shared with +// other rollup jobs. The data is stored so that it doesn’t interfere with +// unrelated jobs. +// API name: rollup_index +func (r *PutJob) RollupIndex(indexname string) *PutJob { + r.req.RollupIndex = indexname + + return r +} + +// Timeout Time to wait for the request to complete. +// API name: timeout +func (r *PutJob) Timeout(duration types.Duration) *PutJob { + r.req.Timeout = duration return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/putjob/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/putjob/request.go index 97c35b2fd..59bf823f5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/putjob/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/putjob/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putjob @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package putjob // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/rollup/put_job/CreateRollupJobRequest.ts#L27-L89 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/rollup/put_job/CreateRollupJobRequest.ts#L27-L89 type Request struct { // Cron A cron string which defines the intervals when the rollup job should be @@ -52,8 +52,8 @@ type Request struct { // aggregations might be used. Rollups provide // enough flexibility that you simply need to determine which fields are needed, // not in what order they are needed. - Groups types.Groupings `json:"groups"` - Headers map[string][]string `json:"headers,omitempty"` + Groups types.Groupings `json:"groups"` + Headers types.HttpHeaders `json:"headers,omitempty"` // IndexPattern The index or index pattern to roll up. Supports wildcard-style patterns // (`logstash-*`). The job attempts to // rollup the entire index or index-pattern. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/putjob/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/putjob/response.go index 477a445f7..83ad393a7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/putjob/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/putjob/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putjob // Response holds the response body struct for the package putjob // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/rollup/put_job/CreateRollupJobResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/rollup/put_job/CreateRollupJobResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/rollupsearch/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/rollupsearch/request.go index 46f6322b8..627e5d455 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/rollupsearch/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/rollupsearch/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package rollupsearch @@ -29,11 +29,14 @@ import ( // Request holds the request body struct for the package rollupsearch // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/rollup/rollup_search/RollupSearchRequest.ts#L27-L47 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/rollup/rollup_search/RollupSearchRequest.ts#L27-L57 type Request struct { + + // Aggregations Specifies aggregations. Aggregations map[string]types.Aggregations `json:"aggregations,omitempty"` - Query *types.Query `json:"query,omitempty"` - // Size Must be zero if set, as rollups work on pre-aggregated data + // Query Specifies a DSL query. + Query *types.Query `json:"query,omitempty"` + // Size Must be zero if set, as rollups work on pre-aggregated data. Size *int `json:"size,omitempty"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/rollupsearch/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/rollupsearch/response.go index 16a18dedd..d352e284d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/rollupsearch/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/rollupsearch/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package rollupsearch @@ -25,6 +25,7 @@ import ( "encoding/json" "errors" "io" + "strconv" "strings" "github.com/elastic/go-elasticsearch/v8/typedapi/types" @@ -32,7 +33,7 @@ import ( // Response holds the response body struct for the package rollupsearch // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/rollup/rollup_search/RollupSearchResponse.ts#L27-L36 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/rollup/rollup_search/RollupSearchResponse.ts#L27-L36 type Response struct { Aggregations map[string]types.Aggregate `json:"aggregations,omitempty"` @@ -66,6 +67,10 @@ func (s *Response) UnmarshalJSON(data []byte) error { switch t { case "aggregations": + if s.Aggregations == nil { + s.Aggregations = make(map[string]types.Aggregate, 0) + } + for dec.More() { tt, err := dec.Token() if err != nil { @@ -78,415 +83,494 @@ func (s *Response) UnmarshalJSON(data []byte) error { if strings.Contains(value, "#") { elems := strings.Split(value, "#") if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]types.Aggregate, 0) + } switch elems[0] { + case "cardinality": o := types.NewCardinalityAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "hdr_percentiles": o := types.NewHdrPercentilesAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "hdr_percentile_ranks": o := types.NewHdrPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "tdigest_percentiles": o := types.NewTDigestPercentilesAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "tdigest_percentile_ranks": o := types.NewTDigestPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "percentiles_bucket": o := types.NewPercentilesBucketAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "median_absolute_deviation": o := types.NewMedianAbsoluteDeviationAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "min": o := types.NewMinAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "max": o := types.NewMaxAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "sum": o := types.NewSumAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "avg": o := types.NewAvgAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "weighted_avg": o := types.NewWeightedAvgAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "value_count": o := types.NewValueCountAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "simple_value": o := types.NewSimpleValueAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "derivative": o := types.NewDerivativeAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "bucket_metric_value": o := types.NewBucketMetricValueAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "stats": o := types.NewStatsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "stats_bucket": o := types.NewStatsBucketAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "extended_stats": o := types.NewExtendedStatsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "extended_stats_bucket": o := types.NewExtendedStatsBucketAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geo_bounds": o := types.NewGeoBoundsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geo_centroid": o := types.NewGeoCentroidAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "histogram": o := types.NewHistogramAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "date_histogram": o := types.NewDateHistogramAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "auto_date_histogram": o := types.NewAutoDateHistogramAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "variable_width_histogram": o := types.NewVariableWidthHistogramAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "sterms": o := types.NewStringTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "lterms": o := types.NewLongTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "dterms": o := types.NewDoubleTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "umterms": o := types.NewUnmappedTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "lrareterms": o := types.NewLongRareTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "srareterms": o := types.NewStringRareTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "umrareterms": o := types.NewUnmappedRareTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "multi_terms": o := types.NewMultiTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "missing": o := types.NewMissingAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "nested": o := types.NewNestedAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "reverse_nested": o := types.NewReverseNestedAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "global": o := types.NewGlobalAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "filter": o := types.NewFilterAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "children": o := types.NewChildrenAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "parent": o := types.NewParentAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "sampler": o := types.NewSamplerAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "unmapped_sampler": o := types.NewUnmappedSamplerAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geohash_grid": o := types.NewGeoHashGridAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geotile_grid": o := types.NewGeoTileGridAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geohex_grid": o := types.NewGeoHexGridAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "range": o := types.NewRangeAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "date_range": o := types.NewDateRangeAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geo_distance": o := types.NewGeoDistanceAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "ip_range": o := types.NewIpRangeAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "ip_prefix": o := types.NewIpPrefixAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "filters": o := types.NewFiltersAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "adjacency_matrix": o := types.NewAdjacencyMatrixAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "siglterms": o := types.NewSignificantLongTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "sigsterms": o := types.NewSignificantStringTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "umsigterms": o := types.NewUnmappedSignificantTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "composite": o := types.NewCompositeAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := types.NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "scripted_metric": o := types.NewScriptedMetricAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "top_hits": o := types.NewTopHitsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "inference": o := types.NewInferenceAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "string_stats": o := types.NewStringStatsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "box_plot": o := types.NewBoxPlotAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "top_metrics": o := types.NewTopMetricsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "t_test": o := types.NewTTestAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "rate": o := types.NewRateAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "simple_long_value": o := types.NewCumulativeCardinalityAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "matrix_stats": o := types.NewMatrixStatsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geo_line": o := types.NewGeoLineAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + default: o := make(map[string]interface{}, 0) if err := dec.Decode(&o); err != nil { @@ -518,18 +602,46 @@ func (s *Response) UnmarshalJSON(data []byte) error { } case "terminated_early": - if err := dec.Decode(&s.TerminatedEarly); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.TerminatedEarly = &value + case bool: + s.TerminatedEarly = &v } case "timed_out": - if err := dec.Decode(&s.TimedOut); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.TimedOut = value + case bool: + s.TimedOut = v } case "took": - if err := dec.Decode(&s.Took); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Took = value + case float64: + f := int64(v) + s.Took = f } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/rollupsearch/rollup_search.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/rollupsearch/rollup_search.go index 18d435c00..60c35f2d4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/rollupsearch/rollup_search.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/rollupsearch/rollup_search.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Enables searching rolled-up data using the standard query DSL. package rollupsearch @@ -53,8 +53,9 @@ type RollupSearch struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -70,7 +71,7 @@ func NewRollupSearchFunc(tp elastictransport.Interface) NewRollupSearch { return func(index string) *RollupSearch { n := New(tp) - n.Index(index) + n._index(index) return n } @@ -85,6 +86,8 @@ func New(tp elastictransport.Interface) *RollupSearch { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -114,9 +117,19 @@ func (r *RollupSearch) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -124,6 +137,7 @@ func (r *RollupSearch) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -206,7 +220,6 @@ func (r RollupSearch) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -215,6 +228,10 @@ func (r RollupSearch) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -225,12 +242,11 @@ func (r *RollupSearch) Header(key, value string) *RollupSearch { return r } -// Index The indices or index-pattern(s) (containing rollup or regular data) that -// should be searched +// Index Enables searching rolled-up data using the standard Query DSL. // API Name: index -func (r *RollupSearch) Index(v string) *RollupSearch { +func (r *RollupSearch) _index(index string) *RollupSearch { r.paramSet |= indexMask - r.index = v + r.index = index return r } @@ -238,8 +254,8 @@ func (r *RollupSearch) Index(v string) *RollupSearch { // RestTotalHitsAsInt Indicates whether hits.total should be rendered as an integer or an object in // the rest search response // API name: rest_total_hits_as_int -func (r *RollupSearch) RestTotalHitsAsInt(b bool) *RollupSearch { - r.values.Set("rest_total_hits_as_int", strconv.FormatBool(b)) +func (r *RollupSearch) RestTotalHitsAsInt(resttotalhitsasint bool) *RollupSearch { + r.values.Set("rest_total_hits_as_int", strconv.FormatBool(resttotalhitsasint)) return r } @@ -247,8 +263,34 @@ func (r *RollupSearch) RestTotalHitsAsInt(b bool) *RollupSearch { // TypedKeys Specify whether aggregation and suggester names should be prefixed by their // respective types in the response // API name: typed_keys -func (r *RollupSearch) TypedKeys(b bool) *RollupSearch { - r.values.Set("typed_keys", strconv.FormatBool(b)) +func (r *RollupSearch) TypedKeys(typedkeys bool) *RollupSearch { + r.values.Set("typed_keys", strconv.FormatBool(typedkeys)) + + return r +} + +// Aggregations Specifies aggregations. +// API name: aggregations +func (r *RollupSearch) Aggregations(aggregations map[string]types.Aggregations) *RollupSearch { + + r.req.Aggregations = aggregations + + return r +} + +// Query Specifies a DSL query. +// API name: query +func (r *RollupSearch) Query(query *types.Query) *RollupSearch { + + r.req.Query = query + + return r +} + +// Size Must be zero if set, as rollups work on pre-aggregated data. +// API name: size +func (r *RollupSearch) Size(size int) *RollupSearch { + r.req.Size = &size return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/startjob/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/startjob/response.go index 53b20cf72..2d5f46dac 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/startjob/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/startjob/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package startjob // Response holds the response body struct for the package startjob // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/rollup/start_job/StartRollupJobResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/rollup/start_job/StartRollupJobResponse.ts#L20-L22 type Response struct { Started bool `json:"started"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/startjob/start_job.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/startjob/start_job.go index a75ee5bd3..24c79af64 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/startjob/start_job.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/startjob/start_job.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Starts an existing, stopped rollup job. package startjob @@ -67,7 +67,7 @@ func NewStartJobFunc(tp elastictransport.Interface) NewStartJob { return func(id string) *StartJob { n := New(tp) - n.Id(id) + n._id(id) return n } @@ -172,7 +172,6 @@ func (r StartJob) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -181,6 +180,10 @@ func (r StartJob) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -212,11 +215,11 @@ func (r *StartJob) Header(key, value string) *StartJob { return r } -// Id The ID of the job to start +// Id Identifier for the rollup job. // API Name: id -func (r *StartJob) Id(v string) *StartJob { +func (r *StartJob) _id(id string) *StartJob { r.paramSet |= idMask - r.id = v + r.id = id return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/stopjob/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/stopjob/response.go index 575c46766..de277add7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/stopjob/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/stopjob/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package stopjob // Response holds the response body struct for the package stopjob // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/rollup/stop_job/StopRollupJobResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/rollup/stop_job/StopRollupJobResponse.ts#L20-L22 type Response struct { Stopped bool `json:"stopped"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/stopjob/stop_job.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/stopjob/stop_job.go index a97267b76..dded2b557 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/stopjob/stop_job.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/rollup/stopjob/stop_job.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Stops an existing, started rollup job. package stopjob @@ -68,7 +68,7 @@ func NewStopJobFunc(tp elastictransport.Interface) NewStopJob { return func(id string) *StopJob { n := New(tp) - n.Id(id) + n._id(id) return n } @@ -173,7 +173,6 @@ func (r StopJob) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -182,6 +181,10 @@ func (r StopJob) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -213,29 +216,32 @@ func (r *StopJob) Header(key, value string) *StopJob { return r } -// Id The ID of the job to stop +// Id Identifier for the rollup job. // API Name: id -func (r *StopJob) Id(v string) *StopJob { +func (r *StopJob) _id(id string) *StopJob { r.paramSet |= idMask - r.id = v + r.id = id return r } -// Timeout Block for (at maximum) the specified duration while waiting for the job to -// stop. Defaults to 30s. +// Timeout If `wait_for_completion` is `true`, the API blocks for (at maximum) the +// specified duration while waiting for the job to stop. +// If more than `timeout` time has passed, the API throws a timeout exception. // API name: timeout -func (r *StopJob) Timeout(v string) *StopJob { - r.values.Set("timeout", v) +func (r *StopJob) Timeout(duration string) *StopJob { + r.values.Set("timeout", duration) return r } -// WaitForCompletion True if the API should block until the job has fully stopped, false if should -// be executed async. Defaults to false. +// WaitForCompletion If set to `true`, causes the API to block until the indexer state completely +// stops. +// If set to `false`, the API returns immediately and the indexer is stopped +// asynchronously in the background. // API name: wait_for_completion -func (r *StopJob) WaitForCompletion(b bool) *StopJob { - r.values.Set("wait_for_completion", strconv.FormatBool(b)) +func (r *StopJob) WaitForCompletion(waitforcompletion bool) *StopJob { + r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchablesnapshots/cachestats/cache_stats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchablesnapshots/cachestats/cache_stats.go index 0187c82b5..475fff254 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchablesnapshots/cachestats/cache_stats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchablesnapshots/cachestats/cache_stats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieve node-level cache statistics about searchable snapshots. package cachestats @@ -179,7 +179,6 @@ func (r CacheStats) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -188,6 +187,10 @@ func (r CacheStats) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -223,16 +226,16 @@ func (r *CacheStats) Header(key, value string) *CacheStats { // information; use `_local` to return information from the node you're // connecting to, leave empty to get information from all nodes // API Name: nodeid -func (r *CacheStats) NodeId(v string) *CacheStats { +func (r *CacheStats) NodeId(nodeid string) *CacheStats { r.paramSet |= nodeidMask - r.nodeid = v + r.nodeid = nodeid return r } // API name: master_timeout -func (r *CacheStats) MasterTimeout(v string) *CacheStats { - r.values.Set("master_timeout", v) +func (r *CacheStats) MasterTimeout(duration string) *CacheStats { + r.values.Set("master_timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchablesnapshots/cachestats/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchablesnapshots/cachestats/response.go index 843bc714f..2f6abd148 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchablesnapshots/cachestats/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchablesnapshots/cachestats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package cachestats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package cachestats // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/searchable_snapshots/cache_stats/Response.ts#L24-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/searchable_snapshots/cache_stats/Response.ts#L24-L28 type Response struct { Nodes map[string]types.Node `json:"nodes"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchablesnapshots/clearcache/clear_cache.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchablesnapshots/clearcache/clear_cache.go index 99f41c705..dbd50e8a1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchablesnapshots/clearcache/clear_cache.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchablesnapshots/clearcache/clear_cache.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Clear the cache of searchable snapshots. package clearcache @@ -36,6 +36,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" ) const ( @@ -180,7 +181,6 @@ func (r ClearCache) Do(ctx context.Context) (Response, error) { } return *response, nil - } errorResponse := types.NewElasticsearchError() @@ -189,6 +189,10 @@ func (r ClearCache) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -222,9 +226,9 @@ func (r *ClearCache) Header(key, value string) *ClearCache { // Index A comma-separated list of index names // API Name: index -func (r *ClearCache) Index(v string) *ClearCache { +func (r *ClearCache) Index(index string) *ClearCache { r.paramSet |= indexMask - r.index = v + r.index = index return r } @@ -232,8 +236,12 @@ func (r *ClearCache) Index(v string) *ClearCache { // ExpandWildcards Whether to expand wildcard expression to concrete indices that are open, // closed or both. // API name: expand_wildcards -func (r *ClearCache) ExpandWildcards(v string) *ClearCache { - r.values.Set("expand_wildcards", v) +func (r *ClearCache) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *ClearCache { + tmp := []string{} + for _, item := range expandwildcards { + tmp = append(tmp, item.String()) + } + r.values.Set("expand_wildcards", strings.Join(tmp, ",")) return r } @@ -241,8 +249,8 @@ func (r *ClearCache) ExpandWildcards(v string) *ClearCache { // AllowNoIndices Whether to ignore if a wildcard indices expression resolves into no concrete // indices. (This includes `_all` string or when no indices have been specified) // API name: allow_no_indices -func (r *ClearCache) AllowNoIndices(b bool) *ClearCache { - r.values.Set("allow_no_indices", strconv.FormatBool(b)) +func (r *ClearCache) AllowNoIndices(allownoindices bool) *ClearCache { + r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices)) return r } @@ -250,22 +258,22 @@ func (r *ClearCache) AllowNoIndices(b bool) *ClearCache { // IgnoreUnavailable Whether specified concrete indices should be ignored when unavailable // (missing or closed) // API name: ignore_unavailable -func (r *ClearCache) IgnoreUnavailable(b bool) *ClearCache { - r.values.Set("ignore_unavailable", strconv.FormatBool(b)) +func (r *ClearCache) IgnoreUnavailable(ignoreunavailable bool) *ClearCache { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) return r } // API name: pretty -func (r *ClearCache) Pretty(b bool) *ClearCache { - r.values.Set("pretty", strconv.FormatBool(b)) +func (r *ClearCache) Pretty(pretty bool) *ClearCache { + r.values.Set("pretty", strconv.FormatBool(pretty)) return r } // API name: human -func (r *ClearCache) Human(b bool) *ClearCache { - r.values.Set("human", strconv.FormatBool(b)) +func (r *ClearCache) Human(human bool) *ClearCache { + r.values.Set("human", strconv.FormatBool(human)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchablesnapshots/clearcache/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchablesnapshots/clearcache/response.go index 251c8be2b..2206f7605 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchablesnapshots/clearcache/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchablesnapshots/clearcache/response.go @@ -16,14 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package clearcache -import "encoding/json" +import ( + "encoding/json" +) // Response holds the response body struct for the package clearcache // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/searchable_snapshots/clear_cache/SearchableSnapshotsClearCacheResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/searchable_snapshots/clear_cache/SearchableSnapshotsClearCacheResponse.ts#L22-L24 -type Response json.RawMessage +type Response = json.RawMessage + +func NewResponse() *Response { + return new(Response) +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchablesnapshots/mount/mount.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchablesnapshots/mount/mount.go index 19e5c764e..635ae57cc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchablesnapshots/mount/mount.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchablesnapshots/mount/mount.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Mount a snapshot as a searchable index. package mount @@ -55,8 +55,9 @@ type Mount struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -73,9 +74,9 @@ func NewMountFunc(tp elastictransport.Interface) NewMount { return func(repository, snapshot string) *Mount { n := New(tp) - n.Repository(repository) + n._repository(repository) - n.Snapshot(snapshot) + n._snapshot(snapshot) return n } @@ -90,6 +91,8 @@ func New(tp elastictransport.Interface) *Mount { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -119,9 +122,19 @@ func (r *Mount) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -129,6 +142,7 @@ func (r *Mount) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -214,7 +228,6 @@ func (r Mount) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -223,6 +236,10 @@ func (r Mount) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -235,34 +252,34 @@ func (r *Mount) Header(key, value string) *Mount { // Repository The name of the repository containing the snapshot of the index to mount // API Name: repository -func (r *Mount) Repository(v string) *Mount { +func (r *Mount) _repository(repository string) *Mount { r.paramSet |= repositoryMask - r.repository = v + r.repository = repository return r } // Snapshot The name of the snapshot of the index to mount // API Name: snapshot -func (r *Mount) Snapshot(v string) *Mount { +func (r *Mount) _snapshot(snapshot string) *Mount { r.paramSet |= snapshotMask - r.snapshot = v + r.snapshot = snapshot return r } // MasterTimeout Explicit operation timeout for connection to master node // API name: master_timeout -func (r *Mount) MasterTimeout(v string) *Mount { - r.values.Set("master_timeout", v) +func (r *Mount) MasterTimeout(duration string) *Mount { + r.values.Set("master_timeout", duration) return r } // WaitForCompletion Should this request wait until the operation has completed before returning // API name: wait_for_completion -func (r *Mount) WaitForCompletion(b bool) *Mount { - r.values.Set("wait_for_completion", strconv.FormatBool(b)) +func (r *Mount) WaitForCompletion(waitforcompletion bool) *Mount { + r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) return r } @@ -270,8 +287,37 @@ func (r *Mount) WaitForCompletion(b bool) *Mount { // Storage Selects the kind of local storage used to accelerate searches. Experimental, // and defaults to `full_copy` // API name: storage -func (r *Mount) Storage(v string) *Mount { - r.values.Set("storage", v) +func (r *Mount) Storage(storage string) *Mount { + r.values.Set("storage", storage) + + return r +} + +// API name: ignore_index_settings +func (r *Mount) IgnoreIndexSettings(ignoreindexsettings ...string) *Mount { + r.req.IgnoreIndexSettings = ignoreindexsettings + + return r +} + +// API name: index +func (r *Mount) Index(indexname string) *Mount { + r.req.Index = indexname + + return r +} + +// API name: index_settings +func (r *Mount) IndexSettings(indexsettings map[string]json.RawMessage) *Mount { + + r.req.IndexSettings = indexsettings + + return r +} + +// API name: renamed_index +func (r *Mount) RenamedIndex(indexname string) *Mount { + r.req.RenamedIndex = &indexname return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchablesnapshots/mount/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchablesnapshots/mount/request.go index 310e82447..f908e00e6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchablesnapshots/mount/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchablesnapshots/mount/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package mount @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package mount // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/searchable_snapshots/mount/SearchableSnapshotsMountRequest.ts#L26-L50 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/searchable_snapshots/mount/SearchableSnapshotsMountRequest.ts#L26-L49 type Request struct { IgnoreIndexSettings []string `json:"ignore_index_settings,omitempty"` Index string `json:"index"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchablesnapshots/mount/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchablesnapshots/mount/response.go index e39a1fd7f..033d5c6db 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchablesnapshots/mount/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchablesnapshots/mount/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package mount @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package mount // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/searchable_snapshots/mount/SearchableSnapshotsMountResponse.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/searchable_snapshots/mount/SearchableSnapshotsMountResponse.ts#L22-L26 type Response struct { Snapshot types.MountedSnapshot `json:"snapshot"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchablesnapshots/stats/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchablesnapshots/stats/response.go index c8de5a02a..a59623e44 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchablesnapshots/stats/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchablesnapshots/stats/response.go @@ -16,15 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package stats -import "encoding/json" +import ( + "encoding/json" +) // Response holds the response body struct for the package stats // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/searchable_snapshots/stats/SearchableSnapshotsStatsResponse.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/searchable_snapshots/stats/SearchableSnapshotsStatsResponse.ts#L22-L27 type Response struct { Stats json.RawMessage `json:"stats,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchablesnapshots/stats/stats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchablesnapshots/stats/stats.go index 7757e42d0..988663943 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchablesnapshots/stats/stats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchablesnapshots/stats/stats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieve shard-level statistics about searchable snapshots. package stats @@ -35,7 +35,6 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/statslevel" ) @@ -177,7 +176,6 @@ func (r Stats) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -186,6 +184,10 @@ func (r Stats) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -219,17 +221,17 @@ func (r *Stats) Header(key, value string) *Stats { // Index A comma-separated list of index names // API Name: index -func (r *Stats) Index(v string) *Stats { +func (r *Stats) Index(index string) *Stats { r.paramSet |= indexMask - r.index = v + r.index = index return r } // Level Return stats aggregated at cluster, index or shard level // API name: level -func (r *Stats) Level(enum statslevel.StatsLevel) *Stats { - r.values.Set("level", enum.String()) +func (r *Stats) Level(level statslevel.StatsLevel) *Stats { + r.values.Set("level", level.String()) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/delete/delete.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/delete/delete.go new file mode 100644 index 000000000..1746b8369 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/delete/delete.go @@ -0,0 +1,223 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Deletes a search application. +package delete + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Delete struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + buf *gobytes.Buffer + + paramSet int + + name string +} + +// NewDelete type alias for index. +type NewDelete func(name string) *Delete + +// NewDeleteFunc returns a new instance of Delete with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteFunc(tp elastictransport.Interface) NewDelete { + return func(name string) *Delete { + n := New(tp) + + n._name(name) + + return n + } +} + +// Deletes a search application. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/master/put-search-application.html +func New(tp elastictransport.Interface) *Delete { + r := &Delete{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + buf: gobytes.NewBuffer(nil), + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Delete) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_application") + path.WriteString("/") + path.WriteString("search_application") + path.WriteString("/") + + path.WriteString(r.name) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.buf) + } else { + req, err = http.NewRequest(method, r.path.String(), r.buf) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Delete) Perform(ctx context.Context) (*http.Response, error) { + req, err := r.HttpRequest(ctx) + if err != nil { + return nil, err + } + + res, err := r.transport.Perform(req) + if err != nil { + return nil, fmt.Errorf("an error happened during the Delete query execution: %w", err) + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a delete.Response +func (r Delete) Do(ctx context.Context) (*Response, error) { + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Delete) IsSuccess(ctx context.Context) (bool, error) { + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(ioutil.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + return false, nil +} + +// Header set a key, value pair in the Delete headers map. +func (r *Delete) Header(key, value string) *Delete { + r.headers.Set(key, value) + + return r +} + +// Name The name of the search application to delete +// API Name: name +func (r *Delete) _name(name string) *Delete { + r.paramSet |= nameMask + r.name = name + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/deletevotingconfigexclusions/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/delete/response.go similarity index 69% rename from vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/deletevotingconfigexclusions/response.go rename to vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/delete/response.go index fc76317fe..3c16af289 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/deletevotingconfigexclusions/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/delete/response.go @@ -16,15 +16,19 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 -package deletevotingconfigexclusions +package delete -// Response holds the response body struct for the package deletevotingconfigexclusions +// Response holds the response body struct for the package delete // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/delete_voting_config_exclusions/ClusterDeleteVotingConfigExclusionsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/search_application/delete/SearchApplicationsDeleteResponse.ts#L22-L24 type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` } // NewResponse returns a Response diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/deletebehavioralanalytics/delete_behavioral_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/deletebehavioralanalytics/delete_behavioral_analytics.go new file mode 100644 index 000000000..a69cd2605 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/deletebehavioralanalytics/delete_behavioral_analytics.go @@ -0,0 +1,223 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Delete a behavioral analytics collection. +package deletebehavioralanalytics + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteBehavioralAnalytics struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + buf *gobytes.Buffer + + paramSet int + + name string +} + +// NewDeleteBehavioralAnalytics type alias for index. +type NewDeleteBehavioralAnalytics func(name string) *DeleteBehavioralAnalytics + +// NewDeleteBehavioralAnalyticsFunc returns a new instance of DeleteBehavioralAnalytics with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteBehavioralAnalyticsFunc(tp elastictransport.Interface) NewDeleteBehavioralAnalytics { + return func(name string) *DeleteBehavioralAnalytics { + n := New(tp) + + n._name(name) + + return n + } +} + +// Delete a behavioral analytics collection. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-analytics-collection.html +func New(tp elastictransport.Interface) *DeleteBehavioralAnalytics { + r := &DeleteBehavioralAnalytics{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + buf: gobytes.NewBuffer(nil), + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteBehavioralAnalytics) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_application") + path.WriteString("/") + path.WriteString("analytics") + path.WriteString("/") + + path.WriteString(r.name) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.buf) + } else { + req, err = http.NewRequest(method, r.path.String(), r.buf) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteBehavioralAnalytics) Perform(ctx context.Context) (*http.Response, error) { + req, err := r.HttpRequest(ctx) + if err != nil { + return nil, err + } + + res, err := r.transport.Perform(req) + if err != nil { + return nil, fmt.Errorf("an error happened during the DeleteBehavioralAnalytics query execution: %w", err) + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deletebehavioralanalytics.Response +func (r DeleteBehavioralAnalytics) Do(ctx context.Context) (*Response, error) { + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteBehavioralAnalytics) IsSuccess(ctx context.Context) (bool, error) { + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(ioutil.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + return false, nil +} + +// Header set a key, value pair in the DeleteBehavioralAnalytics headers map. +func (r *DeleteBehavioralAnalytics) Header(key, value string) *DeleteBehavioralAnalytics { + r.headers.Set(key, value) + + return r +} + +// Name The name of the analytics collection to be deleted +// API Name: name +func (r *DeleteBehavioralAnalytics) _name(name string) *DeleteBehavioralAnalytics { + r.paramSet |= nameMask + r.name = name + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/deletebehavioralanalytics/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/deletebehavioralanalytics/response.go new file mode 100644 index 000000000..3636fa60b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/deletebehavioralanalytics/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package deletebehavioralanalytics + +// Response holds the response body struct for the package deletebehavioralanalytics +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/search_application/delete_behavioral_analytics/BehavioralAnalyticsDeleteResponse.ts#L22-L24 + +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/get/get.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/get/get.go new file mode 100644 index 000000000..52737f12f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/get/get.go @@ -0,0 +1,223 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Returns the details about a search application. +package get + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Get struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + buf *gobytes.Buffer + + paramSet int + + name string +} + +// NewGet type alias for index. +type NewGet func(name string) *Get + +// NewGetFunc returns a new instance of Get with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetFunc(tp elastictransport.Interface) NewGet { + return func(name string) *Get { + n := New(tp) + + n._name(name) + + return n + } +} + +// Returns the details about a search application. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/master/get-search-application.html +func New(tp elastictransport.Interface) *Get { + r := &Get{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + buf: gobytes.NewBuffer(nil), + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Get) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_application") + path.WriteString("/") + path.WriteString("search_application") + path.WriteString("/") + + path.WriteString(r.name) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.buf) + } else { + req, err = http.NewRequest(method, r.path.String(), r.buf) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Get) Perform(ctx context.Context) (*http.Response, error) { + req, err := r.HttpRequest(ctx) + if err != nil { + return nil, err + } + + res, err := r.transport.Perform(req) + if err != nil { + return nil, fmt.Errorf("an error happened during the Get query execution: %w", err) + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a get.Response +func (r Get) Do(ctx context.Context) (*Response, error) { + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r Get) IsSuccess(ctx context.Context) (bool, error) { + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(ioutil.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + return false, nil +} + +// Header set a key, value pair in the Get headers map. +func (r *Get) Header(key, value string) *Get { + r.headers.Set(key, value) + + return r +} + +// Name The name of the search application +// API Name: name +func (r *Get) _name(name string) *Get { + r.paramSet |= nameMask + r.name = name + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/get/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/get/response.go new file mode 100644 index 000000000..daa3c4a88 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/get/response.go @@ -0,0 +1,49 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package get + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Response holds the response body struct for the package get +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/search_application/get/SearchApplicationsGetResponse.ts#L22-L24 + +type Response struct { + + // AnalyticsCollectionName Analytics collection associated to the Search Application. + AnalyticsCollectionName *string `json:"analytics_collection_name,omitempty"` + // Indices Indices that are part of the Search Application. + Indices []string `json:"indices"` + // Name Search Application name. + Name string `json:"name"` + // Template Search template to use on search operations. + Template *types.SearchApplicationTemplate `json:"template,omitempty"` + // UpdatedAtMillis Last time the Search Application was updated. + UpdatedAtMillis int64 `json:"updated_at_millis"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/getbehavioralanalytics/get_behavioral_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/getbehavioralanalytics/get_behavioral_analytics.go new file mode 100644 index 000000000..5b5ced995 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/getbehavioralanalytics/get_behavioral_analytics.go @@ -0,0 +1,228 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Returns the existing behavioral analytics collections. +package getbehavioralanalytics + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetBehavioralAnalytics struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + buf *gobytes.Buffer + + paramSet int + + name string +} + +// NewGetBehavioralAnalytics type alias for index. +type NewGetBehavioralAnalytics func() *GetBehavioralAnalytics + +// NewGetBehavioralAnalyticsFunc returns a new instance of GetBehavioralAnalytics with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetBehavioralAnalyticsFunc(tp elastictransport.Interface) NewGetBehavioralAnalytics { + return func() *GetBehavioralAnalytics { + n := New(tp) + + return n + } +} + +// Returns the existing behavioral analytics collections. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/master/list-analytics-collection.html +func New(tp elastictransport.Interface) *GetBehavioralAnalytics { + r := &GetBehavioralAnalytics{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + buf: gobytes.NewBuffer(nil), + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetBehavioralAnalytics) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_application") + path.WriteString("/") + path.WriteString("analytics") + + method = http.MethodGet + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_application") + path.WriteString("/") + path.WriteString("analytics") + path.WriteString("/") + + path.WriteString(r.name) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.buf) + } else { + req, err = http.NewRequest(method, r.path.String(), r.buf) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetBehavioralAnalytics) Perform(ctx context.Context) (*http.Response, error) { + req, err := r.HttpRequest(ctx) + if err != nil { + return nil, err + } + + res, err := r.transport.Perform(req) + if err != nil { + return nil, fmt.Errorf("an error happened during the GetBehavioralAnalytics query execution: %w", err) + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getbehavioralanalytics.Response +func (r GetBehavioralAnalytics) Do(ctx context.Context) (Response, error) { + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(&response) + if err != nil { + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetBehavioralAnalytics) IsSuccess(ctx context.Context) (bool, error) { + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(ioutil.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + return false, nil +} + +// Header set a key, value pair in the GetBehavioralAnalytics headers map. +func (r *GetBehavioralAnalytics) Header(key, value string) *GetBehavioralAnalytics { + r.headers.Set(key, value) + + return r +} + +// Name A list of analytics collections to limit the returned information +// API Name: name +func (r *GetBehavioralAnalytics) Name(name string) *GetBehavioralAnalytics { + r.paramSet |= nameMask + r.name = name + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/getbehavioralanalytics/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/getbehavioralanalytics/response.go new file mode 100644 index 000000000..606bb010c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/getbehavioralanalytics/response.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package getbehavioralanalytics + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Response holds the response body struct for the package getbehavioralanalytics +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/search_application/get_behavioral_analytics/BehavioralAnalyticsGetResponse.ts#L24-L26 + +type Response map[string]types.AnalyticsCollection + +// NewResponse returns a Response +func NewResponse() Response { + r := make(Response, 0) + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/list/list.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/list/list.go new file mode 100644 index 000000000..049b28bc2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/list/list.go @@ -0,0 +1,228 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Returns the existing search applications. +package list + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type List struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + buf *gobytes.Buffer + + paramSet int +} + +// NewList type alias for index. +type NewList func() *List + +// NewListFunc returns a new instance of List with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewListFunc(tp elastictransport.Interface) NewList { + return func() *List { + n := New(tp) + + return n + } +} + +// Returns the existing search applications. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/master/list-search-applications.html +func New(tp elastictransport.Interface) *List { + r := &List{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + buf: gobytes.NewBuffer(nil), + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *List) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_application") + path.WriteString("/") + path.WriteString("search_application") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.buf) + } else { + req, err = http.NewRequest(method, r.path.String(), r.buf) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r List) Perform(ctx context.Context) (*http.Response, error) { + req, err := r.HttpRequest(ctx) + if err != nil { + return nil, err + } + + res, err := r.transport.Perform(req) + if err != nil { + return nil, fmt.Errorf("an error happened during the List query execution: %w", err) + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a list.Response +func (r List) Do(ctx context.Context) (*Response, error) { + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r List) IsSuccess(ctx context.Context) (bool, error) { + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(ioutil.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + return false, nil +} + +// Header set a key, value pair in the List headers map. +func (r *List) Header(key, value string) *List { + r.headers.Set(key, value) + + return r +} + +// Q Query in the Lucene query string syntax. +// API name: q +func (r *List) Q(q string) *List { + r.values.Set("q", q) + + return r +} + +// From Starting offset. +// API name: from +func (r *List) From(from int) *List { + r.values.Set("from", strconv.Itoa(from)) + + return r +} + +// Size Specifies a max number of results to get. +// API name: size +func (r *List) Size(size int) *List { + r.values.Set("size", strconv.Itoa(size)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/list/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/list/response.go new file mode 100644 index 000000000..9ec95723c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/list/response.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package list + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Response holds the response body struct for the package list +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/search_application/list/SearchApplicationsListResponse.ts#L24-L29 + +type Response struct { + Count int64 `json:"count"` + Results []types.SearchApplicationListItem `json:"results"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/put/put.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/put/put.go new file mode 100644 index 000000000..e735655de --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/put/put.go @@ -0,0 +1,300 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Creates or updates a search application. +package put + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Put struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + buf *gobytes.Buffer + + req *Request + deferred []func(request *Request) error + raw io.Reader + + paramSet int + + name string +} + +// NewPut type alias for index. +type NewPut func(name string) *Put + +// NewPutFunc returns a new instance of Put with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutFunc(tp elastictransport.Interface) NewPut { + return func(name string) *Put { + n := New(tp) + + n._name(name) + + return n + } +} + +// Creates or updates a search application. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/master/put-search-application.html +func New(tp elastictransport.Interface) *Put { + r := &Put{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + buf: gobytes.NewBuffer(nil), + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Put) Raw(raw io.Reader) *Put { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Put) Request(req *Request) *Put { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Put) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw != nil { + r.buf.ReadFrom(r.raw) + } else if r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Put: %w", err) + } + + r.buf.Write(data) + + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_application") + path.WriteString("/") + path.WriteString("search_application") + path.WriteString("/") + + path.WriteString(r.name) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.buf) + } else { + req, err = http.NewRequest(method, r.path.String(), r.buf) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.buf.Len() > 0 { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Put) Perform(ctx context.Context) (*http.Response, error) { + req, err := r.HttpRequest(ctx) + if err != nil { + return nil, err + } + + res, err := r.transport.Perform(req) + if err != nil { + return nil, fmt.Errorf("an error happened during the Put query execution: %w", err) + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a put.Response +func (r Put) Do(ctx context.Context) (*Response, error) { + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + return nil, errorResponse +} + +// Header set a key, value pair in the Put headers map. +func (r *Put) Header(key, value string) *Put { + r.headers.Set(key, value) + + return r +} + +// Name The name of the search application to be created or updated. +// API Name: name +func (r *Put) _name(name string) *Put { + r.paramSet |= nameMask + r.name = name + + return r +} + +// Create If `true`, this request cannot replace or update existing Search +// Applications. +// API name: create +func (r *Put) Create(create bool) *Put { + r.values.Set("create", strconv.FormatBool(create)) + + return r +} + +// AnalyticsCollectionName Analytics collection associated to the Search Application. +// API name: analytics_collection_name +func (r *Put) AnalyticsCollectionName(name string) *Put { + r.req.AnalyticsCollectionName = &name + + return r +} + +// Indices Indices that are part of the Search Application. +// API name: indices +func (r *Put) Indices(indices ...string) *Put { + r.req.Indices = indices + + return r +} + +// Name Search Application name. +// API name: name +func (r *Put) Name(name string) *Put { + r.req.Name = name + + return r +} + +// Template Search template to use on search operations. +// API name: template +func (r *Put) Template(template *types.SearchApplicationTemplate) *Put { + + r.req.Template = template + + return r +} + +// UpdatedAtMillis Last time the Search Application was updated. +// API name: updated_at_millis +func (r *Put) UpdatedAtMillis(epochtimeunitmillis int64) *Put { + r.req.UpdatedAtMillis = epochtimeunitmillis + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/put/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/put/request.go new file mode 100644 index 000000000..ec3d5ee7d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/put/request.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package put + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Request holds the request body struct for the package put +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/search_application/put/SearchApplicationsPutRequest.ts#L23-L48 +type Request = types.SearchApplication diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/postvotingconfigexclusions/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/put/response.go similarity index 67% rename from vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/postvotingconfigexclusions/response.go rename to vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/put/response.go index b062c3a98..eaf65ea14 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/cluster/postvotingconfigexclusions/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/put/response.go @@ -16,15 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 -package postvotingconfigexclusions +package put -// Response holds the response body struct for the package postvotingconfigexclusions +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/result" +) + +// Response holds the response body struct for the package put // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/post_voting_config_exclusions/ClusterPostVotingConfigExclusionsResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/search_application/put/SearchApplicationsPutResponse.ts#L22-L26 type Response struct { + Result result.Result `json:"result"` } // NewResponse returns a Response diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/putbehavioralanalytics/put_behavioral_analytics.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/putbehavioralanalytics/put_behavioral_analytics.go new file mode 100644 index 000000000..a77f9beac --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/putbehavioralanalytics/put_behavioral_analytics.go @@ -0,0 +1,223 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Creates a behavioral analytics collection. +package putbehavioralanalytics + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutBehavioralAnalytics struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + buf *gobytes.Buffer + + paramSet int + + name string +} + +// NewPutBehavioralAnalytics type alias for index. +type NewPutBehavioralAnalytics func(name string) *PutBehavioralAnalytics + +// NewPutBehavioralAnalyticsFunc returns a new instance of PutBehavioralAnalytics with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutBehavioralAnalyticsFunc(tp elastictransport.Interface) NewPutBehavioralAnalytics { + return func(name string) *PutBehavioralAnalytics { + n := New(tp) + + n._name(name) + + return n + } +} + +// Creates a behavioral analytics collection. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/master/put-analytics-collection.html +func New(tp elastictransport.Interface) *PutBehavioralAnalytics { + r := &PutBehavioralAnalytics{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + buf: gobytes.NewBuffer(nil), + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutBehavioralAnalytics) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_application") + path.WriteString("/") + path.WriteString("analytics") + path.WriteString("/") + + path.WriteString(r.name) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.buf) + } else { + req, err = http.NewRequest(method, r.path.String(), r.buf) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutBehavioralAnalytics) Perform(ctx context.Context) (*http.Response, error) { + req, err := r.HttpRequest(ctx) + if err != nil { + return nil, err + } + + res, err := r.transport.Perform(req) + if err != nil { + return nil, fmt.Errorf("an error happened during the PutBehavioralAnalytics query execution: %w", err) + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putbehavioralanalytics.Response +func (r PutBehavioralAnalytics) Do(ctx context.Context) (*Response, error) { + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r PutBehavioralAnalytics) IsSuccess(ctx context.Context) (bool, error) { + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(ioutil.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + return false, nil +} + +// Header set a key, value pair in the PutBehavioralAnalytics headers map. +func (r *PutBehavioralAnalytics) Header(key, value string) *PutBehavioralAnalytics { + r.headers.Set(key, value) + + return r +} + +// Name The name of the analytics collection to be created or updated. +// API Name: name +func (r *PutBehavioralAnalytics) _name(name string) *PutBehavioralAnalytics { + r.paramSet |= nameMask + r.name = name + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/putbehavioralanalytics/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/putbehavioralanalytics/response.go new file mode 100644 index 000000000..e214aba06 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/putbehavioralanalytics/response.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package putbehavioralanalytics + +// Response holds the response body struct for the package putbehavioralanalytics +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/search_application/put_behavioral_analytics/BehavioralAnalyticsPutResponse.ts#L24-L26 + +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` + // Name The name of the analytics collection created or updated + Name string `json:"name"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/search/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/search/request.go new file mode 100644 index 000000000..eac5a25d2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/search/request.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package search + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package search +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/search_application/search/SearchApplicationsSearchRequest.ts#L24-L43 +type Request struct { + + // Params Query parameters specific to this request, which will override any defaults + // specified in the template. + Params map[string]json.RawMessage `json:"params,omitempty"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{ + Params: make(map[string]json.RawMessage, 0), + } + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Search request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/search/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/search/response.go new file mode 100644 index 000000000..052cee94e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/search/response.go @@ -0,0 +1,782 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package search + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "strings" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Response holds the response body struct for the package search +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/search_application/search/SearchApplicationsSearchResponse.ts#L23-L25 + +type Response struct { + Aggregations map[string]types.Aggregate `json:"aggregations,omitempty"` + Clusters_ *types.ClusterStatistics `json:"_clusters,omitempty"` + Fields map[string]json.RawMessage `json:"fields,omitempty"` + Hits types.HitsMetadata `json:"hits"` + MaxScore *types.Float64 `json:"max_score,omitempty"` + NumReducePhases *int64 `json:"num_reduce_phases,omitempty"` + PitId *string `json:"pit_id,omitempty"` + Profile *types.Profile `json:"profile,omitempty"` + ScrollId_ *string `json:"_scroll_id,omitempty"` + Shards_ types.ShardStatistics `json:"_shards"` + Suggest map[string][]types.Suggest `json:"suggest,omitempty"` + TerminatedEarly *bool `json:"terminated_early,omitempty"` + TimedOut bool `json:"timed_out"` + Took int64 `json:"took"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + Aggregations: make(map[string]types.Aggregate, 0), + Fields: make(map[string]json.RawMessage, 0), + Suggest: make(map[string][]types.Suggest, 0), + } + return r +} + +func (s *Response) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aggregations": + if s.Aggregations == nil { + s.Aggregations = make(map[string]types.Aggregate, 0) + } + + for dec.More() { + tt, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + if value, ok := tt.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]types.Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := types.NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := types.NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := types.NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := types.NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := types.NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := types.NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := types.NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "min": + o := types.NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "max": + o := types.NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sum": + o := types.NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "avg": + o := types.NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := types.NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := types.NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := types.NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := types.NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := types.NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats": + o := types.NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := types.NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := types.NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := types.NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := types.NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := types.NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := types.NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := types.NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := types.NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := types.NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := types.NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := types.NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := types.NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := types.NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := types.NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := types.NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := types.NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := types.NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "missing": + o := types.NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "nested": + o := types.NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := types.NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "global": + o := types.NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filter": + o := types.NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "children": + o := types.NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "parent": + o := types.NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := types.NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := types.NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := types.NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := types.NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := types.NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "range": + o := types.NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := types.NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := types.NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := types.NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := types.NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filters": + o := types.NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := types.NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := types.NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := types.NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := types.NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "composite": + o := types.NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := types.NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := types.NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := types.NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "inference": + o := types.NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := types.NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "box_plot": + o := types.NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := types.NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := types.NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "rate": + o := types.NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := types.NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := types.NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := types.NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[value] = o + } + } + } + + case "_clusters": + if err := dec.Decode(&s.Clusters_); err != nil { + return err + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Fields); err != nil { + return err + } + + case "hits": + if err := dec.Decode(&s.Hits); err != nil { + return err + } + + case "max_score": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := types.Float64(value) + s.MaxScore = &f + case float64: + f := types.Float64(v) + s.MaxScore = &f + } + + case "num_reduce_phases": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.NumReducePhases = &value + case float64: + f := int64(v) + s.NumReducePhases = &f + } + + case "pit_id": + if err := dec.Decode(&s.PitId); err != nil { + return err + } + + case "profile": + if err := dec.Decode(&s.Profile); err != nil { + return err + } + + case "_scroll_id": + if err := dec.Decode(&s.ScrollId_); err != nil { + return err + } + + case "_shards": + if err := dec.Decode(&s.Shards_); err != nil { + return err + } + + case "suggest": + if s.Suggest == nil { + s.Suggest = make(map[string][]types.Suggest, 0) + } + + for dec.More() { + tt, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + if value, ok := tt.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Suggest == nil { + s.Suggest = make(map[string][]types.Suggest, 0) + } + switch elems[0] { + + case "completion": + o := types.NewCompletionSuggest() + if err := dec.Decode(&o); err != nil { + return err + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + case "phrase": + o := types.NewPhraseSuggest() + if err := dec.Decode(&o); err != nil { + return err + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + case "term": + o := types.NewTermSuggest() + if err := dec.Decode(&o); err != nil { + return err + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + default: + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + } + } else { + return errors.New("cannot decode JSON for field Suggest") + } + } else { + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Suggest[value] = append(s.Suggest[value], o) + } + } + } + + case "terminated_early": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.TerminatedEarly = &value + case bool: + s.TerminatedEarly = &v + } + + case "timed_out": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.TimedOut = value + case bool: + s.TimedOut = v + } + + case "took": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Took = value + case float64: + f := int64(v) + s.Took = f + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/search/search.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/search/search.go new file mode 100644 index 000000000..544c696c6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/search/search.go @@ -0,0 +1,263 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Perform a search against a search application +package search + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + nameMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type Search struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + buf *gobytes.Buffer + + req *Request + deferred []func(request *Request) error + raw io.Reader + + paramSet int + + name string +} + +// NewSearch type alias for index. +type NewSearch func(name string) *Search + +// NewSearchFunc returns a new instance of Search with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewSearchFunc(tp elastictransport.Interface) NewSearch { + return func(name string) *Search { + n := New(tp) + + n._name(name) + + return n + } +} + +// Perform a search against a search application +// +// https://www.elastic.co/guide/en/elasticsearch/reference/master/search-application-search.html +func New(tp elastictransport.Interface) *Search { + r := &Search{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + buf: gobytes.NewBuffer(nil), + + req: NewRequest(), + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *Search) Raw(raw io.Reader) *Search { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *Search) Request(req *Request) *Search { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *Search) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw != nil { + r.buf.ReadFrom(r.raw) + } else if r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for Search: %w", err) + } + + r.buf.Write(data) + + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == nameMask: + path.WriteString("/") + path.WriteString("_application") + path.WriteString("/") + path.WriteString("search_application") + path.WriteString("/") + + path.WriteString(r.name) + path.WriteString("/") + path.WriteString("_search") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.buf) + } else { + req, err = http.NewRequest(method, r.path.String(), r.buf) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.buf.Len() > 0 { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r Search) Perform(ctx context.Context) (*http.Response, error) { + req, err := r.HttpRequest(ctx) + if err != nil { + return nil, err + } + + res, err := r.transport.Perform(req) + if err != nil { + return nil, fmt.Errorf("an error happened during the Search query execution: %w", err) + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a search.Response +func (r Search) Do(ctx context.Context) (*Response, error) { + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + return nil, errorResponse +} + +// Header set a key, value pair in the Search headers map. +func (r *Search) Header(key, value string) *Search { + r.headers.Set(key, value) + + return r +} + +// Name The name of the search application to be searched. +// API Name: name +func (r *Search) _name(name string) *Search { + r.paramSet |= nameMask + r.name = name + + return r +} + +// Params Query parameters specific to this request, which will override any defaults +// specified in the template. +// API name: params +func (r *Search) Params(params map[string]json.RawMessage) *Search { + + r.req.Params = params + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/activateuserprofile/activate_user_profile.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/activateuserprofile/activate_user_profile.go index 98ffb68dc..4cccb4049 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/activateuserprofile/activate_user_profile.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/activateuserprofile/activate_user_profile.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Creates or updates the user profile on behalf of another user. package activateuserprofile @@ -34,6 +34,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/granttype" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -48,8 +49,9 @@ type ActivateUserProfile struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int } @@ -76,6 +78,8 @@ func New(tp elastictransport.Interface) *ActivateUserProfile { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -105,9 +109,19 @@ func (r *ActivateUserProfile) HttpRequest(ctx context.Context) (*http.Request, e var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -115,6 +129,7 @@ func (r *ActivateUserProfile) HttpRequest(ctx context.Context) (*http.Request, e } r.buf.Write(data) + } r.path.Scheme = "http" @@ -196,7 +211,6 @@ func (r ActivateUserProfile) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -205,6 +219,10 @@ func (r ActivateUserProfile) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -214,3 +232,34 @@ func (r *ActivateUserProfile) Header(key, value string) *ActivateUserProfile { return r } + +// API name: access_token +func (r *ActivateUserProfile) AccessToken(accesstoken string) *ActivateUserProfile { + + r.req.AccessToken = &accesstoken + + return r +} + +// API name: grant_type +func (r *ActivateUserProfile) GrantType(granttype granttype.GrantType) *ActivateUserProfile { + r.req.GrantType = granttype + + return r +} + +// API name: password +func (r *ActivateUserProfile) Password(password string) *ActivateUserProfile { + + r.req.Password = &password + + return r +} + +// API name: username +func (r *ActivateUserProfile) Username(username string) *ActivateUserProfile { + + r.req.Username = &username + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/activateuserprofile/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/activateuserprofile/request.go index 8a523788e..f54673ca3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/activateuserprofile/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/activateuserprofile/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package activateuserprofile @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package activateuserprofile // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/activate_user_profile/Request.ts#L23-L37 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/activate_user_profile/Request.ts#L23-L37 type Request struct { AccessToken *string `json:"access_token,omitempty"` GrantType granttype.GrantType `json:"grant_type"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/activateuserprofile/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/activateuserprofile/response.go index 7f7c72925..41afecc63 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/activateuserprofile/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/activateuserprofile/response.go @@ -16,25 +16,35 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package activateuserprofile import ( + "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) // Response holds the response body struct for the package activateuserprofile // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/activate_user_profile/Response.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/activate_user_profile/Response.ts#L22-L24 type Response struct { + Data map[string]json.RawMessage `json:"data"` Doc_ types.UserProfileHitMetadata `json:"_doc"` + Enabled *bool `json:"enabled,omitempty"` + Labels map[string]json.RawMessage `json:"labels"` LastSynchronized int64 `json:"last_synchronized"` + Uid string `json:"uid"` + User types.UserProfileUser `json:"user"` } // NewResponse returns a Response func NewResponse() *Response { - r := &Response{} + r := &Response{ + Data: make(map[string]json.RawMessage, 0), + Labels: make(map[string]json.RawMessage, 0), + } return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/authenticate/authenticate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/authenticate/authenticate.go index f46164f7c..69e2fc869 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/authenticate/authenticate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/authenticate/authenticate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Enables authentication as a user and retrieve information about the // authenticated user. @@ -161,7 +161,6 @@ func (r Authenticate) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -170,6 +169,10 @@ func (r Authenticate) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/authenticate/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/authenticate/response.go index 1d467afc9..676eede8a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/authenticate/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/authenticate/response.go @@ -16,32 +16,30 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package authenticate import ( - "encoding/json" - "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) // Response holds the response body struct for the package authenticate // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/authenticate/SecurityAuthenticateResponse.ts#L25-L40 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/authenticate/SecurityAuthenticateResponse.ts#L25-L43 type Response struct { - ApiKey *types.ApiKey `json:"api_key,omitempty"` - AuthenticationRealm types.RealmInfo `json:"authentication_realm"` - AuthenticationType string `json:"authentication_type"` - Email string `json:"email,omitempty"` - Enabled bool `json:"enabled"` - FullName string `json:"full_name,omitempty"` - LookupRealm types.RealmInfo `json:"lookup_realm"` - Metadata map[string]json.RawMessage `json:"metadata"` - Roles []string `json:"roles"` - Token *types.AuthenticateToken `json:"token,omitempty"` - Username string `json:"username"` + ApiKey *types.ApiKey `json:"api_key,omitempty"` + AuthenticationRealm types.RealmInfo `json:"authentication_realm"` + AuthenticationType string `json:"authentication_type"` + Email string `json:"email,omitempty"` + Enabled bool `json:"enabled"` + FullName string `json:"full_name,omitempty"` + LookupRealm types.RealmInfo `json:"lookup_realm"` + Metadata types.Metadata `json:"metadata"` + Roles []string `json:"roles"` + Token *types.AuthenticateToken `json:"token,omitempty"` + Username string `json:"username"` } // NewResponse returns a Response diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/bulkupdateapikeys/bulk_update_api_keys.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/bulkupdateapikeys/bulk_update_api_keys.go index cd566875b..24224ed95 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/bulkupdateapikeys/bulk_update_api_keys.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/bulkupdateapikeys/bulk_update_api_keys.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Updates the attributes of multiple existing API keys. package bulkupdateapikeys @@ -147,6 +147,11 @@ func (r BulkUpdateApiKeys) Perform(ctx context.Context) (*http.Response, error) return res, nil } +// Do runs the request through the transport, handle the response and returns a bulkupdateapikeys.Response +func (r BulkUpdateApiKeys) Do(ctx context.Context) (bool, error) { + return r.IsSuccess(ctx) +} + // IsSuccess allows to run a query with a context and retrieve the result as a boolean. // This only exists for endpoints without a request payload and allows for quick control flow. func (r BulkUpdateApiKeys) IsSuccess(ctx context.Context) (bool, error) { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/changepassword/change_password.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/changepassword/change_password.go index 810e46cfe..6006386bd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/changepassword/change_password.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/changepassword/change_password.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Changes the passwords of users in the native realm and built-in users. package changepassword @@ -34,7 +34,6 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/refresh" ) @@ -54,8 +53,9 @@ type ChangePassword struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -84,6 +84,8 @@ func New(tp elastictransport.Interface) *ChangePassword { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -113,9 +115,19 @@ func (r *ChangePassword) HttpRequest(ctx context.Context) (*http.Request, error) var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -123,6 +135,7 @@ func (r *ChangePassword) HttpRequest(ctx context.Context) (*http.Request, error) } r.buf.Write(data) + } r.path.Scheme = "http" @@ -216,7 +229,6 @@ func (r ChangePassword) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -225,6 +237,10 @@ func (r ChangePassword) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -238,9 +254,9 @@ func (r *ChangePassword) Header(key, value string) *ChangePassword { // Username The user whose password you want to change. If you do not specify this // parameter, the password is changed for the current user. // API Name: username -func (r *ChangePassword) Username(v string) *ChangePassword { +func (r *ChangePassword) Username(username string) *ChangePassword { r.paramSet |= usernameMask - r.username = v + r.username = username return r } @@ -249,8 +265,29 @@ func (r *ChangePassword) Username(v string) *ChangePassword { // operation visible to search, if `wait_for` then wait for a refresh to make // this operation visible to search, if `false` then do nothing with refreshes. // API name: refresh -func (r *ChangePassword) Refresh(enum refresh.Refresh) *ChangePassword { - r.values.Set("refresh", enum.String()) +func (r *ChangePassword) Refresh(refresh refresh.Refresh) *ChangePassword { + r.values.Set("refresh", refresh.String()) + + return r +} + +// Password The new password value. Passwords must be at least 6 characters long. +// API name: password +func (r *ChangePassword) Password(password string) *ChangePassword { + r.req.Password = &password + + return r +} + +// PasswordHash A hash of the new password value. This must be produced using the same +// hashing algorithm as has been configured for password storage. For more +// details, +// see the explanation of the `xpack.security.authc.password_hashing.algorithm` +// setting. +// API name: password_hash +func (r *ChangePassword) PasswordHash(passwordhash string) *ChangePassword { + + r.req.PasswordHash = &passwordhash return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/changepassword/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/changepassword/request.go index da7e7f3e7..98c282671 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/changepassword/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/changepassword/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package changepassword @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package changepassword // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/change_password/SecurityChangePasswordRequest.ts#L23-L52 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/change_password/SecurityChangePasswordRequest.ts#L23-L51 type Request struct { // Password The new password value. Passwords must be at least 6 characters long. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/changepassword/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/changepassword/response.go index f737f3080..4dbe059b5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/changepassword/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/changepassword/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package changepassword // Response holds the response body struct for the package changepassword // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/change_password/SecurityChangePasswordResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/change_password/SecurityChangePasswordResponse.ts#L20-L22 type Response struct { } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/clearapikeycache/clear_api_key_cache.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/clearapikeycache/clear_api_key_cache.go index f29cda972..fb15a7137 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/clearapikeycache/clear_api_key_cache.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/clearapikeycache/clear_api_key_cache.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Clear a subset or all entries from the API key cache. package clearapikeycache @@ -67,7 +67,7 @@ func NewClearApiKeyCacheFunc(tp elastictransport.Interface) NewClearApiKeyCache return func(ids string) *ClearApiKeyCache { n := New(tp) - n.Ids(ids) + n._ids(ids) return n } @@ -172,7 +172,6 @@ func (r ClearApiKeyCache) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -181,6 +180,10 @@ func (r ClearApiKeyCache) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -212,11 +215,13 @@ func (r *ClearApiKeyCache) Header(key, value string) *ClearApiKeyCache { return r } -// Ids A comma-separated list of IDs of API keys to clear from the cache +// Ids Comma-separated list of API key IDs to evict from the API key cache. +// To evict all API keys, use `*`. +// Does not support other wildcard patterns. // API Name: ids -func (r *ClearApiKeyCache) Ids(v string) *ClearApiKeyCache { +func (r *ClearApiKeyCache) _ids(ids string) *ClearApiKeyCache { r.paramSet |= idsMask - r.ids = v + r.ids = ids return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/clearapikeycache/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/clearapikeycache/response.go index ef7b03291..254f33cd9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/clearapikeycache/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/clearapikeycache/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package clearapikeycache @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package clearapikeycache // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/clear_api_key_cache/SecurityClearApiKeyCacheResponse.ts#L25-L32 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/clear_api_key_cache/SecurityClearApiKeyCacheResponse.ts#L25-L32 type Response struct { ClusterName string `json:"cluster_name"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/clearcachedprivileges/clear_cached_privileges.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/clearcachedprivileges/clear_cached_privileges.go index 719fc489e..4593af2fc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/clearcachedprivileges/clear_cached_privileges.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/clearcachedprivileges/clear_cached_privileges.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Evicts application privileges from the native application privileges cache. package clearcachedprivileges @@ -67,7 +67,7 @@ func NewClearCachedPrivilegesFunc(tp elastictransport.Interface) NewClearCachedP return func(application string) *ClearCachedPrivileges { n := New(tp) - n.Application(application) + n._application(application) return n } @@ -172,7 +172,6 @@ func (r ClearCachedPrivileges) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -181,6 +180,10 @@ func (r ClearCachedPrivileges) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -214,9 +217,9 @@ func (r *ClearCachedPrivileges) Header(key, value string) *ClearCachedPrivileges // Application A comma-separated list of application names // API Name: application -func (r *ClearCachedPrivileges) Application(v string) *ClearCachedPrivileges { +func (r *ClearCachedPrivileges) _application(application string) *ClearCachedPrivileges { r.paramSet |= applicationMask - r.application = v + r.application = application return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/clearcachedprivileges/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/clearcachedprivileges/response.go index f64c855e5..f1bc4cf84 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/clearcachedprivileges/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/clearcachedprivileges/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package clearcachedprivileges @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package clearcachedprivileges // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/clear_cached_privileges/SecurityClearCachedPrivilegesResponse.ts#L25-L32 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/clear_cached_privileges/SecurityClearCachedPrivilegesResponse.ts#L25-L32 type Response struct { ClusterName string `json:"cluster_name"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/clearcachedrealms/clear_cached_realms.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/clearcachedrealms/clear_cached_realms.go index 71135d1a5..f47b4dd8d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/clearcachedrealms/clear_cached_realms.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/clearcachedrealms/clear_cached_realms.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Evicts users from the user cache. Can completely clear the cache or evict // specific users. @@ -68,7 +68,7 @@ func NewClearCachedRealmsFunc(tp elastictransport.Interface) NewClearCachedRealm return func(realms string) *ClearCachedRealms { n := New(tp) - n.Realms(realms) + n._realms(realms) return n } @@ -174,7 +174,6 @@ func (r ClearCachedRealms) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -183,6 +182,10 @@ func (r ClearCachedRealms) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -216,17 +219,21 @@ func (r *ClearCachedRealms) Header(key, value string) *ClearCachedRealms { // Realms Comma-separated list of realms to clear // API Name: realms -func (r *ClearCachedRealms) Realms(v string) *ClearCachedRealms { +func (r *ClearCachedRealms) _realms(realms string) *ClearCachedRealms { r.paramSet |= realmsMask - r.realms = v + r.realms = realms return r } // Usernames Comma-separated list of usernames to clear from the cache // API name: usernames -func (r *ClearCachedRealms) Usernames(v string) *ClearCachedRealms { - r.values.Set("usernames", v) +func (r *ClearCachedRealms) Usernames(usernames ...string) *ClearCachedRealms { + tmp := []string{} + for _, item := range usernames { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("usernames", strings.Join(tmp, ",")) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/clearcachedrealms/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/clearcachedrealms/response.go index c1ee8541f..cc8785da5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/clearcachedrealms/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/clearcachedrealms/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package clearcachedrealms @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package clearcachedrealms // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/clear_cached_realms/SecurityClearCachedRealmsResponse.ts#L25-L32 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/clear_cached_realms/SecurityClearCachedRealmsResponse.ts#L25-L32 type Response struct { ClusterName string `json:"cluster_name"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/clearcachedroles/clear_cached_roles.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/clearcachedroles/clear_cached_roles.go index 21109bd12..3412a81aa 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/clearcachedroles/clear_cached_roles.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/clearcachedroles/clear_cached_roles.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Evicts roles from the native role cache. package clearcachedroles @@ -67,7 +67,7 @@ func NewClearCachedRolesFunc(tp elastictransport.Interface) NewClearCachedRoles return func(name string) *ClearCachedRoles { n := New(tp) - n.Name(name) + n._name(name) return n } @@ -172,7 +172,6 @@ func (r ClearCachedRoles) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -181,6 +180,10 @@ func (r ClearCachedRoles) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -214,9 +217,9 @@ func (r *ClearCachedRoles) Header(key, value string) *ClearCachedRoles { // Name Role name // API Name: name -func (r *ClearCachedRoles) Name(v string) *ClearCachedRoles { +func (r *ClearCachedRoles) _name(name string) *ClearCachedRoles { r.paramSet |= nameMask - r.name = v + r.name = name return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/clearcachedroles/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/clearcachedroles/response.go index 26c0e678b..11bd863d5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/clearcachedroles/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/clearcachedroles/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package clearcachedroles @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package clearcachedroles // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/clear_cached_roles/ClearCachedRolesResponse.ts#L25-L32 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/clear_cached_roles/ClearCachedRolesResponse.ts#L25-L32 type Response struct { ClusterName string `json:"cluster_name"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/clearcachedservicetokens/clear_cached_service_tokens.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/clearcachedservicetokens/clear_cached_service_tokens.go index 4d2d8391a..0a72ac758 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/clearcachedservicetokens/clear_cached_service_tokens.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/clearcachedservicetokens/clear_cached_service_tokens.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Evicts tokens from the service account token caches. package clearcachedservicetokens @@ -73,11 +73,11 @@ func NewClearCachedServiceTokensFunc(tp elastictransport.Interface) NewClearCach return func(namespace, service, name string) *ClearCachedServiceTokens { n := New(tp) - n.Namespace(namespace) + n._namespace(namespace) - n.Service(service) + n._service(service) - n.Name(name) + n._name(name) return n } @@ -192,7 +192,6 @@ func (r ClearCachedServiceTokens) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -201,6 +200,10 @@ func (r ClearCachedServiceTokens) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -234,27 +237,27 @@ func (r *ClearCachedServiceTokens) Header(key, value string) *ClearCachedService // Namespace An identifier for the namespace // API Name: namespace -func (r *ClearCachedServiceTokens) Namespace(v string) *ClearCachedServiceTokens { +func (r *ClearCachedServiceTokens) _namespace(namespace string) *ClearCachedServiceTokens { r.paramSet |= namespaceMask - r.namespace = v + r.namespace = namespace return r } // Service An identifier for the service name // API Name: service -func (r *ClearCachedServiceTokens) Service(v string) *ClearCachedServiceTokens { +func (r *ClearCachedServiceTokens) _service(service string) *ClearCachedServiceTokens { r.paramSet |= serviceMask - r.service = v + r.service = service return r } // Name A comma-separated list of service token names // API Name: name -func (r *ClearCachedServiceTokens) Name(v string) *ClearCachedServiceTokens { +func (r *ClearCachedServiceTokens) _name(name string) *ClearCachedServiceTokens { r.paramSet |= nameMask - r.name = v + r.name = name return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/clearcachedservicetokens/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/clearcachedservicetokens/response.go index ec20eb65d..0b7fb1bdb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/clearcachedservicetokens/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/clearcachedservicetokens/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package clearcachedservicetokens @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package clearcachedservicetokens // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/clear_cached_service_tokens/ClearCachedServiceTokensResponse.ts#L25-L32 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/clear_cached_service_tokens/ClearCachedServiceTokensResponse.ts#L25-L32 type Response struct { ClusterName string `json:"cluster_name"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/createapikey/create_api_key.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/createapikey/create_api_key.go index 72b79c113..82eb91688 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/createapikey/create_api_key.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/createapikey/create_api_key.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Creates an API key for access without requiring basic authentication. package createapikey @@ -34,7 +34,6 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/refresh" ) @@ -50,8 +49,9 @@ type CreateApiKey struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int } @@ -78,6 +78,8 @@ func New(tp elastictransport.Interface) *CreateApiKey { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -107,9 +109,19 @@ func (r *CreateApiKey) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -117,6 +129,7 @@ func (r *CreateApiKey) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -196,7 +209,6 @@ func (r CreateApiKey) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -205,6 +217,10 @@ func (r CreateApiKey) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -219,8 +235,50 @@ func (r *CreateApiKey) Header(key, value string) *CreateApiKey { // operation visible to search, if `wait_for` then wait for a refresh to make // this operation visible to search, if `false` then do nothing with refreshes. // API name: refresh -func (r *CreateApiKey) Refresh(enum refresh.Refresh) *CreateApiKey { - r.values.Set("refresh", enum.String()) +func (r *CreateApiKey) Refresh(refresh refresh.Refresh) *CreateApiKey { + r.values.Set("refresh", refresh.String()) + + return r +} + +// Expiration Expiration time for the API key. By default, API keys never expire. +// API name: expiration +func (r *CreateApiKey) Expiration(duration types.Duration) *CreateApiKey { + r.req.Expiration = duration + + return r +} + +// Metadata Arbitrary metadata that you want to associate with the API key. It supports +// nested data structure. Within the metadata object, keys beginning with `_` +// are reserved for system usage. +// API name: metadata +func (r *CreateApiKey) Metadata(metadata types.Metadata) *CreateApiKey { + r.req.Metadata = metadata + + return r +} + +// Name Specifies the name for this API key. +// API name: name +func (r *CreateApiKey) Name(name string) *CreateApiKey { + r.req.Name = &name + + return r +} + +// RoleDescriptors An array of role descriptors for this API key. This parameter is optional. +// When it is not specified or is an empty array, then the API key will have a +// point in time snapshot of permissions of the authenticated user. If you +// supply role descriptors then the resultant permissions would be an +// intersection of API keys permissions and authenticated user’s permissions +// thereby limiting the access scope for API keys. The structure of role +// descriptor is the same as the request for create role API. For more details, +// see create or update roles API. +// API name: role_descriptors +func (r *CreateApiKey) RoleDescriptors(roledescriptors map[string]types.RoleDescriptor) *CreateApiKey { + + r.req.RoleDescriptors = roledescriptors return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/createapikey/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/createapikey/request.go index 342cc06b5..3c5bc0b36 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/createapikey/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/createapikey/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package createapikey @@ -29,15 +29,15 @@ import ( // Request holds the request body struct for the package createapikey // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/create_api_key/SecurityCreateApiKeyRequest.ts#L26-L51 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/create_api_key/SecurityCreateApiKeyRequest.ts#L26-L58 type Request struct { // Expiration Expiration time for the API key. By default, API keys never expire. Expiration types.Duration `json:"expiration,omitempty"` // Metadata Arbitrary metadata that you want to associate with the API key. It supports - // nested data structure. Within the metadata object, keys beginning with _ are - // reserved for system usage. - Metadata map[string]json.RawMessage `json:"metadata,omitempty"` + // nested data structure. Within the metadata object, keys beginning with `_` + // are reserved for system usage. + Metadata types.Metadata `json:"metadata,omitempty"` // Name Specifies the name for this API key. Name *string `json:"name,omitempty"` // RoleDescriptors An array of role descriptors for this API key. This parameter is optional. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/createapikey/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/createapikey/response.go index 63827244a..5a0a633c9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/createapikey/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/createapikey/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package createapikey // Response holds the response body struct for the package createapikey // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/create_api_key/SecurityCreateApiKeyResponse.ts#L23-L49 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/create_api_key/SecurityCreateApiKeyResponse.ts#L23-L50 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/createcrossclusterapikey/create_cross_cluster_api_key.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/createcrossclusterapikey/create_cross_cluster_api_key.go new file mode 100644 index 000000000..6ad984079 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/createcrossclusterapikey/create_cross_cluster_api_key.go @@ -0,0 +1,181 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Creates a cross-cluster API key for API key based remote cluster access. +package createcrossclusterapikey + +import ( + gobytes "bytes" + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type CreateCrossClusterApiKey struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + buf *gobytes.Buffer + + paramSet int +} + +// NewCreateCrossClusterApiKey type alias for index. +type NewCreateCrossClusterApiKey func() *CreateCrossClusterApiKey + +// NewCreateCrossClusterApiKeyFunc returns a new instance of CreateCrossClusterApiKey with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewCreateCrossClusterApiKeyFunc(tp elastictransport.Interface) NewCreateCrossClusterApiKey { + return func() *CreateCrossClusterApiKey { + n := New(tp) + + return n + } +} + +// Creates a cross-cluster API key for API key based remote cluster access. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-cross-cluster-api-key.html +func New(tp elastictransport.Interface) *CreateCrossClusterApiKey { + r := &CreateCrossClusterApiKey{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + buf: gobytes.NewBuffer(nil), + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *CreateCrossClusterApiKey) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_security") + path.WriteString("/") + path.WriteString("cross_cluster") + path.WriteString("/") + path.WriteString("api_key") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.buf) + } else { + req, err = http.NewRequest(method, r.path.String(), r.buf) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.buf.Len() > 0 { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r CreateCrossClusterApiKey) Perform(ctx context.Context) (*http.Response, error) { + req, err := r.HttpRequest(ctx) + if err != nil { + return nil, err + } + + res, err := r.transport.Perform(req) + if err != nil { + return nil, fmt.Errorf("an error happened during the CreateCrossClusterApiKey query execution: %w", err) + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a createcrossclusterapikey.Response +func (r CreateCrossClusterApiKey) Do(ctx context.Context) (bool, error) { + return r.IsSuccess(ctx) +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r CreateCrossClusterApiKey) IsSuccess(ctx context.Context) (bool, error) { + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(ioutil.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + return false, nil +} + +// Header set a key, value pair in the CreateCrossClusterApiKey headers map. +func (r *CreateCrossClusterApiKey) Header(key, value string) *CreateCrossClusterApiKey { + r.headers.Set(key, value) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/createservicetoken/create_service_token.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/createservicetoken/create_service_token.go index e477584a6..db3b60c55 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/createservicetoken/create_service_token.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/createservicetoken/create_service_token.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Creates a service account token for access without requiring basic // authentication. @@ -36,7 +36,6 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/refresh" ) @@ -76,9 +75,9 @@ func NewCreateServiceTokenFunc(tp elastictransport.Interface) NewCreateServiceTo return func(namespace, service string) *CreateServiceToken { n := New(tp) - n.Namespace(namespace) + n._namespace(namespace) - n.Service(service) + n._service(service) return n } @@ -209,7 +208,6 @@ func (r CreateServiceToken) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -218,6 +216,10 @@ func (r CreateServiceToken) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -251,27 +253,27 @@ func (r *CreateServiceToken) Header(key, value string) *CreateServiceToken { // Namespace An identifier for the namespace // API Name: namespace -func (r *CreateServiceToken) Namespace(v string) *CreateServiceToken { +func (r *CreateServiceToken) _namespace(namespace string) *CreateServiceToken { r.paramSet |= namespaceMask - r.namespace = v + r.namespace = namespace return r } // Service An identifier for the service name // API Name: service -func (r *CreateServiceToken) Service(v string) *CreateServiceToken { +func (r *CreateServiceToken) _service(service string) *CreateServiceToken { r.paramSet |= serviceMask - r.service = v + r.service = service return r } // Name An identifier for the token name // API Name: name -func (r *CreateServiceToken) Name(v string) *CreateServiceToken { +func (r *CreateServiceToken) Name(name string) *CreateServiceToken { r.paramSet |= nameMask - r.name = v + r.name = name return r } @@ -280,8 +282,8 @@ func (r *CreateServiceToken) Name(v string) *CreateServiceToken { // search, if `wait_for` (the default) then wait for a refresh to make this // operation visible to search, if `false` then do nothing with refreshes. // API name: refresh -func (r *CreateServiceToken) Refresh(enum refresh.Refresh) *CreateServiceToken { - r.values.Set("refresh", enum.String()) +func (r *CreateServiceToken) Refresh(refresh refresh.Refresh) *CreateServiceToken { + r.values.Set("refresh", refresh.String()) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/createservicetoken/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/createservicetoken/response.go index 349156486..d60cae722 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/createservicetoken/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/createservicetoken/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package createservicetoken @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package createservicetoken // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/create_service_token/CreateServiceTokenResponse.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/create_service_token/CreateServiceTokenResponse.ts#L22-L27 type Response struct { Created bool `json:"created"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/deleteprivileges/delete_privileges.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/deleteprivileges/delete_privileges.go index 505499af5..a69b248a5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/deleteprivileges/delete_privileges.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/deleteprivileges/delete_privileges.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Removes application privileges. package deleteprivileges @@ -35,7 +35,6 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/refresh" ) @@ -72,9 +71,9 @@ func NewDeletePrivilegesFunc(tp elastictransport.Interface) NewDeletePrivileges return func(application, name string) *DeletePrivileges { n := New(tp) - n.Application(application) + n._application(application) - n.Name(name) + n._name(name) return n } @@ -180,7 +179,6 @@ func (r DeletePrivileges) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -189,6 +187,10 @@ func (r DeletePrivileges) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -222,18 +224,18 @@ func (r *DeletePrivileges) Header(key, value string) *DeletePrivileges { // Application Application name // API Name: application -func (r *DeletePrivileges) Application(v string) *DeletePrivileges { +func (r *DeletePrivileges) _application(application string) *DeletePrivileges { r.paramSet |= applicationMask - r.application = v + r.application = application return r } // Name Privilege name // API Name: name -func (r *DeletePrivileges) Name(v string) *DeletePrivileges { +func (r *DeletePrivileges) _name(name string) *DeletePrivileges { r.paramSet |= nameMask - r.name = v + r.name = name return r } @@ -242,8 +244,8 @@ func (r *DeletePrivileges) Name(v string) *DeletePrivileges { // operation visible to search, if `wait_for` then wait for a refresh to make // this operation visible to search, if `false` then do nothing with refreshes. // API name: refresh -func (r *DeletePrivileges) Refresh(enum refresh.Refresh) *DeletePrivileges { - r.values.Set("refresh", enum.String()) +func (r *DeletePrivileges) Refresh(refresh refresh.Refresh) *DeletePrivileges { + r.values.Set("refresh", refresh.String()) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/deleteprivileges/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/deleteprivileges/response.go index 1fdd621d3..0a6a0b78a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/deleteprivileges/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/deleteprivileges/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package deleteprivileges @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package deleteprivileges // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/delete_privileges/SecurityDeletePrivilegesResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/delete_privileges/SecurityDeletePrivilegesResponse.ts#L23-L25 type Response map[string]map[string]types.FoundStatus diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/deleterole/delete_role.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/deleterole/delete_role.go index 981589b7f..45adbc993 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/deleterole/delete_role.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/deleterole/delete_role.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Removes roles in the native realm. package deleterole @@ -35,7 +35,6 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/refresh" ) @@ -69,7 +68,7 @@ func NewDeleteRoleFunc(tp elastictransport.Interface) NewDeleteRole { return func(name string) *DeleteRole { n := New(tp) - n.Name(name) + n._name(name) return n } @@ -172,7 +171,6 @@ func (r DeleteRole) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -181,6 +179,10 @@ func (r DeleteRole) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -214,9 +216,9 @@ func (r *DeleteRole) Header(key, value string) *DeleteRole { // Name Role name // API Name: name -func (r *DeleteRole) Name(v string) *DeleteRole { +func (r *DeleteRole) _name(name string) *DeleteRole { r.paramSet |= nameMask - r.name = v + r.name = name return r } @@ -225,8 +227,8 @@ func (r *DeleteRole) Name(v string) *DeleteRole { // operation visible to search, if `wait_for` then wait for a refresh to make // this operation visible to search, if `false` then do nothing with refreshes. // API name: refresh -func (r *DeleteRole) Refresh(enum refresh.Refresh) *DeleteRole { - r.values.Set("refresh", enum.String()) +func (r *DeleteRole) Refresh(refresh refresh.Refresh) *DeleteRole { + r.values.Set("refresh", refresh.String()) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/deleterole/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/deleterole/response.go index 9306cdb55..d7a32cfe4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/deleterole/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/deleterole/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package deleterole // Response holds the response body struct for the package deleterole // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/delete_role/SecurityDeleteRoleResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/delete_role/SecurityDeleteRoleResponse.ts#L20-L22 type Response struct { Found bool `json:"found"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/deleterolemapping/delete_role_mapping.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/deleterolemapping/delete_role_mapping.go index 2089fbfef..525e94026 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/deleterolemapping/delete_role_mapping.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/deleterolemapping/delete_role_mapping.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Removes role mappings. package deleterolemapping @@ -35,7 +35,6 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/refresh" ) @@ -69,7 +68,7 @@ func NewDeleteRoleMappingFunc(tp elastictransport.Interface) NewDeleteRoleMappin return func(name string) *DeleteRoleMapping { n := New(tp) - n.Name(name) + n._name(name) return n } @@ -172,7 +171,6 @@ func (r DeleteRoleMapping) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -181,6 +179,10 @@ func (r DeleteRoleMapping) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -214,9 +216,9 @@ func (r *DeleteRoleMapping) Header(key, value string) *DeleteRoleMapping { // Name Role-mapping name // API Name: name -func (r *DeleteRoleMapping) Name(v string) *DeleteRoleMapping { +func (r *DeleteRoleMapping) _name(name string) *DeleteRoleMapping { r.paramSet |= nameMask - r.name = v + r.name = name return r } @@ -225,8 +227,8 @@ func (r *DeleteRoleMapping) Name(v string) *DeleteRoleMapping { // operation visible to search, if `wait_for` then wait for a refresh to make // this operation visible to search, if `false` then do nothing with refreshes. // API name: refresh -func (r *DeleteRoleMapping) Refresh(enum refresh.Refresh) *DeleteRoleMapping { - r.values.Set("refresh", enum.String()) +func (r *DeleteRoleMapping) Refresh(refresh refresh.Refresh) *DeleteRoleMapping { + r.values.Set("refresh", refresh.String()) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/deleterolemapping/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/deleterolemapping/response.go index eb88470c6..dc3bebb9c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/deleterolemapping/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/deleterolemapping/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package deleterolemapping // Response holds the response body struct for the package deleterolemapping // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/delete_role_mapping/SecurityDeleteRoleMappingResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/delete_role_mapping/SecurityDeleteRoleMappingResponse.ts#L20-L22 type Response struct { Found bool `json:"found"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/deleteservicetoken/delete_service_token.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/deleteservicetoken/delete_service_token.go index 98f5393a0..23183afcc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/deleteservicetoken/delete_service_token.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/deleteservicetoken/delete_service_token.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Deletes a service account token. package deleteservicetoken @@ -35,7 +35,6 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/refresh" ) @@ -75,11 +74,11 @@ func NewDeleteServiceTokenFunc(tp elastictransport.Interface) NewDeleteServiceTo return func(namespace, service, name string) *DeleteServiceToken { n := New(tp) - n.Namespace(namespace) + n._namespace(namespace) - n.Service(service) + n._service(service) - n.Name(name) + n._name(name) return n } @@ -192,7 +191,6 @@ func (r DeleteServiceToken) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -201,6 +199,10 @@ func (r DeleteServiceToken) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -234,27 +236,27 @@ func (r *DeleteServiceToken) Header(key, value string) *DeleteServiceToken { // Namespace An identifier for the namespace // API Name: namespace -func (r *DeleteServiceToken) Namespace(v string) *DeleteServiceToken { +func (r *DeleteServiceToken) _namespace(namespace string) *DeleteServiceToken { r.paramSet |= namespaceMask - r.namespace = v + r.namespace = namespace return r } // Service An identifier for the service name // API Name: service -func (r *DeleteServiceToken) Service(v string) *DeleteServiceToken { +func (r *DeleteServiceToken) _service(service string) *DeleteServiceToken { r.paramSet |= serviceMask - r.service = v + r.service = service return r } // Name An identifier for the token name // API Name: name -func (r *DeleteServiceToken) Name(v string) *DeleteServiceToken { +func (r *DeleteServiceToken) _name(name string) *DeleteServiceToken { r.paramSet |= nameMask - r.name = v + r.name = name return r } @@ -263,8 +265,8 @@ func (r *DeleteServiceToken) Name(v string) *DeleteServiceToken { // search, if `wait_for` (the default) then wait for a refresh to make this // operation visible to search, if `false` then do nothing with refreshes. // API name: refresh -func (r *DeleteServiceToken) Refresh(enum refresh.Refresh) *DeleteServiceToken { - r.values.Set("refresh", enum.String()) +func (r *DeleteServiceToken) Refresh(refresh refresh.Refresh) *DeleteServiceToken { + r.values.Set("refresh", refresh.String()) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/deleteservicetoken/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/deleteservicetoken/response.go index 7ca466f67..93f8ea295 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/deleteservicetoken/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/deleteservicetoken/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package deleteservicetoken // Response holds the response body struct for the package deleteservicetoken // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/delete_service_token/DeleteServiceTokenResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/delete_service_token/DeleteServiceTokenResponse.ts#L20-L22 type Response struct { Found bool `json:"found"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/deleteuser/delete_user.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/deleteuser/delete_user.go index b884ce2a1..fd25b10f7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/deleteuser/delete_user.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/deleteuser/delete_user.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Deletes users from the native realm. package deleteuser @@ -35,7 +35,6 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/refresh" ) @@ -69,7 +68,7 @@ func NewDeleteUserFunc(tp elastictransport.Interface) NewDeleteUser { return func(username string) *DeleteUser { n := New(tp) - n.Username(username) + n._username(username) return n } @@ -172,7 +171,6 @@ func (r DeleteUser) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -181,6 +179,10 @@ func (r DeleteUser) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -214,9 +216,9 @@ func (r *DeleteUser) Header(key, value string) *DeleteUser { // Username username // API Name: username -func (r *DeleteUser) Username(v string) *DeleteUser { +func (r *DeleteUser) _username(username string) *DeleteUser { r.paramSet |= usernameMask - r.username = v + r.username = username return r } @@ -225,8 +227,8 @@ func (r *DeleteUser) Username(v string) *DeleteUser { // operation visible to search, if `wait_for` then wait for a refresh to make // this operation visible to search, if `false` then do nothing with refreshes. // API name: refresh -func (r *DeleteUser) Refresh(enum refresh.Refresh) *DeleteUser { - r.values.Set("refresh", enum.String()) +func (r *DeleteUser) Refresh(refresh refresh.Refresh) *DeleteUser { + r.values.Set("refresh", refresh.String()) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/deleteuser/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/deleteuser/response.go index d979d6808..f040a915d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/deleteuser/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/deleteuser/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package deleteuser // Response holds the response body struct for the package deleteuser // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/delete_user/SecurityDeleteUserResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/delete_user/SecurityDeleteUserResponse.ts#L20-L22 type Response struct { Found bool `json:"found"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/disableuser/disable_user.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/disableuser/disable_user.go index 600f9944e..71bc650dc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/disableuser/disable_user.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/disableuser/disable_user.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Disables users in the native realm. package disableuser @@ -35,7 +35,6 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/refresh" ) @@ -69,7 +68,7 @@ func NewDisableUserFunc(tp elastictransport.Interface) NewDisableUser { return func(username string) *DisableUser { n := New(tp) - n.Username(username) + n._username(username) return n } @@ -174,7 +173,6 @@ func (r DisableUser) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -183,6 +181,10 @@ func (r DisableUser) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -216,9 +218,9 @@ func (r *DisableUser) Header(key, value string) *DisableUser { // Username The username of the user to disable // API Name: username -func (r *DisableUser) Username(v string) *DisableUser { +func (r *DisableUser) _username(username string) *DisableUser { r.paramSet |= usernameMask - r.username = v + r.username = username return r } @@ -227,8 +229,8 @@ func (r *DisableUser) Username(v string) *DisableUser { // operation visible to search, if `wait_for` then wait for a refresh to make // this operation visible to search, if `false` then do nothing with refreshes. // API name: refresh -func (r *DisableUser) Refresh(enum refresh.Refresh) *DisableUser { - r.values.Set("refresh", enum.String()) +func (r *DisableUser) Refresh(refresh refresh.Refresh) *DisableUser { + r.values.Set("refresh", refresh.String()) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/disableuser/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/disableuser/response.go index 6ad3e422b..22e564284 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/disableuser/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/disableuser/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package disableuser // Response holds the response body struct for the package disableuser // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/disable_user/SecurityDisableUserResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/disable_user/SecurityDisableUserResponse.ts#L20-L22 type Response struct { } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/disableuserprofile/disable_user_profile.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/disableuserprofile/disable_user_profile.go index 1ce8234ae..b129e79b1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/disableuserprofile/disable_user_profile.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/disableuserprofile/disable_user_profile.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Disables a user profile so it's not visible in user profile searches. package disableuserprofile @@ -35,7 +35,6 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/refresh" ) @@ -69,7 +68,7 @@ func NewDisableUserProfileFunc(tp elastictransport.Interface) NewDisableUserProf return func(uid string) *DisableUserProfile { n := New(tp) - n.Uid(uid) + n._uid(uid) return n } @@ -174,7 +173,6 @@ func (r DisableUserProfile) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -183,6 +181,10 @@ func (r DisableUserProfile) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -216,9 +218,9 @@ func (r *DisableUserProfile) Header(key, value string) *DisableUserProfile { // Uid Unique identifier for the user profile. // API Name: uid -func (r *DisableUserProfile) Uid(v string) *DisableUserProfile { +func (r *DisableUserProfile) _uid(uid string) *DisableUserProfile { r.paramSet |= uidMask - r.uid = v + r.uid = uid return r } @@ -228,8 +230,8 @@ func (r *DisableUserProfile) Uid(v string) *DisableUserProfile { // operation // visible to search, if 'false' do nothing with refreshes. // API name: refresh -func (r *DisableUserProfile) Refresh(enum refresh.Refresh) *DisableUserProfile { - r.values.Set("refresh", enum.String()) +func (r *DisableUserProfile) Refresh(refresh refresh.Refresh) *DisableUserProfile { + r.values.Set("refresh", refresh.String()) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/disableuserprofile/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/disableuserprofile/response.go index 86aeda8ca..bee2b1475 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/disableuserprofile/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/disableuserprofile/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package disableuserprofile // Response holds the response body struct for the package disableuserprofile // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/disable_user_profile/Response.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/disable_user_profile/Response.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/enableuser/enable_user.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/enableuser/enable_user.go index 4b6b2ff7b..92173e99e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/enableuser/enable_user.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/enableuser/enable_user.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Enables users in the native realm. package enableuser @@ -35,7 +35,6 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/refresh" ) @@ -69,7 +68,7 @@ func NewEnableUserFunc(tp elastictransport.Interface) NewEnableUser { return func(username string) *EnableUser { n := New(tp) - n.Username(username) + n._username(username) return n } @@ -174,7 +173,6 @@ func (r EnableUser) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -183,6 +181,10 @@ func (r EnableUser) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -216,9 +218,9 @@ func (r *EnableUser) Header(key, value string) *EnableUser { // Username The username of the user to enable // API Name: username -func (r *EnableUser) Username(v string) *EnableUser { +func (r *EnableUser) _username(username string) *EnableUser { r.paramSet |= usernameMask - r.username = v + r.username = username return r } @@ -227,8 +229,8 @@ func (r *EnableUser) Username(v string) *EnableUser { // operation visible to search, if `wait_for` then wait for a refresh to make // this operation visible to search, if `false` then do nothing with refreshes. // API name: refresh -func (r *EnableUser) Refresh(enum refresh.Refresh) *EnableUser { - r.values.Set("refresh", enum.String()) +func (r *EnableUser) Refresh(refresh refresh.Refresh) *EnableUser { + r.values.Set("refresh", refresh.String()) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/enableuser/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/enableuser/response.go index b2080160c..1570f2403 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/enableuser/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/enableuser/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package enableuser // Response holds the response body struct for the package enableuser // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/enable_user/SecurityEnableUserResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/enable_user/SecurityEnableUserResponse.ts#L20-L22 type Response struct { } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/enableuserprofile/enable_user_profile.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/enableuserprofile/enable_user_profile.go index 7550353d7..c97d9d7a4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/enableuserprofile/enable_user_profile.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/enableuserprofile/enable_user_profile.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Enables a user profile so it's visible in user profile searches. package enableuserprofile @@ -35,7 +35,6 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/refresh" ) @@ -69,7 +68,7 @@ func NewEnableUserProfileFunc(tp elastictransport.Interface) NewEnableUserProfil return func(uid string) *EnableUserProfile { n := New(tp) - n.Uid(uid) + n._uid(uid) return n } @@ -174,7 +173,6 @@ func (r EnableUserProfile) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -183,6 +181,10 @@ func (r EnableUserProfile) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -216,9 +218,9 @@ func (r *EnableUserProfile) Header(key, value string) *EnableUserProfile { // Uid Unique identifier for the user profile. // API Name: uid -func (r *EnableUserProfile) Uid(v string) *EnableUserProfile { +func (r *EnableUserProfile) _uid(uid string) *EnableUserProfile { r.paramSet |= uidMask - r.uid = v + r.uid = uid return r } @@ -228,8 +230,8 @@ func (r *EnableUserProfile) Uid(v string) *EnableUserProfile { // operation // visible to search, if 'false' do nothing with refreshes. // API name: refresh -func (r *EnableUserProfile) Refresh(enum refresh.Refresh) *EnableUserProfile { - r.values.Set("refresh", enum.String()) +func (r *EnableUserProfile) Refresh(refresh refresh.Refresh) *EnableUserProfile { + r.values.Set("refresh", refresh.String()) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/enableuserprofile/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/enableuserprofile/response.go index c070a5d86..c9cf4a6c8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/enableuserprofile/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/enableuserprofile/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package enableuserprofile // Response holds the response body struct for the package enableuserprofile // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/enable_user_profile/Response.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/enable_user_profile/Response.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/enrollkibana/enroll_kibana.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/enrollkibana/enroll_kibana.go index ce6f4f1a8..52b1b09bd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/enrollkibana/enroll_kibana.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/enrollkibana/enroll_kibana.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Allows a kibana instance to configure itself to communicate with a secured // elasticsearch cluster. @@ -169,7 +169,6 @@ func (r EnrollKibana) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -178,6 +177,10 @@ func (r EnrollKibana) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/enrollkibana/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/enrollkibana/response.go index 1b150443f..db0249046 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/enrollkibana/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/enrollkibana/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package enrollkibana @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package enrollkibana // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/enroll_kibana/Response.ts#L20-L25 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/enroll_kibana/Response.ts#L20-L25 type Response struct { HttpCa string `json:"http_ca"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/enrollnode/enroll_node.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/enrollnode/enroll_node.go index 7774672b3..3995237c9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/enrollnode/enroll_node.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/enrollnode/enroll_node.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Allows a new node to enroll to an existing cluster with security enabled. package enrollnode @@ -167,7 +167,6 @@ func (r EnrollNode) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -176,6 +175,10 @@ func (r EnrollNode) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/enrollnode/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/enrollnode/response.go index 5389e49f9..f66d4e759 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/enrollnode/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/enrollnode/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package enrollnode // Response holds the response body struct for the package enrollnode // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/enroll_node/Response.ts#L20-L29 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/enroll_node/Response.ts#L20-L29 type Response struct { HttpCaCert string `json:"http_ca_cert"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getapikey/get_api_key.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getapikey/get_api_key.go index e08a4d8e2..7f1399029 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getapikey/get_api_key.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getapikey/get_api_key.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves information for one or more API keys. package getapikey @@ -160,7 +160,6 @@ func (r GetApiKey) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -169,6 +168,10 @@ func (r GetApiKey) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -200,42 +203,53 @@ func (r *GetApiKey) Header(key, value string) *GetApiKey { return r } -// Id API key id of the API key to be retrieved +// Id An API key id. +// This parameter cannot be used with any of `name`, `realm_name` or `username`. // API name: id -func (r *GetApiKey) Id(v string) *GetApiKey { - r.values.Set("id", v) +func (r *GetApiKey) Id(id string) *GetApiKey { + r.values.Set("id", id) return r } -// Name API key name of the API key to be retrieved +// Name An API key name. +// This parameter cannot be used with any of `id`, `realm_name` or `username`. +// It supports prefix search with wildcard. // API name: name -func (r *GetApiKey) Name(v string) *GetApiKey { - r.values.Set("name", v) +func (r *GetApiKey) Name(name string) *GetApiKey { + r.values.Set("name", name) return r } -// Owner flag to query API keys owned by the currently authenticated user +// Owner A boolean flag that can be used to query API keys owned by the currently +// authenticated user. +// The `realm_name` or `username` parameters cannot be specified when this +// parameter is set to `true` as they are assumed to be the currently +// authenticated ones. // API name: owner -func (r *GetApiKey) Owner(b bool) *GetApiKey { - r.values.Set("owner", strconv.FormatBool(b)) +func (r *GetApiKey) Owner(owner bool) *GetApiKey { + r.values.Set("owner", strconv.FormatBool(owner)) return r } -// RealmName realm name of the user who created this API key to be retrieved +// RealmName The name of an authentication realm. +// This parameter cannot be used with either `id` or `name` or when `owner` flag +// is set to `true`. // API name: realm_name -func (r *GetApiKey) RealmName(v string) *GetApiKey { - r.values.Set("realm_name", v) +func (r *GetApiKey) RealmName(name string) *GetApiKey { + r.values.Set("realm_name", name) return r } -// Username user name of the user who created this API key to be retrieved +// Username The username of a user. +// This parameter cannot be used with either `id` or `name` or when `owner` flag +// is set to `true`. // API name: username -func (r *GetApiKey) Username(v string) *GetApiKey { - r.values.Set("username", v) +func (r *GetApiKey) Username(username string) *GetApiKey { + r.values.Set("username", username) return r } @@ -245,8 +259,8 @@ func (r *GetApiKey) Username(v string) *GetApiKey { // permission is the intersection of its assigned role // descriptors and the owner user's role descriptors. // API name: with_limited_by -func (r *GetApiKey) WithLimitedBy(b bool) *GetApiKey { - r.values.Set("with_limited_by", strconv.FormatBool(b)) +func (r *GetApiKey) WithLimitedBy(withlimitedby bool) *GetApiKey { + r.values.Set("with_limited_by", strconv.FormatBool(withlimitedby)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getapikey/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getapikey/response.go index 503d5e2a8..4752a914d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getapikey/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getapikey/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getapikey @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getapikey // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/get_api_key/SecurityGetApiKeyResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/get_api_key/SecurityGetApiKeyResponse.ts#L22-L24 type Response struct { ApiKeys []types.ApiKey `json:"api_keys"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getbuiltinprivileges/get_builtin_privileges.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getbuiltinprivileges/get_builtin_privileges.go index a30a95306..f8eda355e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getbuiltinprivileges/get_builtin_privileges.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getbuiltinprivileges/get_builtin_privileges.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves the list of cluster privileges and index privileges that are // available in this version of Elasticsearch. @@ -163,7 +163,6 @@ func (r GetBuiltinPrivileges) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -172,6 +171,10 @@ func (r GetBuiltinPrivileges) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getbuiltinprivileges/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getbuiltinprivileges/response.go index 55143853d..caa7e557c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getbuiltinprivileges/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getbuiltinprivileges/response.go @@ -16,13 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getbuiltinprivileges +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // Response holds the response body struct for the package getbuiltinprivileges // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/get_builtin_privileges/SecurityGetBuiltinPrivilegesResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/get_builtin_privileges/SecurityGetBuiltinPrivilegesResponse.ts#L22-L24 type Response struct { Cluster []string `json:"cluster"` @@ -34,3 +41,43 @@ func NewResponse() *Response { r := &Response{} return r } + +func (s *Response) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "cluster": + if err := dec.Decode(&s.Cluster); err != nil { + return err + } + + case "index": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Index = append(s.Index, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Index); err != nil { + return err + } + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getprivileges/get_privileges.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getprivileges/get_privileges.go index 2d5a5a998..bb8be6be6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getprivileges/get_privileges.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getprivileges/get_privileges.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves application privileges. package getprivileges @@ -191,7 +191,6 @@ func (r GetPrivileges) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -200,6 +199,10 @@ func (r GetPrivileges) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -233,18 +236,18 @@ func (r *GetPrivileges) Header(key, value string) *GetPrivileges { // Application Application name // API Name: application -func (r *GetPrivileges) Application(v string) *GetPrivileges { +func (r *GetPrivileges) Application(application string) *GetPrivileges { r.paramSet |= applicationMask - r.application = v + r.application = application return r } // Name Privilege name // API Name: name -func (r *GetPrivileges) Name(v string) *GetPrivileges { +func (r *GetPrivileges) Name(name string) *GetPrivileges { r.paramSet |= nameMask - r.name = v + r.name = name return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getprivileges/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getprivileges/response.go index 0818228bd..56e871fd0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getprivileges/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getprivileges/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getprivileges @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getprivileges // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/get_privileges/SecurityGetPrivilegesResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/get_privileges/SecurityGetPrivilegesResponse.ts#L23-L25 type Response map[string]map[string]types.PrivilegesActions diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getrole/get_role.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getrole/get_role.go index a26989deb..ba5a1276e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getrole/get_role.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getrole/get_role.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves roles in the native realm. package getrole @@ -175,7 +175,6 @@ func (r GetRole) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -184,6 +183,10 @@ func (r GetRole) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -219,9 +222,9 @@ func (r *GetRole) Header(key, value string) *GetRole { // list. If you do not specify this parameter, the API returns information about // all roles. // API Name: name -func (r *GetRole) Name(v string) *GetRole { +func (r *GetRole) Name(name string) *GetRole { r.paramSet |= nameMask - r.name = v + r.name = name return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getrole/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getrole/response.go index bed601977..79c574ffb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getrole/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getrole/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getrole @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getrole // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/get_role/SecurityGetRoleResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/get_role/SecurityGetRoleResponse.ts#L23-L25 type Response map[string]types.Role diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getrolemapping/get_role_mapping.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getrolemapping/get_role_mapping.go index 89bca4c60..0ba15c193 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getrolemapping/get_role_mapping.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getrolemapping/get_role_mapping.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves role mappings. package getrolemapping @@ -175,7 +175,6 @@ func (r GetRoleMapping) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -184,6 +183,10 @@ func (r GetRoleMapping) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -221,9 +224,9 @@ func (r *GetRoleMapping) Header(key, value string) *GetRoleMapping { // names as a comma-separated list. If you do not specify this parameter, the // API returns information about all role mappings. // API Name: name -func (r *GetRoleMapping) Name(v string) *GetRoleMapping { +func (r *GetRoleMapping) Name(name string) *GetRoleMapping { r.paramSet |= nameMask - r.name = v + r.name = name return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getrolemapping/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getrolemapping/response.go index 44afe0c26..b163e203d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getrolemapping/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getrolemapping/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getrolemapping @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getrolemapping // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/get_role_mapping/SecurityGetRoleMappingResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/get_role_mapping/SecurityGetRoleMappingResponse.ts#L23-L25 type Response map[string]types.SecurityRoleMapping diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getserviceaccounts/get_service_accounts.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getserviceaccounts/get_service_accounts.go index 88d6468e8..1b7eb1e7f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getserviceaccounts/get_service_accounts.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getserviceaccounts/get_service_accounts.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves information about service accounts. package getserviceaccounts @@ -191,7 +191,6 @@ func (r GetServiceAccounts) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -200,6 +199,10 @@ func (r GetServiceAccounts) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -235,9 +238,9 @@ func (r *GetServiceAccounts) Header(key, value string) *GetServiceAccounts { // service accounts. If you omit this parameter, you must also omit the // `service` parameter. // API Name: namespace -func (r *GetServiceAccounts) Namespace(v string) *GetServiceAccounts { +func (r *GetServiceAccounts) Namespace(namespace string) *GetServiceAccounts { r.paramSet |= namespaceMask - r.namespace = v + r.namespace = namespace return r } @@ -245,9 +248,9 @@ func (r *GetServiceAccounts) Namespace(v string) *GetServiceAccounts { // Service Name of the service name. Omit this parameter to retrieve information about // all service accounts that belong to the specified `namespace`. // API Name: service -func (r *GetServiceAccounts) Service(v string) *GetServiceAccounts { +func (r *GetServiceAccounts) Service(service string) *GetServiceAccounts { r.paramSet |= serviceMask - r.service = v + r.service = service return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getserviceaccounts/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getserviceaccounts/response.go index dc28c9385..c81a7e878 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getserviceaccounts/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getserviceaccounts/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getserviceaccounts @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getserviceaccounts // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/get_service_accounts/GetServiceAccountsResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/get_service_accounts/GetServiceAccountsResponse.ts#L23-L25 type Response map[string]types.RoleDescriptorWrapper diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getservicecredentials/get_service_credentials.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getservicecredentials/get_service_credentials.go index 9d071781e..e4ce7245f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getservicecredentials/get_service_credentials.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getservicecredentials/get_service_credentials.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves information of all service credentials for a service account. package getservicecredentials @@ -70,9 +70,9 @@ func NewGetServiceCredentialsFunc(tp elastictransport.Interface) NewGetServiceCr return func(namespace, service string) *GetServiceCredentials { n := New(tp) - n.Namespace(namespace) + n._namespace(namespace) - n.Service(service) + n._service(service) return n } @@ -180,7 +180,6 @@ func (r GetServiceCredentials) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -189,6 +188,10 @@ func (r GetServiceCredentials) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -222,18 +225,18 @@ func (r *GetServiceCredentials) Header(key, value string) *GetServiceCredentials // Namespace Name of the namespace. // API Name: namespace -func (r *GetServiceCredentials) Namespace(v string) *GetServiceCredentials { +func (r *GetServiceCredentials) _namespace(namespace string) *GetServiceCredentials { r.paramSet |= namespaceMask - r.namespace = v + r.namespace = namespace return r } // Service Name of the service name. // API Name: service -func (r *GetServiceCredentials) Service(v string) *GetServiceCredentials { +func (r *GetServiceCredentials) _service(service string) *GetServiceCredentials { r.paramSet |= serviceMask - r.service = v + r.service = service return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getservicecredentials/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getservicecredentials/response.go index 410a4aa98..c243883fa 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getservicecredentials/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getservicecredentials/response.go @@ -16,32 +16,30 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getservicecredentials import ( - "encoding/json" - "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) // Response holds the response body struct for the package getservicecredentials // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/get_service_credentials/GetServiceCredentialsResponse.ts#L25-L33 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/get_service_credentials/GetServiceCredentialsResponse.ts#L25-L33 type Response struct { Count int `json:"count"` // NodesCredentials Contains service account credentials collected from all nodes of the cluster - NodesCredentials types.NodesCredentials `json:"nodes_credentials"` - ServiceAccount string `json:"service_account"` - Tokens map[string]map[string]json.RawMessage `json:"tokens"` + NodesCredentials types.NodesCredentials `json:"nodes_credentials"` + ServiceAccount string `json:"service_account"` + Tokens map[string]types.Metadata `json:"tokens"` } // NewResponse returns a Response func NewResponse() *Response { r := &Response{ - Tokens: make(map[string]map[string]json.RawMessage, 0), + Tokens: make(map[string]types.Metadata, 0), } return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/gettoken/get_token.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/gettoken/get_token.go index 4efec78ad..f05d6180b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/gettoken/get_token.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/gettoken/get_token.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Creates a bearer token for access without requiring basic authentication. package gettoken @@ -34,6 +34,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/accesstokengranttype" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -48,8 +49,9 @@ type GetToken struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int } @@ -76,6 +78,8 @@ func New(tp elastictransport.Interface) *GetToken { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -105,9 +109,19 @@ func (r *GetToken) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -115,6 +129,7 @@ func (r *GetToken) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -196,7 +211,6 @@ func (r GetToken) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -205,6 +219,10 @@ func (r GetToken) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -214,3 +232,48 @@ func (r *GetToken) Header(key, value string) *GetToken { return r } + +// API name: grant_type +func (r *GetToken) GrantType(granttype accesstokengranttype.AccessTokenGrantType) *GetToken { + r.req.GrantType = &granttype + + return r +} + +// API name: kerberos_ticket +func (r *GetToken) KerberosTicket(kerberosticket string) *GetToken { + + r.req.KerberosTicket = &kerberosticket + + return r +} + +// API name: password +func (r *GetToken) Password(password string) *GetToken { + r.req.Password = &password + + return r +} + +// API name: refresh_token +func (r *GetToken) RefreshToken(refreshtoken string) *GetToken { + + r.req.RefreshToken = &refreshtoken + + return r +} + +// API name: scope +func (r *GetToken) Scope(scope string) *GetToken { + + r.req.Scope = &scope + + return r +} + +// API name: username +func (r *GetToken) Username(username string) *GetToken { + r.req.Username = &username + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/gettoken/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/gettoken/request.go index 2ab90259b..e672396ed 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/gettoken/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/gettoken/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package gettoken @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package gettoken // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/get_token/GetUserAccessTokenRequest.ts#L25-L39 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/get_token/GetUserAccessTokenRequest.ts#L25-L39 type Request struct { GrantType *accesstokengranttype.AccessTokenGrantType `json:"grant_type,omitempty"` KerberosTicket *string `json:"kerberos_ticket,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/gettoken/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/gettoken/response.go index 9ded79814..ec1244382 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/gettoken/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/gettoken/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package gettoken @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package gettoken // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/get_token/GetUserAccessTokenResponse.ts#L23-L33 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/get_token/GetUserAccessTokenResponse.ts#L23-L33 type Response struct { AccessToken string `json:"access_token"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getuser/get_user.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getuser/get_user.go index 188ae6879..bea3dc06f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getuser/get_user.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getuser/get_user.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves information about users in the native realm and built-in users. package getuser @@ -176,7 +176,6 @@ func (r GetUser) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -185,6 +184,10 @@ func (r GetUser) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -220,17 +223,17 @@ func (r *GetUser) Header(key, value string) *GetUser { // comma-separated list. If you omit this parameter, the API retrieves // information about all users. // API Name: username -func (r *GetUser) Username(v ...string) *GetUser { +func (r *GetUser) Username(usernames ...string) *GetUser { r.paramSet |= usernameMask - r.username = strings.Join(v, ",") + r.username = strings.Join(usernames, ",") return r } // WithProfileUid If true will return the User Profile ID for a user, if any. // API name: with_profile_uid -func (r *GetUser) WithProfileUid(b bool) *GetUser { - r.values.Set("with_profile_uid", strconv.FormatBool(b)) +func (r *GetUser) WithProfileUid(withprofileuid bool) *GetUser { + r.values.Set("with_profile_uid", strconv.FormatBool(withprofileuid)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getuser/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getuser/response.go index a23e58b11..1fe247835 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getuser/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getuser/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getuser @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getuser // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/get_user/SecurityGetUserResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/get_user/SecurityGetUserResponse.ts#L23-L25 type Response map[string]types.User diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getuserprivileges/get_user_privileges.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getuserprivileges/get_user_privileges.go index 813e83908..5a61a205e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getuserprivileges/get_user_privileges.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getuserprivileges/get_user_privileges.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves security privileges for the logged in user. package getuserprivileges @@ -161,7 +161,6 @@ func (r GetUserPrivileges) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -170,6 +169,10 @@ func (r GetUserPrivileges) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -205,8 +208,8 @@ func (r *GetUserPrivileges) Header(key, value string) *GetUserPrivileges { // with exactly one application. If you do not specify this parameter, the API // returns information about all privileges for all applications. // API name: application -func (r *GetUserPrivileges) Application(v string) *GetUserPrivileges { - r.values.Set("application", v) +func (r *GetUserPrivileges) Application(name string) *GetUserPrivileges { + r.values.Set("application", name) return r } @@ -214,15 +217,15 @@ func (r *GetUserPrivileges) Application(v string) *GetUserPrivileges { // Priviledge The name of the privilege. If you do not specify this parameter, the API // returns information about all privileges for the requested application. // API name: priviledge -func (r *GetUserPrivileges) Priviledge(v string) *GetUserPrivileges { - r.values.Set("priviledge", v) +func (r *GetUserPrivileges) Priviledge(name string) *GetUserPrivileges { + r.values.Set("priviledge", name) return r } // API name: username -func (r *GetUserPrivileges) Username(v string) *GetUserPrivileges { - r.values.Set("username", v) +func (r *GetUserPrivileges) Username(username string) *GetUserPrivileges { + r.values.Set("username", username) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getuserprivileges/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getuserprivileges/response.go index a0923fae1..bd7e56e73 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getuserprivileges/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getuserprivileges/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getuserprivileges @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getuserprivileges // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/get_user_privileges/SecurityGetUserPrivilegesResponse.ts#L27-L35 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/get_user_privileges/SecurityGetUserPrivilegesResponse.ts#L27-L35 type Response struct { Applications []types.ApplicationPrivileges `json:"applications"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getuserprofile/get_user_profile.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getuserprofile/get_user_profile.go index 9446f0509..58e9591e7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getuserprofile/get_user_profile.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getuserprofile/get_user_profile.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves user profiles for the given unique ID(s). package getuserprofile @@ -67,7 +67,7 @@ func NewGetUserProfileFunc(tp elastictransport.Interface) NewGetUserProfile { return func(uid string) *GetUserProfile { n := New(tp) - n.Uid(uid) + n._uid(uid) return n } @@ -170,7 +170,6 @@ func (r GetUserProfile) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -179,6 +178,10 @@ func (r GetUserProfile) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -212,9 +215,9 @@ func (r *GetUserProfile) Header(key, value string) *GetUserProfile { // Uid A unique identifier for the user profile. // API Name: uid -func (r *GetUserProfile) Uid(v ...string) *GetUserProfile { +func (r *GetUserProfile) _uid(uids ...string) *GetUserProfile { r.paramSet |= uidMask - r.uid = strings.Join(v, ",") + r.uid = strings.Join(uids, ",") return r } @@ -224,8 +227,12 @@ func (r *GetUserProfile) Uid(v ...string) *GetUserProfile { // use `data=` to retrieve content nested under the specified ``. // By default returns no `data` content. // API name: data -func (r *GetUserProfile) Data(v string) *GetUserProfile { - r.values.Set("data", v) +func (r *GetUserProfile) Data(data ...string) *GetUserProfile { + tmp := []string{} + for _, item := range data { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("data", strings.Join(tmp, ",")) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getuserprofile/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getuserprofile/response.go index 9a2456ddb..7e4983ab6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getuserprofile/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/getuserprofile/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getuserprofile @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getuserprofile // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/get_user_profile/Response.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/get_user_profile/Response.ts#L23-L28 type Response struct { Errors *types.GetUserProfileErrors `json:"errors,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/grantapikey/grant_api_key.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/grantapikey/grant_api_key.go index c558e93bb..ede5812d3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/grantapikey/grant_api_key.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/grantapikey/grant_api_key.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Creates an API key on behalf of another user. package grantapikey @@ -34,6 +34,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/apikeygranttype" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -48,8 +49,9 @@ type GrantApiKey struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int } @@ -76,6 +78,8 @@ func New(tp elastictransport.Interface) *GrantApiKey { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -105,9 +109,19 @@ func (r *GrantApiKey) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -115,6 +129,7 @@ func (r *GrantApiKey) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -196,7 +211,6 @@ func (r GrantApiKey) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -205,6 +219,10 @@ func (r GrantApiKey) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -214,3 +232,59 @@ func (r *GrantApiKey) Header(key, value string) *GrantApiKey { return r } + +// AccessToken The user’s access token. +// If you specify the `access_token` grant type, this parameter is required. +// It is not valid with other grant types. +// API name: access_token +func (r *GrantApiKey) AccessToken(accesstoken string) *GrantApiKey { + + r.req.AccessToken = &accesstoken + + return r +} + +// ApiKey Defines the API key. +// API name: api_key +func (r *GrantApiKey) ApiKey(apikey *types.GrantApiKey) *GrantApiKey { + + r.req.ApiKey = *apikey + + return r +} + +// GrantType The type of grant. Supported grant types are: `access_token`, `password`. +// API name: grant_type +func (r *GrantApiKey) GrantType(granttype apikeygranttype.ApiKeyGrantType) *GrantApiKey { + r.req.GrantType = granttype + + return r +} + +// Password The user’s password. If you specify the `password` grant type, this parameter +// is required. +// It is not valid with other grant types. +// API name: password +func (r *GrantApiKey) Password(password string) *GrantApiKey { + r.req.Password = &password + + return r +} + +// RunAs The name of the user to be impersonated. +// API name: run_as +func (r *GrantApiKey) RunAs(username string) *GrantApiKey { + r.req.RunAs = &username + + return r +} + +// Username The user name that identifies the user. +// If you specify the `password` grant type, this parameter is required. +// It is not valid with other grant types. +// API name: username +func (r *GrantApiKey) Username(username string) *GrantApiKey { + r.req.Username = &username + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/grantapikey/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/grantapikey/request.go index 293992830..af9de3994 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/grantapikey/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/grantapikey/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package grantapikey @@ -30,14 +30,27 @@ import ( // Request holds the request body struct for the package grantapikey // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/grant_api_key/SecurityGrantApiKeyRequest.ts#L24-L38 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/grant_api_key/SecurityGrantApiKeyRequest.ts#L24-L75 type Request struct { - AccessToken *string `json:"access_token,omitempty"` - ApiKey types.GrantApiKey `json:"api_key"` - GrantType apikeygranttype.ApiKeyGrantType `json:"grant_type"` - Password *string `json:"password,omitempty"` - RunAs *string `json:"run_as,omitempty"` - Username *string `json:"username,omitempty"` + + // AccessToken The user’s access token. + // If you specify the `access_token` grant type, this parameter is required. + // It is not valid with other grant types. + AccessToken *string `json:"access_token,omitempty"` + // ApiKey Defines the API key. + ApiKey types.GrantApiKey `json:"api_key"` + // GrantType The type of grant. Supported grant types are: `access_token`, `password`. + GrantType apikeygranttype.ApiKeyGrantType `json:"grant_type"` + // Password The user’s password. If you specify the `password` grant type, this parameter + // is required. + // It is not valid with other grant types. + Password *string `json:"password,omitempty"` + // RunAs The name of the user to be impersonated. + RunAs *string `json:"run_as,omitempty"` + // Username The user name that identifies the user. + // If you specify the `password` grant type, this parameter is required. + // It is not valid with other grant types. + Username *string `json:"username,omitempty"` } // NewRequest returns a Request diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/grantapikey/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/grantapikey/response.go index b7de1453d..56db799b2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/grantapikey/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/grantapikey/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package grantapikey // Response holds the response body struct for the package grantapikey // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/grant_api_key/SecurityGrantApiKeyResponse.ts#L23-L31 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/grant_api_key/SecurityGrantApiKeyResponse.ts#L23-L31 type Response struct { ApiKey string `json:"api_key"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/hasprivileges/has_privileges.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/hasprivileges/has_privileges.go index 720f64a4d..45dd9d086 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/hasprivileges/has_privileges.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/hasprivileges/has_privileges.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Determines whether the specified user has a specified list of privileges. package hasprivileges @@ -34,6 +34,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/clusterprivilege" ) const ( @@ -52,8 +53,9 @@ type HasPrivileges struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -82,6 +84,8 @@ func New(tp elastictransport.Interface) *HasPrivileges { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -111,9 +115,19 @@ func (r *HasPrivileges) HttpRequest(ctx context.Context) (*http.Request, error) var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -121,6 +135,7 @@ func (r *HasPrivileges) HttpRequest(ctx context.Context) (*http.Request, error) } r.buf.Write(data) + } r.path.Scheme = "http" @@ -214,7 +229,6 @@ func (r HasPrivileges) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -223,6 +237,10 @@ func (r HasPrivileges) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -235,9 +253,31 @@ func (r *HasPrivileges) Header(key, value string) *HasPrivileges { // User Username // API Name: user -func (r *HasPrivileges) User(v string) *HasPrivileges { +func (r *HasPrivileges) User(user string) *HasPrivileges { r.paramSet |= userMask - r.user = v + r.user = user + + return r +} + +// API name: application +func (r *HasPrivileges) Application(applications ...types.ApplicationPrivilegesCheck) *HasPrivileges { + r.req.Application = applications + + return r +} + +// Cluster A list of the cluster privileges that you want to check. +// API name: cluster +func (r *HasPrivileges) Cluster(clusters ...clusterprivilege.ClusterPrivilege) *HasPrivileges { + r.req.Cluster = clusters + + return r +} + +// API name: index +func (r *HasPrivileges) Index(indices ...types.IndexPrivilegesCheck) *HasPrivileges { + r.req.Index = indices return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/hasprivileges/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/hasprivileges/request.go index bebef3564..72db810e7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/hasprivileges/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/hasprivileges/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package hasprivileges @@ -30,7 +30,7 @@ import ( // Request holds the request body struct for the package hasprivileges // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/has_privileges/SecurityHasPrivilegesRequest.ts#L25-L42 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/has_privileges/SecurityHasPrivilegesRequest.ts#L25-L42 type Request struct { Application []types.ApplicationPrivilegesCheck `json:"application,omitempty"` // Cluster A list of the cluster privileges that you want to check. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/hasprivileges/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/hasprivileges/response.go index 7d877c949..df982c603 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/hasprivileges/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/hasprivileges/response.go @@ -16,27 +16,31 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package hasprivileges +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + // Response holds the response body struct for the package hasprivileges // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/has_privileges/SecurityHasPrivilegesResponse.ts#L24-L32 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/has_privileges/SecurityHasPrivilegesResponse.ts#L24-L32 type Response struct { - Application map[string]map[string]map[string]bool `json:"application"` - Cluster map[string]bool `json:"cluster"` - HasAllRequested bool `json:"has_all_requested"` - Index map[string]map[string]bool `json:"index"` - Username string `json:"username"` + Application types.ApplicationsPrivileges `json:"application"` + Cluster map[string]bool `json:"cluster"` + HasAllRequested bool `json:"has_all_requested"` + Index map[string]types.Privileges `json:"index"` + Username string `json:"username"` } // NewResponse returns a Response func NewResponse() *Response { r := &Response{ Cluster: make(map[string]bool, 0), - Index: make(map[string]map[string]bool, 0), + Index: make(map[string]types.Privileges, 0), } return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/hasprivilegesuserprofile/has_privileges_user_profile.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/hasprivilegesuserprofile/has_privileges_user_profile.go index 6d3b3a664..922a1bd73 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/hasprivilegesuserprofile/has_privileges_user_profile.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/hasprivilegesuserprofile/has_privileges_user_profile.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Determines whether the users associated with the specified profile IDs have // all the requested privileges. @@ -49,8 +49,9 @@ type HasPrivilegesUserProfile struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int } @@ -78,6 +79,8 @@ func New(tp elastictransport.Interface) *HasPrivilegesUserProfile { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -107,9 +110,19 @@ func (r *HasPrivilegesUserProfile) HttpRequest(ctx context.Context) (*http.Reque var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -117,6 +130,7 @@ func (r *HasPrivilegesUserProfile) HttpRequest(ctx context.Context) (*http.Reque } r.buf.Write(data) + } r.path.Scheme = "http" @@ -198,7 +212,6 @@ func (r HasPrivilegesUserProfile) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -207,6 +220,10 @@ func (r HasPrivilegesUserProfile) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -216,3 +233,20 @@ func (r *HasPrivilegesUserProfile) Header(key, value string) *HasPrivilegesUserP return r } + +// API name: privileges +func (r *HasPrivilegesUserProfile) Privileges(privileges *types.PrivilegesCheck) *HasPrivilegesUserProfile { + + r.req.Privileges = *privileges + + return r +} + +// Uids A list of profile IDs. The privileges are checked for associated users of the +// profiles. +// API name: uids +func (r *HasPrivilegesUserProfile) Uids(uids ...string) *HasPrivilegesUserProfile { + r.req.Uids = uids + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/hasprivilegesuserprofile/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/hasprivilegesuserprofile/request.go index 40bb0c74a..ceb5cf432 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/hasprivilegesuserprofile/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/hasprivilegesuserprofile/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package hasprivilegesuserprofile @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package hasprivilegesuserprofile // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/has_privileges_user_profile/Request.ts#L24-L38 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/has_privileges_user_profile/Request.ts#L24-L38 type Request struct { Privileges types.PrivilegesCheck `json:"privileges"` // Uids A list of profile IDs. The privileges are checked for associated users of the diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/hasprivilegesuserprofile/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/hasprivilegesuserprofile/response.go index 6234002c5..cd4d3c916 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/hasprivilegesuserprofile/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/hasprivilegesuserprofile/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package hasprivilegesuserprofile @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package hasprivilegesuserprofile // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/has_privileges_user_profile/Response.ts#L23-L38 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/has_privileges_user_profile/Response.ts#L23-L38 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/invalidateapikey/invalidate_api_key.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/invalidateapikey/invalidate_api_key.go index 24ffdb03c..b54d1ffbe 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/invalidateapikey/invalidate_api_key.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/invalidateapikey/invalidate_api_key.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Invalidates one or more API keys. package invalidateapikey @@ -48,8 +48,9 @@ type InvalidateApiKey struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int } @@ -76,6 +77,8 @@ func New(tp elastictransport.Interface) *InvalidateApiKey { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -105,9 +108,19 @@ func (r *InvalidateApiKey) HttpRequest(ctx context.Context) (*http.Request, erro var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -115,6 +128,7 @@ func (r *InvalidateApiKey) HttpRequest(ctx context.Context) (*http.Request, erro } r.buf.Write(data) + } r.path.Scheme = "http" @@ -194,7 +208,6 @@ func (r InvalidateApiKey) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -203,6 +216,10 @@ func (r InvalidateApiKey) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -212,3 +229,61 @@ func (r *InvalidateApiKey) Header(key, value string) *InvalidateApiKey { return r } + +// API name: id +func (r *InvalidateApiKey) Id(id string) *InvalidateApiKey { + r.req.Id = &id + + return r +} + +// Ids A list of API key ids. +// This parameter cannot be used with any of `name`, `realm_name`, or +// `username`. +// API name: ids +func (r *InvalidateApiKey) Ids(ids ...string) *InvalidateApiKey { + r.req.Ids = ids + + return r +} + +// Name An API key name. +// This parameter cannot be used with any of `ids`, `realm_name` or `username`. +// API name: name +func (r *InvalidateApiKey) Name(name string) *InvalidateApiKey { + r.req.Name = &name + + return r +} + +// Owner Can be used to query API keys owned by the currently authenticated user. +// The `realm_name` or `username` parameters cannot be specified when this +// parameter is set to `true` as they are assumed to be the currently +// authenticated ones. +// API name: owner +func (r *InvalidateApiKey) Owner(owner bool) *InvalidateApiKey { + r.req.Owner = &owner + + return r +} + +// RealmName The name of an authentication realm. +// This parameter cannot be used with either `ids` or `name`, or when `owner` +// flag is set to `true`. +// API name: realm_name +func (r *InvalidateApiKey) RealmName(realmname string) *InvalidateApiKey { + + r.req.RealmName = &realmname + + return r +} + +// Username The username of a user. +// This parameter cannot be used with either `ids` or `name`, or when `owner` +// flag is set to `true`. +// API name: username +func (r *InvalidateApiKey) Username(username string) *InvalidateApiKey { + r.req.Username = &username + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/invalidateapikey/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/invalidateapikey/request.go index 7e5429803..a9b00d5e9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/invalidateapikey/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/invalidateapikey/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package invalidateapikey @@ -27,14 +27,29 @@ import ( // Request holds the request body struct for the package invalidateapikey // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/invalidate_api_key/SecurityInvalidateApiKeyRequest.ts#L23-L37 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/invalidate_api_key/SecurityInvalidateApiKeyRequest.ts#L23-L66 type Request struct { - Id *string `json:"id,omitempty"` - Ids []string `json:"ids,omitempty"` - Name *string `json:"name,omitempty"` - Owner *bool `json:"owner,omitempty"` - RealmName *string `json:"realm_name,omitempty"` - Username *string `json:"username,omitempty"` + Id *string `json:"id,omitempty"` + // Ids A list of API key ids. + // This parameter cannot be used with any of `name`, `realm_name`, or + // `username`. + Ids []string `json:"ids,omitempty"` + // Name An API key name. + // This parameter cannot be used with any of `ids`, `realm_name` or `username`. + Name *string `json:"name,omitempty"` + // Owner Can be used to query API keys owned by the currently authenticated user. + // The `realm_name` or `username` parameters cannot be specified when this + // parameter is set to `true` as they are assumed to be the currently + // authenticated ones. + Owner *bool `json:"owner,omitempty"` + // RealmName The name of an authentication realm. + // This parameter cannot be used with either `ids` or `name`, or when `owner` + // flag is set to `true`. + RealmName *string `json:"realm_name,omitempty"` + // Username The username of a user. + // This parameter cannot be used with either `ids` or `name`, or when `owner` + // flag is set to `true`. + Username *string `json:"username,omitempty"` } // NewRequest returns a Request diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/invalidateapikey/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/invalidateapikey/response.go index 918a8f409..163c9294f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/invalidateapikey/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/invalidateapikey/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package invalidateapikey @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package invalidateapikey // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/invalidate_api_key/SecurityInvalidateApiKeyResponse.ts#L23-L30 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/invalidate_api_key/SecurityInvalidateApiKeyResponse.ts#L23-L30 type Response struct { ErrorCount int `json:"error_count"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/invalidatetoken/invalidate_token.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/invalidatetoken/invalidate_token.go index 4f806d38a..44d2f1a6b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/invalidatetoken/invalidate_token.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/invalidatetoken/invalidate_token.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Invalidates one or more access tokens or refresh tokens. package invalidatetoken @@ -48,8 +48,9 @@ type InvalidateToken struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int } @@ -76,6 +77,8 @@ func New(tp elastictransport.Interface) *InvalidateToken { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -105,9 +108,19 @@ func (r *InvalidateToken) HttpRequest(ctx context.Context) (*http.Request, error var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -115,6 +128,7 @@ func (r *InvalidateToken) HttpRequest(ctx context.Context) (*http.Request, error } r.buf.Write(data) + } r.path.Scheme = "http" @@ -196,7 +210,6 @@ func (r InvalidateToken) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -205,6 +218,10 @@ func (r InvalidateToken) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -214,3 +231,33 @@ func (r *InvalidateToken) Header(key, value string) *InvalidateToken { return r } + +// API name: realm_name +func (r *InvalidateToken) RealmName(name string) *InvalidateToken { + r.req.RealmName = &name + + return r +} + +// API name: refresh_token +func (r *InvalidateToken) RefreshToken(refreshtoken string) *InvalidateToken { + + r.req.RefreshToken = &refreshtoken + + return r +} + +// API name: token +func (r *InvalidateToken) Token(token string) *InvalidateToken { + + r.req.Token = &token + + return r +} + +// API name: username +func (r *InvalidateToken) Username(username string) *InvalidateToken { + r.req.Username = &username + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/invalidatetoken/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/invalidatetoken/request.go index 405368209..8f0a3cbd3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/invalidatetoken/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/invalidatetoken/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package invalidatetoken @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package invalidatetoken // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/invalidate_token/SecurityInvalidateTokenRequest.ts#L23-L35 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/invalidate_token/SecurityInvalidateTokenRequest.ts#L23-L35 type Request struct { RealmName *string `json:"realm_name,omitempty"` RefreshToken *string `json:"refresh_token,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/invalidatetoken/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/invalidatetoken/response.go index ae356c44e..1406943e9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/invalidatetoken/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/invalidatetoken/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package invalidatetoken @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package invalidatetoken // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/invalidate_token/SecurityInvalidateTokenResponse.ts#L23-L30 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/invalidate_token/SecurityInvalidateTokenResponse.ts#L23-L30 type Response struct { ErrorCount int64 `json:"error_count"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/oidcauthenticate/oidc_authenticate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/oidcauthenticate/oidc_authenticate.go index 2136f75e1..30dce20c9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/oidcauthenticate/oidc_authenticate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/oidcauthenticate/oidc_authenticate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Exchanges an OpenID Connection authentication response message for an // Elasticsearch access token and refresh token pair @@ -149,6 +149,11 @@ func (r OidcAuthenticate) Perform(ctx context.Context) (*http.Response, error) { return res, nil } +// Do runs the request through the transport, handle the response and returns a oidcauthenticate.Response +func (r OidcAuthenticate) Do(ctx context.Context) (bool, error) { + return r.IsSuccess(ctx) +} + // IsSuccess allows to run a query with a context and retrieve the result as a boolean. // This only exists for endpoints without a request payload and allows for quick control flow. func (r OidcAuthenticate) IsSuccess(ctx context.Context) (bool, error) { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/oidclogout/oidc_logout.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/oidclogout/oidc_logout.go index 1b9c84719..0c0f1bc51 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/oidclogout/oidc_logout.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/oidclogout/oidc_logout.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Invalidates a refresh token and access token that was generated from the // OpenID Connect Authenticate API @@ -149,6 +149,11 @@ func (r OidcLogout) Perform(ctx context.Context) (*http.Response, error) { return res, nil } +// Do runs the request through the transport, handle the response and returns a oidclogout.Response +func (r OidcLogout) Do(ctx context.Context) (bool, error) { + return r.IsSuccess(ctx) +} + // IsSuccess allows to run a query with a context and retrieve the result as a boolean. // This only exists for endpoints without a request payload and allows for quick control flow. func (r OidcLogout) IsSuccess(ctx context.Context) (bool, error) { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/oidcprepareauthentication/oidc_prepare_authentication.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/oidcprepareauthentication/oidc_prepare_authentication.go index 90af3cbee..02c667386 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/oidcprepareauthentication/oidc_prepare_authentication.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/oidcprepareauthentication/oidc_prepare_authentication.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Creates an OAuth 2.0 authentication request as a URL string package oidcprepareauthentication @@ -147,6 +147,11 @@ func (r OidcPrepareAuthentication) Perform(ctx context.Context) (*http.Response, return res, nil } +// Do runs the request through the transport, handle the response and returns a oidcprepareauthentication.Response +func (r OidcPrepareAuthentication) Do(ctx context.Context) (bool, error) { + return r.IsSuccess(ctx) +} + // IsSuccess allows to run a query with a context and retrieve the result as a boolean. // This only exists for endpoints without a request payload and allows for quick control flow. func (r OidcPrepareAuthentication) IsSuccess(ctx context.Context) (bool, error) { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putprivileges/put_privileges.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putprivileges/put_privileges.go index 38b202de6..a74ade3c6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putprivileges/put_privileges.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putprivileges/put_privileges.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Adds or updates application privileges. package putprivileges @@ -34,7 +34,6 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/refresh" ) @@ -50,8 +49,9 @@ type PutPrivileges struct { buf *gobytes.Buffer - req map[string]map[string]types.PrivilegesActions - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int } @@ -92,7 +92,7 @@ func (r *PutPrivileges) Raw(raw io.Reader) *PutPrivileges { } // Request allows to set the request property with the appropriate payload. -func (r *PutPrivileges) Request(req map[string]map[string]types.PrivilegesActions) *PutPrivileges { +func (r *PutPrivileges) Request(req *Request) *PutPrivileges { r.req = req return r @@ -107,9 +107,19 @@ func (r *PutPrivileges) HttpRequest(ctx context.Context) (*http.Request, error) var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -117,6 +127,7 @@ func (r *PutPrivileges) HttpRequest(ctx context.Context) (*http.Request, error) } r.buf.Write(data) + } r.path.Scheme = "http" @@ -127,7 +138,7 @@ func (r *PutPrivileges) HttpRequest(ctx context.Context) (*http.Request, error) path.WriteString("_security") path.WriteString("/") path.WriteString("privilege") - path.WriteString("/") + method = http.MethodPut } @@ -196,7 +207,6 @@ func (r PutPrivileges) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -205,6 +215,10 @@ func (r PutPrivileges) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -219,8 +233,8 @@ func (r *PutPrivileges) Header(key, value string) *PutPrivileges { // operation visible to search, if `wait_for` then wait for a refresh to make // this operation visible to search, if `false` then do nothing with refreshes. // API name: refresh -func (r *PutPrivileges) Refresh(enum refresh.Refresh) *PutPrivileges { - r.values.Set("refresh", enum.String()) +func (r *PutPrivileges) Refresh(refresh refresh.Refresh) *PutPrivileges { + r.values.Set("refresh", refresh.String()) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putprivileges/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putprivileges/request.go new file mode 100644 index 000000000..a617a89ce --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putprivileges/request.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package putprivileges + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Request holds the request body struct for the package putprivileges +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/put_privileges/SecurityPutPrivilegesRequest.ts#L25-L37 +type Request = map[string]map[string]types.PrivilegesActions diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putprivileges/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putprivileges/response.go index 7bf3cfa07..e44050bd0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putprivileges/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putprivileges/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putprivileges @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package putprivileges // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/put_privileges/SecurityPutPrivilegesResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/put_privileges/SecurityPutPrivilegesResponse.ts#L23-L25 type Response map[string]map[string]types.CreatedStatus diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putrole/put_role.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putrole/put_role.go index 2f0df7c1d..0a1571c02 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putrole/put_role.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putrole/put_role.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Adds and updates roles in the native realm. package putrole @@ -34,7 +34,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/clusterprivilege" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/refresh" ) @@ -54,8 +54,9 @@ type PutRole struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -71,7 +72,7 @@ func NewPutRoleFunc(tp elastictransport.Interface) NewPutRole { return func(name string) *PutRole { n := New(tp) - n.Name(name) + n._name(name) return n } @@ -86,6 +87,8 @@ func New(tp elastictransport.Interface) *PutRole { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -115,9 +118,19 @@ func (r *PutRole) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -125,6 +138,7 @@ func (r *PutRole) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -207,7 +221,6 @@ func (r PutRole) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -216,6 +229,10 @@ func (r PutRole) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -228,9 +245,9 @@ func (r *PutRole) Header(key, value string) *PutRole { // Name The name of the role. // API Name: name -func (r *PutRole) Name(v string) *PutRole { +func (r *PutRole) _name(name string) *PutRole { r.paramSet |= nameMask - r.name = v + r.name = name return r } @@ -239,8 +256,75 @@ func (r *PutRole) Name(v string) *PutRole { // operation visible to search, if `wait_for` then wait for a refresh to make // this operation visible to search, if `false` then do nothing with refreshes. // API name: refresh -func (r *PutRole) Refresh(enum refresh.Refresh) *PutRole { - r.values.Set("refresh", enum.String()) +func (r *PutRole) Refresh(refresh refresh.Refresh) *PutRole { + r.values.Set("refresh", refresh.String()) + + return r +} + +// Applications A list of application privilege entries. +// API name: applications +func (r *PutRole) Applications(applications ...types.ApplicationPrivileges) *PutRole { + r.req.Applications = applications + + return r +} + +// Cluster A list of cluster privileges. These privileges define the cluster-level +// actions for users with this role. +// API name: cluster +func (r *PutRole) Cluster(clusters ...clusterprivilege.ClusterPrivilege) *PutRole { + r.req.Cluster = clusters + + return r +} + +// Global An object defining global privileges. A global privilege is a form of cluster +// privilege that is request-aware. Support for global privileges is currently +// limited to the management of application privileges. +// API name: global +func (r *PutRole) Global(global map[string]json.RawMessage) *PutRole { + + r.req.Global = global + + return r +} + +// Indices A list of indices permissions entries. +// API name: indices +func (r *PutRole) Indices(indices ...types.IndicesPrivileges) *PutRole { + r.req.Indices = indices + + return r +} + +// Metadata Optional metadata. Within the metadata object, keys that begin with an +// underscore (`_`) are reserved for system use. +// API name: metadata +func (r *PutRole) Metadata(metadata types.Metadata) *PutRole { + r.req.Metadata = metadata + + return r +} + +// RunAs A list of users that the owners of this role can impersonate. +// API name: run_as +func (r *PutRole) RunAs(runas ...string) *PutRole { + r.req.RunAs = runas + + return r +} + +// TransientMetadata Indicates roles that might be incompatible with the current cluster license, +// specifically roles with document and field level security. When the cluster +// license doesn’t allow certain features for a given role, this parameter is +// updated dynamically to list the incompatible features. If `enabled` is +// `false`, the role is ignored, but is still listed in the response from the +// authenticate API. +// API name: transient_metadata +func (r *PutRole) TransientMetadata(transientmetadata *types.TransientMetadataConfig) *PutRole { + + r.req.TransientMetadata = transientmetadata return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putrole/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putrole/request.go index 016132d5c..98745d8c4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putrole/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putrole/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putrole @@ -30,7 +30,7 @@ import ( // Request holds the request body struct for the package putrole // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/put_role/SecurityPutRoleRequest.ts#L31-L80 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/put_role/SecurityPutRoleRequest.ts#L31-L80 type Request struct { // Applications A list of application privilege entries. @@ -46,7 +46,7 @@ type Request struct { Indices []types.IndicesPrivileges `json:"indices,omitempty"` // Metadata Optional metadata. Within the metadata object, keys that begin with an // underscore (`_`) are reserved for system use. - Metadata map[string]json.RawMessage `json:"metadata,omitempty"` + Metadata types.Metadata `json:"metadata,omitempty"` // RunAs A list of users that the owners of this role can impersonate. RunAs []string `json:"run_as,omitempty"` // TransientMetadata Indicates roles that might be incompatible with the current cluster license, diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putrole/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putrole/response.go index eadb1a4e8..53d6e32c8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putrole/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putrole/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putrole @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package putrole // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/put_role/SecurityPutRoleResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/put_role/SecurityPutRoleResponse.ts#L22-L24 type Response struct { Role types.CreatedStatus `json:"role"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putrolemapping/put_role_mapping.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putrolemapping/put_role_mapping.go index 22f7564d3..275379080 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putrolemapping/put_role_mapping.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putrolemapping/put_role_mapping.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Creates and updates role mappings. package putrolemapping @@ -34,7 +34,6 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/refresh" ) @@ -54,8 +53,9 @@ type PutRoleMapping struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -71,7 +71,7 @@ func NewPutRoleMappingFunc(tp elastictransport.Interface) NewPutRoleMapping { return func(name string) *PutRoleMapping { n := New(tp) - n.Name(name) + n._name(name) return n } @@ -86,6 +86,8 @@ func New(tp elastictransport.Interface) *PutRoleMapping { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -115,9 +117,19 @@ func (r *PutRoleMapping) HttpRequest(ctx context.Context) (*http.Request, error) var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -125,6 +137,7 @@ func (r *PutRoleMapping) HttpRequest(ctx context.Context) (*http.Request, error) } r.buf.Write(data) + } r.path.Scheme = "http" @@ -207,7 +220,6 @@ func (r PutRoleMapping) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -216,6 +228,10 @@ func (r PutRoleMapping) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -228,9 +244,9 @@ func (r *PutRoleMapping) Header(key, value string) *PutRoleMapping { // Name Role-mapping name // API Name: name -func (r *PutRoleMapping) Name(v string) *PutRoleMapping { +func (r *PutRoleMapping) _name(name string) *PutRoleMapping { r.paramSet |= nameMask - r.name = v + r.name = name return r } @@ -239,8 +255,44 @@ func (r *PutRoleMapping) Name(v string) *PutRoleMapping { // operation visible to search, if `wait_for` then wait for a refresh to make // this operation visible to search, if `false` then do nothing with refreshes. // API name: refresh -func (r *PutRoleMapping) Refresh(enum refresh.Refresh) *PutRoleMapping { - r.values.Set("refresh", enum.String()) +func (r *PutRoleMapping) Refresh(refresh refresh.Refresh) *PutRoleMapping { + r.values.Set("refresh", refresh.String()) + + return r +} + +// API name: enabled +func (r *PutRoleMapping) Enabled(enabled bool) *PutRoleMapping { + r.req.Enabled = &enabled + + return r +} + +// API name: metadata +func (r *PutRoleMapping) Metadata(metadata types.Metadata) *PutRoleMapping { + r.req.Metadata = metadata + + return r +} + +// API name: roles +func (r *PutRoleMapping) Roles(roles ...string) *PutRoleMapping { + r.req.Roles = roles + + return r +} + +// API name: rules +func (r *PutRoleMapping) Rules(rules *types.RoleMappingRule) *PutRoleMapping { + + r.req.Rules = rules + + return r +} + +// API name: run_as +func (r *PutRoleMapping) RunAs(runas ...string) *PutRoleMapping { + r.req.RunAs = runas return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putrolemapping/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putrolemapping/request.go index 62f02333a..11a08accb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putrolemapping/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putrolemapping/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putrolemapping @@ -29,13 +29,13 @@ import ( // Request holds the request body struct for the package putrolemapping // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/put_role_mapping/SecurityPutRoleMappingRequest.ts#L24-L43 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/put_role_mapping/SecurityPutRoleMappingRequest.ts#L24-L43 type Request struct { - Enabled *bool `json:"enabled,omitempty"` - Metadata map[string]json.RawMessage `json:"metadata,omitempty"` - Roles []string `json:"roles,omitempty"` - Rules *types.RoleMappingRule `json:"rules,omitempty"` - RunAs []string `json:"run_as,omitempty"` + Enabled *bool `json:"enabled,omitempty"` + Metadata types.Metadata `json:"metadata,omitempty"` + Roles []string `json:"roles,omitempty"` + Rules *types.RoleMappingRule `json:"rules,omitempty"` + RunAs []string `json:"run_as,omitempty"` } // NewRequest returns a Request diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putrolemapping/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putrolemapping/response.go index 77ff3f3a3..c14ba3704 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putrolemapping/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putrolemapping/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putrolemapping @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package putrolemapping // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/put_role_mapping/SecurityPutRoleMappingResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/put_role_mapping/SecurityPutRoleMappingResponse.ts#L22-L24 type Response struct { Created *bool `json:"created,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putuser/put_user.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putuser/put_user.go index e203a21c7..ac31cf24d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putuser/put_user.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putuser/put_user.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Adds and updates users in the native realm. These users are commonly referred // to as native users. @@ -35,7 +35,6 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/refresh" ) @@ -55,8 +54,9 @@ type PutUser struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -72,7 +72,7 @@ func NewPutUserFunc(tp elastictransport.Interface) NewPutUser { return func(username string) *PutUser { n := New(tp) - n.Username(username) + n._username(username) return n } @@ -88,6 +88,8 @@ func New(tp elastictransport.Interface) *PutUser { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -117,9 +119,19 @@ func (r *PutUser) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -127,6 +139,7 @@ func (r *PutUser) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -209,7 +222,6 @@ func (r PutUser) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -218,6 +230,10 @@ func (r PutUser) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -230,9 +246,9 @@ func (r *PutUser) Header(key, value string) *PutUser { // Username The username of the User // API Name: username -func (r *PutUser) Username(v string) *PutUser { +func (r *PutUser) _username(username string) *PutUser { r.paramSet |= usernameMask - r.username = v + r.username = username return r } @@ -241,8 +257,58 @@ func (r *PutUser) Username(v string) *PutUser { // operation visible to search, if `wait_for` then wait for a refresh to make // this operation visible to search, if `false` then do nothing with refreshes. // API name: refresh -func (r *PutUser) Refresh(enum refresh.Refresh) *PutUser { - r.values.Set("refresh", enum.String()) +func (r *PutUser) Refresh(refresh refresh.Refresh) *PutUser { + r.values.Set("refresh", refresh.String()) + + return r +} + +// API name: email +func (r *PutUser) Email(email string) *PutUser { + r.req.Email = email + + return r +} + +// API name: enabled +func (r *PutUser) Enabled(enabled bool) *PutUser { + r.req.Enabled = &enabled + + return r +} + +// API name: full_name +func (r *PutUser) FullName(fullname string) *PutUser { + r.req.FullName = fullname + + return r +} + +// API name: metadata +func (r *PutUser) Metadata(metadata types.Metadata) *PutUser { + r.req.Metadata = metadata + + return r +} + +// API name: password +func (r *PutUser) Password(password string) *PutUser { + r.req.Password = &password + + return r +} + +// API name: password_hash +func (r *PutUser) PasswordHash(passwordhash string) *PutUser { + + r.req.PasswordHash = &passwordhash + + return r +} + +// API name: roles +func (r *PutUser) Roles(roles ...string) *PutUser { + r.req.Roles = roles return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putuser/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putuser/request.go index 87851abb0..ff277c953 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putuser/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putuser/request.go @@ -16,27 +16,29 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putuser import ( "encoding/json" "fmt" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) // Request holds the request body struct for the package putuser // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/put_user/SecurityPutUserRequest.ts#L23-L45 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/put_user/SecurityPutUserRequest.ts#L23-L44 type Request struct { - Email string `json:"email,omitempty"` - Enabled *bool `json:"enabled,omitempty"` - FullName string `json:"full_name,omitempty"` - Metadata map[string]json.RawMessage `json:"metadata,omitempty"` - Password *string `json:"password,omitempty"` - PasswordHash *string `json:"password_hash,omitempty"` - Roles []string `json:"roles,omitempty"` - Username *string `json:"username,omitempty"` + Email string `json:"email,omitempty"` + Enabled *bool `json:"enabled,omitempty"` + FullName string `json:"full_name,omitempty"` + Metadata types.Metadata `json:"metadata,omitempty"` + Password *string `json:"password,omitempty"` + PasswordHash *string `json:"password_hash,omitempty"` + Roles []string `json:"roles,omitempty"` + Username *string `json:"username,omitempty"` } // NewRequest returns a Request diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putuser/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putuser/response.go index 92032b0dc..2bc984187 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putuser/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/putuser/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putuser // Response holds the response body struct for the package putuser // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/put_user/SecurityPutUserResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/put_user/SecurityPutUserResponse.ts#L20-L22 type Response struct { Created bool `json:"created"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/queryapikeys/query_api_keys.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/queryapikeys/query_api_keys.go index 9573f4002..a9ea1e8db 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/queryapikeys/query_api_keys.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/queryapikeys/query_api_keys.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves information for API keys using a subset of query DSL package queryapikeys @@ -49,8 +49,9 @@ type QueryApiKeys struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int } @@ -77,6 +78,8 @@ func New(tp elastictransport.Interface) *QueryApiKeys { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -106,9 +109,19 @@ func (r *QueryApiKeys) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -116,6 +129,7 @@ func (r *QueryApiKeys) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -197,7 +211,6 @@ func (r QueryApiKeys) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -206,6 +219,10 @@ func (r QueryApiKeys) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -216,13 +233,65 @@ func (r *QueryApiKeys) Header(key, value string) *QueryApiKeys { return r } -// WithLimitedBy Return the snapshot of the owner user's role descriptors -// associated with the API key. An API key's actual -// permission is the intersection of its assigned role +// WithLimitedBy Return the snapshot of the owner user's role descriptors associated with the +// API key. +// An API key's actual permission is the intersection of its assigned role // descriptors and the owner user's role descriptors. // API name: with_limited_by -func (r *QueryApiKeys) WithLimitedBy(b bool) *QueryApiKeys { - r.values.Set("with_limited_by", strconv.FormatBool(b)) +func (r *QueryApiKeys) WithLimitedBy(withlimitedby bool) *QueryApiKeys { + r.values.Set("with_limited_by", strconv.FormatBool(withlimitedby)) + + return r +} + +// From Starting document offset. +// By default, you cannot page through more than 10,000 hits using the from and +// size parameters. +// To page through more hits, use the `search_after` parameter. +// API name: from +func (r *QueryApiKeys) From(from int) *QueryApiKeys { + r.req.From = &from + + return r +} + +// Query A query to filter which API keys to return. +// The query supports a subset of query types, including `match_all`, `bool`, +// `term`, `terms`, `ids`, `prefix`, `wildcard`, and `range`. +// You can query all public information associated with an API key. +// API name: query +func (r *QueryApiKeys) Query(query *types.Query) *QueryApiKeys { + + r.req.Query = query + + return r +} + +// SearchAfter Search after definition +// API name: search_after +func (r *QueryApiKeys) SearchAfter(sortresults ...types.FieldValue) *QueryApiKeys { + r.req.SearchAfter = sortresults + + return r +} + +// Size The number of hits to return. +// By default, you cannot page through more than 10,000 hits using the `from` +// and `size` parameters. +// To page through more hits, use the `search_after` parameter. +// API name: size +func (r *QueryApiKeys) Size(size int) *QueryApiKeys { + r.req.Size = &size + + return r +} + +// Sort Other than `id`, all public fields of an API key are eligible for sorting. +// In addition, sort can also be applied to the `_doc` field to sort by index +// order. +// API name: sort +func (r *QueryApiKeys) Sort(sorts ...types.SortCombinations) *QueryApiKeys { + r.req.Sort = sorts return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/queryapikeys/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/queryapikeys/request.go index 26d614aed..000b1cfd8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/queryapikeys/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/queryapikeys/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package queryapikeys @@ -29,24 +29,29 @@ import ( // Request holds the request body struct for the package queryapikeys // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/query_api_keys/QueryApiKeysRequest.ts#L25-L67 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/query_api_keys/QueryApiKeysRequest.ts#L25-L74 type Request struct { - // From Starting document offset. By default, you cannot page through more than - // 10,000 - // hits using the from and size parameters. To page through more hits, use the - // search_after parameter. + // From Starting document offset. + // By default, you cannot page through more than 10,000 hits using the from and + // size parameters. + // To page through more hits, use the `search_after` parameter. From *int `json:"from,omitempty"` // Query A query to filter which API keys to return. - // The query supports a subset of query types, including match_all, bool, term, - // terms, ids, prefix, wildcard, and range. - // You can query all public information associated with an API key - Query *types.Query `json:"query,omitempty"` + // The query supports a subset of query types, including `match_all`, `bool`, + // `term`, `terms`, `ids`, `prefix`, `wildcard`, and `range`. + // You can query all public information associated with an API key. + Query *types.Query `json:"query,omitempty"` + // SearchAfter Search after definition SearchAfter []types.FieldValue `json:"search_after,omitempty"` - // Size The number of hits to return. By default, you cannot page through more - // than 10,000 hits using the from and size parameters. To page through more - // hits, use the search_after parameter. - Size *int `json:"size,omitempty"` + // Size The number of hits to return. + // By default, you cannot page through more than 10,000 hits using the `from` + // and `size` parameters. + // To page through more hits, use the `search_after` parameter. + Size *int `json:"size,omitempty"` + // Sort Other than `id`, all public fields of an API key are eligible for sorting. + // In addition, sort can also be applied to the `_doc` field to sort by index + // order. Sort []types.SortCombinations `json:"sort,omitempty"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/queryapikeys/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/queryapikeys/response.go index d083de615..74a22751b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/queryapikeys/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/queryapikeys/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package queryapikeys @@ -26,12 +26,16 @@ import ( // Response holds the response body struct for the package queryapikeys // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/query_api_keys/QueryApiKeysResponse.ts#L23-L29 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/query_api_keys/QueryApiKeysResponse.ts#L23-L38 type Response struct { + + // ApiKeys A list of API key information. ApiKeys []types.ApiKey `json:"api_keys"` - Count int `json:"count"` - Total int `json:"total"` + // Count The number of API keys returned in the response. + Count int `json:"count"` + // Total The total number of API keys found. + Total int `json:"total"` } // NewResponse returns a Response diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlauthenticate/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlauthenticate/request.go index 830570616..615fb30e1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlauthenticate/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlauthenticate/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package samlauthenticate @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package samlauthenticate // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/saml_authenticate/Request.ts#L23-L38 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/saml_authenticate/Request.ts#L23-L38 type Request struct { // Content The SAML response as it was sent by the user’s browser, usually a Base64 diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlauthenticate/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlauthenticate/response.go index 805217a6c..8b0fd53c7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlauthenticate/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlauthenticate/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package samlauthenticate // Response holds the response body struct for the package samlauthenticate // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/saml_authenticate/Response.ts#L22-L30 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/saml_authenticate/Response.ts#L22-L30 type Response struct { AccessToken string `json:"access_token"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlauthenticate/saml_authenticate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlauthenticate/saml_authenticate.go index f03527beb..0a70c1210 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlauthenticate/saml_authenticate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlauthenticate/saml_authenticate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Exchanges a SAML Response message for an Elasticsearch access token and // refresh token pair @@ -49,8 +49,9 @@ type SamlAuthenticate struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int } @@ -78,6 +79,8 @@ func New(tp elastictransport.Interface) *SamlAuthenticate { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -107,9 +110,19 @@ func (r *SamlAuthenticate) HttpRequest(ctx context.Context) (*http.Request, erro var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -117,6 +130,7 @@ func (r *SamlAuthenticate) HttpRequest(ctx context.Context) (*http.Request, erro } r.buf.Write(data) + } r.path.Scheme = "http" @@ -198,7 +212,6 @@ func (r SamlAuthenticate) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -207,6 +220,10 @@ func (r SamlAuthenticate) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -216,3 +233,32 @@ func (r *SamlAuthenticate) Header(key, value string) *SamlAuthenticate { return r } + +// Content The SAML response as it was sent by the user’s browser, usually a Base64 +// encoded XML document. +// API name: content +func (r *SamlAuthenticate) Content(content string) *SamlAuthenticate { + + r.req.Content = content + + return r +} + +// Ids A json array with all the valid SAML Request Ids that the caller of the API +// has for the current user. +// API name: ids +func (r *SamlAuthenticate) Ids(ids ...string) *SamlAuthenticate { + r.req.Ids = ids + + return r +} + +// Realm The name of the realm that should authenticate the SAML response. Useful in +// cases where many SAML realms are defined. +// API name: realm +func (r *SamlAuthenticate) Realm(realm string) *SamlAuthenticate { + + r.req.Realm = &realm + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlcompletelogout/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlcompletelogout/request.go index 3b558025f..916e7ff03 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlcompletelogout/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlcompletelogout/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package samlcompletelogout @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package samlcompletelogout // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/saml_complete_logout/Request.ts#L23-L40 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/saml_complete_logout/Request.ts#L23-L40 type Request struct { // Content If the SAML IdP sends the logout response with the HTTP-Post binding, this diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlcompletelogout/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlcompletelogout/response.go deleted file mode 100644 index bafae677e..000000000 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlcompletelogout/response.go +++ /dev/null @@ -1,34 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 - -package samlcompletelogout - -// Response holds the response body struct for the package samlcompletelogout -// -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/saml_complete_logout/Response.ts#L22-L24 - -type Response struct { -} - -// NewResponse returns a Response -func NewResponse() *Response { - r := &Response{} - return r -} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlcompletelogout/saml_complete_logout.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlcompletelogout/saml_complete_logout.go index 590478f60..b6e6cb619 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlcompletelogout/saml_complete_logout.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlcompletelogout/saml_complete_logout.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Verifies the logout response sent from the SAML IdP package samlcompletelogout @@ -33,7 +33,6 @@ import ( "strings" "github.com/elastic/elastic-transport-go/v8/elastictransport" - "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) // ErrBuildPath is returned in case of missing parameters within the build of the request. @@ -48,8 +47,9 @@ type SamlCompleteLogout struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int } @@ -76,6 +76,8 @@ func New(tp elastictransport.Interface) *SamlCompleteLogout { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -105,9 +107,19 @@ func (r *SamlCompleteLogout) HttpRequest(ctx context.Context) (*http.Request, er var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -115,6 +127,7 @@ func (r *SamlCompleteLogout) HttpRequest(ctx context.Context) (*http.Request, er } r.buf.Write(data) + } r.path.Scheme = "http" @@ -178,39 +191,49 @@ func (r SamlCompleteLogout) Perform(ctx context.Context) (*http.Response, error) return res, nil } -// Do runs the request through the transport, handle the response and returns a samlcompletelogout.Response -func (r SamlCompleteLogout) Do(ctx context.Context) (*Response, error) { +// Header set a key, value pair in the SamlCompleteLogout headers map. +func (r *SamlCompleteLogout) Header(key, value string) *SamlCompleteLogout { + r.headers.Set(key, value) - response := NewResponse() + return r +} - res, err := r.Perform(ctx) - if err != nil { - return nil, err - } - defer res.Body.Close() +// Content If the SAML IdP sends the logout response with the HTTP-Post binding, this +// field must be set to the value of the SAMLResponse form parameter from the +// logout response. +// API name: content +func (r *SamlCompleteLogout) Content(content string) *SamlCompleteLogout { - if res.StatusCode < 299 { - err = json.NewDecoder(res.Body).Decode(response) - if err != nil { - return nil, err - } + r.req.Content = &content - return response, nil + return r +} - } +// Ids A json array with all the valid SAML Request Ids that the caller of the API +// has for the current user. +// API name: ids +func (r *SamlCompleteLogout) Ids(ids ...string) *SamlCompleteLogout { + r.req.Ids = ids - errorResponse := types.NewElasticsearchError() - err = json.NewDecoder(res.Body).Decode(errorResponse) - if err != nil { - return nil, err - } + return r +} - return nil, errorResponse +// QueryString If the SAML IdP sends the logout response with the HTTP-Redirect binding, +// this field must be set to the query string of the redirect URI. +// API name: query_string +func (r *SamlCompleteLogout) QueryString(querystring string) *SamlCompleteLogout { + + r.req.QueryString = &querystring + + return r } -// Header set a key, value pair in the SamlCompleteLogout headers map. -func (r *SamlCompleteLogout) Header(key, value string) *SamlCompleteLogout { - r.headers.Set(key, value) +// Realm The name of the SAML realm in Elasticsearch for which the configuration is +// used to verify the logout response. +// API name: realm +func (r *SamlCompleteLogout) Realm(realm string) *SamlCompleteLogout { + + r.req.Realm = realm return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlinvalidate/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlinvalidate/request.go index 65f3a1df6..fc10a4747 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlinvalidate/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlinvalidate/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package samlinvalidate @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package samlinvalidate // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/saml_invalidate/Request.ts#L22-L43 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/saml_invalidate/Request.ts#L22-L43 type Request struct { // Acs The Assertion Consumer Service URL that matches the one of the SAML realm in diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlinvalidate/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlinvalidate/response.go index a8ef77945..fc5333808 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlinvalidate/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlinvalidate/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package samlinvalidate // Response holds the response body struct for the package samlinvalidate // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/saml_invalidate/Response.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/saml_invalidate/Response.ts#L22-L28 type Response struct { Invalidated int `json:"invalidated"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlinvalidate/saml_invalidate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlinvalidate/saml_invalidate.go index d5dd06aa9..3db545dda 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlinvalidate/saml_invalidate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlinvalidate/saml_invalidate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Consumes a SAML LogoutRequest package samlinvalidate @@ -48,8 +48,9 @@ type SamlInvalidate struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int } @@ -76,6 +77,8 @@ func New(tp elastictransport.Interface) *SamlInvalidate { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -105,9 +108,19 @@ func (r *SamlInvalidate) HttpRequest(ctx context.Context) (*http.Request, error) var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -115,6 +128,7 @@ func (r *SamlInvalidate) HttpRequest(ctx context.Context) (*http.Request, error) } r.buf.Write(data) + } r.path.Scheme = "http" @@ -196,7 +210,6 @@ func (r SamlInvalidate) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -205,6 +218,10 @@ func (r SamlInvalidate) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -214,3 +231,44 @@ func (r *SamlInvalidate) Header(key, value string) *SamlInvalidate { return r } + +// Acs The Assertion Consumer Service URL that matches the one of the SAML realm in +// Elasticsearch that should be used. You must specify either this parameter or +// the realm parameter. +// API name: acs +func (r *SamlInvalidate) Acs(acs string) *SamlInvalidate { + + r.req.Acs = &acs + + return r +} + +// QueryString The query part of the URL that the user was redirected to by the SAML IdP to +// initiate the Single Logout. +// This query should include a single parameter named SAMLRequest that contains +// a SAML logout request that is deflated and Base64 encoded. +// If the SAML IdP has signed the logout request, the URL should include two +// extra parameters named SigAlg and Signature that contain the algorithm used +// for the signature and the signature value itself. +// In order for Elasticsearch to be able to verify the IdP’s signature, the +// value of the query_string field must be an exact match to the string provided +// by the browser. +// The client application must not attempt to parse or process the string in any +// way. +// API name: query_string +func (r *SamlInvalidate) QueryString(querystring string) *SamlInvalidate { + + r.req.QueryString = querystring + + return r +} + +// Realm The name of the SAML realm in Elasticsearch the configuration. You must +// specify either this parameter or the acs parameter. +// API name: realm +func (r *SamlInvalidate) Realm(realm string) *SamlInvalidate { + + r.req.Realm = &realm + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samllogout/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samllogout/request.go index d8b0c5db1..5c368da36 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samllogout/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samllogout/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package samllogout @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package samllogout // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/saml_logout/Request.ts#L22-L41 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/saml_logout/Request.ts#L22-L41 type Request struct { // RefreshToken The refresh token that was returned as a response to calling the SAML diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samllogout/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samllogout/response.go index d819b6093..91fc7f0a2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samllogout/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samllogout/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package samllogout // Response holds the response body struct for the package samllogout // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/saml_logout/Response.ts#L20-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/saml_logout/Response.ts#L20-L24 type Response struct { Redirect string `json:"redirect"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samllogout/saml_logout.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samllogout/saml_logout.go index b2a80ffd2..d94942225 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samllogout/saml_logout.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samllogout/saml_logout.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Invalidates an access token and a refresh token that were generated via the // SAML Authenticate API @@ -49,8 +49,9 @@ type SamlLogout struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int } @@ -78,6 +79,8 @@ func New(tp elastictransport.Interface) *SamlLogout { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -107,9 +110,19 @@ func (r *SamlLogout) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -117,6 +130,7 @@ func (r *SamlLogout) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -198,7 +212,6 @@ func (r SamlLogout) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -207,6 +220,10 @@ func (r SamlLogout) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -216,3 +233,27 @@ func (r *SamlLogout) Header(key, value string) *SamlLogout { return r } + +// RefreshToken The refresh token that was returned as a response to calling the SAML +// authenticate API. +// Alternatively, the most recent refresh token that was received after +// refreshing the original access token. +// API name: refresh_token +func (r *SamlLogout) RefreshToken(refreshtoken string) *SamlLogout { + + r.req.RefreshToken = &refreshtoken + + return r +} + +// Token The access token that was returned as a response to calling the SAML +// authenticate API. +// Alternatively, the most recent token that was received after refreshing the +// original one by using a refresh_token. +// API name: token +func (r *SamlLogout) Token(token string) *SamlLogout { + + r.req.Token = token + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlprepareauthentication/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlprepareauthentication/request.go index 84b629fdb..0115c3a2a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlprepareauthentication/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlprepareauthentication/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package samlprepareauthentication @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package samlprepareauthentication // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/saml_prepare_authentication/Request.ts#L22-L46 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/saml_prepare_authentication/Request.ts#L22-L46 type Request struct { // Acs The Assertion Consumer Service URL that matches the one of the SAML realms in diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlprepareauthentication/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlprepareauthentication/response.go index 1cd79e833..e6279d9f2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlprepareauthentication/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlprepareauthentication/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package samlprepareauthentication // Response holds the response body struct for the package samlprepareauthentication // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/saml_prepare_authentication/Response.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/saml_prepare_authentication/Response.ts#L22-L28 type Response struct { Id string `json:"id"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlprepareauthentication/saml_prepare_authentication.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlprepareauthentication/saml_prepare_authentication.go index 7b44bf43f..1a988c48e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlprepareauthentication/saml_prepare_authentication.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlprepareauthentication/saml_prepare_authentication.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Creates a SAML authentication request package samlprepareauthentication @@ -48,8 +48,9 @@ type SamlPrepareAuthentication struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int } @@ -76,6 +77,8 @@ func New(tp elastictransport.Interface) *SamlPrepareAuthentication { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -105,9 +108,19 @@ func (r *SamlPrepareAuthentication) HttpRequest(ctx context.Context) (*http.Requ var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -115,6 +128,7 @@ func (r *SamlPrepareAuthentication) HttpRequest(ctx context.Context) (*http.Requ } r.buf.Write(data) + } r.path.Scheme = "http" @@ -196,7 +210,6 @@ func (r SamlPrepareAuthentication) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -205,6 +218,10 @@ func (r SamlPrepareAuthentication) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -214,3 +231,38 @@ func (r *SamlPrepareAuthentication) Header(key, value string) *SamlPrepareAuthen return r } + +// Acs The Assertion Consumer Service URL that matches the one of the SAML realms in +// Elasticsearch. +// The realm is used to generate the authentication request. You must specify +// either this parameter or the realm parameter. +// API name: acs +func (r *SamlPrepareAuthentication) Acs(acs string) *SamlPrepareAuthentication { + + r.req.Acs = &acs + + return r +} + +// Realm The name of the SAML realm in Elasticsearch for which the configuration is +// used to generate the authentication request. +// You must specify either this parameter or the acs parameter. +// API name: realm +func (r *SamlPrepareAuthentication) Realm(realm string) *SamlPrepareAuthentication { + + r.req.Realm = &realm + + return r +} + +// RelayState A string that will be included in the redirect URL that this API returns as +// the RelayState query parameter. +// If the Authentication Request is signed, this value is used as part of the +// signature computation. +// API name: relay_state +func (r *SamlPrepareAuthentication) RelayState(relaystate string) *SamlPrepareAuthentication { + + r.req.RelayState = &relaystate + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlserviceprovidermetadata/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlserviceprovidermetadata/response.go index 6e0f264d6..028cbf81b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlserviceprovidermetadata/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlserviceprovidermetadata/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package samlserviceprovidermetadata // Response holds the response body struct for the package samlserviceprovidermetadata // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/saml_service_provider_metadata/Response.ts#L20-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/saml_service_provider_metadata/Response.ts#L20-L24 type Response struct { Metadata string `json:"metadata"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlserviceprovidermetadata/saml_service_provider_metadata.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlserviceprovidermetadata/saml_service_provider_metadata.go index 1b04bd6b7..c97e97ac3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlserviceprovidermetadata/saml_service_provider_metadata.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/samlserviceprovidermetadata/saml_service_provider_metadata.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Generates SAML metadata for the Elastic stack SAML 2.0 Service Provider package samlserviceprovidermetadata @@ -67,7 +67,7 @@ func NewSamlServiceProviderMetadataFunc(tp elastictransport.Interface) NewSamlSe return func(realmname string) *SamlServiceProviderMetadata { n := New(tp) - n.RealmName(realmname) + n._realmname(realmname) return n } @@ -178,7 +178,6 @@ func (r SamlServiceProviderMetadata) Do(ctx context.Context) (*Response, error) } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -187,6 +186,10 @@ func (r SamlServiceProviderMetadata) Do(ctx context.Context) (*Response, error) return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -220,9 +223,9 @@ func (r *SamlServiceProviderMetadata) Header(key, value string) *SamlServiceProv // RealmName The name of the SAML realm in Elasticsearch. // API Name: realmname -func (r *SamlServiceProviderMetadata) RealmName(v string) *SamlServiceProviderMetadata { +func (r *SamlServiceProviderMetadata) _realmname(realmname string) *SamlServiceProviderMetadata { r.paramSet |= realmnameMask - r.realmname = v + r.realmname = realmname return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/suggestuserprofiles/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/suggestuserprofiles/request.go index 3bfe59337..04cb7792c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/suggestuserprofiles/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/suggestuserprofiles/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package suggestuserprofiles @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package suggestuserprofiles // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/suggest_user_profiles/Request.ts#L24-L66 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/suggest_user_profiles/Request.ts#L24-L66 type Request struct { // Data List of filters for the `data` field of the profile document. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/suggestuserprofiles/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/suggestuserprofiles/response.go index b58482f26..26d41c9ff 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/suggestuserprofiles/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/suggestuserprofiles/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package suggestuserprofiles @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package suggestuserprofiles // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/suggest_user_profiles/Response.ts#L29-L35 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/suggest_user_profiles/Response.ts#L29-L35 type Response struct { Profiles []types.UserProfile `json:"profiles"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/suggestuserprofiles/suggest_user_profiles.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/suggestuserprofiles/suggest_user_profiles.go index 1b4b423cd..c93f5432a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/suggestuserprofiles/suggest_user_profiles.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/suggestuserprofiles/suggest_user_profiles.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Get suggestions for user profiles that match specified search criteria. package suggestuserprofiles @@ -48,8 +48,9 @@ type SuggestUserProfiles struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int } @@ -76,6 +77,8 @@ func New(tp elastictransport.Interface) *SuggestUserProfiles { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -105,9 +108,19 @@ func (r *SuggestUserProfiles) HttpRequest(ctx context.Context) (*http.Request, e var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -115,6 +128,7 @@ func (r *SuggestUserProfiles) HttpRequest(ctx context.Context) (*http.Request, e } r.buf.Write(data) + } r.path.Scheme = "http" @@ -196,7 +210,6 @@ func (r SuggestUserProfiles) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -205,6 +218,10 @@ func (r SuggestUserProfiles) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -220,8 +237,39 @@ func (r *SuggestUserProfiles) Header(key, value string) *SuggestUserProfiles { // use `data=` to retrieve content nested under the specified ``. // By default returns no `data` content. // API name: data -func (r *SuggestUserProfiles) Data(v string) *SuggestUserProfiles { - r.values.Set("data", v) +func (r *SuggestUserProfiles) Data(data ...string) *SuggestUserProfiles { + r.req.Data = data + + return r +} + +// Hint Extra search criteria to improve relevance of the suggestion result. +// Profiles matching the spcified hint are ranked higher in the response. +// Profiles not matching the hint don't exclude the profile from the response +// as long as the profile matches the `name` field query. +// API name: hint +func (r *SuggestUserProfiles) Hint(hint *types.Hint) *SuggestUserProfiles { + + r.req.Hint = hint + + return r +} + +// Name Query string used to match name-related fields in user profile documents. +// Name-related fields are the user's `username`, `full_name`, and `email`. +// API name: name +func (r *SuggestUserProfiles) Name(name string) *SuggestUserProfiles { + + r.req.Name = &name + + return r +} + +// Size Number of profiles to return. +// API name: size +func (r *SuggestUserProfiles) Size(size int64) *SuggestUserProfiles { + + r.req.Size = &size return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/updateapikey/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/updateapikey/request.go index 9e6ac5521..c41d6a571 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/updateapikey/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/updateapikey/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package updateapikey @@ -29,13 +29,13 @@ import ( // Request holds the request body struct for the package updateapikey // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/update_api_key/Request.ts#L25-L49 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/update_api_key/Request.ts#L25-L62 type Request struct { // Metadata Arbitrary metadata that you want to associate with the API key. It supports // nested data structure. Within the metadata object, keys beginning with _ are // reserved for system usage. - Metadata map[string]json.RawMessage `json:"metadata,omitempty"` + Metadata types.Metadata `json:"metadata,omitempty"` // RoleDescriptors An array of role descriptors for this API key. This parameter is optional. // When it is not specified or is an empty array, then the API key will have a // point in time snapshot of permissions of the authenticated user. If you diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/updateapikey/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/updateapikey/response.go index bc36eed7e..c4fa9c626 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/updateapikey/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/updateapikey/response.go @@ -16,15 +16,18 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package updateapikey // Response holds the response body struct for the package updateapikey // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/update_api_key/Response.ts#L20-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/update_api_key/Response.ts#L20-L28 type Response struct { + + // Updated If `true`, the API key was updated. + // If `false`, the API key didn’t change because no change was detected. Updated bool `json:"updated"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/updateapikey/update_api_key.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/updateapikey/update_api_key.go index 4ee4e0ade..b214e0f99 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/updateapikey/update_api_key.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/updateapikey/update_api_key.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Updates attributes of an existing API key. package updateapikey @@ -52,8 +52,9 @@ type UpdateApiKey struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -69,7 +70,7 @@ func NewUpdateApiKeyFunc(tp elastictransport.Interface) NewUpdateApiKey { return func(id string) *UpdateApiKey { n := New(tp) - n.Id(id) + n._id(id) return n } @@ -84,6 +85,8 @@ func New(tp elastictransport.Interface) *UpdateApiKey { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -113,9 +116,19 @@ func (r *UpdateApiKey) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -123,6 +136,7 @@ func (r *UpdateApiKey) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -205,7 +219,6 @@ func (r UpdateApiKey) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -214,6 +227,10 @@ func (r UpdateApiKey) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -226,9 +243,35 @@ func (r *UpdateApiKey) Header(key, value string) *UpdateApiKey { // Id The ID of the API key to update. // API Name: id -func (r *UpdateApiKey) Id(v string) *UpdateApiKey { +func (r *UpdateApiKey) _id(id string) *UpdateApiKey { r.paramSet |= idMask - r.id = v + r.id = id + + return r +} + +// Metadata Arbitrary metadata that you want to associate with the API key. It supports +// nested data structure. Within the metadata object, keys beginning with _ are +// reserved for system usage. +// API name: metadata +func (r *UpdateApiKey) Metadata(metadata types.Metadata) *UpdateApiKey { + r.req.Metadata = metadata + + return r +} + +// RoleDescriptors An array of role descriptors for this API key. This parameter is optional. +// When it is not specified or is an empty array, then the API key will have a +// point in time snapshot of permissions of the authenticated user. If you +// supply role descriptors then the resultant permissions would be an +// intersection of API keys permissions and authenticated user’s permissions +// thereby limiting the access scope for API keys. The structure of role +// descriptor is the same as the request for create role API. For more details, +// see create or update roles API. +// API name: role_descriptors +func (r *UpdateApiKey) RoleDescriptors(roledescriptors map[string]types.RoleDescriptor) *UpdateApiKey { + + r.req.RoleDescriptors = roledescriptors return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/updateuserprofiledata/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/updateuserprofiledata/request.go index 606adacd6..43a891992 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/updateuserprofiledata/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/updateuserprofiledata/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package updateuserprofiledata @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package updateuserprofiledata // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/update_user_profile_data/Request.ts#L27-L70 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/update_user_profile_data/Request.ts#L27-L70 type Request struct { // Data Non-searchable data that you want to associate with the user profile. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/updateuserprofiledata/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/updateuserprofiledata/response.go index 684c2bed7..1176e1ea3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/updateuserprofiledata/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/updateuserprofiledata/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package updateuserprofiledata // Response holds the response body struct for the package updateuserprofiledata // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/update_user_profile_data/Response.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/update_user_profile_data/Response.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/updateuserprofiledata/update_user_profile_data.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/updateuserprofiledata/update_user_profile_data.go index 4a4231e70..5bb1ad0f4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/updateuserprofiledata/update_user_profile_data.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/security/updateuserprofiledata/update_user_profile_data.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Update application specific data for the user profile of the given unique ID. package updateuserprofiledata @@ -34,7 +34,6 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/refresh" ) @@ -54,8 +53,9 @@ type UpdateUserProfileData struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -71,7 +71,7 @@ func NewUpdateUserProfileDataFunc(tp elastictransport.Interface) NewUpdateUserPr return func(uid string) *UpdateUserProfileData { n := New(tp) - n.Uid(uid) + n._uid(uid) return n } @@ -86,6 +86,8 @@ func New(tp elastictransport.Interface) *UpdateUserProfileData { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -115,9 +117,19 @@ func (r *UpdateUserProfileData) HttpRequest(ctx context.Context) (*http.Request, var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -125,6 +137,7 @@ func (r *UpdateUserProfileData) HttpRequest(ctx context.Context) (*http.Request, } r.buf.Write(data) + } r.path.Scheme = "http" @@ -209,7 +222,6 @@ func (r UpdateUserProfileData) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -218,6 +230,10 @@ func (r UpdateUserProfileData) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -230,25 +246,25 @@ func (r *UpdateUserProfileData) Header(key, value string) *UpdateUserProfileData // Uid A unique identifier for the user profile. // API Name: uid -func (r *UpdateUserProfileData) Uid(v string) *UpdateUserProfileData { +func (r *UpdateUserProfileData) _uid(uid string) *UpdateUserProfileData { r.paramSet |= uidMask - r.uid = v + r.uid = uid return r } // IfSeqNo Only perform the operation if the document has this sequence number. // API name: if_seq_no -func (r *UpdateUserProfileData) IfSeqNo(v string) *UpdateUserProfileData { - r.values.Set("if_seq_no", v) +func (r *UpdateUserProfileData) IfSeqNo(sequencenumber string) *UpdateUserProfileData { + r.values.Set("if_seq_no", sequencenumber) return r } // IfPrimaryTerm Only perform the operation if the document has this primary term. // API name: if_primary_term -func (r *UpdateUserProfileData) IfPrimaryTerm(v string) *UpdateUserProfileData { - r.values.Set("if_primary_term", v) +func (r *UpdateUserProfileData) IfPrimaryTerm(ifprimaryterm string) *UpdateUserProfileData { + r.values.Set("if_primary_term", ifprimaryterm) return r } @@ -258,8 +274,28 @@ func (r *UpdateUserProfileData) IfPrimaryTerm(v string) *UpdateUserProfileData { // operation // visible to search, if 'false' do nothing with refreshes. // API name: refresh -func (r *UpdateUserProfileData) Refresh(enum refresh.Refresh) *UpdateUserProfileData { - r.values.Set("refresh", enum.String()) +func (r *UpdateUserProfileData) Refresh(refresh refresh.Refresh) *UpdateUserProfileData { + r.values.Set("refresh", refresh.String()) + + return r +} + +// Data Non-searchable data that you want to associate with the user profile. +// This field supports a nested data structure. +// API name: data +func (r *UpdateUserProfileData) Data(data map[string]json.RawMessage) *UpdateUserProfileData { + + r.req.Data = data + + return r +} + +// Labels Searchable data that you want to associate with the user profile. This +// field supports a nested data structure. +// API name: labels +func (r *UpdateUserProfileData) Labels(labels map[string]json.RawMessage) *UpdateUserProfileData { + + r.req.Labels = labels return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/shutdown/deletenode/delete_node.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/shutdown/deletenode/delete_node.go index 55a045168..5219607ad 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/shutdown/deletenode/delete_node.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/shutdown/deletenode/delete_node.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Removes a node from the shutdown list. Designed for indirect use by ECE/ESS // and ECK. Direct use is not supported. @@ -36,7 +36,6 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeunit" ) @@ -70,7 +69,7 @@ func NewDeleteNodeFunc(tp elastictransport.Interface) NewDeleteNode { return func(nodeid string) *DeleteNode { n := New(tp) - n.NodeId(nodeid) + n._nodeid(nodeid) return n } @@ -180,7 +179,6 @@ func (r DeleteNode) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -189,6 +187,10 @@ func (r DeleteNode) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -222,9 +224,9 @@ func (r *DeleteNode) Header(key, value string) *DeleteNode { // NodeId The node id of node to be removed from the shutdown state // API Name: nodeid -func (r *DeleteNode) NodeId(v string) *DeleteNode { +func (r *DeleteNode) _nodeid(nodeid string) *DeleteNode { r.paramSet |= nodeidMask - r.nodeid = v + r.nodeid = nodeid return r } @@ -232,8 +234,8 @@ func (r *DeleteNode) NodeId(v string) *DeleteNode { // MasterTimeout Period to wait for a connection to the master node. If no response is // received before the timeout expires, the request fails and returns an error. // API name: master_timeout -func (r *DeleteNode) MasterTimeout(enum timeunit.TimeUnit) *DeleteNode { - r.values.Set("master_timeout", enum.String()) +func (r *DeleteNode) MasterTimeout(mastertimeout timeunit.TimeUnit) *DeleteNode { + r.values.Set("master_timeout", mastertimeout.String()) return r } @@ -241,8 +243,8 @@ func (r *DeleteNode) MasterTimeout(enum timeunit.TimeUnit) *DeleteNode { // Timeout Period to wait for a response. If no response is received before the timeout // expires, the request fails and returns an error. // API name: timeout -func (r *DeleteNode) Timeout(enum timeunit.TimeUnit) *DeleteNode { - r.values.Set("timeout", enum.String()) +func (r *DeleteNode) Timeout(timeout timeunit.TimeUnit) *DeleteNode { + r.values.Set("timeout", timeout.String()) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/shutdown/deletenode/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/shutdown/deletenode/response.go index 1c9dbb1db..af4691f5f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/shutdown/deletenode/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/shutdown/deletenode/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package deletenode // Response holds the response body struct for the package deletenode // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/shutdown/delete_node/ShutdownDeleteNodeResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/shutdown/delete_node/ShutdownDeleteNodeResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/shutdown/getnode/get_node.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/shutdown/getnode/get_node.go index fc1559fed..29b1a856f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/shutdown/getnode/get_node.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/shutdown/getnode/get_node.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieve status of a node or nodes that are currently marked as shutting // down. Designed for indirect use by ECE/ESS and ECK. Direct use is not @@ -37,7 +37,6 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeunit" ) @@ -187,7 +186,6 @@ func (r GetNode) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -196,6 +194,10 @@ func (r GetNode) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -229,9 +231,9 @@ func (r *GetNode) Header(key, value string) *GetNode { // NodeId Which node for which to retrieve the shutdown status // API Name: nodeid -func (r *GetNode) NodeId(v string) *GetNode { +func (r *GetNode) NodeId(nodeid string) *GetNode { r.paramSet |= nodeidMask - r.nodeid = v + r.nodeid = nodeid return r } @@ -239,8 +241,8 @@ func (r *GetNode) NodeId(v string) *GetNode { // MasterTimeout Period to wait for a connection to the master node. If no response is // received before the timeout expires, the request fails and returns an error. // API name: master_timeout -func (r *GetNode) MasterTimeout(enum timeunit.TimeUnit) *GetNode { - r.values.Set("master_timeout", enum.String()) +func (r *GetNode) MasterTimeout(mastertimeout timeunit.TimeUnit) *GetNode { + r.values.Set("master_timeout", mastertimeout.String()) return r } @@ -248,8 +250,8 @@ func (r *GetNode) MasterTimeout(enum timeunit.TimeUnit) *GetNode { // Timeout Period to wait for a response. If no response is received before the timeout // expires, the request fails and returns an error. // API name: timeout -func (r *GetNode) Timeout(enum timeunit.TimeUnit) *GetNode { - r.values.Set("timeout", enum.String()) +func (r *GetNode) Timeout(timeout timeunit.TimeUnit) *GetNode { + r.values.Set("timeout", timeout.String()) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/shutdown/getnode/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/shutdown/getnode/response.go index c907dedc9..f4edc3ac3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/shutdown/getnode/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/shutdown/getnode/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getnode @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getnode // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L23-L27 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L23-L27 type Response struct { Nodes []types.NodeShutdownStatus `json:"nodes"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/shutdown/putnode/put_node.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/shutdown/putnode/put_node.go index ed4f25e86..0ab1bcfdd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/shutdown/putnode/put_node.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/shutdown/putnode/put_node.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Adds a node to be shut down. Designed for indirect use by ECE/ESS and ECK. // Direct use is not supported. @@ -35,8 +35,8 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeunit" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/type_" ) const ( @@ -55,8 +55,9 @@ type PutNode struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -72,7 +73,7 @@ func NewPutNodeFunc(tp elastictransport.Interface) NewPutNode { return func(nodeid string) *PutNode { n := New(tp) - n.NodeId(nodeid) + n._nodeid(nodeid) return n } @@ -88,6 +89,8 @@ func New(tp elastictransport.Interface) *PutNode { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -117,9 +120,19 @@ func (r *PutNode) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -127,6 +140,7 @@ func (r *PutNode) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -209,7 +223,6 @@ func (r PutNode) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -218,6 +231,10 @@ func (r PutNode) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -230,9 +247,9 @@ func (r *PutNode) Header(key, value string) *PutNode { // NodeId The node id of node to be shut down // API Name: nodeid -func (r *PutNode) NodeId(v string) *PutNode { +func (r *PutNode) _nodeid(nodeid string) *PutNode { r.paramSet |= nodeidMask - r.nodeid = v + r.nodeid = nodeid return r } @@ -240,8 +257,8 @@ func (r *PutNode) NodeId(v string) *PutNode { // MasterTimeout Period to wait for a connection to the master node. If no response is // received before the timeout expires, the request fails and returns an error. // API name: master_timeout -func (r *PutNode) MasterTimeout(enum timeunit.TimeUnit) *PutNode { - r.values.Set("master_timeout", enum.String()) +func (r *PutNode) MasterTimeout(mastertimeout timeunit.TimeUnit) *PutNode { + r.values.Set("master_timeout", mastertimeout.String()) return r } @@ -249,8 +266,67 @@ func (r *PutNode) MasterTimeout(enum timeunit.TimeUnit) *PutNode { // Timeout Period to wait for a response. If no response is received before the timeout // expires, the request fails and returns an error. // API name: timeout -func (r *PutNode) Timeout(enum timeunit.TimeUnit) *PutNode { - r.values.Set("timeout", enum.String()) +func (r *PutNode) Timeout(timeout timeunit.TimeUnit) *PutNode { + r.values.Set("timeout", timeout.String()) + + return r +} + +// AllocationDelay Only valid if type is restart. +// Controls how long Elasticsearch will wait for the node to restart and join +// the cluster before reassigning its shards to other nodes. +// This works the same as delaying allocation with the +// index.unassigned.node_left.delayed_timeout setting. +// If you specify both a restart allocation delay and an index-level allocation +// delay, the longer of the two is used. +// API name: allocation_delay +func (r *PutNode) AllocationDelay(allocationdelay string) *PutNode { + + r.req.AllocationDelay = &allocationdelay + + return r +} + +// Reason A human-readable reason that the node is being shut down. +// This field provides information for other cluster operators; it does not +// affect the shut down process. +// API name: reason +func (r *PutNode) Reason(reason string) *PutNode { + + r.req.Reason = reason + + return r +} + +// TargetNodeName Only valid if type is replace. +// Specifies the name of the node that is replacing the node being shut down. +// Shards from the shut down node are only allowed to be allocated to the target +// node, and no other data will be allocated to the target node. +// During relocation of data certain allocation rules are ignored, such as disk +// watermarks or user attribute filtering rules. +// API name: target_node_name +func (r *PutNode) TargetNodeName(targetnodename string) *PutNode { + + r.req.TargetNodeName = &targetnodename + + return r +} + +// Type Valid values are restart, remove, or replace. +// Use restart when you need to temporarily shut down a node to perform an +// upgrade, make configuration changes, or perform other maintenance. +// Because the node is expected to rejoin the cluster, data is not migrated off +// of the node. +// Use remove when you need to permanently remove a node from the cluster. +// The node is not marked ready for shutdown until data is migrated off of the +// node Use replace to do a 1:1 replacement of a node with another node. +// Certain allocation decisions will be ignored (such as disk watermarks) in the +// interest of true replacement of the source node with the target node. +// During a replace-type shutdown, rollover and index creation may result in +// unassigned shards, and shrink may fail until the replacement is complete. +// API name: type +func (r *PutNode) Type(type_ type_.Type) *PutNode { + r.req.Type = type_ return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/shutdown/putnode/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/shutdown/putnode/request.go index 4c948ea10..25f83041e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/shutdown/putnode/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/shutdown/putnode/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putnode @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package putnode // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/shutdown/put_node/ShutdownPutNodeRequest.ts#L25-L77 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/shutdown/put_node/ShutdownPutNodeRequest.ts#L25-L76 type Request struct { // AllocationDelay Only valid if type is restart. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/shutdown/putnode/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/shutdown/putnode/response.go index 743c81ea8..087f8f22e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/shutdown/putnode/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/shutdown/putnode/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putnode // Response holds the response body struct for the package putnode // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/shutdown/put_node/ShutdownPutNodeResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/shutdown/put_node/ShutdownPutNodeResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/deletelifecycle/delete_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/deletelifecycle/delete_lifecycle.go index ae71fb6ad..f216d5bd5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/deletelifecycle/delete_lifecycle.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/deletelifecycle/delete_lifecycle.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Deletes an existing snapshot lifecycle policy. package deletelifecycle @@ -67,7 +67,7 @@ func NewDeleteLifecycleFunc(tp elastictransport.Interface) NewDeleteLifecycle { return func(policyid string) *DeleteLifecycle { n := New(tp) - n.PolicyId(policyid) + n._policyid(policyid) return n } @@ -170,7 +170,6 @@ func (r DeleteLifecycle) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -179,6 +178,10 @@ func (r DeleteLifecycle) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -212,9 +215,9 @@ func (r *DeleteLifecycle) Header(key, value string) *DeleteLifecycle { // PolicyId The id of the snapshot lifecycle policy to remove // API Name: policyid -func (r *DeleteLifecycle) PolicyId(v string) *DeleteLifecycle { +func (r *DeleteLifecycle) _policyid(policyid string) *DeleteLifecycle { r.paramSet |= policyidMask - r.policyid = v + r.policyid = policyid return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/deletelifecycle/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/deletelifecycle/response.go index ab14dfbfb..38565a6ee 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/deletelifecycle/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/deletelifecycle/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package deletelifecycle // Response holds the response body struct for the package deletelifecycle // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/slm/delete_lifecycle/DeleteSnapshotLifecycleResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/slm/delete_lifecycle/DeleteSnapshotLifecycleResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/executelifecycle/execute_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/executelifecycle/execute_lifecycle.go index fc1c858cd..f34d44479 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/executelifecycle/execute_lifecycle.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/executelifecycle/execute_lifecycle.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Immediately creates a snapshot according to the lifecycle policy, without // waiting for the scheduled time. @@ -68,7 +68,7 @@ func NewExecuteLifecycleFunc(tp elastictransport.Interface) NewExecuteLifecycle return func(policyid string) *ExecuteLifecycle { n := New(tp) - n.PolicyId(policyid) + n._policyid(policyid) return n } @@ -174,7 +174,6 @@ func (r ExecuteLifecycle) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -183,6 +182,10 @@ func (r ExecuteLifecycle) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -216,9 +219,9 @@ func (r *ExecuteLifecycle) Header(key, value string) *ExecuteLifecycle { // PolicyId The id of the snapshot lifecycle policy to be executed // API Name: policyid -func (r *ExecuteLifecycle) PolicyId(v string) *ExecuteLifecycle { +func (r *ExecuteLifecycle) _policyid(policyid string) *ExecuteLifecycle { r.paramSet |= policyidMask - r.policyid = v + r.policyid = policyid return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/executelifecycle/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/executelifecycle/response.go index bd065e0b6..f9cdceec0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/executelifecycle/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/executelifecycle/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package executelifecycle // Response holds the response body struct for the package executelifecycle // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/slm/execute_lifecycle/ExecuteSnapshotLifecycleResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/slm/execute_lifecycle/ExecuteSnapshotLifecycleResponse.ts#L22-L24 type Response struct { SnapshotName string `json:"snapshot_name"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/executeretention/execute_retention.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/executeretention/execute_retention.go index b4c7dd0e5..163869990 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/executeretention/execute_retention.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/executeretention/execute_retention.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Deletes any snapshots that are expired according to the policy's retention // rules. @@ -161,7 +161,6 @@ func (r ExecuteRetention) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -170,6 +169,10 @@ func (r ExecuteRetention) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/executeretention/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/executeretention/response.go index ccf1e45cb..3a93b3c6d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/executeretention/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/executeretention/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package executeretention // Response holds the response body struct for the package executeretention // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/slm/execute_retention/ExecuteRetentionResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/slm/execute_retention/ExecuteRetentionResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/getlifecycle/get_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/getlifecycle/get_lifecycle.go index 60d3d4368..79436933a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/getlifecycle/get_lifecycle.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/getlifecycle/get_lifecycle.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves one or more snapshot lifecycle policy definitions and information // about the latest snapshot attempts. @@ -177,7 +177,6 @@ func (r GetLifecycle) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -186,6 +185,10 @@ func (r GetLifecycle) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -219,9 +222,9 @@ func (r *GetLifecycle) Header(key, value string) *GetLifecycle { // PolicyId Comma-separated list of snapshot lifecycle policies to retrieve // API Name: policyid -func (r *GetLifecycle) PolicyId(v string) *GetLifecycle { +func (r *GetLifecycle) PolicyId(policyid string) *GetLifecycle { r.paramSet |= policyidMask - r.policyid = v + r.policyid = policyid return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/getlifecycle/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/getlifecycle/response.go index 274b8566d..1f29719b9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/getlifecycle/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/getlifecycle/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getlifecycle @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getlifecycle // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/slm/get_lifecycle/GetSnapshotLifecycleResponse.ts#L24-L26 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/slm/get_lifecycle/GetSnapshotLifecycleResponse.ts#L24-L26 type Response map[string]types.SnapshotLifecycle diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/getstats/get_stats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/getstats/get_stats.go index 4b77dbe14..0020aeef0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/getstats/get_stats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/getstats/get_stats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns global and policy-level statistics about actions taken by snapshot // lifecycle management. @@ -161,7 +161,6 @@ func (r GetStats) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -170,6 +169,10 @@ func (r GetStats) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/getstats/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/getstats/response.go index c54bba0a7..325a4263d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/getstats/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/getstats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getstats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getstats // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/slm/get_stats/GetSnapshotLifecycleStatsResponse.ts#L23-L36 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/slm/get_stats/GetSnapshotLifecycleStatsResponse.ts#L23-L36 type Response struct { PolicyStats []string `json:"policy_stats"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/getstatus/get_status.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/getstatus/get_status.go index 3ec354af6..41ed100c1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/getstatus/get_status.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/getstatus/get_status.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves the status of snapshot lifecycle management (SLM). package getstatus @@ -159,7 +159,6 @@ func (r GetStatus) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -168,6 +167,10 @@ func (r GetStatus) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/getstatus/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/getstatus/response.go index f95ba2af5..0aaa00599 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/getstatus/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/getstatus/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getstatus @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getstatus // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/slm/get_status/GetSnapshotLifecycleManagementStatusResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/slm/get_status/GetSnapshotLifecycleManagementStatusResponse.ts#L22-L24 type Response struct { OperationMode lifecycleoperationmode.LifecycleOperationMode `json:"operation_mode"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/putlifecycle/put_lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/putlifecycle/put_lifecycle.go index 5157a6c4f..5d3ce8349 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/putlifecycle/put_lifecycle.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/putlifecycle/put_lifecycle.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Creates or updates a snapshot lifecycle policy. package putlifecycle @@ -52,8 +52,9 @@ type PutLifecycle struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -69,7 +70,7 @@ func NewPutLifecycleFunc(tp elastictransport.Interface) NewPutLifecycle { return func(policyid string) *PutLifecycle { n := New(tp) - n.PolicyId(policyid) + n._policyid(policyid) return n } @@ -84,6 +85,8 @@ func New(tp elastictransport.Interface) *PutLifecycle { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -113,9 +116,19 @@ func (r *PutLifecycle) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -123,6 +136,7 @@ func (r *PutLifecycle) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -205,7 +219,6 @@ func (r PutLifecycle) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -214,6 +227,10 @@ func (r PutLifecycle) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -226,9 +243,9 @@ func (r *PutLifecycle) Header(key, value string) *PutLifecycle { // PolicyId ID for the snapshot lifecycle policy you want to create or update. // API Name: policyid -func (r *PutLifecycle) PolicyId(v string) *PutLifecycle { +func (r *PutLifecycle) _policyid(policyid string) *PutLifecycle { r.paramSet |= policyidMask - r.policyid = v + r.policyid = policyid return r } @@ -236,8 +253,8 @@ func (r *PutLifecycle) PolicyId(v string) *PutLifecycle { // MasterTimeout Period to wait for a connection to the master node. If no response is // received before the timeout expires, the request fails and returns an error. // API name: master_timeout -func (r *PutLifecycle) MasterTimeout(v string) *PutLifecycle { - r.values.Set("master_timeout", v) +func (r *PutLifecycle) MasterTimeout(duration string) *PutLifecycle { + r.values.Set("master_timeout", duration) return r } @@ -245,8 +262,56 @@ func (r *PutLifecycle) MasterTimeout(v string) *PutLifecycle { // Timeout Period to wait for a response. If no response is received before the timeout // expires, the request fails and returns an error. // API name: timeout -func (r *PutLifecycle) Timeout(v string) *PutLifecycle { - r.values.Set("timeout", v) +func (r *PutLifecycle) Timeout(duration string) *PutLifecycle { + r.values.Set("timeout", duration) + + return r +} + +// Config Configuration for each snapshot created by the policy. +// API name: config +func (r *PutLifecycle) Config(config *types.Configuration) *PutLifecycle { + + r.req.Config = config + + return r +} + +// Name Name automatically assigned to each snapshot created by the policy. Date math +// is supported. To prevent conflicting snapshot names, a UUID is automatically +// appended to each snapshot name. +// API name: name +func (r *PutLifecycle) Name(name string) *PutLifecycle { + r.req.Name = &name + + return r +} + +// Repository Repository used to store snapshots created by this policy. This repository +// must exist prior to the policy’s creation. You can create a repository using +// the snapshot repository API. +// API name: repository +func (r *PutLifecycle) Repository(repository string) *PutLifecycle { + + r.req.Repository = &repository + + return r +} + +// Retention Retention rules used to retain and delete snapshots created by the policy. +// API name: retention +func (r *PutLifecycle) Retention(retention *types.Retention) *PutLifecycle { + + r.req.Retention = retention + + return r +} + +// Schedule Periodic or absolute schedule at which the policy creates snapshots. SLM +// applies schedule changes immediately. +// API name: schedule +func (r *PutLifecycle) Schedule(cronexpression string) *PutLifecycle { + r.req.Schedule = &cronexpression return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/putlifecycle/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/putlifecycle/request.go index 03841b9a1..7507ef646 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/putlifecycle/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/putlifecycle/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putlifecycle @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package putlifecycle // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/slm/put_lifecycle/PutSnapshotLifecycleRequest.ts#L26-L72 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/slm/put_lifecycle/PutSnapshotLifecycleRequest.ts#L26-L72 type Request struct { // Config Configuration for each snapshot created by the policy. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/putlifecycle/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/putlifecycle/response.go index 4f827ccab..45e8c218a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/putlifecycle/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/putlifecycle/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putlifecycle // Response holds the response body struct for the package putlifecycle // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/slm/put_lifecycle/PutSnapshotLifecycleResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/slm/put_lifecycle/PutSnapshotLifecycleResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/start/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/start/response.go index ff133c62f..a10871987 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/start/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/start/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package start // Response holds the response body struct for the package start // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/slm/start/StartSnapshotLifecycleManagementResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/slm/start/StartSnapshotLifecycleManagementResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/start/start.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/start/start.go index 5d6ef50b8..7d32381c7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/start/start.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/start/start.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Turns on snapshot lifecycle management (SLM). package start @@ -159,7 +159,6 @@ func (r Start) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -168,6 +167,10 @@ func (r Start) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/stop/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/stop/response.go index a93528eb9..b92a17b6f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/stop/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/stop/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package stop // Response holds the response body struct for the package stop // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/slm/stop/StopSnapshotLifecycleManagementResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/slm/stop/StopSnapshotLifecycleManagementResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/stop/stop.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/stop/stop.go index 86da66369..5a00930f4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/stop/stop.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/slm/stop/stop.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Turns off snapshot lifecycle management (SLM). package stop @@ -159,7 +159,6 @@ func (r Stop) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -168,6 +167,10 @@ func (r Stop) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/cleanuprepository/cleanup_repository.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/cleanuprepository/cleanup_repository.go index e0437ff8a..1f6926c9e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/cleanuprepository/cleanup_repository.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/cleanuprepository/cleanup_repository.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Removes stale data from repository. package cleanuprepository @@ -67,7 +67,7 @@ func NewCleanupRepositoryFunc(tp elastictransport.Interface) NewCleanupRepositor return func(repository string) *CleanupRepository { n := New(tp) - n.Repository(repository) + n._repository(repository) return n } @@ -170,7 +170,6 @@ func (r CleanupRepository) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -179,6 +178,10 @@ func (r CleanupRepository) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -212,25 +215,25 @@ func (r *CleanupRepository) Header(key, value string) *CleanupRepository { // Repository Snapshot repository to clean up. // API Name: repository -func (r *CleanupRepository) Repository(v string) *CleanupRepository { +func (r *CleanupRepository) _repository(repository string) *CleanupRepository { r.paramSet |= repositoryMask - r.repository = v + r.repository = repository return r } // MasterTimeout Period to wait for a connection to the master node. // API name: master_timeout -func (r *CleanupRepository) MasterTimeout(v string) *CleanupRepository { - r.values.Set("master_timeout", v) +func (r *CleanupRepository) MasterTimeout(duration string) *CleanupRepository { + r.values.Set("master_timeout", duration) return r } // Timeout Period to wait for a response. // API name: timeout -func (r *CleanupRepository) Timeout(v string) *CleanupRepository { - r.values.Set("timeout", v) +func (r *CleanupRepository) Timeout(duration string) *CleanupRepository { + r.values.Set("timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/cleanuprepository/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/cleanuprepository/response.go index c2cfa4538..e5a20b39b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/cleanuprepository/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/cleanuprepository/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package cleanuprepository @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package cleanuprepository // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/snapshot/cleanup_repository/SnapshotCleanupRepositoryResponse.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/snapshot/cleanup_repository/SnapshotCleanupRepositoryResponse.ts#L22-L27 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/clone/clone.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/clone/clone.go index e5ae92d1e..b0c5d3611 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/clone/clone.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/clone/clone.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Clones indices from one snapshot into another snapshot in the same // repository. @@ -57,8 +57,9 @@ type Clone struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -76,11 +77,11 @@ func NewCloneFunc(tp elastictransport.Interface) NewClone { return func(repository, snapshot, targetsnapshot string) *Clone { n := New(tp) - n.Repository(repository) + n._repository(repository) - n.Snapshot(snapshot) + n._snapshot(snapshot) - n.TargetSnapshot(targetsnapshot) + n._targetsnapshot(targetsnapshot) return n } @@ -96,6 +97,8 @@ func New(tp elastictransport.Interface) *Clone { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -125,9 +128,19 @@ func (r *Clone) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -135,6 +148,7 @@ func (r *Clone) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -223,7 +237,6 @@ func (r Clone) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -232,6 +245,10 @@ func (r Clone) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -244,42 +261,50 @@ func (r *Clone) Header(key, value string) *Clone { // Repository A repository name // API Name: repository -func (r *Clone) Repository(v string) *Clone { +func (r *Clone) _repository(repository string) *Clone { r.paramSet |= repositoryMask - r.repository = v + r.repository = repository return r } // Snapshot The name of the snapshot to clone from // API Name: snapshot -func (r *Clone) Snapshot(v string) *Clone { +func (r *Clone) _snapshot(snapshot string) *Clone { r.paramSet |= snapshotMask - r.snapshot = v + r.snapshot = snapshot return r } // TargetSnapshot The name of the cloned snapshot to create // API Name: targetsnapshot -func (r *Clone) TargetSnapshot(v string) *Clone { +func (r *Clone) _targetsnapshot(targetsnapshot string) *Clone { r.paramSet |= targetsnapshotMask - r.targetsnapshot = v + r.targetsnapshot = targetsnapshot return r } // MasterTimeout Explicit operation timeout for connection to master node // API name: master_timeout -func (r *Clone) MasterTimeout(v string) *Clone { - r.values.Set("master_timeout", v) +func (r *Clone) MasterTimeout(duration string) *Clone { + r.values.Set("master_timeout", duration) return r } // API name: timeout -func (r *Clone) Timeout(v string) *Clone { - r.values.Set("timeout", v) +func (r *Clone) Timeout(duration string) *Clone { + r.values.Set("timeout", duration) + + return r +} + +// API name: indices +func (r *Clone) Indices(indices string) *Clone { + + r.req.Indices = indices return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/clone/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/clone/request.go index c16b256c7..6ade6e06e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/clone/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/clone/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package clone @@ -27,7 +27,7 @@ import ( // Request holds the request body struct for the package clone // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/snapshot/clone/SnapshotCloneRequest.ts#L24-L42 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/snapshot/clone/SnapshotCloneRequest.ts#L24-L42 type Request struct { Indices string `json:"indices"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/clone/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/clone/response.go index 9e248d551..ce07601de 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/clone/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/clone/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package clone // Response holds the response body struct for the package clone // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/snapshot/clone/SnapshotCloneResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/snapshot/clone/SnapshotCloneResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/create/create.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/create/create.go index 0776c9f57..91423c0cc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/create/create.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/create/create.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Creates a snapshot in a repository. package create @@ -55,8 +55,9 @@ type Create struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -73,9 +74,9 @@ func NewCreateFunc(tp elastictransport.Interface) NewCreate { return func(repository, snapshot string) *Create { n := New(tp) - n.Repository(repository) + n._repository(repository) - n.Snapshot(snapshot) + n._snapshot(snapshot) return n } @@ -90,6 +91,8 @@ func New(tp elastictransport.Interface) *Create { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -119,9 +122,19 @@ func (r *Create) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -129,6 +142,7 @@ func (r *Create) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -212,7 +226,6 @@ func (r Create) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -221,6 +234,10 @@ func (r Create) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -233,18 +250,18 @@ func (r *Create) Header(key, value string) *Create { // Repository Repository for the snapshot. // API Name: repository -func (r *Create) Repository(v string) *Create { +func (r *Create) _repository(repository string) *Create { r.paramSet |= repositoryMask - r.repository = v + r.repository = repository return r } // Snapshot Name of the snapshot. Must be unique in the repository. // API Name: snapshot -func (r *Create) Snapshot(v string) *Create { +func (r *Create) _snapshot(snapshot string) *Create { r.paramSet |= snapshotMask - r.snapshot = v + r.snapshot = snapshot return r } @@ -252,8 +269,8 @@ func (r *Create) Snapshot(v string) *Create { // MasterTimeout Period to wait for a connection to the master node. If no response is // received before the timeout expires, the request fails and returns an error. // API name: master_timeout -func (r *Create) MasterTimeout(v string) *Create { - r.values.Set("master_timeout", v) +func (r *Create) MasterTimeout(duration string) *Create { + r.values.Set("master_timeout", duration) return r } @@ -261,8 +278,72 @@ func (r *Create) MasterTimeout(v string) *Create { // WaitForCompletion If `true`, the request returns a response when the snapshot is complete. If // `false`, the request returns a response when the snapshot initializes. // API name: wait_for_completion -func (r *Create) WaitForCompletion(b bool) *Create { - r.values.Set("wait_for_completion", strconv.FormatBool(b)) +func (r *Create) WaitForCompletion(waitforcompletion bool) *Create { + r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) + + return r +} + +// FeatureStates Feature states to include in the snapshot. Each feature state includes one or +// more system indices containing related data. You can view a list of eligible +// features using the get features API. If `include_global_state` is `true`, all +// current feature states are included by default. If `include_global_state` is +// `false`, no feature states are included by default. +// API name: feature_states +func (r *Create) FeatureStates(featurestates ...string) *Create { + r.req.FeatureStates = featurestates + + return r +} + +// IgnoreUnavailable If `true`, the request ignores data streams and indices in `indices` that are +// missing or closed. If `false`, the request returns an error for any data +// stream or index that is missing or closed. +// API name: ignore_unavailable +func (r *Create) IgnoreUnavailable(ignoreunavailable bool) *Create { + r.req.IgnoreUnavailable = &ignoreunavailable + + return r +} + +// IncludeGlobalState If `true`, the current cluster state is included in the snapshot. The cluster +// state includes persistent cluster settings, composable index templates, +// legacy index templates, ingest pipelines, and ILM policies. It also includes +// data stored in system indices, such as Watches and task records (configurable +// via `feature_states`). +// API name: include_global_state +func (r *Create) IncludeGlobalState(includeglobalstate bool) *Create { + r.req.IncludeGlobalState = &includeglobalstate + + return r +} + +// Indices Data streams and indices to include in the snapshot. Supports multi-target +// syntax. Includes all data streams and indices by default. +// API name: indices +func (r *Create) Indices(indices ...string) *Create { + r.req.Indices = indices + + return r +} + +// Metadata Optional metadata for the snapshot. May have any contents. Must be less than +// 1024 bytes. This map is not automatically generated by Elasticsearch. +// API name: metadata +func (r *Create) Metadata(metadata types.Metadata) *Create { + r.req.Metadata = metadata + + return r +} + +// Partial If `true`, allows restoring a partial snapshot of indices with unavailable +// shards. Only shards that were successfully included in the snapshot will be +// restored. All missing shards will be recreated as empty. If `false`, the +// entire restore operation will fail if one or more indices included in the +// snapshot do not have all primary shards available. +// API name: partial +func (r *Create) Partial(partial bool) *Create { + r.req.Partial = &partial return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/create/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/create/request.go index fc0aa9e49..9be9e683d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/create/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/create/request.go @@ -16,18 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package create import ( "encoding/json" "fmt" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) // Request holds the request body struct for the package create // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/snapshot/create/SnapshotCreateRequest.ts#L24-L81 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/snapshot/create/SnapshotCreateRequest.ts#L24-L81 type Request struct { // FeatureStates Feature states to include in the snapshot. Each feature state includes one or @@ -51,7 +53,7 @@ type Request struct { Indices []string `json:"indices,omitempty"` // Metadata Optional metadata for the snapshot. May have any contents. Must be less than // 1024 bytes. This map is not automatically generated by Elasticsearch. - Metadata map[string]json.RawMessage `json:"metadata,omitempty"` + Metadata types.Metadata `json:"metadata,omitempty"` // Partial If `true`, allows restoring a partial snapshot of indices with unavailable // shards. Only shards that were successfully included in the snapshot will be // restored. All missing shards will be recreated as empty. If `false`, the diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/create/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/create/response.go index a500663f8..93b5e6100 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/create/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/create/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package create @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package create // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/snapshot/create/SnapshotCreateResponse.ts#L22-L34 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/snapshot/create/SnapshotCreateResponse.ts#L22-L35 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/createrepository/create_repository.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/createrepository/create_repository.go index ad168bdae..9f76edde0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/createrepository/create_repository.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/createrepository/create_repository.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Creates a repository. package createrepository @@ -53,8 +53,9 @@ type CreateRepository struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -70,7 +71,7 @@ func NewCreateRepositoryFunc(tp elastictransport.Interface) NewCreateRepository return func(repository string) *CreateRepository { n := New(tp) - n.Repository(repository) + n._repository(repository) return n } @@ -85,6 +86,8 @@ func New(tp elastictransport.Interface) *CreateRepository { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -114,9 +117,19 @@ func (r *CreateRepository) HttpRequest(ctx context.Context) (*http.Request, erro var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -124,6 +137,7 @@ func (r *CreateRepository) HttpRequest(ctx context.Context) (*http.Request, erro } r.buf.Write(data) + } r.path.Scheme = "http" @@ -204,7 +218,6 @@ func (r CreateRepository) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -213,6 +226,10 @@ func (r CreateRepository) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -225,33 +242,49 @@ func (r *CreateRepository) Header(key, value string) *CreateRepository { // Repository A repository name // API Name: repository -func (r *CreateRepository) Repository(v string) *CreateRepository { +func (r *CreateRepository) _repository(repository string) *CreateRepository { r.paramSet |= repositoryMask - r.repository = v + r.repository = repository return r } // MasterTimeout Explicit operation timeout for connection to master node // API name: master_timeout -func (r *CreateRepository) MasterTimeout(v string) *CreateRepository { - r.values.Set("master_timeout", v) +func (r *CreateRepository) MasterTimeout(duration string) *CreateRepository { + r.values.Set("master_timeout", duration) return r } // Timeout Explicit operation timeout // API name: timeout -func (r *CreateRepository) Timeout(v string) *CreateRepository { - r.values.Set("timeout", v) +func (r *CreateRepository) Timeout(duration string) *CreateRepository { + r.values.Set("timeout", duration) return r } // Verify Whether to verify the repository after creation // API name: verify -func (r *CreateRepository) Verify(b bool) *CreateRepository { - r.values.Set("verify", strconv.FormatBool(b)) +func (r *CreateRepository) Verify(verify bool) *CreateRepository { + r.values.Set("verify", strconv.FormatBool(verify)) + + return r +} + +// API name: settings +func (r *CreateRepository) Settings(settings *types.RepositorySettings) *CreateRepository { + + r.req.Settings = *settings + + return r +} + +// API name: type +func (r *CreateRepository) Type(type_ string) *CreateRepository { + + r.req.Type = type_ return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/createrepository/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/createrepository/request.go index 214b92bdb..b990e068a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/createrepository/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/createrepository/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package createrepository @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package createrepository // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/snapshot/create_repository/SnapshotCreateRepositoryRequest.ts#L28-L49 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/snapshot/create_repository/SnapshotCreateRepositoryRequest.ts#L28-L48 type Request struct { Repository *types.Repository `json:"repository,omitempty"` Settings types.RepositorySettings `json:"settings"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/createrepository/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/createrepository/response.go index 581c60319..a0a71fe8f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/createrepository/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/createrepository/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package createrepository // Response holds the response body struct for the package createrepository // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/snapshot/create_repository/SnapshotCreateRepositoryResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/snapshot/create_repository/SnapshotCreateRepositoryResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/delete/delete.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/delete/delete.go index 10dcbf233..036fd99d4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/delete/delete.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/delete/delete.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Deletes one or more snapshots. package delete @@ -70,9 +70,9 @@ func NewDeleteFunc(tp elastictransport.Interface) NewDelete { return func(repository, snapshot string) *Delete { n := New(tp) - n.Repository(repository) + n._repository(repository) - n.Snapshot(snapshot) + n._snapshot(snapshot) return n } @@ -176,7 +176,6 @@ func (r Delete) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -185,6 +184,10 @@ func (r Delete) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -218,26 +221,26 @@ func (r *Delete) Header(key, value string) *Delete { // Repository A repository name // API Name: repository -func (r *Delete) Repository(v string) *Delete { +func (r *Delete) _repository(repository string) *Delete { r.paramSet |= repositoryMask - r.repository = v + r.repository = repository return r } // Snapshot A comma-separated list of snapshot names // API Name: snapshot -func (r *Delete) Snapshot(v string) *Delete { +func (r *Delete) _snapshot(snapshot string) *Delete { r.paramSet |= snapshotMask - r.snapshot = v + r.snapshot = snapshot return r } // MasterTimeout Explicit operation timeout for connection to master node // API name: master_timeout -func (r *Delete) MasterTimeout(v string) *Delete { - r.values.Set("master_timeout", v) +func (r *Delete) MasterTimeout(duration string) *Delete { + r.values.Set("master_timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/delete/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/delete/response.go index 26cc75097..eb941e4b1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/delete/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/delete/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package delete // Response holds the response body struct for the package delete // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/snapshot/delete/SnapshotDeleteResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/snapshot/delete/SnapshotDeleteResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/deleterepository/delete_repository.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/deleterepository/delete_repository.go index fed8c1dc9..519424578 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/deleterepository/delete_repository.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/deleterepository/delete_repository.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Deletes a repository. package deleterepository @@ -67,7 +67,7 @@ func NewDeleteRepositoryFunc(tp elastictransport.Interface) NewDeleteRepository return func(repository string) *DeleteRepository { n := New(tp) - n.Repository(repository) + n._repository(repository) return n } @@ -168,7 +168,6 @@ func (r DeleteRepository) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -177,6 +176,10 @@ func (r DeleteRepository) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -211,25 +214,25 @@ func (r *DeleteRepository) Header(key, value string) *DeleteRepository { // Repository Name of the snapshot repository to unregister. Wildcard (`*`) patterns are // supported. // API Name: repository -func (r *DeleteRepository) Repository(v string) *DeleteRepository { +func (r *DeleteRepository) _repository(repository string) *DeleteRepository { r.paramSet |= repositoryMask - r.repository = v + r.repository = repository return r } // MasterTimeout Explicit operation timeout for connection to master node // API name: master_timeout -func (r *DeleteRepository) MasterTimeout(v string) *DeleteRepository { - r.values.Set("master_timeout", v) +func (r *DeleteRepository) MasterTimeout(duration string) *DeleteRepository { + r.values.Set("master_timeout", duration) return r } // Timeout Explicit operation timeout // API name: timeout -func (r *DeleteRepository) Timeout(v string) *DeleteRepository { - r.values.Set("timeout", v) +func (r *DeleteRepository) Timeout(duration string) *DeleteRepository { + r.values.Set("timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/deleterepository/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/deleterepository/response.go index fcaca0944..4407f7751 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/deleterepository/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/deleterepository/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package deleterepository // Response holds the response body struct for the package deleterepository // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/snapshot/delete_repository/SnapshotDeleteRepositoryResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/snapshot/delete_repository/SnapshotDeleteRepositoryResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/get/get.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/get/get.go index b3760b972..b94571212 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/get/get.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/get/get.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns information about a snapshot. package get @@ -36,7 +36,6 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/snapshotsort" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortorder" ) @@ -74,9 +73,9 @@ func NewGetFunc(tp elastictransport.Interface) NewGet { return func(repository, snapshot string) *Get { n := New(tp) - n.Repository(repository) + n._repository(repository) - n.Snapshot(snapshot) + n._snapshot(snapshot) return n } @@ -180,7 +179,6 @@ func (r Get) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -189,6 +187,10 @@ func (r Get) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -223,9 +225,9 @@ func (r *Get) Header(key, value string) *Get { // Repository Comma-separated list of snapshot repository names used to limit the request. // Wildcard (*) expressions are supported. // API Name: repository -func (r *Get) Repository(v string) *Get { +func (r *Get) _repository(repository string) *Get { r.paramSet |= repositoryMask - r.repository = v + r.repository = repository return r } @@ -237,9 +239,9 @@ func (r *Get) Repository(v string) *Get { // - To get information about any snapshots that are currently running, use // _current. // API Name: snapshot -func (r *Get) Snapshot(v string) *Get { +func (r *Get) _snapshot(snapshot string) *Get { r.paramSet |= snapshotMask - r.snapshot = v + r.snapshot = snapshot return r } @@ -247,8 +249,8 @@ func (r *Get) Snapshot(v string) *Get { // IgnoreUnavailable If false, the request returns an error for any snapshots that are // unavailable. // API name: ignore_unavailable -func (r *Get) IgnoreUnavailable(b bool) *Get { - r.values.Set("ignore_unavailable", strconv.FormatBool(b)) +func (r *Get) IgnoreUnavailable(ignoreunavailable bool) *Get { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) return r } @@ -256,8 +258,8 @@ func (r *Get) IgnoreUnavailable(b bool) *Get { // MasterTimeout Period to wait for a connection to the master node. If no response is // received before the timeout expires, the request fails and returns an error. // API name: master_timeout -func (r *Get) MasterTimeout(v string) *Get { - r.values.Set("master_timeout", v) +func (r *Get) MasterTimeout(duration string) *Get { + r.values.Set("master_timeout", duration) return r } @@ -266,8 +268,8 @@ func (r *Get) MasterTimeout(v string) *Get { // version of Elasticsearch which took the snapshot, the start and end times of // the snapshot, and the number of shards snapshotted. // API name: verbose -func (r *Get) Verbose(b bool) *Get { - r.values.Set("verbose", strconv.FormatBool(b)) +func (r *Get) Verbose(verbose bool) *Get { + r.values.Set("verbose", strconv.FormatBool(verbose)) return r } @@ -277,24 +279,24 @@ func (r *Get) Verbose(b bool) *Get { // bytes, and the maximum number of segments per shard in the index. Defaults to // false, meaning that this information is omitted. // API name: index_details -func (r *Get) IndexDetails(b bool) *Get { - r.values.Set("index_details", strconv.FormatBool(b)) +func (r *Get) IndexDetails(indexdetails bool) *Get { + r.values.Set("index_details", strconv.FormatBool(indexdetails)) return r } // IndexNames If true, returns the name of each index in each snapshot. // API name: index_names -func (r *Get) IndexNames(b bool) *Get { - r.values.Set("index_names", strconv.FormatBool(b)) +func (r *Get) IndexNames(indexnames bool) *Get { + r.values.Set("index_names", strconv.FormatBool(indexnames)) return r } // IncludeRepository If true, returns the repository name in each snapshot. // API name: include_repository -func (r *Get) IncludeRepository(b bool) *Get { - r.values.Set("include_repository", strconv.FormatBool(b)) +func (r *Get) IncludeRepository(includerepository bool) *Get { + r.values.Set("include_repository", strconv.FormatBool(includerepository)) return r } @@ -302,8 +304,8 @@ func (r *Get) IncludeRepository(b bool) *Get { // Sort Allows setting a sort order for the result. Defaults to start_time, i.e. // sorting by snapshot start time stamp. // API name: sort -func (r *Get) Sort(enum snapshotsort.SnapshotSort) *Get { - r.values.Set("sort", enum.String()) +func (r *Get) Sort(sort snapshotsort.SnapshotSort) *Get { + r.values.Set("sort", sort.String()) return r } @@ -311,8 +313,8 @@ func (r *Get) Sort(enum snapshotsort.SnapshotSort) *Get { // Size Maximum number of snapshots to return. Defaults to 0 which means return all // that match the request without limit. // API name: size -func (r *Get) Size(i int) *Get { - r.values.Set("size", strconv.Itoa(i)) +func (r *Get) Size(size int) *Get { + r.values.Set("size", strconv.Itoa(size)) return r } @@ -320,8 +322,8 @@ func (r *Get) Size(i int) *Get { // Order Sort order. Valid values are asc for ascending and desc for descending order. // Defaults to asc, meaning ascending order. // API name: order -func (r *Get) Order(enum sortorder.SortOrder) *Get { - r.values.Set("order", enum.String()) +func (r *Get) Order(order sortorder.SortOrder) *Get { + r.values.Set("order", order.String()) return r } @@ -329,8 +331,8 @@ func (r *Get) Order(enum sortorder.SortOrder) *Get { // After Offset identifier to start pagination from as returned by the next field in // the response body. // API name: after -func (r *Get) After(v string) *Get { - r.values.Set("after", v) +func (r *Get) After(after string) *Get { + r.values.Set("after", after) return r } @@ -339,8 +341,8 @@ func (r *Get) After(v string) *Get { // request. Using a non-zero value for this parameter is mutually exclusive with // using the after parameter. Defaults to 0. // API name: offset -func (r *Get) Offset(i int) *Get { - r.values.Set("offset", strconv.Itoa(i)) +func (r *Get) Offset(offset int) *Get { + r.values.Set("offset", strconv.Itoa(offset)) return r } @@ -350,8 +352,8 @@ func (r *Get) Offset(i int) *Get { // name, a millisecond time value or a number when sorting by index- or shard // count. // API name: from_sort_value -func (r *Get) FromSortValue(v string) *Get { - r.values.Set("from_sort_value", v) +func (r *Get) FromSortValue(fromsortvalue string) *Get { + r.values.Set("from_sort_value", fromsortvalue) return r } @@ -362,8 +364,8 @@ func (r *Get) FromSortValue(v string) *Get { // SLM policy you can use the special pattern _none that will match all // snapshots without an SLM policy. // API name: slm_policy_filter -func (r *Get) SlmPolicyFilter(v string) *Get { - r.values.Set("slm_policy_filter", v) +func (r *Get) SlmPolicyFilter(name string) *Get { + r.values.Set("slm_policy_filter", name) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/get/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/get/response.go index c1660b4c9..92f0ab8c2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/get/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/get/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package get @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package get // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/snapshot/get/SnapshotGetResponse.ts#L25-L40 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/snapshot/get/SnapshotGetResponse.ts#L25-L42 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/getrepository/get_repository.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/getrepository/get_repository.go index c6e32953b..6796b417c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/getrepository/get_repository.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/getrepository/get_repository.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns information about a repository. package getrepository @@ -172,7 +172,6 @@ func (r GetRepository) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -181,6 +180,10 @@ func (r GetRepository) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -214,9 +217,9 @@ func (r *GetRepository) Header(key, value string) *GetRepository { // Repository A comma-separated list of repository names // API Name: repository -func (r *GetRepository) Repository(v string) *GetRepository { +func (r *GetRepository) Repository(repository string) *GetRepository { r.paramSet |= repositoryMask - r.repository = v + r.repository = repository return r } @@ -224,16 +227,16 @@ func (r *GetRepository) Repository(v string) *GetRepository { // Local Return local information, do not retrieve the state from master node // (default: false) // API name: local -func (r *GetRepository) Local(b bool) *GetRepository { - r.values.Set("local", strconv.FormatBool(b)) +func (r *GetRepository) Local(local bool) *GetRepository { + r.values.Set("local", strconv.FormatBool(local)) return r } // MasterTimeout Explicit operation timeout for connection to master node // API name: master_timeout -func (r *GetRepository) MasterTimeout(v string) *GetRepository { - r.values.Set("master_timeout", v) +func (r *GetRepository) MasterTimeout(duration string) *GetRepository { + r.values.Set("master_timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/getrepository/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/getrepository/response.go index 2eb73f3d3..710c3eb9a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/getrepository/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/getrepository/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getrepository @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getrepository // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/snapshot/get_repository/SnapshotGetRepositoryResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/snapshot/get_repository/SnapshotGetRepositoryResponse.ts#L23-L25 type Response map[string]types.Repository diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/restore/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/restore/request.go index 5b359983c..51ff55855 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/restore/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/restore/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package restore @@ -29,8 +29,9 @@ import ( // Request holds the request body struct for the package restore // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/snapshot/restore/SnapshotRestoreRequest.ts#L25-L50 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/snapshot/restore/SnapshotRestoreRequest.ts#L25-L51 type Request struct { + FeatureStates []string `json:"feature_states,omitempty"` IgnoreIndexSettings []string `json:"ignore_index_settings,omitempty"` IgnoreUnavailable *bool `json:"ignore_unavailable,omitempty"` IncludeAliases *bool `json:"include_aliases,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/restore/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/restore/response.go index e8f9a06a6..2659a0617 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/restore/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/restore/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package restore @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package restore // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/snapshot/restore/SnapshotRestoreResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/snapshot/restore/SnapshotRestoreResponse.ts#L23-L25 type Response struct { Snapshot types.SnapshotRestore `json:"snapshot"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/restore/restore.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/restore/restore.go index 86d56eeb3..dcc5dfc4e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/restore/restore.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/restore/restore.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Restores a snapshot. package restore @@ -55,8 +55,9 @@ type Restore struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -73,9 +74,9 @@ func NewRestoreFunc(tp elastictransport.Interface) NewRestore { return func(repository, snapshot string) *Restore { n := New(tp) - n.Repository(repository) + n._repository(repository) - n.Snapshot(snapshot) + n._snapshot(snapshot) return n } @@ -90,6 +91,8 @@ func New(tp elastictransport.Interface) *Restore { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -119,9 +122,19 @@ func (r *Restore) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -129,6 +142,7 @@ func (r *Restore) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -214,7 +228,6 @@ func (r Restore) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -223,6 +236,10 @@ func (r Restore) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -235,34 +252,107 @@ func (r *Restore) Header(key, value string) *Restore { // Repository A repository name // API Name: repository -func (r *Restore) Repository(v string) *Restore { +func (r *Restore) _repository(repository string) *Restore { r.paramSet |= repositoryMask - r.repository = v + r.repository = repository return r } // Snapshot A snapshot name // API Name: snapshot -func (r *Restore) Snapshot(v string) *Restore { +func (r *Restore) _snapshot(snapshot string) *Restore { r.paramSet |= snapshotMask - r.snapshot = v + r.snapshot = snapshot return r } // MasterTimeout Explicit operation timeout for connection to master node // API name: master_timeout -func (r *Restore) MasterTimeout(v string) *Restore { - r.values.Set("master_timeout", v) +func (r *Restore) MasterTimeout(duration string) *Restore { + r.values.Set("master_timeout", duration) return r } // WaitForCompletion Should this request wait until the operation has completed before returning // API name: wait_for_completion -func (r *Restore) WaitForCompletion(b bool) *Restore { - r.values.Set("wait_for_completion", strconv.FormatBool(b)) +func (r *Restore) WaitForCompletion(waitforcompletion bool) *Restore { + r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) + + return r +} + +// API name: feature_states +func (r *Restore) FeatureStates(featurestates ...string) *Restore { + r.req.FeatureStates = featurestates + + return r +} + +// API name: ignore_index_settings +func (r *Restore) IgnoreIndexSettings(ignoreindexsettings ...string) *Restore { + r.req.IgnoreIndexSettings = ignoreindexsettings + + return r +} + +// API name: ignore_unavailable +func (r *Restore) IgnoreUnavailable(ignoreunavailable bool) *Restore { + r.req.IgnoreUnavailable = &ignoreunavailable + + return r +} + +// API name: include_aliases +func (r *Restore) IncludeAliases(includealiases bool) *Restore { + r.req.IncludeAliases = &includealiases + + return r +} + +// API name: include_global_state +func (r *Restore) IncludeGlobalState(includeglobalstate bool) *Restore { + r.req.IncludeGlobalState = &includeglobalstate + + return r +} + +// API name: index_settings +func (r *Restore) IndexSettings(indexsettings *types.IndexSettings) *Restore { + + r.req.IndexSettings = indexsettings + + return r +} + +// API name: indices +func (r *Restore) Indices(indices ...string) *Restore { + r.req.Indices = indices + + return r +} + +// API name: partial +func (r *Restore) Partial(partial bool) *Restore { + r.req.Partial = &partial + + return r +} + +// API name: rename_pattern +func (r *Restore) RenamePattern(renamepattern string) *Restore { + + r.req.RenamePattern = &renamepattern + + return r +} + +// API name: rename_replacement +func (r *Restore) RenameReplacement(renamereplacement string) *Restore { + + r.req.RenameReplacement = &renamereplacement return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/status/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/status/response.go index b22f02127..bc8cf86ed 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/status/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/status/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package status @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package status // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/snapshot/status/SnapshotStatusResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/snapshot/status/SnapshotStatusResponse.ts#L22-L24 type Response struct { Snapshots []types.Status `json:"snapshots"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/status/status.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/status/status.go index cc95a05df..5b0ffe403 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/status/status.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/status/status.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns information about the status of a snapshot. package status @@ -192,7 +192,6 @@ func (r Status) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -201,6 +200,10 @@ func (r Status) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -234,18 +237,18 @@ func (r *Status) Header(key, value string) *Status { // Repository A repository name // API Name: repository -func (r *Status) Repository(v string) *Status { +func (r *Status) Repository(repository string) *Status { r.paramSet |= repositoryMask - r.repository = v + r.repository = repository return r } // Snapshot A comma-separated list of snapshot names // API Name: snapshot -func (r *Status) Snapshot(v string) *Status { +func (r *Status) Snapshot(snapshot string) *Status { r.paramSet |= snapshotMask - r.snapshot = v + r.snapshot = snapshot return r } @@ -253,16 +256,16 @@ func (r *Status) Snapshot(v string) *Status { // IgnoreUnavailable Whether to ignore unavailable snapshots, defaults to false which means a // SnapshotMissingException is thrown // API name: ignore_unavailable -func (r *Status) IgnoreUnavailable(b bool) *Status { - r.values.Set("ignore_unavailable", strconv.FormatBool(b)) +func (r *Status) IgnoreUnavailable(ignoreunavailable bool) *Status { + r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable)) return r } // MasterTimeout Explicit operation timeout for connection to master node // API name: master_timeout -func (r *Status) MasterTimeout(v string) *Status { - r.values.Set("master_timeout", v) +func (r *Status) MasterTimeout(duration string) *Status { + r.values.Set("master_timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/verifyrepository/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/verifyrepository/response.go index b5913fe5a..655581102 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/verifyrepository/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/verifyrepository/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package verifyrepository @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package verifyrepository // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/snapshot/verify_repository/SnapshotVerifyRepositoryResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/snapshot/verify_repository/SnapshotVerifyRepositoryResponse.ts#L23-L25 type Response struct { Nodes map[string]types.CompactNodeInfo `json:"nodes"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/verifyrepository/verify_repository.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/verifyrepository/verify_repository.go index 55c9a7676..084530fc1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/verifyrepository/verify_repository.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/snapshot/verifyrepository/verify_repository.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Verifies a repository. package verifyrepository @@ -67,7 +67,7 @@ func NewVerifyRepositoryFunc(tp elastictransport.Interface) NewVerifyRepository return func(repository string) *VerifyRepository { n := New(tp) - n.Repository(repository) + n._repository(repository) return n } @@ -170,7 +170,6 @@ func (r VerifyRepository) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -179,6 +178,10 @@ func (r VerifyRepository) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -212,25 +215,25 @@ func (r *VerifyRepository) Header(key, value string) *VerifyRepository { // Repository A repository name // API Name: repository -func (r *VerifyRepository) Repository(v string) *VerifyRepository { +func (r *VerifyRepository) _repository(repository string) *VerifyRepository { r.paramSet |= repositoryMask - r.repository = v + r.repository = repository return r } // MasterTimeout Explicit operation timeout for connection to master node // API name: master_timeout -func (r *VerifyRepository) MasterTimeout(v string) *VerifyRepository { - r.values.Set("master_timeout", v) +func (r *VerifyRepository) MasterTimeout(duration string) *VerifyRepository { + r.values.Set("master_timeout", duration) return r } // Timeout Explicit operation timeout // API name: timeout -func (r *VerifyRepository) Timeout(v string) *VerifyRepository { - r.values.Set("timeout", v) +func (r *VerifyRepository) Timeout(duration string) *VerifyRepository { + r.values.Set("timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/clearcursor/clear_cursor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/clearcursor/clear_cursor.go index 44662ad9d..cdef0a33a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/clearcursor/clear_cursor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/clearcursor/clear_cursor.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Clears the SQL cursor package clearcursor @@ -48,8 +48,9 @@ type ClearCursor struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int } @@ -76,6 +77,8 @@ func New(tp elastictransport.Interface) *ClearCursor { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -105,9 +108,19 @@ func (r *ClearCursor) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -115,6 +128,7 @@ func (r *ClearCursor) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -194,7 +208,6 @@ func (r ClearCursor) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -203,6 +216,10 @@ func (r ClearCursor) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -212,3 +229,12 @@ func (r *ClearCursor) Header(key, value string) *ClearCursor { return r } + +// Cursor Cursor to clear. +// API name: cursor +func (r *ClearCursor) Cursor(cursor string) *ClearCursor { + + r.req.Cursor = cursor + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/clearcursor/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/clearcursor/request.go index 3b8bd2834..7ee1914ea 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/clearcursor/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/clearcursor/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package clearcursor @@ -27,8 +27,10 @@ import ( // Request holds the request body struct for the package clearcursor // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/sql/clear_cursor/ClearSqlCursorRequest.ts#L22-L31 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/sql/clear_cursor/ClearSqlCursorRequest.ts#L22-L34 type Request struct { + + // Cursor Cursor to clear. Cursor string `json:"cursor"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/clearcursor/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/clearcursor/response.go index c0df894cb..f2af1cba9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/clearcursor/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/clearcursor/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package clearcursor // Response holds the response body struct for the package clearcursor // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/sql/clear_cursor/ClearSqlCursorResponse.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/sql/clear_cursor/ClearSqlCursorResponse.ts#L20-L22 type Response struct { Succeeded bool `json:"succeeded"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/deleteasync/delete_async.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/deleteasync/delete_async.go index 5e3470823..195425076 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/deleteasync/delete_async.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/deleteasync/delete_async.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Deletes an async SQL search or a stored synchronous SQL search. If the search // is still running, the API cancels it. @@ -68,7 +68,7 @@ func NewDeleteAsyncFunc(tp elastictransport.Interface) NewDeleteAsync { return func(id string) *DeleteAsync { n := New(tp) - n.Id(id) + n._id(id) return n } @@ -174,7 +174,6 @@ func (r DeleteAsync) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -183,6 +182,10 @@ func (r DeleteAsync) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -214,11 +217,11 @@ func (r *DeleteAsync) Header(key, value string) *DeleteAsync { return r } -// Id The async search ID +// Id Identifier for the search. // API Name: id -func (r *DeleteAsync) Id(v string) *DeleteAsync { +func (r *DeleteAsync) _id(id string) *DeleteAsync { r.paramSet |= idMask - r.id = v + r.id = id return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/deleteasync/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/deleteasync/response.go index 834ad2af5..efc3811fb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/deleteasync/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/deleteasync/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package deleteasync // Response holds the response body struct for the package deleteasync // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/sql/delete_async/SqlDeleteAsyncResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/sql/delete_async/SqlDeleteAsyncResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/getasync/get_async.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/getasync/get_async.go index 77a74b6c4..c1f94be0a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/getasync/get_async.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/getasync/get_async.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns the current status and available results for an async SQL search or // stored synchronous SQL search @@ -68,7 +68,7 @@ func NewGetAsyncFunc(tp elastictransport.Interface) NewGetAsync { return func(id string) *GetAsync { n := New(tp) - n.Id(id) + n._id(id) return n } @@ -172,7 +172,6 @@ func (r GetAsync) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -181,6 +180,10 @@ func (r GetAsync) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -212,11 +215,11 @@ func (r *GetAsync) Header(key, value string) *GetAsync { return r } -// Id The async search ID +// Id Identifier for the search. // API Name: id -func (r *GetAsync) Id(v string) *GetAsync { +func (r *GetAsync) _id(id string) *GetAsync { r.paramSet |= idMask - r.id = v + r.id = id return r } @@ -224,8 +227,8 @@ func (r *GetAsync) Id(v string) *GetAsync { // Delimiter Separator for CSV results. The API only supports this parameter for CSV // responses. // API name: delimiter -func (r *GetAsync) Delimiter(v string) *GetAsync { - r.values.Set("delimiter", v) +func (r *GetAsync) Delimiter(delimiter string) *GetAsync { + r.values.Set("delimiter", delimiter) return r } @@ -234,8 +237,8 @@ func (r *GetAsync) Delimiter(v string) *GetAsync { // the // Accept HTTP header. If you specify both, the API uses this parameter. // API name: format -func (r *GetAsync) Format(v string) *GetAsync { - r.values.Set("format", v) +func (r *GetAsync) Format(format string) *GetAsync { + r.values.Set("format", format) return r } @@ -243,8 +246,8 @@ func (r *GetAsync) Format(v string) *GetAsync { // KeepAlive Retention period for the search and its results. Defaults // to the `keep_alive` period for the original SQL search. // API name: keep_alive -func (r *GetAsync) KeepAlive(v string) *GetAsync { - r.values.Set("keep_alive", v) +func (r *GetAsync) KeepAlive(duration string) *GetAsync { + r.values.Set("keep_alive", duration) return r } @@ -252,8 +255,8 @@ func (r *GetAsync) KeepAlive(v string) *GetAsync { // WaitForCompletionTimeout Period to wait for complete results. Defaults to no timeout, // meaning the request waits for complete search results. // API name: wait_for_completion_timeout -func (r *GetAsync) WaitForCompletionTimeout(v string) *GetAsync { - r.values.Set("wait_for_completion_timeout", v) +func (r *GetAsync) WaitForCompletionTimeout(duration string) *GetAsync { + r.values.Set("wait_for_completion_timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/getasync/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/getasync/response.go index 9337f11e0..4692d1daf 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/getasync/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/getasync/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getasync @@ -28,7 +28,7 @@ import ( // Response holds the response body struct for the package getasync // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/sql/get_async/SqlGetAsyncResponse.ts#L23-L60 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/sql/get_async/SqlGetAsyncResponse.ts#L23-L60 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/getasyncstatus/get_async_status.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/getasyncstatus/get_async_status.go index df0eb1b59..adbab1154 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/getasyncstatus/get_async_status.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/getasyncstatus/get_async_status.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns the current status of an async SQL search or a stored synchronous SQL // search @@ -68,7 +68,7 @@ func NewGetAsyncStatusFunc(tp elastictransport.Interface) NewGetAsyncStatus { return func(id string) *GetAsyncStatus { n := New(tp) - n.Id(id) + n._id(id) return n } @@ -174,7 +174,6 @@ func (r GetAsyncStatus) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -183,6 +182,10 @@ func (r GetAsyncStatus) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -214,11 +217,11 @@ func (r *GetAsyncStatus) Header(key, value string) *GetAsyncStatus { return r } -// Id The async search ID +// Id Identifier for the search. // API Name: id -func (r *GetAsyncStatus) Id(v string) *GetAsyncStatus { +func (r *GetAsyncStatus) _id(id string) *GetAsyncStatus { r.paramSet |= idMask - r.id = v + r.id = id return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/getasyncstatus/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/getasyncstatus/response.go index c6b7162a9..51971b78c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/getasyncstatus/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/getasyncstatus/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getasyncstatus // Response holds the response body struct for the package getasyncstatus // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/sql/get_async_status/SqlGetAsyncStatusResponse.ts#L23-L55 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/sql/get_async_status/SqlGetAsyncStatusResponse.ts#L23-L55 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/query/query.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/query/query.go index 13339f6ed..9633232a5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/query/query.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/query/query.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Executes a SQL request package query @@ -48,8 +48,9 @@ type Query struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int } @@ -76,6 +77,8 @@ func New(tp elastictransport.Interface) *Query { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -105,9 +108,19 @@ func (r *Query) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -115,6 +128,7 @@ func (r *Query) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -192,7 +206,6 @@ func (r Query) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -201,6 +214,10 @@ func (r Query) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -211,10 +228,155 @@ func (r *Query) Header(key, value string) *Query { return r } -// Format a short version of the Accept header, e.g. json, yaml +// Format Format for the response. // API name: format -func (r *Query) Format(v string) *Query { - r.values.Set("format", v) +func (r *Query) Format(format string) *Query { + r.values.Set("format", format) + + return r +} + +// Catalog Default catalog (cluster) for queries. If unspecified, the queries execute on +// the data in the local cluster only. +// API name: catalog +func (r *Query) Catalog(catalog string) *Query { + + r.req.Catalog = &catalog + + return r +} + +// Columnar If true, the results in a columnar fashion: one row represents all the values +// of a certain column from the current page of results. +// API name: columnar +func (r *Query) Columnar(columnar bool) *Query { + r.req.Columnar = &columnar + + return r +} + +// Cursor Cursor used to retrieve a set of paginated results. +// If you specify a cursor, the API only uses the `columnar` and `time_zone` +// request body parameters. +// It ignores other request body parameters. +// API name: cursor +func (r *Query) Cursor(cursor string) *Query { + + r.req.Cursor = &cursor + + return r +} + +// FetchSize The maximum number of rows (or entries) to return in one response +// API name: fetch_size +func (r *Query) FetchSize(fetchsize int) *Query { + r.req.FetchSize = &fetchsize + + return r +} + +// FieldMultiValueLeniency Throw an exception when encountering multiple values for a field (default) or +// be lenient and return the first value from the list (without any guarantees +// of what that will be - typically the first in natural ascending order). +// API name: field_multi_value_leniency +func (r *Query) FieldMultiValueLeniency(fieldmultivalueleniency bool) *Query { + r.req.FieldMultiValueLeniency = &fieldmultivalueleniency + + return r +} + +// Filter Elasticsearch query DSL for additional filtering. +// API name: filter +func (r *Query) Filter(filter *types.Query) *Query { + + r.req.Filter = filter + + return r +} + +// IndexUsingFrozen If true, the search can run on frozen indices. Defaults to false. +// API name: index_using_frozen +func (r *Query) IndexUsingFrozen(indexusingfrozen bool) *Query { + r.req.IndexUsingFrozen = &indexusingfrozen + + return r +} + +// KeepAlive Retention period for an async or saved synchronous search. +// API name: keep_alive +func (r *Query) KeepAlive(duration types.Duration) *Query { + r.req.KeepAlive = duration + + return r +} + +// KeepOnCompletion If true, Elasticsearch stores synchronous searches if you also specify the +// wait_for_completion_timeout parameter. If false, Elasticsearch only stores +// async searches that don’t finish before the wait_for_completion_timeout. +// API name: keep_on_completion +func (r *Query) KeepOnCompletion(keeponcompletion bool) *Query { + r.req.KeepOnCompletion = &keeponcompletion + + return r +} + +// PageTimeout The timeout before a pagination request fails. +// API name: page_timeout +func (r *Query) PageTimeout(duration types.Duration) *Query { + r.req.PageTimeout = duration + + return r +} + +// Params Values for parameters in the query. +// API name: params +func (r *Query) Params(params map[string]json.RawMessage) *Query { + + r.req.Params = params + + return r +} + +// Query SQL query to run. +// API name: query +func (r *Query) Query(query string) *Query { + + r.req.Query = &query + + return r +} + +// RequestTimeout The timeout before the request fails. +// API name: request_timeout +func (r *Query) RequestTimeout(duration types.Duration) *Query { + r.req.RequestTimeout = duration + + return r +} + +// RuntimeMappings Defines one or more runtime fields in the search request. These fields take +// precedence over mapped fields with the same name. +// API name: runtime_mappings +func (r *Query) RuntimeMappings(runtimefields types.RuntimeFields) *Query { + r.req.RuntimeMappings = runtimefields + + return r +} + +// TimeZone ISO-8601 time zone ID for the search. +// API name: time_zone +func (r *Query) TimeZone(timezone string) *Query { + r.req.TimeZone = &timezone + + return r +} + +// WaitForCompletionTimeout Period to wait for complete results. Defaults to no timeout, meaning the +// request waits for complete search results. If the search doesn’t finish +// within this period, the search becomes async. +// API name: wait_for_completion_timeout +func (r *Query) WaitForCompletionTimeout(duration types.Duration) *Query { + r.req.WaitForCompletionTimeout = duration return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/query/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/query/request.go index af1ddb376..600e42393 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/query/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/query/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package query @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package query // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/sql/query/QuerySqlRequest.ts#L28-L115 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/sql/query/QuerySqlRequest.ts#L28-L122 type Request struct { // Catalog Default catalog (cluster) for queries. If unspecified, the queries execute on @@ -37,15 +37,19 @@ type Request struct { Catalog *string `json:"catalog,omitempty"` // Columnar If true, the results in a columnar fashion: one row represents all the values // of a certain column from the current page of results. - Columnar *bool `json:"columnar,omitempty"` - Cursor *string `json:"cursor,omitempty"` + Columnar *bool `json:"columnar,omitempty"` + // Cursor Cursor used to retrieve a set of paginated results. + // If you specify a cursor, the API only uses the `columnar` and `time_zone` + // request body parameters. + // It ignores other request body parameters. + Cursor *string `json:"cursor,omitempty"` // FetchSize The maximum number of rows (or entries) to return in one response FetchSize *int `json:"fetch_size,omitempty"` // FieldMultiValueLeniency Throw an exception when encountering multiple values for a field (default) or // be lenient and return the first value from the list (without any guarantees // of what that will be - typically the first in natural ascending order). FieldMultiValueLeniency *bool `json:"field_multi_value_leniency,omitempty"` - // Filter Optional Elasticsearch query DSL for additional filtering. + // Filter Elasticsearch query DSL for additional filtering. Filter *types.Query `json:"filter,omitempty"` // IndexUsingFrozen If true, the search can run on frozen indices. Defaults to false. IndexUsingFrozen *bool `json:"index_using_frozen,omitempty"` @@ -59,15 +63,14 @@ type Request struct { PageTimeout types.Duration `json:"page_timeout,omitempty"` // Params Values for parameters in the query. Params map[string]json.RawMessage `json:"params,omitempty"` - // Query SQL query to execute + // Query SQL query to run. Query *string `json:"query,omitempty"` // RequestTimeout The timeout before the request fails. RequestTimeout types.Duration `json:"request_timeout,omitempty"` // RuntimeMappings Defines one or more runtime fields in the search request. These fields take // precedence over mapped fields with the same name. - RuntimeMappings map[string]types.RuntimeField `json:"runtime_mappings,omitempty"` - // TimeZone Time-zone in ISO 8601 used for executing the query on the server. More - // information available here. + RuntimeMappings types.RuntimeFields `json:"runtime_mappings,omitempty"` + // TimeZone ISO-8601 time zone ID for the search. TimeZone *string `json:"time_zone,omitempty"` // WaitForCompletionTimeout Period to wait for complete results. Defaults to no timeout, meaning the // request waits for complete search results. If the search doesn’t finish diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/query/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/query/response.go index f6a2ee020..687695bf6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/query/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/query/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package query @@ -28,7 +28,7 @@ import ( // Response holds the response body struct for the package query // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/sql/query/QuerySqlResponse.ts#L23-L60 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/sql/query/QuerySqlResponse.ts#L23-L60 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/translate/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/translate/request.go index 85f143e74..a453cd265 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/translate/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/translate/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package translate @@ -29,12 +29,17 @@ import ( // Request holds the request body struct for the package translate // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/sql/translate/TranslateSqlRequest.ts#L25-L37 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/sql/translate/TranslateSqlRequest.ts#L25-L54 type Request struct { - FetchSize *int `json:"fetch_size,omitempty"` - Filter *types.Query `json:"filter,omitempty"` - Query string `json:"query"` - TimeZone *string `json:"time_zone,omitempty"` + + // FetchSize The maximum number of rows (or entries) to return in one response. + FetchSize *int `json:"fetch_size,omitempty"` + // Filter Elasticsearch query DSL for additional filtering. + Filter *types.Query `json:"filter,omitempty"` + // Query SQL query to run. + Query string `json:"query"` + // TimeZone ISO-8601 time zone ID for the search. + TimeZone *string `json:"time_zone,omitempty"` } // NewRequest returns a Request diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/translate/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/translate/response.go index 82abef254..8951eb232 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/translate/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/translate/response.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package translate import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) // Response holds the response body struct for the package translate // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/sql/translate/TranslateSqlResponse.ts#L28-L38 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/sql/translate/TranslateSqlResponse.ts#L28-L38 type Response struct { Aggregations map[string]types.Aggregations `json:"aggregations,omitempty"` @@ -44,3 +50,76 @@ func NewResponse() *Response { } return r } + +func (s *Response) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aggregations": + if s.Aggregations == nil { + s.Aggregations = make(map[string]types.Aggregations, 0) + } + if err := dec.Decode(&s.Aggregations); err != nil { + return err + } + + case "fields": + if err := dec.Decode(&s.Fields); err != nil { + return err + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return err + } + + case "size": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Size = &value + case float64: + f := int64(v) + s.Size = &f + } + + case "sort": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(types.SortCombinations) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Sort = append(s.Sort, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Sort); err != nil { + return err + } + } + + case "_source": + if err := dec.Decode(&s.Source_); err != nil { + return err + } + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/translate/translate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/translate/translate.go index 39699d966..b2fe1609f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/translate/translate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/sql/translate/translate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Translates SQL into Elasticsearch queries package translate @@ -48,8 +48,9 @@ type Translate struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int } @@ -76,6 +77,8 @@ func New(tp elastictransport.Interface) *Translate { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -105,9 +108,19 @@ func (r *Translate) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -115,6 +128,7 @@ func (r *Translate) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -194,7 +208,6 @@ func (r Translate) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -203,6 +216,10 @@ func (r Translate) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -212,3 +229,37 @@ func (r *Translate) Header(key, value string) *Translate { return r } + +// FetchSize The maximum number of rows (or entries) to return in one response. +// API name: fetch_size +func (r *Translate) FetchSize(fetchsize int) *Translate { + r.req.FetchSize = &fetchsize + + return r +} + +// Filter Elasticsearch query DSL for additional filtering. +// API name: filter +func (r *Translate) Filter(filter *types.Query) *Translate { + + r.req.Filter = filter + + return r +} + +// Query SQL query to run. +// API name: query +func (r *Translate) Query(query string) *Translate { + + r.req.Query = query + + return r +} + +// TimeZone ISO-8601 time zone ID for the search. +// API name: time_zone +func (r *Translate) TimeZone(timezone string) *Translate { + r.req.TimeZone = &timezone + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ssl/certificates/certificates.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ssl/certificates/certificates.go index af1982b81..f58af515e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ssl/certificates/certificates.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ssl/certificates/certificates.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves information about the X.509 certificates used to encrypt // communications in the cluster. @@ -161,7 +161,6 @@ func (r Certificates) Do(ctx context.Context) (Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -170,6 +169,10 @@ func (r Certificates) Do(ctx context.Context) (Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ssl/certificates/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ssl/certificates/response.go index 1b9f990fe..fe28c9ce3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ssl/certificates/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/ssl/certificates/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package certificates @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package certificates // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ssl/certificates/GetCertificatesResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ssl/certificates/GetCertificatesResponse.ts#L22-L24 type Response []types.CertificateInformation diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/deletesynonym/delete_synonym.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/deletesynonym/delete_synonym.go new file mode 100644 index 000000000..22b1315df --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/deletesynonym/delete_synonym.go @@ -0,0 +1,221 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Deletes a synonym set +package deletesynonym + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteSynonym struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + buf *gobytes.Buffer + + paramSet int + + id string +} + +// NewDeleteSynonym type alias for index. +type NewDeleteSynonym func(id string) *DeleteSynonym + +// NewDeleteSynonymFunc returns a new instance of DeleteSynonym with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteSynonymFunc(tp elastictransport.Interface) NewDeleteSynonym { + return func(id string) *DeleteSynonym { + n := New(tp) + + n._id(id) + + return n + } +} + +// Deletes a synonym set +// +// https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-synonyms-set.html +func New(tp elastictransport.Interface) *DeleteSynonym { + r := &DeleteSynonym{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + buf: gobytes.NewBuffer(nil), + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteSynonym) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_synonyms") + path.WriteString("/") + + path.WriteString(r.id) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.buf) + } else { + req, err = http.NewRequest(method, r.path.String(), r.buf) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteSynonym) Perform(ctx context.Context) (*http.Response, error) { + req, err := r.HttpRequest(ctx) + if err != nil { + return nil, err + } + + res, err := r.transport.Perform(req) + if err != nil { + return nil, fmt.Errorf("an error happened during the DeleteSynonym query execution: %w", err) + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deletesynonym.Response +func (r DeleteSynonym) Do(ctx context.Context) (*Response, error) { + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteSynonym) IsSuccess(ctx context.Context) (bool, error) { + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(ioutil.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + return false, nil +} + +// Header set a key, value pair in the DeleteSynonym headers map. +func (r *DeleteSynonym) Header(key, value string) *DeleteSynonym { + r.headers.Set(key, value) + + return r +} + +// Id The id of the synonyms set to be deleted +// API Name: id +func (r *DeleteSynonym) _id(id string) *DeleteSynonym { + r.paramSet |= idMask + r.id = id + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/deletesynonym/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/deletesynonym/response.go new file mode 100644 index 000000000..7fd5b957c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/deletesynonym/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package deletesynonym + +// Response holds the response body struct for the package deletesynonym +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/synonyms/delete_synonym/SynonymsDeleteResponse.ts#L22-L24 + +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/deletesynonymrule/delete_synonym_rule.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/deletesynonymrule/delete_synonym_rule.go new file mode 100644 index 000000000..7363fc5c2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/deletesynonymrule/delete_synonym_rule.go @@ -0,0 +1,244 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Deletes a synonym rule in a synonym set +package deletesynonymrule + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + setidMask = iota + 1 + + ruleidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type DeleteSynonymRule struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + buf *gobytes.Buffer + + paramSet int + + setid string + ruleid string +} + +// NewDeleteSynonymRule type alias for index. +type NewDeleteSynonymRule func(setid, ruleid string) *DeleteSynonymRule + +// NewDeleteSynonymRuleFunc returns a new instance of DeleteSynonymRule with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewDeleteSynonymRuleFunc(tp elastictransport.Interface) NewDeleteSynonymRule { + return func(setid, ruleid string) *DeleteSynonymRule { + n := New(tp) + + n._setid(setid) + + n._ruleid(ruleid) + + return n + } +} + +// Deletes a synonym rule in a synonym set +// +// https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-synonym-rule.html +func New(tp elastictransport.Interface) *DeleteSynonymRule { + r := &DeleteSynonymRule{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + buf: gobytes.NewBuffer(nil), + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *DeleteSynonymRule) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == setidMask|ruleidMask: + path.WriteString("/") + path.WriteString("_synonyms") + path.WriteString("/") + + path.WriteString(r.setid) + path.WriteString("/") + + path.WriteString(r.ruleid) + + method = http.MethodDelete + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.buf) + } else { + req, err = http.NewRequest(method, r.path.String(), r.buf) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.buf.Len() > 0 { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r DeleteSynonymRule) Perform(ctx context.Context) (*http.Response, error) { + req, err := r.HttpRequest(ctx) + if err != nil { + return nil, err + } + + res, err := r.transport.Perform(req) + if err != nil { + return nil, fmt.Errorf("an error happened during the DeleteSynonymRule query execution: %w", err) + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a deletesynonymrule.Response +func (r DeleteSynonymRule) Do(ctx context.Context) (*Response, error) { + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r DeleteSynonymRule) IsSuccess(ctx context.Context) (bool, error) { + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(ioutil.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + return false, nil +} + +// Header set a key, value pair in the DeleteSynonymRule headers map. +func (r *DeleteSynonymRule) Header(key, value string) *DeleteSynonymRule { + r.headers.Set(key, value) + + return r +} + +// SetId The id of the synonym set to be updated +// API Name: setid +func (r *DeleteSynonymRule) _setid(setid string) *DeleteSynonymRule { + r.paramSet |= setidMask + r.setid = setid + + return r +} + +// RuleId The id of the synonym rule to be deleted +// API Name: ruleid +func (r *DeleteSynonymRule) _ruleid(ruleid string) *DeleteSynonymRule { + r.paramSet |= ruleidMask + r.ruleid = ruleid + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/deletesynonymrule/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/deletesynonymrule/response.go new file mode 100644 index 000000000..66d12b84e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/deletesynonymrule/response.go @@ -0,0 +1,45 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package deletesynonymrule + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/result" +) + +// Response holds the response body struct for the package deletesynonymrule +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/synonyms/delete_synonym_rule/SynonymRuleDeleteResponse.ts#L22-L24 + +type Response struct { + + // ReloadAnalyzersDetails Updating synonyms in a synonym set reloads the associated analyzers. + // This is the analyzers reloading result + ReloadAnalyzersDetails types.ReloadResult `json:"reload_analyzers_details"` + // Result Update operation result + Result result.Result `json:"result"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/getsynonym/get_synonym.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/getsynonym/get_synonym.go new file mode 100644 index 000000000..22167dc57 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/getsynonym/get_synonym.go @@ -0,0 +1,238 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Retrieves a synonym set +package getsynonym + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetSynonym struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + buf *gobytes.Buffer + + paramSet int + + id string +} + +// NewGetSynonym type alias for index. +type NewGetSynonym func(id string) *GetSynonym + +// NewGetSynonymFunc returns a new instance of GetSynonym with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetSynonymFunc(tp elastictransport.Interface) NewGetSynonym { + return func(id string) *GetSynonym { + n := New(tp) + + n._id(id) + + return n + } +} + +// Retrieves a synonym set +// +// https://www.elastic.co/guide/en/elasticsearch/reference/master/get-synonyms-set.html +func New(tp elastictransport.Interface) *GetSynonym { + r := &GetSynonym{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + buf: gobytes.NewBuffer(nil), + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetSynonym) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_synonyms") + path.WriteString("/") + + path.WriteString(r.id) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.buf) + } else { + req, err = http.NewRequest(method, r.path.String(), r.buf) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetSynonym) Perform(ctx context.Context) (*http.Response, error) { + req, err := r.HttpRequest(ctx) + if err != nil { + return nil, err + } + + res, err := r.transport.Perform(req) + if err != nil { + return nil, fmt.Errorf("an error happened during the GetSynonym query execution: %w", err) + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getsynonym.Response +func (r GetSynonym) Do(ctx context.Context) (*Response, error) { + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetSynonym) IsSuccess(ctx context.Context) (bool, error) { + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(ioutil.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + return false, nil +} + +// Header set a key, value pair in the GetSynonym headers map. +func (r *GetSynonym) Header(key, value string) *GetSynonym { + r.headers.Set(key, value) + + return r +} + +// Id "The id of the synonyms set to be retrieved +// API Name: id +func (r *GetSynonym) _id(id string) *GetSynonym { + r.paramSet |= idMask + r.id = id + + return r +} + +// From Starting offset for query rules to be retrieved +// API name: from +func (r *GetSynonym) From(from int) *GetSynonym { + r.values.Set("from", strconv.Itoa(from)) + + return r +} + +// Size specifies a max number of query rules to retrieve +// API name: size +func (r *GetSynonym) Size(size int) *GetSynonym { + r.values.Set("size", strconv.Itoa(size)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/getsynonym/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/getsynonym/response.go new file mode 100644 index 000000000..2c5d6bd2a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/getsynonym/response.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package getsynonym + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Response holds the response body struct for the package getsynonym +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/synonyms/get_synonym/SynonymsGetResponse.ts#L23-L28 + +type Response struct { + Count int `json:"count"` + SynonymsSet []types.SynonymRuleRead `json:"synonyms_set"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/getsynonymrule/get_synonym_rule.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/getsynonymrule/get_synonym_rule.go new file mode 100644 index 000000000..5ae6d8bb4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/getsynonymrule/get_synonym_rule.go @@ -0,0 +1,244 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Retrieves a synonym rule from a synonym set +package getsynonymrule + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + setidMask = iota + 1 + + ruleidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetSynonymRule struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + buf *gobytes.Buffer + + paramSet int + + setid string + ruleid string +} + +// NewGetSynonymRule type alias for index. +type NewGetSynonymRule func(setid, ruleid string) *GetSynonymRule + +// NewGetSynonymRuleFunc returns a new instance of GetSynonymRule with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetSynonymRuleFunc(tp elastictransport.Interface) NewGetSynonymRule { + return func(setid, ruleid string) *GetSynonymRule { + n := New(tp) + + n._setid(setid) + + n._ruleid(ruleid) + + return n + } +} + +// Retrieves a synonym rule from a synonym set +// +// https://www.elastic.co/guide/en/elasticsearch/reference/master/get-synonym-rule.html +func New(tp elastictransport.Interface) *GetSynonymRule { + r := &GetSynonymRule{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + buf: gobytes.NewBuffer(nil), + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetSynonymRule) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == setidMask|ruleidMask: + path.WriteString("/") + path.WriteString("_synonyms") + path.WriteString("/") + + path.WriteString(r.setid) + path.WriteString("/") + + path.WriteString(r.ruleid) + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.buf) + } else { + req, err = http.NewRequest(method, r.path.String(), r.buf) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.buf.Len() > 0 { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetSynonymRule) Perform(ctx context.Context) (*http.Response, error) { + req, err := r.HttpRequest(ctx) + if err != nil { + return nil, err + } + + res, err := r.transport.Perform(req) + if err != nil { + return nil, fmt.Errorf("an error happened during the GetSynonymRule query execution: %w", err) + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getsynonymrule.Response +func (r GetSynonymRule) Do(ctx context.Context) (*Response, error) { + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetSynonymRule) IsSuccess(ctx context.Context) (bool, error) { + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(ioutil.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + return false, nil +} + +// Header set a key, value pair in the GetSynonymRule headers map. +func (r *GetSynonymRule) Header(key, value string) *GetSynonymRule { + r.headers.Set(key, value) + + return r +} + +// SetId The id of the synonym set to retrieve the synonym rule from +// API Name: setid +func (r *GetSynonymRule) _setid(setid string) *GetSynonymRule { + r.paramSet |= setidMask + r.setid = setid + + return r +} + +// RuleId The id of the synonym rule to retrieve +// API Name: ruleid +func (r *GetSynonymRule) _ruleid(ruleid string) *GetSynonymRule { + r.paramSet |= ruleidMask + r.ruleid = ruleid + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/getsynonymrule/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/getsynonymrule/response.go new file mode 100644 index 000000000..eec7e5f04 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/getsynonymrule/response.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package getsynonymrule + +// Response holds the response body struct for the package getsynonymrule +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/synonyms/get_synonym_rule/SynonymRuleGetResponse.ts#L22-L24 + +type Response struct { + + // Id Synonym Rule identifier + Id string `json:"id"` + // Synonyms Synonyms, in Solr format, that conform the synonym rule. See + // https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-synonym-graph-tokenfilter.html#_solr_synonyms_2 + Synonyms string `json:"synonyms"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/getsynonymssets/get_synonyms_sets.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/getsynonymssets/get_synonyms_sets.go new file mode 100644 index 000000000..8584b59fd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/getsynonymssets/get_synonyms_sets.go @@ -0,0 +1,218 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Retrieves a summary of all defined synonym sets +package getsynonymssets + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetSynonymsSets struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + buf *gobytes.Buffer + + paramSet int +} + +// NewGetSynonymsSets type alias for index. +type NewGetSynonymsSets func() *GetSynonymsSets + +// NewGetSynonymsSetsFunc returns a new instance of GetSynonymsSets with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetSynonymsSetsFunc(tp elastictransport.Interface) NewGetSynonymsSets { + return func() *GetSynonymsSets { + n := New(tp) + + return n + } +} + +// Retrieves a summary of all defined synonym sets +// +// https://www.elastic.co/guide/en/elasticsearch/reference/master/list-synonyms-sets.html +func New(tp elastictransport.Interface) *GetSynonymsSets { + r := &GetSynonymsSets{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + buf: gobytes.NewBuffer(nil), + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetSynonymsSets) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_synonyms") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.buf) + } else { + req, err = http.NewRequest(method, r.path.String(), r.buf) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetSynonymsSets) Perform(ctx context.Context) (*http.Response, error) { + req, err := r.HttpRequest(ctx) + if err != nil { + return nil, err + } + + res, err := r.transport.Perform(req) + if err != nil { + return nil, fmt.Errorf("an error happened during the GetSynonymsSets query execution: %w", err) + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getsynonymssets.Response +func (r GetSynonymsSets) Do(ctx context.Context) (*Response, error) { + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetSynonymsSets) IsSuccess(ctx context.Context) (bool, error) { + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(ioutil.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + return false, nil +} + +// Header set a key, value pair in the GetSynonymsSets headers map. +func (r *GetSynonymsSets) Header(key, value string) *GetSynonymsSets { + r.headers.Set(key, value) + + return r +} + +// From Starting offset +// API name: from +func (r *GetSynonymsSets) From(from int) *GetSynonymsSets { + r.values.Set("from", strconv.Itoa(from)) + + return r +} + +// Size specifies a max number of results to get +// API name: size +func (r *GetSynonymsSets) Size(size int) *GetSynonymsSets { + r.values.Set("size", strconv.Itoa(size)) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/getsynonymssets/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/getsynonymssets/response.go new file mode 100644 index 000000000..10a0f9707 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/getsynonymssets/response.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package getsynonymssets + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Response holds the response body struct for the package getsynonymssets +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/synonyms/get_synonyms_sets/SynonymsSetsGetResponse.ts#L23-L28 + +type Response struct { + Count int `json:"count"` + Results []types.SynonymsSetItem `json:"results"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/putsynonym/put_synonym.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/putsynonym/put_synonym.go new file mode 100644 index 000000000..69db652d7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/putsynonym/put_synonym.go @@ -0,0 +1,257 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Creates or updates a synonyms set +package putsynonym + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + idMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutSynonym struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + buf *gobytes.Buffer + + req *Request + deferred []func(request *Request) error + raw io.Reader + + paramSet int + + id string +} + +// NewPutSynonym type alias for index. +type NewPutSynonym func(id string) *PutSynonym + +// NewPutSynonymFunc returns a new instance of PutSynonym with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutSynonymFunc(tp elastictransport.Interface) NewPutSynonym { + return func(id string) *PutSynonym { + n := New(tp) + + n._id(id) + + return n + } +} + +// Creates or updates a synonyms set +// +// https://www.elastic.co/guide/en/elasticsearch/reference/master/put-synonyms-set.html +func New(tp elastictransport.Interface) *PutSynonym { + r := &PutSynonym{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + buf: gobytes.NewBuffer(nil), + + req: NewRequest(), + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutSynonym) Raw(raw io.Reader) *PutSynonym { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutSynonym) Request(req *Request) *PutSynonym { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutSynonym) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw != nil { + r.buf.ReadFrom(r.raw) + } else if r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutSynonym: %w", err) + } + + r.buf.Write(data) + + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == idMask: + path.WriteString("/") + path.WriteString("_synonyms") + path.WriteString("/") + + path.WriteString(r.id) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.buf) + } else { + req, err = http.NewRequest(method, r.path.String(), r.buf) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.buf.Len() > 0 { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutSynonym) Perform(ctx context.Context) (*http.Response, error) { + req, err := r.HttpRequest(ctx) + if err != nil { + return nil, err + } + + res, err := r.transport.Perform(req) + if err != nil { + return nil, fmt.Errorf("an error happened during the PutSynonym query execution: %w", err) + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putsynonym.Response +func (r PutSynonym) Do(ctx context.Context) (*Response, error) { + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + return nil, errorResponse +} + +// Header set a key, value pair in the PutSynonym headers map. +func (r *PutSynonym) Header(key, value string) *PutSynonym { + r.headers.Set(key, value) + + return r +} + +// Id The id of the synonyms set to be created or updated +// API Name: id +func (r *PutSynonym) _id(id string) *PutSynonym { + r.paramSet |= idMask + r.id = id + + return r +} + +// SynonymsSet The synonym set information to update +// API name: synonyms_set +func (r *PutSynonym) SynonymsSet(synonymssets ...types.SynonymRule) *PutSynonym { + r.req.SynonymsSet = synonymssets + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/putsynonym/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/putsynonym/request.go new file mode 100644 index 000000000..083bbf724 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/putsynonym/request.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package putsynonym + +import ( + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Request holds the request body struct for the package putsynonym +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/synonyms/put_synonym/SynonymsPutRequest.ts#L23-L42 +type Request struct { + + // SynonymsSet The synonym set information to update + SynonymsSet []types.SynonymRule `json:"synonyms_set"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putsynonym request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/putsynonym/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/putsynonym/response.go new file mode 100644 index 000000000..ace1c0a62 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/putsynonym/response.go @@ -0,0 +1,41 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package putsynonym + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/result" +) + +// Response holds the response body struct for the package putsynonym +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/synonyms/put_synonym/SynonymsPutResponse.ts#L24-L29 + +type Response struct { + ReloadAnalyzersDetails types.ReloadDetails `json:"reload_analyzers_details"` + Result result.Result `json:"result"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/putsynonymrule/put_synonym_rule.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/putsynonymrule/put_synonym_rule.go new file mode 100644 index 000000000..c696c9db7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/putsynonymrule/put_synonym_rule.go @@ -0,0 +1,273 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Creates or updates a synonym rule in a synonym set +package putsynonymrule + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + setidMask = iota + 1 + + ruleidMask +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type PutSynonymRule struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + buf *gobytes.Buffer + + req *Request + deferred []func(request *Request) error + raw io.Reader + + paramSet int + + setid string + ruleid string +} + +// NewPutSynonymRule type alias for index. +type NewPutSynonymRule func(setid, ruleid string) *PutSynonymRule + +// NewPutSynonymRuleFunc returns a new instance of PutSynonymRule with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewPutSynonymRuleFunc(tp elastictransport.Interface) NewPutSynonymRule { + return func(setid, ruleid string) *PutSynonymRule { + n := New(tp) + + n._setid(setid) + + n._ruleid(ruleid) + + return n + } +} + +// Creates or updates a synonym rule in a synonym set +// +// https://www.elastic.co/guide/en/elasticsearch/reference/master/put-synonym-rule.html +func New(tp elastictransport.Interface) *PutSynonymRule { + r := &PutSynonymRule{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + buf: gobytes.NewBuffer(nil), + + req: NewRequest(), + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *PutSynonymRule) Raw(raw io.Reader) *PutSynonymRule { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *PutSynonymRule) Request(req *Request) *PutSynonymRule { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *PutSynonymRule) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw != nil { + r.buf.ReadFrom(r.raw) + } else if r.req != nil { + + data, err := json.Marshal(r.req) + + if err != nil { + return nil, fmt.Errorf("could not serialise request for PutSynonymRule: %w", err) + } + + r.buf.Write(data) + + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == setidMask|ruleidMask: + path.WriteString("/") + path.WriteString("_synonyms") + path.WriteString("/") + + path.WriteString(r.setid) + path.WriteString("/") + + path.WriteString(r.ruleid) + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.buf) + } else { + req, err = http.NewRequest(method, r.path.String(), r.buf) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.buf.Len() > 0 { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r PutSynonymRule) Perform(ctx context.Context) (*http.Response, error) { + req, err := r.HttpRequest(ctx) + if err != nil { + return nil, err + } + + res, err := r.transport.Perform(req) + if err != nil { + return nil, fmt.Errorf("an error happened during the PutSynonymRule query execution: %w", err) + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a putsynonymrule.Response +func (r PutSynonymRule) Do(ctx context.Context) (*Response, error) { + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + return nil, errorResponse +} + +// Header set a key, value pair in the PutSynonymRule headers map. +func (r *PutSynonymRule) Header(key, value string) *PutSynonymRule { + r.headers.Set(key, value) + + return r +} + +// SetId The id of the synonym set to be updated with the synonym rule +// API Name: setid +func (r *PutSynonymRule) _setid(setid string) *PutSynonymRule { + r.paramSet |= setidMask + r.setid = setid + + return r +} + +// RuleId The id of the synonym rule to be updated or created +// API Name: ruleid +func (r *PutSynonymRule) _ruleid(ruleid string) *PutSynonymRule { + r.paramSet |= ruleidMask + r.ruleid = ruleid + + return r +} + +// API name: synonyms +func (r *PutSynonymRule) Synonyms(synonyms ...string) *PutSynonymRule { + r.req.Synonyms = synonyms + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/putsynonymrule/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/putsynonymrule/request.go new file mode 100644 index 000000000..7eba9db84 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/putsynonymrule/request.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package putsynonymrule + +import ( + "encoding/json" + "fmt" +) + +// Request holds the request body struct for the package putsynonymrule +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/synonyms/put_synonym_rule/SynonymRulePutRequest.ts#L23-L47 +type Request struct { + Synonyms []string `json:"synonyms"` +} + +// NewRequest returns a Request +func NewRequest() *Request { + r := &Request{} + return r +} + +// FromJSON allows to load an arbitrary json into the request structure +func (r *Request) FromJSON(data string) (*Request, error) { + var req Request + err := json.Unmarshal([]byte(data), &req) + + if err != nil { + return nil, fmt.Errorf("could not deserialise json into Putsynonymrule request: %w", err) + } + + return &req, nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/putsynonymrule/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/putsynonymrule/response.go new file mode 100644 index 000000000..cff58de03 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/putsynonymrule/response.go @@ -0,0 +1,45 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package putsynonymrule + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/result" +) + +// Response holds the response body struct for the package putsynonymrule +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/synonyms/put_synonym_rule/SynonymRulePutResponse.ts#L22-L24 + +type Response struct { + + // ReloadAnalyzersDetails Updating synonyms in a synonym set reloads the associated analyzers. + // This is the analyzers reloading result + ReloadAnalyzersDetails types.ReloadResult `json:"reload_analyzers_details"` + // Result Update operation result + Result result.Result `json:"result"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/tasks/cancel/cancel.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/tasks/cancel/cancel.go index 9fbde56a6..05f18ab0e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/tasks/cancel/cancel.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/tasks/cancel/cancel.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Cancels a task, if it can be cancelled through an API. package cancel @@ -176,7 +176,6 @@ func (r Cancel) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -185,6 +184,10 @@ func (r Cancel) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -216,39 +219,44 @@ func (r *Cancel) Header(key, value string) *Cancel { return r } -// TaskId Cancel the task with specified task id (node_id:task_number) +// TaskId ID of the task. // API Name: taskid -func (r *Cancel) TaskId(v string) *Cancel { +func (r *Cancel) TaskId(taskid string) *Cancel { r.paramSet |= taskidMask - r.taskid = v + r.taskid = taskid return r } -// Actions A comma-separated list of actions that should be cancelled. Leave empty to -// cancel all. +// Actions Comma-separated list or wildcard expression of actions used to limit the +// request. // API name: actions -func (r *Cancel) Actions(v string) *Cancel { - r.values.Set("actions", v) +func (r *Cancel) Actions(actions ...string) *Cancel { + tmp := []string{} + for _, item := range actions { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("actions", strings.Join(tmp, ",")) return r } -// Nodes A comma-separated list of node IDs or names to limit the returned -// information; use `_local` to return information from the node you're -// connecting to, leave empty to get information from all nodes +// Nodes Comma-separated list of node IDs or names used to limit the request. // API name: nodes -func (r *Cancel) Nodes(v string) *Cancel { - r.values.Set("nodes", v) +func (r *Cancel) Nodes(nodes ...string) *Cancel { + tmp := []string{} + for _, item := range nodes { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("nodes", strings.Join(tmp, ",")) return r } -// ParentTaskId Cancel tasks with specified parent task id (node_id:task_number). Set to -1 -// to cancel all. +// ParentTaskId Parent task ID used to limit the tasks. // API name: parent_task_id -func (r *Cancel) ParentTaskId(v string) *Cancel { - r.values.Set("parent_task_id", v) +func (r *Cancel) ParentTaskId(parenttaskid string) *Cancel { + r.values.Set("parent_task_id", parenttaskid) return r } @@ -256,8 +264,8 @@ func (r *Cancel) ParentTaskId(v string) *Cancel { // WaitForCompletion Should the request block until the cancellation of the task and its // descendant tasks is completed. Defaults to false // API name: wait_for_completion -func (r *Cancel) WaitForCompletion(b bool) *Cancel { - r.values.Set("wait_for_completion", strconv.FormatBool(b)) +func (r *Cancel) WaitForCompletion(waitforcompletion bool) *Cancel { + r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/tasks/cancel/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/tasks/cancel/response.go index 130707b1c..7a1c25351 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/tasks/cancel/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/tasks/cancel/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package cancel @@ -31,7 +31,7 @@ import ( // Response holds the response body struct for the package cancel // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/tasks/cancel/CancelTasksResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/tasks/cancel/CancelTasksResponse.ts#L22-L24 type Response struct { NodeFailures []types.ErrorCause `json:"node_failures,omitempty"` @@ -73,6 +73,9 @@ func (s *Response) UnmarshalJSON(data []byte) error { } case "nodes": + if s.Nodes == nil { + s.Nodes = make(map[string]types.NodeTasks, 0) + } if err := dec.Decode(&s.Nodes); err != nil { return err } @@ -83,8 +86,24 @@ func (s *Response) UnmarshalJSON(data []byte) error { } case "tasks": - if err := dec.Decode(&s.Tasks); err != nil { - return err + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]types.ParentTaskInfo, 0) + if err := localDec.Decode(&o); err != nil { + return err + } + s.Tasks = o + case '[': + o := []types.TaskInfo{} + if err := localDec.Decode(&o); err != nil { + return err + } + s.Tasks = o } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/tasks/get/get.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/tasks/get/get.go index 67b198d4b..b0129508c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/tasks/get/get.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/tasks/get/get.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns information about a task. package get @@ -68,7 +68,7 @@ func NewGetFunc(tp elastictransport.Interface) NewGet { return func(taskid string) *Get { n := New(tp) - n.TaskId(taskid) + n._taskid(taskid) return n } @@ -169,7 +169,6 @@ func (r Get) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -178,6 +177,10 @@ func (r Get) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -209,27 +212,29 @@ func (r *Get) Header(key, value string) *Get { return r } -// TaskId Return the task with specified id (node_id:task_number) +// TaskId ID of the task. // API Name: taskid -func (r *Get) TaskId(v string) *Get { +func (r *Get) _taskid(taskid string) *Get { r.paramSet |= taskidMask - r.taskid = v + r.taskid = taskid return r } -// Timeout Explicit operation timeout +// Timeout Period to wait for a response. +// If no response is received before the timeout expires, the request fails and +// returns an error. // API name: timeout -func (r *Get) Timeout(v string) *Get { - r.values.Set("timeout", v) +func (r *Get) Timeout(duration string) *Get { + r.values.Set("timeout", duration) return r } -// WaitForCompletion Wait for the matching tasks to complete (default: false) +// WaitForCompletion If `true`, the request blocks until the task has completed. // API name: wait_for_completion -func (r *Get) WaitForCompletion(b bool) *Get { - r.values.Set("wait_for_completion", strconv.FormatBool(b)) +func (r *Get) WaitForCompletion(waitforcompletion bool) *Get { + r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/tasks/get/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/tasks/get/response.go index 43827fbcb..b5e4eadfd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/tasks/get/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/tasks/get/response.go @@ -16,22 +16,24 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package get import ( + "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) // Response holds the response body struct for the package get // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/tasks/get/GetTaskResponse.ts#L24-L31 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/tasks/get/GetTaskResponse.ts#L24-L31 type Response struct { Completed bool `json:"completed"` Error *types.ErrorCause `json:"error,omitempty"` - Response *types.TaskStatus `json:"response,omitempty"` + Response json.RawMessage `json:"response,omitempty"` Task types.TaskInfo `json:"task"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/tasks/list/list.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/tasks/list/list.go index f05dfb37e..105330d92 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/tasks/list/list.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/tasks/list/list.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Returns a list of tasks. package list @@ -36,7 +36,6 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/groupby" ) @@ -160,7 +159,6 @@ func (r List) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -169,6 +167,10 @@ func (r List) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -203,32 +205,40 @@ func (r *List) Header(key, value string) *List { // Actions Comma-separated list or wildcard expression of actions used to limit the // request. // API name: actions -func (r *List) Actions(v string) *List { - r.values.Set("actions", v) +func (r *List) Actions(actions ...string) *List { + tmp := []string{} + for _, item := range actions { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("actions", strings.Join(tmp, ",")) return r } // Detailed If `true`, the response includes detailed information about shard recoveries. // API name: detailed -func (r *List) Detailed(b bool) *List { - r.values.Set("detailed", strconv.FormatBool(b)) +func (r *List) Detailed(detailed bool) *List { + r.values.Set("detailed", strconv.FormatBool(detailed)) return r } // GroupBy Key used to group tasks in the response. // API name: group_by -func (r *List) GroupBy(enum groupby.GroupBy) *List { - r.values.Set("group_by", enum.String()) +func (r *List) GroupBy(groupby groupby.GroupBy) *List { + r.values.Set("group_by", groupby.String()) return r } // NodeId Comma-separated list of node IDs or names used to limit returned information. // API name: node_id -func (r *List) NodeId(v string) *List { - r.values.Set("node_id", v) +func (r *List) NodeId(nodeids ...string) *List { + tmp := []string{} + for _, item := range nodeids { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("node_id", strings.Join(tmp, ",")) return r } @@ -236,8 +246,8 @@ func (r *List) NodeId(v string) *List { // ParentTaskId Parent task ID used to limit returned information. To return all tasks, omit // this parameter or use a value of `-1`. // API name: parent_task_id -func (r *List) ParentTaskId(v string) *List { - r.values.Set("parent_task_id", v) +func (r *List) ParentTaskId(id string) *List { + r.values.Set("parent_task_id", id) return r } @@ -245,8 +255,8 @@ func (r *List) ParentTaskId(v string) *List { // MasterTimeout Period to wait for a connection to the master node. If no response is // received before the timeout expires, the request fails and returns an error. // API name: master_timeout -func (r *List) MasterTimeout(v string) *List { - r.values.Set("master_timeout", v) +func (r *List) MasterTimeout(duration string) *List { + r.values.Set("master_timeout", duration) return r } @@ -254,16 +264,16 @@ func (r *List) MasterTimeout(v string) *List { // Timeout Period to wait for a response. If no response is received before the timeout // expires, the request fails and returns an error. // API name: timeout -func (r *List) Timeout(v string) *List { - r.values.Set("timeout", v) +func (r *List) Timeout(duration string) *List { + r.values.Set("timeout", duration) return r } // WaitForCompletion If `true`, the request blocks until the operation is complete. // API name: wait_for_completion -func (r *List) WaitForCompletion(b bool) *List { - r.values.Set("wait_for_completion", strconv.FormatBool(b)) +func (r *List) WaitForCompletion(waitforcompletion bool) *List { + r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/tasks/list/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/tasks/list/response.go index c49a063cc..b2e5be616 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/tasks/list/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/tasks/list/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package list @@ -31,7 +31,7 @@ import ( // Response holds the response body struct for the package list // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/tasks/list/ListTasksResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/tasks/list/ListTasksResponse.ts#L22-L24 type Response struct { NodeFailures []types.ErrorCause `json:"node_failures,omitempty"` @@ -73,6 +73,9 @@ func (s *Response) UnmarshalJSON(data []byte) error { } case "nodes": + if s.Nodes == nil { + s.Nodes = make(map[string]types.NodeTasks, 0) + } if err := dec.Decode(&s.Nodes); err != nil { return err } @@ -83,8 +86,24 @@ func (s *Response) UnmarshalJSON(data []byte) error { } case "tasks": - if err := dec.Decode(&s.Tasks); err != nil { - return err + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]types.ParentTaskInfo, 0) + if err := localDec.Decode(&o); err != nil { + return err + } + s.Tasks = o + case '[': + o := []types.TaskInfo{} + if err := localDec.Decode(&o); err != nil { + return err + } + s.Tasks = o } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/textstructure/findstructure/find_structure.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/textstructure/findstructure/find_structure.go new file mode 100644 index 000000000..489c042d1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/textstructure/findstructure/find_structure.go @@ -0,0 +1,393 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Finds the structure of a text file. The text file must contain data that is +// suitable to be ingested into Elasticsearch. +package findstructure + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type FindStructure struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + buf *gobytes.Buffer + + req *Request + deferred []func(request *Request) error + raw io.Reader + + paramSet int +} + +// NewFindStructure type alias for index. +type NewFindStructure func() *FindStructure + +// NewFindStructureFunc returns a new instance of FindStructure with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewFindStructureFunc(tp elastictransport.Interface) NewFindStructure { + return func() *FindStructure { + n := New(tp) + + return n + } +} + +// Finds the structure of a text file. The text file must contain data that is +// suitable to be ingested into Elasticsearch. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/find-structure.html +func New(tp elastictransport.Interface) *FindStructure { + r := &FindStructure{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + buf: gobytes.NewBuffer(nil), + } + + return r +} + +// Raw takes a json payload as input which is then passed to the http.Request +// If specified Raw takes precedence on Request method. +func (r *FindStructure) Raw(raw io.Reader) *FindStructure { + r.raw = raw + + return r +} + +// Request allows to set the request property with the appropriate payload. +func (r *FindStructure) Request(req *Request) *FindStructure { + r.req = req + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *FindStructure) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + + if r.raw != nil { + r.buf.ReadFrom(r.raw) + } else if r.req != nil { + + for _, elem := range *r.req { + data, err := json.Marshal(elem) + if err != nil { + return nil, err + } + r.buf.Write(data) + r.buf.Write([]byte("\n")) + } + + if err != nil { + return nil, fmt.Errorf("could not serialise request for FindStructure: %w", err) + } + + } + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_text_structure") + path.WriteString("/") + path.WriteString("find_structure") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.buf) + } else { + req, err = http.NewRequest(method, r.path.String(), r.buf) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.buf.Len() > 0 { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+x-ndjson;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r FindStructure) Perform(ctx context.Context) (*http.Response, error) { + req, err := r.HttpRequest(ctx) + if err != nil { + return nil, err + } + + res, err := r.transport.Perform(req) + if err != nil { + return nil, fmt.Errorf("an error happened during the FindStructure query execution: %w", err) + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a findstructure.Response +func (r FindStructure) Do(ctx context.Context) (*Response, error) { + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + return nil, errorResponse +} + +// Header set a key, value pair in the FindStructure headers map. +func (r *FindStructure) Header(key, value string) *FindStructure { + r.headers.Set(key, value) + + return r +} + +// Charset The text’s character set. It must be a character set that is supported by the +// JVM that Elasticsearch uses. For example, UTF-8, UTF-16LE, windows-1252, or +// EUC-JP. If this parameter is not specified, the structure finder chooses an +// appropriate character set. +// API name: charset +func (r *FindStructure) Charset(charset string) *FindStructure { + r.values.Set("charset", charset) + + return r +} + +// ColumnNames If you have set format to delimited, you can specify the column names in a +// comma-separated list. If this parameter is not specified, the structure +// finder uses the column names from the header row of the text. If the text +// does not have a header role, columns are named "column1", "column2", +// "column3", etc. +// API name: column_names +func (r *FindStructure) ColumnNames(columnnames string) *FindStructure { + r.values.Set("column_names", columnnames) + + return r +} + +// Delimiter If you have set format to delimited, you can specify the character used to +// delimit the values in each row. Only a single character is supported; the +// delimiter cannot have multiple characters. By default, the API considers the +// following possibilities: comma, tab, semi-colon, and pipe (|). In this +// default scenario, all rows must have the same number of fields for the +// delimited format to be detected. If you specify a delimiter, up to 10% of the +// rows can have a different number of columns than the first row. +// API name: delimiter +func (r *FindStructure) Delimiter(delimiter string) *FindStructure { + r.values.Set("delimiter", delimiter) + + return r +} + +// Explain If this parameter is set to true, the response includes a field named +// explanation, which is an array of strings that indicate how the structure +// finder produced its result. +// API name: explain +func (r *FindStructure) Explain(explain bool) *FindStructure { + r.values.Set("explain", strconv.FormatBool(explain)) + + return r +} + +// Format The high level structure of the text. Valid values are ndjson, xml, +// delimited, and semi_structured_text. By default, the API chooses the format. +// In this default scenario, all rows must have the same number of fields for a +// delimited format to be detected. If the format is set to delimited and the +// delimiter is not set, however, the API tolerates up to 5% of rows that have a +// different number of columns than the first row. +// API name: format +func (r *FindStructure) Format(format string) *FindStructure { + r.values.Set("format", format) + + return r +} + +// GrokPattern If you have set format to semi_structured_text, you can specify a Grok +// pattern that is used to extract fields from every message in the text. The +// name of the timestamp field in the Grok pattern must match what is specified +// in the timestamp_field parameter. If that parameter is not specified, the +// name of the timestamp field in the Grok pattern must match "timestamp". If +// grok_pattern is not specified, the structure finder creates a Grok pattern. +// API name: grok_pattern +func (r *FindStructure) GrokPattern(grokpattern string) *FindStructure { + r.values.Set("grok_pattern", grokpattern) + + return r +} + +// HasHeaderRow If you have set format to delimited, you can use this parameter to indicate +// whether the column names are in the first row of the text. If this parameter +// is not specified, the structure finder guesses based on the similarity of the +// first row of the text to other rows. +// API name: has_header_row +func (r *FindStructure) HasHeaderRow(hasheaderrow bool) *FindStructure { + r.values.Set("has_header_row", strconv.FormatBool(hasheaderrow)) + + return r +} + +// LineMergeSizeLimit The maximum number of characters in a message when lines are merged to form +// messages while analyzing semi-structured text. If you have extremely long +// messages you may need to increase this, but be aware that this may lead to +// very long processing times if the way to group lines into messages is +// misdetected. +// API name: line_merge_size_limit +func (r *FindStructure) LineMergeSizeLimit(linemergesizelimit string) *FindStructure { + r.values.Set("line_merge_size_limit", linemergesizelimit) + + return r +} + +// LinesToSample The number of lines to include in the structural analysis, starting from the +// beginning of the text. The minimum is 2; If the value of this parameter is +// greater than the number of lines in the text, the analysis proceeds (as long +// as there are at least two lines in the text) for all of the lines. +// API name: lines_to_sample +func (r *FindStructure) LinesToSample(linestosample string) *FindStructure { + r.values.Set("lines_to_sample", linestosample) + + return r +} + +// Quote If you have set format to delimited, you can specify the character used to +// quote the values in each row if they contain newlines or the delimiter +// character. Only a single character is supported. If this parameter is not +// specified, the default value is a double quote ("). If your delimited text +// format does not use quoting, a workaround is to set this argument to a +// character that does not appear anywhere in the sample. +// API name: quote +func (r *FindStructure) Quote(quote string) *FindStructure { + r.values.Set("quote", quote) + + return r +} + +// ShouldTrimFields If you have set format to delimited, you can specify whether values between +// delimiters should have whitespace trimmed from them. If this parameter is not +// specified and the delimiter is pipe (|), the default value is true. +// Otherwise, the default value is false. +// API name: should_trim_fields +func (r *FindStructure) ShouldTrimFields(shouldtrimfields bool) *FindStructure { + r.values.Set("should_trim_fields", strconv.FormatBool(shouldtrimfields)) + + return r +} + +// Timeout Sets the maximum amount of time that the structure analysis make take. If the +// analysis is still running when the timeout expires then it will be aborted. +// API name: timeout +func (r *FindStructure) Timeout(duration string) *FindStructure { + r.values.Set("timeout", duration) + + return r +} + +// TimestampField Optional parameter to specify the timestamp field in the file +// API name: timestamp_field +func (r *FindStructure) TimestampField(field string) *FindStructure { + r.values.Set("timestamp_field", field) + + return r +} + +// TimestampFormat The Java time format of the timestamp field in the text. +// API name: timestamp_format +func (r *FindStructure) TimestampFormat(timestampformat string) *FindStructure { + r.values.Set("timestamp_format", timestampformat) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/textstructure/findstructure/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/textstructure/findstructure/request.go new file mode 100644 index 000000000..dc9be8c22 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/textstructure/findstructure/request.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package findstructure + +import ( + "encoding/json" +) + +// Request holds the request body struct for the package findstructure +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/text_structure/find_structure/FindStructureRequest.ts#L24-L73 +type Request = []json.RawMessage diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/textstructure/findstructure/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/textstructure/findstructure/response.go new file mode 100644 index 000000000..8507cea65 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/textstructure/findstructure/response.go @@ -0,0 +1,62 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package findstructure + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +// Response holds the response body struct for the package findstructure +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/text_structure/find_structure/FindStructureResponse.ts#L27-L52 + +type Response struct { + Charset string `json:"charset"` + ColumnNames []string `json:"column_names,omitempty"` + Delimiter *string `json:"delimiter,omitempty"` + ExcludeLinesPattern *string `json:"exclude_lines_pattern,omitempty"` + Explanation []string `json:"explanation,omitempty"` + FieldStats map[string]types.FieldStat `json:"field_stats"` + Format string `json:"format"` + GrokPattern *string `json:"grok_pattern,omitempty"` + HasByteOrderMarker bool `json:"has_byte_order_marker"` + HasHeaderRow *bool `json:"has_header_row,omitempty"` + IngestPipeline types.PipelineConfig `json:"ingest_pipeline"` + JavaTimestampFormats []string `json:"java_timestamp_formats,omitempty"` + JodaTimestampFormats []string `json:"joda_timestamp_formats,omitempty"` + Mappings types.TypeMapping `json:"mappings"` + MultilineStartPattern *string `json:"multiline_start_pattern,omitempty"` + NeedClientTimezone bool `json:"need_client_timezone"` + NumLinesAnalyzed int `json:"num_lines_analyzed"` + NumMessagesAnalyzed int `json:"num_messages_analyzed"` + Quote *string `json:"quote,omitempty"` + SampleStart string `json:"sample_start"` + ShouldTrimFields *bool `json:"should_trim_fields,omitempty"` + TimestampField *string `json:"timestamp_field,omitempty"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{ + FieldStats: make(map[string]types.FieldStat, 0), + } + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/deletetransform/delete_transform.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/deletetransform/delete_transform.go index 3c3b9bb45..4222c0121 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/deletetransform/delete_transform.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/deletetransform/delete_transform.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Deletes an existing transform. package deletetransform @@ -68,7 +68,7 @@ func NewDeleteTransformFunc(tp elastictransport.Interface) NewDeleteTransform { return func(transformid string) *DeleteTransform { n := New(tp) - n.TransformId(transformid) + n._transformid(transformid) return n } @@ -169,7 +169,6 @@ func (r DeleteTransform) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -178,6 +177,10 @@ func (r DeleteTransform) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -211,9 +214,9 @@ func (r *DeleteTransform) Header(key, value string) *DeleteTransform { // TransformId Identifier for the transform. // API Name: transformid -func (r *DeleteTransform) TransformId(v string) *DeleteTransform { +func (r *DeleteTransform) _transformid(transformid string) *DeleteTransform { r.paramSet |= transformidMask - r.transformid = v + r.transformid = transformid return r } @@ -222,8 +225,8 @@ func (r *DeleteTransform) TransformId(v string) *DeleteTransform { // deleted. If true, the transform is // deleted regardless of its current state. // API name: force -func (r *DeleteTransform) Force(b bool) *DeleteTransform { - r.values.Set("force", strconv.FormatBool(b)) +func (r *DeleteTransform) Force(force bool) *DeleteTransform { + r.values.Set("force", strconv.FormatBool(force)) return r } @@ -231,8 +234,8 @@ func (r *DeleteTransform) Force(b bool) *DeleteTransform { // Timeout Period to wait for a response. If no response is received before the timeout // expires, the request fails and returns an error. // API name: timeout -func (r *DeleteTransform) Timeout(v string) *DeleteTransform { - r.values.Set("timeout", v) +func (r *DeleteTransform) Timeout(duration string) *DeleteTransform { + r.values.Set("timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/deletetransform/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/deletetransform/response.go index aa6413951..5c5dffadf 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/deletetransform/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/deletetransform/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package deletetransform // Response holds the response body struct for the package deletetransform // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/transform/delete_transform/DeleteTransformResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/transform/delete_transform/DeleteTransformResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/gettransform/get_transform.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/gettransform/get_transform.go index d506ec570..3d0402318 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/gettransform/get_transform.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/gettransform/get_transform.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves configuration information for transforms. package gettransform @@ -172,7 +172,6 @@ func (r GetTransform) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -181,6 +180,10 @@ func (r GetTransform) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -217,9 +220,9 @@ func (r *GetTransform) Header(key, value string) *GetTransform { // `_all`, by specifying `*` as the ``, or by omitting the // ``. // API Name: transformid -func (r *GetTransform) TransformId(v string) *GetTransform { +func (r *GetTransform) TransformId(transformid string) *GetTransform { r.paramSet |= transformidMask - r.transformid = v + r.transformid = transformid return r } @@ -233,24 +236,24 @@ func (r *GetTransform) TransformId(v string) *GetTransform { // If this parameter is false, the request returns a 404 status code when // there are no matches or only partial matches. // API name: allow_no_match -func (r *GetTransform) AllowNoMatch(b bool) *GetTransform { - r.values.Set("allow_no_match", strconv.FormatBool(b)) +func (r *GetTransform) AllowNoMatch(allownomatch bool) *GetTransform { + r.values.Set("allow_no_match", strconv.FormatBool(allownomatch)) return r } // From Skips the specified number of transforms. // API name: from -func (r *GetTransform) From(i int) *GetTransform { - r.values.Set("from", strconv.Itoa(i)) +func (r *GetTransform) From(from int) *GetTransform { + r.values.Set("from", strconv.Itoa(from)) return r } // Size Specifies the maximum number of transforms to obtain. // API name: size -func (r *GetTransform) Size(i int) *GetTransform { - r.values.Set("size", strconv.Itoa(i)) +func (r *GetTransform) Size(size int) *GetTransform { + r.values.Set("size", strconv.Itoa(size)) return r } @@ -259,8 +262,8 @@ func (r *GetTransform) Size(i int) *GetTransform { // transform. This allows the configuration to be in an acceptable format to // be retrieved and then added to another cluster. // API name: exclude_generated -func (r *GetTransform) ExcludeGenerated(b bool) *GetTransform { - r.values.Set("exclude_generated", strconv.FormatBool(b)) +func (r *GetTransform) ExcludeGenerated(excludegenerated bool) *GetTransform { + r.values.Set("exclude_generated", strconv.FormatBool(excludegenerated)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/gettransform/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/gettransform/response.go index 5e37deb7c..8906f96ad 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/gettransform/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/gettransform/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package gettransform @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package gettransform // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/transform/get_transform/GetTransformResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/transform/get_transform/GetTransformResponse.ts#L23-L25 type Response struct { Count int64 `json:"count"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/gettransformstats/get_transform_stats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/gettransformstats/get_transform_stats.go index 05c50ff82..2d04a02b1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/gettransformstats/get_transform_stats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/gettransformstats/get_transform_stats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves usage information for transforms. package gettransformstats @@ -68,7 +68,7 @@ func NewGetTransformStatsFunc(tp elastictransport.Interface) NewGetTransformStat return func(transformid string) *GetTransformStats { n := New(tp) - n.TransformId(transformid) + n._transformid(transformid) return n } @@ -171,7 +171,6 @@ func (r GetTransformStats) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -180,6 +179,10 @@ func (r GetTransformStats) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -216,9 +219,9 @@ func (r *GetTransformStats) Header(key, value string) *GetTransformStats { // `_all`, by specifying `*` as the ``, or by omitting the // ``. // API Name: transformid -func (r *GetTransformStats) TransformId(v string) *GetTransformStats { +func (r *GetTransformStats) _transformid(transformid string) *GetTransformStats { r.paramSet |= transformidMask - r.transformid = v + r.transformid = transformid return r } @@ -232,32 +235,32 @@ func (r *GetTransformStats) TransformId(v string) *GetTransformStats { // If this parameter is false, the request returns a 404 status code when // there are no matches or only partial matches. // API name: allow_no_match -func (r *GetTransformStats) AllowNoMatch(b bool) *GetTransformStats { - r.values.Set("allow_no_match", strconv.FormatBool(b)) +func (r *GetTransformStats) AllowNoMatch(allownomatch bool) *GetTransformStats { + r.values.Set("allow_no_match", strconv.FormatBool(allownomatch)) return r } // From Skips the specified number of transforms. // API name: from -func (r *GetTransformStats) From(v string) *GetTransformStats { - r.values.Set("from", v) +func (r *GetTransformStats) From(from string) *GetTransformStats { + r.values.Set("from", from) return r } // Size Specifies the maximum number of transforms to obtain. // API name: size -func (r *GetTransformStats) Size(v string) *GetTransformStats { - r.values.Set("size", v) +func (r *GetTransformStats) Size(size string) *GetTransformStats { + r.values.Set("size", size) return r } // Timeout Controls the time to wait for the stats // API name: timeout -func (r *GetTransformStats) Timeout(v string) *GetTransformStats { - r.values.Set("timeout", v) +func (r *GetTransformStats) Timeout(duration string) *GetTransformStats { + r.values.Set("timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/gettransformstats/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/gettransformstats/response.go index afa553b1b..b8adf6865 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/gettransformstats/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/gettransformstats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package gettransformstats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package gettransformstats // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/transform/get_transform_stats/GetTransformStatsResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/transform/get_transform_stats/GetTransformStatsResponse.ts#L23-L25 type Response struct { Count int64 `json:"count"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/previewtransform/preview_transform.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/previewtransform/preview_transform.go index be019c460..9c242ce61 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/previewtransform/preview_transform.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/previewtransform/preview_transform.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Previews a transform. package previewtransform @@ -52,8 +52,9 @@ type PreviewTransform struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -82,6 +83,8 @@ func New(tp elastictransport.Interface) *PreviewTransform { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -111,9 +114,19 @@ func (r *PreviewTransform) HttpRequest(ctx context.Context) (*http.Request, erro var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -121,6 +134,7 @@ func (r *PreviewTransform) HttpRequest(ctx context.Context) (*http.Request, erro } r.buf.Write(data) + } r.path.Scheme = "http" @@ -210,7 +224,6 @@ func (r PreviewTransform) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -219,6 +232,10 @@ func (r PreviewTransform) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -233,9 +250,9 @@ func (r *PreviewTransform) Header(key, value string) *PreviewTransform { // you cannot provide transform // configuration details in the request body. // API Name: transformid -func (r *PreviewTransform) TransformId(v string) *PreviewTransform { +func (r *PreviewTransform) TransformId(transformid string) *PreviewTransform { r.paramSet |= transformidMask - r.transformid = v + r.transformid = transformid return r } @@ -243,8 +260,95 @@ func (r *PreviewTransform) TransformId(v string) *PreviewTransform { // Timeout Period to wait for a response. If no response is received before the // timeout expires, the request fails and returns an error. // API name: timeout -func (r *PreviewTransform) Timeout(v string) *PreviewTransform { - r.values.Set("timeout", v) +func (r *PreviewTransform) Timeout(duration string) *PreviewTransform { + r.values.Set("timeout", duration) + + return r +} + +// Description Free text description of the transform. +// API name: description +func (r *PreviewTransform) Description(description string) *PreviewTransform { + + r.req.Description = &description + + return r +} + +// Dest The destination for the transform. +// API name: dest +func (r *PreviewTransform) Dest(dest *types.TransformDestination) *PreviewTransform { + + r.req.Dest = dest + + return r +} + +// Frequency The interval between checks for changes in the source indices when the +// transform is running continuously. Also determines the retry interval in +// the event of transient failures while the transform is searching or +// indexing. The minimum value is 1s and the maximum is 1h. +// API name: frequency +func (r *PreviewTransform) Frequency(duration types.Duration) *PreviewTransform { + r.req.Frequency = duration + + return r +} + +// Latest The latest method transforms the data by finding the latest document for +// each unique key. +// API name: latest +func (r *PreviewTransform) Latest(latest *types.Latest) *PreviewTransform { + + r.req.Latest = latest + + return r +} + +// Pivot The pivot method transforms the data by aggregating and grouping it. +// These objects define the group by fields and the aggregation to reduce +// the data. +// API name: pivot +func (r *PreviewTransform) Pivot(pivot *types.Pivot) *PreviewTransform { + + r.req.Pivot = pivot + + return r +} + +// RetentionPolicy Defines a retention policy for the transform. Data that meets the defined +// criteria is deleted from the destination index. +// API name: retention_policy +func (r *PreviewTransform) RetentionPolicy(retentionpolicy *types.RetentionPolicyContainer) *PreviewTransform { + + r.req.RetentionPolicy = retentionpolicy + + return r +} + +// Settings Defines optional transform settings. +// API name: settings +func (r *PreviewTransform) Settings(settings *types.Settings) *PreviewTransform { + + r.req.Settings = settings + + return r +} + +// Source The source of the data for the transform. +// API name: source +func (r *PreviewTransform) Source(source *types.TransformSource) *PreviewTransform { + + r.req.Source = source + + return r +} + +// Sync Defines the properties transforms require to run continuously. +// API name: sync +func (r *PreviewTransform) Sync(sync *types.SyncContainer) *PreviewTransform { + + r.req.Sync = sync return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/previewtransform/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/previewtransform/request.go index 375502fd8..3090fdc3b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/previewtransform/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/previewtransform/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package previewtransform @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package previewtransform // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/transform/preview_transform/PreviewTransformRequest.ts#L33-L107 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/transform/preview_transform/PreviewTransformRequest.ts#L33-L107 type Request struct { // Description Free text description of the transform. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/previewtransform/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/previewtransform/response.go index 66a1c6ed3..d2c971d40 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/previewtransform/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/previewtransform/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package previewtransform @@ -28,7 +28,7 @@ import ( // Response holds the response body struct for the package previewtransform // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/transform/preview_transform/PreviewTransformResponse.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/transform/preview_transform/PreviewTransformResponse.ts#L22-L27 type Response struct { GeneratedDestIndex types.IndexState `json:"generated_dest_index"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/puttransform/put_transform.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/puttransform/put_transform.go index e26f660e3..b4fe9b9a6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/puttransform/put_transform.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/puttransform/put_transform.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Instantiates a transform. package puttransform @@ -53,8 +53,9 @@ type PutTransform struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -70,7 +71,7 @@ func NewPutTransformFunc(tp elastictransport.Interface) NewPutTransform { return func(transformid string) *PutTransform { n := New(tp) - n.TransformId(transformid) + n._transformid(transformid) return n } @@ -85,6 +86,8 @@ func New(tp elastictransport.Interface) *PutTransform { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -114,9 +117,19 @@ func (r *PutTransform) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -124,6 +137,7 @@ func (r *PutTransform) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -204,7 +218,6 @@ func (r PutTransform) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -213,6 +226,10 @@ func (r PutTransform) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -228,9 +245,9 @@ func (r *PutTransform) Header(key, value string) *PutTransform { // hyphens, and underscores. It has a 64 character limit and must start and end // with alphanumeric characters. // API Name: transformid -func (r *PutTransform) TransformId(v string) *PutTransform { +func (r *PutTransform) _transformid(transformid string) *PutTransform { r.paramSet |= transformidMask - r.transformid = v + r.transformid = transformid return r } @@ -245,8 +262,8 @@ func (r *PutTransform) TransformId(v string) *PutTransform { // start the transform, however, with // the exception of privilege checks. // API name: defer_validation -func (r *PutTransform) DeferValidation(b bool) *PutTransform { - r.values.Set("defer_validation", strconv.FormatBool(b)) +func (r *PutTransform) DeferValidation(defervalidation bool) *PutTransform { + r.values.Set("defer_validation", strconv.FormatBool(defervalidation)) return r } @@ -254,8 +271,105 @@ func (r *PutTransform) DeferValidation(b bool) *PutTransform { // Timeout Period to wait for a response. If no response is received before the timeout // expires, the request fails and returns an error. // API name: timeout -func (r *PutTransform) Timeout(v string) *PutTransform { - r.values.Set("timeout", v) +func (r *PutTransform) Timeout(duration string) *PutTransform { + r.values.Set("timeout", duration) + + return r +} + +// Description Free text description of the transform. +// API name: description +func (r *PutTransform) Description(description string) *PutTransform { + + r.req.Description = &description + + return r +} + +// Dest The destination for the transform. +// API name: dest +func (r *PutTransform) Dest(dest *types.TransformDestination) *PutTransform { + + r.req.Dest = *dest + + return r +} + +// Frequency The interval between checks for changes in the source indices when the +// transform is running continuously. Also +// determines the retry interval in the event of transient failures while the +// transform is searching or indexing. +// The minimum value is `1s` and the maximum is `1h`. +// API name: frequency +func (r *PutTransform) Frequency(duration types.Duration) *PutTransform { + r.req.Frequency = duration + + return r +} + +// Latest The latest method transforms the data by finding the latest document for each +// unique key. +// API name: latest +func (r *PutTransform) Latest(latest *types.Latest) *PutTransform { + + r.req.Latest = latest + + return r +} + +// Meta_ Defines optional transform metadata. +// API name: _meta +func (r *PutTransform) Meta_(metadata types.Metadata) *PutTransform { + r.req.Meta_ = metadata + + return r +} + +// Pivot The pivot method transforms the data by aggregating and grouping it. These +// objects define the group by fields +// and the aggregation to reduce the data. +// API name: pivot +func (r *PutTransform) Pivot(pivot *types.Pivot) *PutTransform { + + r.req.Pivot = pivot + + return r +} + +// RetentionPolicy Defines a retention policy for the transform. Data that meets the defined +// criteria is deleted from the +// destination index. +// API name: retention_policy +func (r *PutTransform) RetentionPolicy(retentionpolicy *types.RetentionPolicyContainer) *PutTransform { + + r.req.RetentionPolicy = retentionpolicy + + return r +} + +// Settings Defines optional transform settings. +// API name: settings +func (r *PutTransform) Settings(settings *types.Settings) *PutTransform { + + r.req.Settings = settings + + return r +} + +// Source The source of the data for the transform. +// API name: source +func (r *PutTransform) Source(source *types.TransformSource) *PutTransform { + + r.req.Source = *source + + return r +} + +// Sync Defines the properties transforms require to run continuously. +// API name: sync +func (r *PutTransform) Sync(sync *types.SyncContainer) *PutTransform { + + r.req.Sync = sync return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/puttransform/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/puttransform/request.go index 3cbd8727b..a1a780dea 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/puttransform/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/puttransform/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package puttransform @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package puttransform // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/transform/put_transform/PutTransformRequest.ts#L33-L122 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/transform/put_transform/PutTransformRequest.ts#L33-L122 type Request struct { // Description Free text description of the transform. @@ -46,7 +46,7 @@ type Request struct { // unique key. Latest *types.Latest `json:"latest,omitempty"` // Meta_ Defines optional transform metadata. - Meta_ map[string]json.RawMessage `json:"_meta,omitempty"` + Meta_ types.Metadata `json:"_meta,omitempty"` // Pivot The pivot method transforms the data by aggregating and grouping it. These // objects define the group by fields // and the aggregation to reduce the data. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/puttransform/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/puttransform/response.go index d4ef1772a..9e7c5dee4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/puttransform/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/puttransform/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package puttransform // Response holds the response body struct for the package puttransform // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/transform/put_transform/PutTransformResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/transform/put_transform/PutTransformResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/resettransform/reset_transform.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/resettransform/reset_transform.go index b74eddd61..29609d730 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/resettransform/reset_transform.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/resettransform/reset_transform.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Resets an existing transform. package resettransform @@ -68,7 +68,7 @@ func NewResetTransformFunc(tp elastictransport.Interface) NewResetTransform { return func(transformid string) *ResetTransform { n := New(tp) - n.TransformId(transformid) + n._transformid(transformid) return n } @@ -171,7 +171,6 @@ func (r ResetTransform) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -180,6 +179,10 @@ func (r ResetTransform) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -216,9 +219,9 @@ func (r *ResetTransform) Header(key, value string) *ResetTransform { // hyphens, and underscores. It has a 64 character limit and must start and end // with alphanumeric characters. // API Name: transformid -func (r *ResetTransform) TransformId(v string) *ResetTransform { +func (r *ResetTransform) _transformid(transformid string) *ResetTransform { r.paramSet |= transformidMask - r.transformid = v + r.transformid = transformid return r } @@ -227,8 +230,8 @@ func (r *ResetTransform) TransformId(v string) *ResetTransform { // state. If it's `false`, the transform // must be stopped before it can be reset. // API name: force -func (r *ResetTransform) Force(b bool) *ResetTransform { - r.values.Set("force", strconv.FormatBool(b)) +func (r *ResetTransform) Force(force bool) *ResetTransform { + r.values.Set("force", strconv.FormatBool(force)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/resettransform/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/resettransform/response.go index 08f777596..65448a314 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/resettransform/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/resettransform/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package resettransform // Response holds the response body struct for the package resettransform // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/transform/reset_transform/ResetTransformResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/transform/reset_transform/ResetTransformResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/schedulenowtransform/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/schedulenowtransform/response.go new file mode 100644 index 000000000..77ad3b900 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/schedulenowtransform/response.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package schedulenowtransform + +// Response holds the response body struct for the package schedulenowtransform +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/transform/schedule_now_transform/ScheduleNowTransformResponse.ts#L21-L23 + +type Response struct { + + // Acknowledged For a successful response, this value is always true. On failure, an + // exception is returned instead. + Acknowledged bool `json:"acknowledged"` +} + +// NewResponse returns a Response +func NewResponse() *Response { + r := &Response{} + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/schedulenowtransform/schedule_now_transform.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/schedulenowtransform/schedule_now_transform.go new file mode 100644 index 000000000..b162ce2e5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/schedulenowtransform/schedule_now_transform.go @@ -0,0 +1,237 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Schedules now a transform. +package schedulenowtransform + +import ( + gobytes "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" + "github.com/elastic/go-elasticsearch/v8/typedapi/types" +) + +const ( + transformidMask = iota + 1 +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type ScheduleNowTransform struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + buf *gobytes.Buffer + + paramSet int + + transformid string +} + +// NewScheduleNowTransform type alias for index. +type NewScheduleNowTransform func(transformid string) *ScheduleNowTransform + +// NewScheduleNowTransformFunc returns a new instance of ScheduleNowTransform with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewScheduleNowTransformFunc(tp elastictransport.Interface) NewScheduleNowTransform { + return func(transformid string) *ScheduleNowTransform { + n := New(tp) + + n._transformid(transformid) + + return n + } +} + +// Schedules now a transform. +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/schedule-now-transform.html +func New(tp elastictransport.Interface) *ScheduleNowTransform { + r := &ScheduleNowTransform{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + buf: gobytes.NewBuffer(nil), + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *ScheduleNowTransform) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == transformidMask: + path.WriteString("/") + path.WriteString("_transform") + path.WriteString("/") + + path.WriteString(r.transformid) + path.WriteString("/") + path.WriteString("_schedule_now") + + method = http.MethodPost + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.buf) + } else { + req, err = http.NewRequest(method, r.path.String(), r.buf) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.buf.Len() > 0 { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r ScheduleNowTransform) Perform(ctx context.Context) (*http.Response, error) { + req, err := r.HttpRequest(ctx) + if err != nil { + return nil, err + } + + res, err := r.transport.Perform(req) + if err != nil { + return nil, fmt.Errorf("an error happened during the ScheduleNowTransform query execution: %w", err) + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a schedulenowtransform.Response +func (r ScheduleNowTransform) Do(ctx context.Context) (*Response, error) { + + response := NewResponse() + + res, err := r.Perform(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + + if res.StatusCode < 299 { + err = json.NewDecoder(res.Body).Decode(response) + if err != nil { + return nil, err + } + + return response, nil + } + + errorResponse := types.NewElasticsearchError() + err = json.NewDecoder(res.Body).Decode(errorResponse) + if err != nil { + return nil, err + } + + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + + return nil, errorResponse +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r ScheduleNowTransform) IsSuccess(ctx context.Context) (bool, error) { + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(ioutil.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + return false, nil +} + +// Header set a key, value pair in the ScheduleNowTransform headers map. +func (r *ScheduleNowTransform) Header(key, value string) *ScheduleNowTransform { + r.headers.Set(key, value) + + return r +} + +// TransformId Identifier for the transform. +// API Name: transformid +func (r *ScheduleNowTransform) _transformid(transformid string) *ScheduleNowTransform { + r.paramSet |= transformidMask + r.transformid = transformid + + return r +} + +// Timeout Controls the time to wait for the scheduling to take place +// API name: timeout +func (r *ScheduleNowTransform) Timeout(duration string) *ScheduleNowTransform { + r.values.Set("timeout", duration) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/starttransform/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/starttransform/response.go index ee0fe94a5..9b96d207b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/starttransform/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/starttransform/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package starttransform // Response holds the response body struct for the package starttransform // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/transform/start_transform/StartTransformResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/transform/start_transform/StartTransformResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/starttransform/start_transform.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/starttransform/start_transform.go index 8954a07cc..31ffe4f52 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/starttransform/start_transform.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/starttransform/start_transform.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Starts one or more transforms. package starttransform @@ -67,7 +67,7 @@ func NewStartTransformFunc(tp elastictransport.Interface) NewStartTransform { return func(transformid string) *StartTransform { n := New(tp) - n.TransformId(transformid) + n._transformid(transformid) return n } @@ -170,7 +170,6 @@ func (r StartTransform) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -179,6 +178,10 @@ func (r StartTransform) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -212,9 +215,9 @@ func (r *StartTransform) Header(key, value string) *StartTransform { // TransformId Identifier for the transform. // API Name: transformid -func (r *StartTransform) TransformId(v string) *StartTransform { +func (r *StartTransform) _transformid(transformid string) *StartTransform { r.paramSet |= transformidMask - r.transformid = v + r.transformid = transformid return r } @@ -222,8 +225,18 @@ func (r *StartTransform) TransformId(v string) *StartTransform { // Timeout Period to wait for a response. If no response is received before the timeout // expires, the request fails and returns an error. // API name: timeout -func (r *StartTransform) Timeout(v string) *StartTransform { - r.values.Set("timeout", v) +func (r *StartTransform) Timeout(duration string) *StartTransform { + r.values.Set("timeout", duration) + + return r +} + +// From Restricts the set of transformed entities to those changed after this time. +// Relative times like now-30d are supported. Only applicable for continuous +// transforms. +// API name: from +func (r *StartTransform) From(from string) *StartTransform { + r.values.Set("from", from) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/stoptransform/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/stoptransform/response.go index e7c6c6dd0..b98b1f6df 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/stoptransform/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/stoptransform/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package stoptransform // Response holds the response body struct for the package stoptransform // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/transform/stop_transform/StopTransformResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/transform/stop_transform/StopTransformResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/stoptransform/stop_transform.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/stoptransform/stop_transform.go index b603c0892..cf4baa141 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/stoptransform/stop_transform.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/stoptransform/stop_transform.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Stops one or more transforms. package stoptransform @@ -68,7 +68,7 @@ func NewStopTransformFunc(tp elastictransport.Interface) NewStopTransform { return func(transformid string) *StopTransform { n := New(tp) - n.TransformId(transformid) + n._transformid(transformid) return n } @@ -171,7 +171,6 @@ func (r StopTransform) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -180,6 +179,10 @@ func (r StopTransform) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -215,9 +218,9 @@ func (r *StopTransform) Header(key, value string) *StopTransform { // comma-separated list or a wildcard expression. // To stop all transforms, use `_all` or `*` as the identifier. // API Name: transformid -func (r *StopTransform) TransformId(v string) *StopTransform { +func (r *StopTransform) _transformid(transformid string) *StopTransform { r.paramSet |= transformidMask - r.transformid = v + r.transformid = transformid return r } @@ -235,16 +238,16 @@ func (r *StopTransform) TransformId(v string) *StopTransform { // If it is false, the request returns a 404 status code when there are no // matches or only partial matches. // API name: allow_no_match -func (r *StopTransform) AllowNoMatch(b bool) *StopTransform { - r.values.Set("allow_no_match", strconv.FormatBool(b)) +func (r *StopTransform) AllowNoMatch(allownomatch bool) *StopTransform { + r.values.Set("allow_no_match", strconv.FormatBool(allownomatch)) return r } // Force If it is true, the API forcefully stops the transforms. // API name: force -func (r *StopTransform) Force(b bool) *StopTransform { - r.values.Set("force", strconv.FormatBool(b)) +func (r *StopTransform) Force(force bool) *StopTransform { + r.values.Set("force", strconv.FormatBool(force)) return r } @@ -255,8 +258,8 @@ func (r *StopTransform) Force(b bool) *StopTransform { // request continues processing and // eventually moves the transform to a STOPPED state. // API name: timeout -func (r *StopTransform) Timeout(v string) *StopTransform { - r.values.Set("timeout", v) +func (r *StopTransform) Timeout(duration string) *StopTransform { + r.values.Set("timeout", duration) return r } @@ -265,8 +268,8 @@ func (r *StopTransform) Timeout(v string) *StopTransform { // checkpoint is completed. If it is false, // the transform stops as soon as possible. // API name: wait_for_checkpoint -func (r *StopTransform) WaitForCheckpoint(b bool) *StopTransform { - r.values.Set("wait_for_checkpoint", strconv.FormatBool(b)) +func (r *StopTransform) WaitForCheckpoint(waitforcheckpoint bool) *StopTransform { + r.values.Set("wait_for_checkpoint", strconv.FormatBool(waitforcheckpoint)) return r } @@ -275,8 +278,8 @@ func (r *StopTransform) WaitForCheckpoint(b bool) *StopTransform { // is false, the API returns // immediately and the indexer is stopped asynchronously in the background. // API name: wait_for_completion -func (r *StopTransform) WaitForCompletion(b bool) *StopTransform { - r.values.Set("wait_for_completion", strconv.FormatBool(b)) +func (r *StopTransform) WaitForCompletion(waitforcompletion bool) *StopTransform { + r.values.Set("wait_for_completion", strconv.FormatBool(waitforcompletion)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/updatetransform/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/updatetransform/request.go index 9dbd55a83..bd552ba0b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/updatetransform/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/updatetransform/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package updatetransform @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package updatetransform // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/transform/update_transform/UpdateTransformRequest.ts#L31-L105 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/transform/update_transform/UpdateTransformRequest.ts#L31-L105 type Request struct { // Description Free text description of the transform. @@ -42,7 +42,7 @@ type Request struct { // indexing. The minimum value is 1s and the maximum is 1h. Frequency types.Duration `json:"frequency,omitempty"` // Meta_ Defines optional transform metadata. - Meta_ map[string]json.RawMessage `json:"_meta,omitempty"` + Meta_ types.Metadata `json:"_meta,omitempty"` // RetentionPolicy Defines a retention policy for the transform. Data that meets the defined // criteria is deleted from the destination index. RetentionPolicy types.RetentionPolicyContainer `json:"retention_policy,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/updatetransform/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/updatetransform/response.go index 2a8ba1769..6b4c1d9c7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/updatetransform/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/updatetransform/response.go @@ -16,19 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package updatetransform import ( - "encoding/json" - "github.com/elastic/go-elasticsearch/v8/typedapi/types" ) // Response holds the response body struct for the package updatetransform // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/transform/update_transform/UpdateTransformResponse.ts#L33-L51 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/transform/update_transform/UpdateTransformResponse.ts#L33-L51 type Response struct { Authorization *types.TransformAuthorization `json:"authorization,omitempty"` @@ -38,7 +36,7 @@ type Response struct { Frequency types.Duration `json:"frequency,omitempty"` Id string `json:"id"` Latest *types.Latest `json:"latest,omitempty"` - Meta_ map[string]json.RawMessage `json:"_meta,omitempty"` + Meta_ types.Metadata `json:"_meta,omitempty"` Pivot *types.Pivot `json:"pivot,omitempty"` RetentionPolicy *types.RetentionPolicyContainer `json:"retention_policy,omitempty"` Settings types.Settings `json:"settings"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/updatetransform/update_transform.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/updatetransform/update_transform.go index 813eae53c..625daaad0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/updatetransform/update_transform.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/updatetransform/update_transform.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Updates certain properties of a transform. package updatetransform @@ -53,8 +53,9 @@ type UpdateTransform struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -70,7 +71,7 @@ func NewUpdateTransformFunc(tp elastictransport.Interface) NewUpdateTransform { return func(transformid string) *UpdateTransform { n := New(tp) - n.TransformId(transformid) + n._transformid(transformid) return n } @@ -85,6 +86,8 @@ func New(tp elastictransport.Interface) *UpdateTransform { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -114,9 +117,19 @@ func (r *UpdateTransform) HttpRequest(ctx context.Context) (*http.Request, error var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -124,6 +137,7 @@ func (r *UpdateTransform) HttpRequest(ctx context.Context) (*http.Request, error } r.buf.Write(data) + } r.path.Scheme = "http" @@ -206,7 +220,6 @@ func (r UpdateTransform) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -215,6 +228,10 @@ func (r UpdateTransform) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -227,9 +244,9 @@ func (r *UpdateTransform) Header(key, value string) *UpdateTransform { // TransformId Identifier for the transform. // API Name: transformid -func (r *UpdateTransform) TransformId(v string) *UpdateTransform { +func (r *UpdateTransform) _transformid(transformid string) *UpdateTransform { r.paramSet |= transformidMask - r.transformid = v + r.transformid = transformid return r } @@ -238,8 +255,8 @@ func (r *UpdateTransform) TransformId(v string) *UpdateTransform { // desired if the source index does not exist until after the transform is // created. // API name: defer_validation -func (r *UpdateTransform) DeferValidation(b bool) *UpdateTransform { - r.values.Set("defer_validation", strconv.FormatBool(b)) +func (r *UpdateTransform) DeferValidation(defervalidation bool) *UpdateTransform { + r.values.Set("defer_validation", strconv.FormatBool(defervalidation)) return r } @@ -247,8 +264,81 @@ func (r *UpdateTransform) DeferValidation(b bool) *UpdateTransform { // Timeout Period to wait for a response. If no response is received before the // timeout expires, the request fails and returns an error. // API name: timeout -func (r *UpdateTransform) Timeout(v string) *UpdateTransform { - r.values.Set("timeout", v) +func (r *UpdateTransform) Timeout(duration string) *UpdateTransform { + r.values.Set("timeout", duration) + + return r +} + +// Description Free text description of the transform. +// API name: description +func (r *UpdateTransform) Description(description string) *UpdateTransform { + + r.req.Description = &description + + return r +} + +// Dest The destination for the transform. +// API name: dest +func (r *UpdateTransform) Dest(dest *types.TransformDestination) *UpdateTransform { + + r.req.Dest = dest + + return r +} + +// Frequency The interval between checks for changes in the source indices when the +// transform is running continuously. Also determines the retry interval in +// the event of transient failures while the transform is searching or +// indexing. The minimum value is 1s and the maximum is 1h. +// API name: frequency +func (r *UpdateTransform) Frequency(duration types.Duration) *UpdateTransform { + r.req.Frequency = duration + + return r +} + +// Meta_ Defines optional transform metadata. +// API name: _meta +func (r *UpdateTransform) Meta_(metadata types.Metadata) *UpdateTransform { + r.req.Meta_ = metadata + + return r +} + +// RetentionPolicy Defines a retention policy for the transform. Data that meets the defined +// criteria is deleted from the destination index. +// API name: retention_policy +func (r *UpdateTransform) RetentionPolicy(retentionpolicy types.RetentionPolicyContainer) *UpdateTransform { + r.req.RetentionPolicy = retentionpolicy + + return r +} + +// Settings Defines optional transform settings. +// API name: settings +func (r *UpdateTransform) Settings(settings *types.Settings) *UpdateTransform { + + r.req.Settings = settings + + return r +} + +// Source The source of the data for the transform. +// API name: source +func (r *UpdateTransform) Source(source *types.TransformSource) *UpdateTransform { + + r.req.Source = source + + return r +} + +// Sync Defines the properties transforms require to run continuously. +// API name: sync +func (r *UpdateTransform) Sync(sync *types.SyncContainer) *UpdateTransform { + + r.req.Sync = sync return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/upgradetransforms/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/upgradetransforms/response.go index d2f952d58..6864306ac 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/upgradetransforms/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/upgradetransforms/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package upgradetransforms // Response holds the response body struct for the package upgradetransforms // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/transform/upgrade_transforms/UpgradeTransformsResponse.ts#L25-L34 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/transform/upgrade_transforms/UpgradeTransformsResponse.ts#L25-L34 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/upgradetransforms/upgrade_transforms.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/upgradetransforms/upgrade_transforms.go index 67c963860..26ba7e62d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/upgradetransforms/upgrade_transforms.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/transform/upgradetransforms/upgrade_transforms.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Upgrades all transforms. package upgradetransforms @@ -166,7 +166,6 @@ func (r UpgradeTransforms) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -175,6 +174,10 @@ func (r UpgradeTransforms) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -208,8 +211,8 @@ func (r *UpgradeTransforms) Header(key, value string) *UpgradeTransforms { // DryRun When true, the request checks for updates but does not run them. // API name: dry_run -func (r *UpgradeTransforms) DryRun(b bool) *UpgradeTransforms { - r.values.Set("dry_run", strconv.FormatBool(b)) +func (r *UpgradeTransforms) DryRun(dryrun bool) *UpgradeTransforms { + r.values.Set("dry_run", strconv.FormatBool(dryrun)) return r } @@ -218,8 +221,8 @@ func (r *UpgradeTransforms) DryRun(b bool) *UpgradeTransforms { // expires, the request fails and // returns an error. // API name: timeout -func (r *UpgradeTransforms) Timeout(v string) *UpgradeTransforms { - r.values.Set("timeout", v) +func (r *UpgradeTransforms) Timeout(duration string) *UpgradeTransforms { + r.values.Set("timeout", duration) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/acknowledgement.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/acknowledgement.go index 8556ea96f..b5bb7b170 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/acknowledgement.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/acknowledgement.go @@ -16,18 +16,63 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Acknowledgement type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/license/post/types.ts#L20-L23 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/license/post/types.ts#L20-L23 type Acknowledgement struct { License []string `json:"license"` Message string `json:"message"` } +func (s *Acknowledgement) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "license": + if err := dec.Decode(&s.License); err != nil { + return err + } + + case "message": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Message = o + + } + } + return nil +} + // NewAcknowledgement returns a Acknowledgement. func NewAcknowledgement() *Acknowledgement { r := &Acknowledgement{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/acknowledgestate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/acknowledgestate.go index 5df9a9405..4ab39e953 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/acknowledgestate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/acknowledgestate.go @@ -16,22 +16,57 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/acknowledgementoptions" ) // AcknowledgeState type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Action.ts#L112-L115 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Action.ts#L115-L118 type AcknowledgeState struct { State acknowledgementoptions.AcknowledgementOptions `json:"state"` Timestamp DateTime `json:"timestamp"` } +func (s *AcknowledgeState) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "state": + if err := dec.Decode(&s.State); err != nil { + return err + } + + case "timestamp": + if err := dec.Decode(&s.Timestamp); err != nil { + return err + } + + } + } + return nil +} + // NewAcknowledgeState returns a AcknowledgeState. func NewAcknowledgeState() *AcknowledgeState { r := &AcknowledgeState{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/actionstatus.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/actionstatus.go index ec5ad8bf8..4f65752b2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/actionstatus.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/actionstatus.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // ActionStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Action.ts#L128-L133 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Action.ts#L131-L136 type ActionStatus struct { Ack AcknowledgeState `json:"ack"` LastExecution *ExecutionState `json:"last_execution,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/activationstate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/activationstate.go index 12dd64ee7..3dd86e25f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/activationstate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/activationstate.go @@ -16,18 +16,65 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ActivationState type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Activation.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Activation.ts#L24-L27 type ActivationState struct { Active bool `json:"active"` Timestamp DateTime `json:"timestamp"` } +func (s *ActivationState) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "active": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Active = value + case bool: + s.Active = v + } + + case "timestamp": + if err := dec.Decode(&s.Timestamp); err != nil { + return err + } + + } + } + return nil +} + // NewActivationState returns a ActivationState. func NewActivationState() *ActivationState { r := &ActivationState{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/activationstatus.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/activationstatus.go index 15a049743..1cb537cb7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/activationstatus.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/activationstatus.go @@ -16,19 +16,61 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // ActivationStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Activation.ts#L29-L33 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Activation.ts#L29-L33 type ActivationStatus struct { Actions WatcherStatusActions `json:"actions"` State ActivationState `json:"state"` Version int64 `json:"version"` } +func (s *ActivationStatus) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "actions": + if err := dec.Decode(&s.Actions); err != nil { + return err + } + + case "state": + if err := dec.Decode(&s.State); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + // NewActivationStatus returns a ActivationStatus. func NewActivationStatus() *ActivationStatus { r := &ActivationStatus{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/adaptiveselection.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/adaptiveselection.go index f38fe17b5..cb3d15b23 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/adaptiveselection.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/adaptiveselection.go @@ -16,21 +16,144 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // AdaptiveSelection type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L169-L177 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L403-L432 type AdaptiveSelection struct { - AvgQueueSize *int64 `json:"avg_queue_size,omitempty"` - AvgResponseTime Duration `json:"avg_response_time,omitempty"` - AvgResponseTimeNs *int64 `json:"avg_response_time_ns,omitempty"` - AvgServiceTime Duration `json:"avg_service_time,omitempty"` - AvgServiceTimeNs *int64 `json:"avg_service_time_ns,omitempty"` - OutgoingSearches *int64 `json:"outgoing_searches,omitempty"` - Rank *string `json:"rank,omitempty"` + // AvgQueueSize The exponentially weighted moving average queue size of search requests on + // the keyed node. + AvgQueueSize *int64 `json:"avg_queue_size,omitempty"` + // AvgResponseTime The exponentially weighted moving average response time of search requests on + // the keyed node. + AvgResponseTime Duration `json:"avg_response_time,omitempty"` + // AvgResponseTimeNs The exponentially weighted moving average response time, in nanoseconds, of + // search requests on the keyed node. + AvgResponseTimeNs *int64 `json:"avg_response_time_ns,omitempty"` + // AvgServiceTime The exponentially weighted moving average service time of search requests on + // the keyed node. + AvgServiceTime Duration `json:"avg_service_time,omitempty"` + // AvgServiceTimeNs The exponentially weighted moving average service time, in nanoseconds, of + // search requests on the keyed node. + AvgServiceTimeNs *int64 `json:"avg_service_time_ns,omitempty"` + // OutgoingSearches The number of outstanding search requests to the keyed node from the node + // these stats are for. + OutgoingSearches *int64 `json:"outgoing_searches,omitempty"` + // Rank The rank of this node; used for shard selection when routing search requests. + Rank *string `json:"rank,omitempty"` +} + +func (s *AdaptiveSelection) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "avg_queue_size": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.AvgQueueSize = &value + case float64: + f := int64(v) + s.AvgQueueSize = &f + } + + case "avg_response_time": + if err := dec.Decode(&s.AvgResponseTime); err != nil { + return err + } + + case "avg_response_time_ns": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.AvgResponseTimeNs = &value + case float64: + f := int64(v) + s.AvgResponseTimeNs = &f + } + + case "avg_service_time": + if err := dec.Decode(&s.AvgServiceTime); err != nil { + return err + } + + case "avg_service_time_ns": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.AvgServiceTimeNs = &value + case float64: + f := int64(v) + s.AvgServiceTimeNs = &f + } + + case "outgoing_searches": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.OutgoingSearches = &value + case float64: + f := int64(v) + s.OutgoingSearches = &f + } + + case "rank": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Rank = &o + + } + } + return nil } // NewAdaptiveSelection returns a AdaptiveSelection. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/addaction.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/addaction.go index cd485272e..12aae3e6a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/addaction.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/addaction.go @@ -16,25 +16,177 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // AddAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/update_aliases/types.ts#L30-L44 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/update_aliases/types.ts#L41-L95 type AddAction struct { - Alias *string `json:"alias,omitempty"` - Aliases []string `json:"aliases,omitempty"` - Filter *Query `json:"filter,omitempty"` - Index *string `json:"index,omitempty"` - IndexRouting *string `json:"index_routing,omitempty"` - Indices []string `json:"indices,omitempty"` - IsHidden *bool `json:"is_hidden,omitempty"` - IsWriteIndex *bool `json:"is_write_index,omitempty"` - MustExist *bool `json:"must_exist,omitempty"` - Routing *string `json:"routing,omitempty"` - SearchRouting *string `json:"search_routing,omitempty"` + // Alias Alias for the action. + // Index alias names support date math. + Alias *string `json:"alias,omitempty"` + // Aliases Aliases for the action. + // Index alias names support date math. + Aliases []string `json:"aliases,omitempty"` + // Filter Query used to limit documents the alias can access. + Filter *Query `json:"filter,omitempty"` + // Index Data stream or index for the action. + // Supports wildcards (`*`). + Index *string `json:"index,omitempty"` + // IndexRouting Value used to route indexing operations to a specific shard. + // If specified, this overwrites the `routing` value for indexing operations. + // Data stream aliases don’t support this parameter. + IndexRouting *string `json:"index_routing,omitempty"` + // Indices Data streams or indices for the action. + // Supports wildcards (`*`). + Indices []string `json:"indices,omitempty"` + // IsHidden If `true`, the alias is hidden. + IsHidden *bool `json:"is_hidden,omitempty"` + // IsWriteIndex If `true`, sets the write index or data stream for the alias. + IsWriteIndex *bool `json:"is_write_index,omitempty"` + // MustExist If `true`, the alias must exist to perform the action. + MustExist *bool `json:"must_exist,omitempty"` + // Routing Value used to route indexing and search operations to a specific shard. + // Data stream aliases don’t support this parameter. + Routing *string `json:"routing,omitempty"` + // SearchRouting Value used to route search operations to a specific shard. + // If specified, this overwrites the `routing` value for search operations. + // Data stream aliases don’t support this parameter. + SearchRouting *string `json:"search_routing,omitempty"` +} + +func (s *AddAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "alias": + if err := dec.Decode(&s.Alias); err != nil { + return err + } + + case "aliases": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Aliases = append(s.Aliases, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Aliases); err != nil { + return err + } + } + + case "filter": + if err := dec.Decode(&s.Filter); err != nil { + return err + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return err + } + + case "index_routing": + if err := dec.Decode(&s.IndexRouting); err != nil { + return err + } + + case "indices": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Indices = append(s.Indices, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Indices); err != nil { + return err + } + } + + case "is_hidden": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IsHidden = &value + case bool: + s.IsHidden = &v + } + + case "is_write_index": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IsWriteIndex = &value + case bool: + s.IsWriteIndex = &v + } + + case "must_exist": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.MustExist = &value + case bool: + s.MustExist = &v + } + + case "routing": + if err := dec.Decode(&s.Routing); err != nil { + return err + } + + case "search_routing": + if err := dec.Decode(&s.SearchRouting); err != nil { + return err + } + + } + } + return nil } // NewAddAction returns a AddAction. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/adjacencymatrixaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/adjacencymatrixaggregate.go index 92fa1680f..0b2d15597 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/adjacencymatrixaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/adjacencymatrixaggregate.go @@ -16,27 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" ) // AdjacencyMatrixAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L572-L574 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L573-L575 type AdjacencyMatrixAggregate struct { Buckets BucketsAdjacencyMatrixBucket `json:"buckets"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Meta Metadata `json:"meta,omitempty"` } func (s *AdjacencyMatrixAggregate) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -57,15 +57,17 @@ func (s *AdjacencyMatrixAggregate) UnmarshalJSON(data []byte) error { source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]AdjacencyMatrixBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []AdjacencyMatrixBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/adjacencymatrixaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/adjacencymatrixaggregation.go index b236bbf77..0f10d0786 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/adjacencymatrixaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/adjacencymatrixaggregation.go @@ -16,21 +16,72 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // AdjacencyMatrixAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L48-L50 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L57-L63 type AdjacencyMatrixAggregation struct { - Filters map[string]Query `json:"filters,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Name *string `json:"name,omitempty"` + // Filters Filters used to create buckets. + // At least one filter is required. + Filters map[string]Query `json:"filters,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Name *string `json:"name,omitempty"` +} + +func (s *AdjacencyMatrixAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "filters": + if s.Filters == nil { + s.Filters = make(map[string]Query, 0) + } + if err := dec.Decode(&s.Filters); err != nil { + return err + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + } + } + return nil } // NewAdjacencyMatrixAggregation returns a AdjacencyMatrixAggregation. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/adjacencymatrixbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/adjacencymatrixbucket.go index 1b74199ef..2c3bd7c7d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/adjacencymatrixbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/adjacencymatrixbucket.go @@ -16,25 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "fmt" - "bytes" + "encoding/json" "errors" + "fmt" "io" - + "strconv" "strings" - - "encoding/json" ) // AdjacencyMatrixBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L576-L578 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L577-L579 type AdjacencyMatrixBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -42,6 +40,7 @@ type AdjacencyMatrixBucket struct { } func (s *AdjacencyMatrixBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -55,456 +54,544 @@ func (s *AdjacencyMatrixBucket) UnmarshalJSON(data []byte) error { switch t { - case "aggregations": - for dec.More() { - tt, err := dec.Token() + case "doc_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) if err != nil { - if errors.Is(err, io.EOF) { - break - } return err } - if value, ok := tt.(string); ok { - if strings.Contains(value, "#") { - elems := strings.Split(value, "#") - if len(elems) == 2 { - switch elems[0] { - case "cardinality": - o := NewCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentiles": - o := NewHdrPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentile_ranks": - o := NewHdrPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentiles": - o := NewTDigestPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentile_ranks": - o := NewTDigestPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "percentiles_bucket": - o := NewPercentilesBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "median_absolute_deviation": - o := NewMedianAbsoluteDeviationAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "min": - o := NewMinAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "max": - o := NewMaxAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sum": - o := NewSumAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "avg": - o := NewAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "weighted_avg": - o := NewWeightedAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "value_count": - o := NewValueCountAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_value": - o := NewSimpleValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "derivative": - o := NewDerivativeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "bucket_metric_value": - o := NewBucketMetricValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats": - o := NewStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats_bucket": - o := NewStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats": - o := NewExtendedStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats_bucket": - o := NewExtendedStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_bounds": - o := NewGeoBoundsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_centroid": - o := NewGeoCentroidAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "histogram": - o := NewHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_histogram": - o := NewDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "auto_date_histogram": - o := NewAutoDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "variable_width_histogram": - o := NewVariableWidthHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sterms": - o := NewStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lterms": - o := NewLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "dterms": - o := NewDoubleTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umterms": - o := NewUnmappedTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lrareterms": - o := NewLongRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "srareterms": - o := NewStringRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umrareterms": - o := NewUnmappedRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "multi_terms": - o := NewMultiTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "missing": - o := NewMissingAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "nested": - o := NewNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "reverse_nested": - o := NewReverseNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "global": - o := NewGlobalAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filter": - o := NewFilterAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "children": - o := NewChildrenAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "parent": - o := NewParentAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sampler": - o := NewSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "unmapped_sampler": - o := NewUnmappedSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohash_grid": - o := NewGeoHashGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geotile_grid": - o := NewGeoTileGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohex_grid": - o := NewGeoHexGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "range": - o := NewRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_range": - o := NewDateRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_distance": - o := NewGeoDistanceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_range": - o := NewIpRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_prefix": - o := NewIpPrefixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filters": - o := NewFiltersAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "adjacency_matrix": - o := NewAdjacencyMatrixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "siglterms": - o := NewSignificantLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sigsterms": - o := NewSignificantStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umsigterms": - o := NewUnmappedSignificantTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "composite": - o := NewCompositeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "scripted_metric": - o := NewScriptedMetricAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_hits": - o := NewTopHitsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "inference": - o := NewInferenceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "string_stats": - o := NewStringStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "box_plot": - o := NewBoxPlotAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_metrics": - o := NewTopMetricsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "t_test": - o := NewTTestAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "rate": - o := NewRateAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_long_value": - o := NewCumulativeCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "matrix_stats": - o := NewMatrixStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_line": - o := NewGeoLineAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - default: - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - } - } else { - return errors.New("cannot decode JSON for field Aggregations") - } - } else { - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[value] = o - } - } + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f } - case "doc_count": - if err := dec.Decode(&s.DocCount); err != nil { + case "key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Key = o - case "key": - if err := dec.Decode(&s.Key); err != nil { - return err + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "box_plot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[value] = o + } } } @@ -531,6 +618,7 @@ func (s AdjacencyMatrixBucket) MarshalJSON() ([]byte, error) { for key, value := range s.Aggregations { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "Aggregations") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregate.go index d0fd00c18..fc3a98cc6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregate.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -79,6 +79,7 @@ package types // SignificantStringTermsAggregate // UnmappedSignificantTermsAggregate // CompositeAggregate +// FrequentItemSetsAggregate // ScriptedMetricAggregate // TopHitsAggregate // InferenceAggregate @@ -91,5 +92,5 @@ package types // MatrixStatsAggregate // GeoLineAggregate // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L38-L122 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L38-L123 type Aggregate interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregatemetricdoubleproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregatemetricdoubleproperty.go index ad38c8e26..14644c8b4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregatemetricdoubleproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregatemetricdoubleproperty.go @@ -16,24 +16,24 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" ) // AggregateMetricDoubleProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/complex.ts#L59-L64 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/complex.ts#L59-L64 type AggregateMetricDoubleProperty struct { DefaultMetric string `json:"default_metric"` Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` @@ -48,6 +48,7 @@ type AggregateMetricDoubleProperty struct { } func (s *AggregateMetricDoubleProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -62,9 +63,16 @@ func (s *AggregateMetricDoubleProperty) UnmarshalJSON(data []byte) error { switch t { case "default_metric": - if err := dec.Decode(&s.DefaultMetric); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DefaultMetric = o case "dynamic": if err := dec.Decode(&s.Dynamic); err != nil { @@ -72,6 +80,9 @@ func (s *AggregateMetricDoubleProperty) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -80,7 +91,9 @@ func (s *AggregateMetricDoubleProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -359,18 +372,34 @@ func (s *AggregateMetricDoubleProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } @@ -381,6 +410,9 @@ func (s *AggregateMetricDoubleProperty) UnmarshalJSON(data []byte) error { } case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -389,7 +421,9 @@ func (s *AggregateMetricDoubleProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -668,9 +702,11 @@ func (s *AggregateMetricDoubleProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } @@ -689,6 +725,26 @@ func (s *AggregateMetricDoubleProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s AggregateMetricDoubleProperty) MarshalJSON() ([]byte, error) { + type innerAggregateMetricDoubleProperty AggregateMetricDoubleProperty + tmp := innerAggregateMetricDoubleProperty{ + DefaultMetric: s.DefaultMetric, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + Metrics: s.Metrics, + Properties: s.Properties, + TimeSeriesMetric: s.TimeSeriesMetric, + Type: s.Type, + } + + tmp.Type = "aggregate_metric_double" + + return json.Marshal(tmp) +} + // NewAggregateMetricDoubleProperty returns a AggregateMetricDoubleProperty. func NewAggregateMetricDoubleProperty() *AggregateMetricDoubleProperty { r := &AggregateMetricDoubleProperty{ @@ -697,7 +753,5 @@ func NewAggregateMetricDoubleProperty() *AggregateMetricDoubleProperty { Properties: make(map[string]Property, 0), } - r.Type = "aggregate_metric_double" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregateorder.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregateorder.go index b4302be09..69e59770b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregateorder.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregateorder.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // map[string]sortorder.SortOrder // []map[string]sortorder.SortOrder // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L403-L405 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L976-L978 type AggregateOrder interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregateoutput.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregateoutput.go index 7a3a228ef..712c10cd0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregateoutput.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregateoutput.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // AggregateOutput type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/put_trained_model/types.ts#L101-L106 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/put_trained_model/types.ts#L101-L106 type AggregateOutput struct { Exponent *Weights `json:"exponent,omitempty"` LogisticRegression *Weights `json:"logistic_regression,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregation.go index 6dbc3b4fc..e73f07dfc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregation.go @@ -16,20 +16,61 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // Aggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregation.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregation.ts#L22-L25 type Aggregation struct { - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Name *string `json:"name,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Name *string `json:"name,omitempty"` +} + +func (s *Aggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + } + } + return nil } // NewAggregation returns a Aggregation. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregationbreakdown.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregationbreakdown.go index 4fbae910a..a76e74873 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregationbreakdown.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregationbreakdown.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // AggregationBreakdown type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/profile.ts#L23-L36 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/profile.ts#L23-L36 type AggregationBreakdown struct { BuildAggregation int64 `json:"build_aggregation"` BuildAggregationCount int64 `json:"build_aggregation_count"` @@ -38,6 +46,206 @@ type AggregationBreakdown struct { ReduceCount int64 `json:"reduce_count"` } +func (s *AggregationBreakdown) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "build_aggregation": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.BuildAggregation = value + case float64: + f := int64(v) + s.BuildAggregation = f + } + + case "build_aggregation_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.BuildAggregationCount = value + case float64: + f := int64(v) + s.BuildAggregationCount = f + } + + case "build_leaf_collector": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.BuildLeafCollector = value + case float64: + f := int64(v) + s.BuildLeafCollector = f + } + + case "build_leaf_collector_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.BuildLeafCollectorCount = value + case float64: + f := int64(v) + s.BuildLeafCollectorCount = f + } + + case "collect": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Collect = value + case float64: + f := int64(v) + s.Collect = f + } + + case "collect_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.CollectCount = value + case float64: + f := int64(v) + s.CollectCount = f + } + + case "initialize": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Initialize = value + case float64: + f := int64(v) + s.Initialize = f + } + + case "initialize_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.InitializeCount = value + case float64: + f := int64(v) + s.InitializeCount = f + } + + case "post_collection": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.PostCollection = &value + case float64: + f := int64(v) + s.PostCollection = &f + } + + case "post_collection_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.PostCollectionCount = &value + case float64: + f := int64(v) + s.PostCollectionCount = &f + } + + case "reduce": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Reduce = value + case float64: + f := int64(v) + s.Reduce = f + } + + case "reduce_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.ReduceCount = value + case float64: + f := int64(v) + s.ReduceCount = f + } + + } + } + return nil +} + // NewAggregationBreakdown returns a AggregationBreakdown. func NewAggregationBreakdown() *AggregationBreakdown { r := &AggregationBreakdown{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregationprofile.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregationprofile.go index ab17ef2d5..d85476f54 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregationprofile.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregationprofile.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // AggregationProfile type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/profile.ts#L77-L84 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/profile.ts#L77-L84 type AggregationProfile struct { Breakdown AggregationBreakdown `json:"breakdown"` Children []AggregationProfile `json:"children,omitempty"` @@ -32,6 +40,70 @@ type AggregationProfile struct { Type string `json:"type"` } +func (s *AggregationProfile) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "breakdown": + if err := dec.Decode(&s.Breakdown); err != nil { + return err + } + + case "children": + if err := dec.Decode(&s.Children); err != nil { + return err + } + + case "debug": + if err := dec.Decode(&s.Debug); err != nil { + return err + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = o + + case "time_in_nanos": + if err := dec.Decode(&s.TimeInNanos); err != nil { + return err + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + // NewAggregationProfile returns a AggregationProfile. func NewAggregationProfile() *AggregationProfile { r := &AggregationProfile{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregationprofiledebug.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregationprofiledebug.go index f4f0522dd..7c898bcad 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregationprofiledebug.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregationprofiledebug.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // AggregationProfileDebug type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/profile.ts#L39-L68 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/profile.ts#L39-L68 type AggregationProfileDebug struct { BuiltBuckets *int `json:"built_buckets,omitempty"` CharsFetched *int `json:"chars_fetched,omitempty"` @@ -54,6 +62,423 @@ type AggregationProfileDebug struct { ValuesFetched *int `json:"values_fetched,omitempty"` } +func (s *AggregationProfileDebug) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "built_buckets": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.BuiltBuckets = &value + case float64: + f := int(v) + s.BuiltBuckets = &f + } + + case "chars_fetched": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.CharsFetched = &value + case float64: + f := int(v) + s.CharsFetched = &f + } + + case "collect_analyzed_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.CollectAnalyzedCount = &value + case float64: + f := int(v) + s.CollectAnalyzedCount = &f + } + + case "collect_analyzed_ns": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.CollectAnalyzedNs = &value + case float64: + f := int(v) + s.CollectAnalyzedNs = &f + } + + case "collection_strategy": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CollectionStrategy = &o + + case "deferred_aggregators": + if err := dec.Decode(&s.DeferredAggregators); err != nil { + return err + } + + case "delegate": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Delegate = &o + + case "delegate_debug": + if err := dec.Decode(&s.DelegateDebug); err != nil { + return err + } + + case "empty_collectors_used": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.EmptyCollectorsUsed = &value + case float64: + f := int(v) + s.EmptyCollectorsUsed = &f + } + + case "extract_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.ExtractCount = &value + case float64: + f := int(v) + s.ExtractCount = &f + } + + case "extract_ns": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.ExtractNs = &value + case float64: + f := int(v) + s.ExtractNs = &f + } + + case "filters": + if err := dec.Decode(&s.Filters); err != nil { + return err + } + + case "has_filter": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.HasFilter = &value + case bool: + s.HasFilter = &v + } + + case "map_reducer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MapReducer = &o + + case "numeric_collectors_used": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NumericCollectorsUsed = &value + case float64: + f := int(v) + s.NumericCollectorsUsed = &f + } + + case "ordinals_collectors_overhead_too_high": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.OrdinalsCollectorsOverheadTooHigh = &value + case float64: + f := int(v) + s.OrdinalsCollectorsOverheadTooHigh = &f + } + + case "ordinals_collectors_used": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.OrdinalsCollectorsUsed = &value + case float64: + f := int(v) + s.OrdinalsCollectorsUsed = &f + } + + case "result_strategy": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultStrategy = &o + + case "segments_collected": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.SegmentsCollected = &value + case float64: + f := int(v) + s.SegmentsCollected = &f + } + + case "segments_counted": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.SegmentsCounted = &value + case float64: + f := int(v) + s.SegmentsCounted = &f + } + + case "segments_with_deleted_docs": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.SegmentsWithDeletedDocs = &value + case float64: + f := int(v) + s.SegmentsWithDeletedDocs = &f + } + + case "segments_with_doc_count_field": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.SegmentsWithDocCountField = &value + case float64: + f := int(v) + s.SegmentsWithDocCountField = &f + } + + case "segments_with_multi_valued_ords": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.SegmentsWithMultiValuedOrds = &value + case float64: + f := int(v) + s.SegmentsWithMultiValuedOrds = &f + } + + case "segments_with_single_valued_ords": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.SegmentsWithSingleValuedOrds = &value + case float64: + f := int(v) + s.SegmentsWithSingleValuedOrds = &f + } + + case "string_hashing_collectors_used": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.StringHashingCollectorsUsed = &value + case float64: + f := int(v) + s.StringHashingCollectorsUsed = &f + } + + case "surviving_buckets": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.SurvivingBuckets = &value + case float64: + f := int(v) + s.SurvivingBuckets = &f + } + + case "total_buckets": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.TotalBuckets = &value + case float64: + f := int(v) + s.TotalBuckets = &f + } + + case "values_fetched": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.ValuesFetched = &value + case float64: + f := int(v) + s.ValuesFetched = &f + } + + } + } + return nil +} + // NewAggregationProfileDebug returns a AggregationProfileDebug. func NewAggregationProfileDebug() *AggregationProfileDebug { r := &AggregationProfileDebug{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregationprofiledelegatedebugfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregationprofiledelegatedebugfilter.go index 3775b8a66..e492e6012 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregationprofiledelegatedebugfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregationprofiledelegatedebugfilter.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // AggregationProfileDelegateDebugFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/profile.ts#L70-L75 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/profile.ts#L70-L75 type AggregationProfileDelegateDebugFilter struct { Query *string `json:"query,omitempty"` ResultsFromMetadata *int `json:"results_from_metadata,omitempty"` @@ -30,6 +38,82 @@ type AggregationProfileDelegateDebugFilter struct { SpecializedFor *string `json:"specialized_for,omitempty"` } +func (s *AggregationProfileDelegateDebugFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "query": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Query = &o + + case "results_from_metadata": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.ResultsFromMetadata = &value + case float64: + f := int(v) + s.ResultsFromMetadata = &f + } + + case "segments_counted_in_constant_time": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.SegmentsCountedInConstantTime = &value + case float64: + f := int(v) + s.SegmentsCountedInConstantTime = &f + } + + case "specialized_for": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SpecializedFor = &o + + } + } + return nil +} + // NewAggregationProfileDelegateDebugFilter returns a AggregationProfileDelegateDebugFilter. func NewAggregationProfileDelegateDebugFilter() *AggregationProfileDelegateDebugFilter { r := &AggregationProfileDelegateDebugFilter{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregationrange.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregationrange.go index 568d146a9..3164e9f26 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregationrange.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregationrange.go @@ -16,17 +16,84 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // AggregationRange type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L298-L302 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L672-L685 type AggregationRange struct { - From string `json:"from,omitempty"` - Key *string `json:"key,omitempty"` - To string `json:"to,omitempty"` + // From Start of the range (inclusive). + From string `json:"from,omitempty"` + // Key Custom key to return the range with. + Key *string `json:"key,omitempty"` + // To End of the range (exclusive). + To string `json:"to,omitempty"` +} + +func (s *AggregationRange) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "from": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.From = o + + case "key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Key = &o + + case "to": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.To = o + + } + } + return nil } // NewAggregationRange returns a AggregationRange. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregations.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregations.go index 8f52e2095..f1c2e9f8b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregations.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aggregations.go @@ -16,103 +16,278 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" ) // Aggregations type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/AggregationContainer.ts#L105-L209 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/AggregationContainer.ts#L106-L515 type Aggregations struct { + // AdjacencyMatrix A bucket aggregation returning a form of adjacency matrix. + // The request provides a collection of named filter expressions, similar to the + // `filters` aggregation. + // Each bucket in the response represents a non-empty cell in the matrix of + // intersecting filters. AdjacencyMatrix *AdjacencyMatrixAggregation `json:"adjacency_matrix,omitempty"` - // Aggregations Sub-aggregations for this aggregation. Only applies to bucket aggregations. - Aggregations map[string]Aggregations `json:"aggregations,omitempty"` - AutoDateHistogram *AutoDateHistogramAggregation `json:"auto_date_histogram,omitempty"` - Avg *AverageAggregation `json:"avg,omitempty"` - AvgBucket *AverageBucketAggregation `json:"avg_bucket,omitempty"` - Boxplot *BoxplotAggregation `json:"boxplot,omitempty"` - BucketCorrelation *BucketCorrelationAggregation `json:"bucket_correlation,omitempty"` - BucketCountKsTest *BucketKsAggregation `json:"bucket_count_ks_test,omitempty"` - BucketScript *BucketScriptAggregation `json:"bucket_script,omitempty"` - BucketSelector *BucketSelectorAggregation `json:"bucket_selector,omitempty"` - BucketSort *BucketSortAggregation `json:"bucket_sort,omitempty"` - Cardinality *CardinalityAggregation `json:"cardinality,omitempty"` - CategorizeText *CategorizeTextAggregation `json:"categorize_text,omitempty"` - Children *ChildrenAggregation `json:"children,omitempty"` - Composite *CompositeAggregation `json:"composite,omitempty"` - CumulativeCardinality *CumulativeCardinalityAggregation `json:"cumulative_cardinality,omitempty"` - CumulativeSum *CumulativeSumAggregation `json:"cumulative_sum,omitempty"` - DateHistogram *DateHistogramAggregation `json:"date_histogram,omitempty"` - DateRange *DateRangeAggregation `json:"date_range,omitempty"` - Derivative *DerivativeAggregation `json:"derivative,omitempty"` - DiversifiedSampler *DiversifiedSamplerAggregation `json:"diversified_sampler,omitempty"` - ExtendedStats *ExtendedStatsAggregation `json:"extended_stats,omitempty"` - ExtendedStatsBucket *ExtendedStatsBucketAggregation `json:"extended_stats_bucket,omitempty"` - Filter *Query `json:"filter,omitempty"` - Filters *FiltersAggregation `json:"filters,omitempty"` - GeoBounds *GeoBoundsAggregation `json:"geo_bounds,omitempty"` - GeoCentroid *GeoCentroidAggregation `json:"geo_centroid,omitempty"` - GeoDistance *GeoDistanceAggregation `json:"geo_distance,omitempty"` - GeoLine *GeoLineAggregation `json:"geo_line,omitempty"` - GeohashGrid *GeoHashGridAggregation `json:"geohash_grid,omitempty"` - GeohexGrid *GeohexGridAggregation `json:"geohex_grid,omitempty"` - GeotileGrid *GeoTileGridAggregation `json:"geotile_grid,omitempty"` - Global *GlobalAggregation `json:"global,omitempty"` - Histogram *HistogramAggregation `json:"histogram,omitempty"` - Inference *InferenceAggregation `json:"inference,omitempty"` - IpPrefix *IpPrefixAggregation `json:"ip_prefix,omitempty"` - IpRange *IpRangeAggregation `json:"ip_range,omitempty"` - Line *GeoLineAggregation `json:"line,omitempty"` - MatrixStats *MatrixStatsAggregation `json:"matrix_stats,omitempty"` - Max *MaxAggregation `json:"max,omitempty"` - MaxBucket *MaxBucketAggregation `json:"max_bucket,omitempty"` + // Aggregations Sub-aggregations for this aggregation. + // Only applies to bucket aggregations. + Aggregations map[string]Aggregations `json:"aggregations,omitempty"` + // AutoDateHistogram A multi-bucket aggregation similar to the date histogram, except instead of + // providing an interval to use as the width of each bucket, a target number of + // buckets is provided. + AutoDateHistogram *AutoDateHistogramAggregation `json:"auto_date_histogram,omitempty"` + // Avg A single-value metrics aggregation that computes the average of numeric + // values that are extracted from the aggregated documents. + Avg *AverageAggregation `json:"avg,omitempty"` + // AvgBucket A sibling pipeline aggregation which calculates the mean value of a specified + // metric in a sibling aggregation. + // The specified metric must be numeric and the sibling aggregation must be a + // multi-bucket aggregation. + AvgBucket *AverageBucketAggregation `json:"avg_bucket,omitempty"` + // Boxplot A metrics aggregation that computes a box plot of numeric values extracted + // from the aggregated documents. + Boxplot *BoxplotAggregation `json:"boxplot,omitempty"` + // BucketCorrelation A sibling pipeline aggregation which runs a correlation function on the + // configured sibling multi-bucket aggregation. + BucketCorrelation *BucketCorrelationAggregation `json:"bucket_correlation,omitempty"` + // BucketCountKsTest A sibling pipeline aggregation which runs a two sample Kolmogorov–Smirnov + // test ("K-S test") against a provided distribution and the distribution + // implied by the documents counts in the configured sibling aggregation. + BucketCountKsTest *BucketKsAggregation `json:"bucket_count_ks_test,omitempty"` + // BucketScript A parent pipeline aggregation which runs a script which can perform per + // bucket computations on metrics in the parent multi-bucket aggregation. + BucketScript *BucketScriptAggregation `json:"bucket_script,omitempty"` + // BucketSelector A parent pipeline aggregation which runs a script to determine whether the + // current bucket will be retained in the parent multi-bucket aggregation. + BucketSelector *BucketSelectorAggregation `json:"bucket_selector,omitempty"` + // BucketSort A parent pipeline aggregation which sorts the buckets of its parent + // multi-bucket aggregation. + BucketSort *BucketSortAggregation `json:"bucket_sort,omitempty"` + // Cardinality A single-value metrics aggregation that calculates an approximate count of + // distinct values. + Cardinality *CardinalityAggregation `json:"cardinality,omitempty"` + // CategorizeText A multi-bucket aggregation that groups semi-structured text into buckets. + CategorizeText *CategorizeTextAggregation `json:"categorize_text,omitempty"` + // Children A single bucket aggregation that selects child documents that have the + // specified type, as defined in a `join` field. + Children *ChildrenAggregation `json:"children,omitempty"` + // Composite A multi-bucket aggregation that creates composite buckets from different + // sources. + // Unlike the other multi-bucket aggregations, you can use the `composite` + // aggregation to paginate *all* buckets from a multi-level aggregation + // efficiently. + Composite *CompositeAggregation `json:"composite,omitempty"` + // CumulativeCardinality A parent pipeline aggregation which calculates the cumulative cardinality in + // a parent `histogram` or `date_histogram` aggregation. + CumulativeCardinality *CumulativeCardinalityAggregation `json:"cumulative_cardinality,omitempty"` + // CumulativeSum A parent pipeline aggregation which calculates the cumulative sum of a + // specified metric in a parent `histogram` or `date_histogram` aggregation. + CumulativeSum *CumulativeSumAggregation `json:"cumulative_sum,omitempty"` + // DateHistogram A multi-bucket values source based aggregation that can be applied on date + // values or date range values extracted from the documents. + // It dynamically builds fixed size (interval) buckets over the values. + DateHistogram *DateHistogramAggregation `json:"date_histogram,omitempty"` + // DateRange A multi-bucket value source based aggregation that enables the user to define + // a set of date ranges - each representing a bucket. + DateRange *DateRangeAggregation `json:"date_range,omitempty"` + // Derivative A parent pipeline aggregation which calculates the derivative of a specified + // metric in a parent `histogram` or `date_histogram` aggregation. + Derivative *DerivativeAggregation `json:"derivative,omitempty"` + // DiversifiedSampler A filtering aggregation used to limit any sub aggregations' processing to a + // sample of the top-scoring documents. + // Similar to the `sampler` aggregation, but adds the ability to limit the + // number of matches that share a common value. + DiversifiedSampler *DiversifiedSamplerAggregation `json:"diversified_sampler,omitempty"` + // ExtendedStats A multi-value metrics aggregation that computes stats over numeric values + // extracted from the aggregated documents. + ExtendedStats *ExtendedStatsAggregation `json:"extended_stats,omitempty"` + // ExtendedStatsBucket A sibling pipeline aggregation which calculates a variety of stats across all + // bucket of a specified metric in a sibling aggregation. + ExtendedStatsBucket *ExtendedStatsBucketAggregation `json:"extended_stats_bucket,omitempty"` + // Filter A single bucket aggregation that narrows the set of documents to those that + // match a query. + Filter *Query `json:"filter,omitempty"` + // Filters A multi-bucket aggregation where each bucket contains the documents that + // match a query. + Filters *FiltersAggregation `json:"filters,omitempty"` + // FrequentItemSets A bucket aggregation which finds frequent item sets, a form of association + // rules mining that identifies items that often occur together. + FrequentItemSets *FrequentItemSetsAggregation `json:"frequent_item_sets,omitempty"` + // GeoBounds A metric aggregation that computes the geographic bounding box containing all + // values for a Geopoint or Geoshape field. + GeoBounds *GeoBoundsAggregation `json:"geo_bounds,omitempty"` + // GeoCentroid A metric aggregation that computes the weighted centroid from all coordinate + // values for geo fields. + GeoCentroid *GeoCentroidAggregation `json:"geo_centroid,omitempty"` + // GeoDistance A multi-bucket aggregation that works on `geo_point` fields. + // Evaluates the distance of each document value from an origin point and + // determines the buckets it belongs to, based on ranges defined in the request. + GeoDistance *GeoDistanceAggregation `json:"geo_distance,omitempty"` + // GeoLine Aggregates all `geo_point` values within a bucket into a `LineString` ordered + // by the chosen sort field. + GeoLine *GeoLineAggregation `json:"geo_line,omitempty"` + // GeohashGrid A multi-bucket aggregation that groups `geo_point` and `geo_shape` values + // into buckets that represent a grid. + // Each cell is labeled using a geohash which is of user-definable precision. + GeohashGrid *GeoHashGridAggregation `json:"geohash_grid,omitempty"` + // GeohexGrid A multi-bucket aggregation that groups `geo_point` and `geo_shape` values + // into buckets that represent a grid. + // Each cell corresponds to a H3 cell index and is labeled using the H3Index + // representation. + GeohexGrid *GeohexGridAggregation `json:"geohex_grid,omitempty"` + // GeotileGrid A multi-bucket aggregation that groups `geo_point` and `geo_shape` values + // into buckets that represent a grid. + // Each cell corresponds to a map tile as used by many online map sites. + GeotileGrid *GeoTileGridAggregation `json:"geotile_grid,omitempty"` + // Global Defines a single bucket of all the documents within the search execution + // context. + // This context is defined by the indices and the document types you’re + // searching on, but is not influenced by the search query itself. + Global *GlobalAggregation `json:"global,omitempty"` + // Histogram A multi-bucket values source based aggregation that can be applied on numeric + // values or numeric range values extracted from the documents. + // It dynamically builds fixed size (interval) buckets over the values. + Histogram *HistogramAggregation `json:"histogram,omitempty"` + // Inference A parent pipeline aggregation which loads a pre-trained model and performs + // inference on the collated result fields from the parent bucket aggregation. + Inference *InferenceAggregation `json:"inference,omitempty"` + // IpPrefix A bucket aggregation that groups documents based on the network or + // sub-network of an IP address. + IpPrefix *IpPrefixAggregation `json:"ip_prefix,omitempty"` + // IpRange A multi-bucket value source based aggregation that enables the user to define + // a set of IP ranges - each representing a bucket. + IpRange *IpRangeAggregation `json:"ip_range,omitempty"` + Line *GeoLineAggregation `json:"line,omitempty"` + // MatrixStats A numeric aggregation that computes the following statistics over a set of + // document fields: `count`, `mean`, `variance`, `skewness`, `kurtosis`, + // `covariance`, and `covariance`. + MatrixStats *MatrixStatsAggregation `json:"matrix_stats,omitempty"` + // Max A single-value metrics aggregation that returns the maximum value among the + // numeric values extracted from the aggregated documents. + Max *MaxAggregation `json:"max,omitempty"` + // MaxBucket A sibling pipeline aggregation which identifies the bucket(s) with the + // maximum value of a specified metric in a sibling aggregation and outputs both + // the value and the key(s) of the bucket(s). + MaxBucket *MaxBucketAggregation `json:"max_bucket,omitempty"` + // MedianAbsoluteDeviation A single-value aggregation that approximates the median absolute deviation of + // its search results. MedianAbsoluteDeviation *MedianAbsoluteDeviationAggregation `json:"median_absolute_deviation,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Min *MinAggregation `json:"min,omitempty"` - MinBucket *MinBucketAggregation `json:"min_bucket,omitempty"` - Missing *MissingAggregation `json:"missing,omitempty"` - MovingAvg MovingAverageAggregation `json:"moving_avg,omitempty"` - MovingFn *MovingFunctionAggregation `json:"moving_fn,omitempty"` - MovingPercentiles *MovingPercentilesAggregation `json:"moving_percentiles,omitempty"` - MultiTerms *MultiTermsAggregation `json:"multi_terms,omitempty"` - Nested *NestedAggregation `json:"nested,omitempty"` - Normalize *NormalizeAggregation `json:"normalize,omitempty"` - Parent *ParentAggregation `json:"parent,omitempty"` - PercentileRanks *PercentileRanksAggregation `json:"percentile_ranks,omitempty"` - Percentiles *PercentilesAggregation `json:"percentiles,omitempty"` - PercentilesBucket *PercentilesBucketAggregation `json:"percentiles_bucket,omitempty"` - Range *RangeAggregation `json:"range,omitempty"` - RareTerms *RareTermsAggregation `json:"rare_terms,omitempty"` - Rate *RateAggregation `json:"rate,omitempty"` - ReverseNested *ReverseNestedAggregation `json:"reverse_nested,omitempty"` - Sampler *SamplerAggregation `json:"sampler,omitempty"` - ScriptedMetric *ScriptedMetricAggregation `json:"scripted_metric,omitempty"` - SerialDiff *SerialDifferencingAggregation `json:"serial_diff,omitempty"` - SignificantTerms *SignificantTermsAggregation `json:"significant_terms,omitempty"` - SignificantText *SignificantTextAggregation `json:"significant_text,omitempty"` - Stats *StatsAggregation `json:"stats,omitempty"` - StatsBucket *StatsBucketAggregation `json:"stats_bucket,omitempty"` - StringStats *StringStatsAggregation `json:"string_stats,omitempty"` - Sum *SumAggregation `json:"sum,omitempty"` - SumBucket *SumBucketAggregation `json:"sum_bucket,omitempty"` - TTest *TTestAggregation `json:"t_test,omitempty"` - Terms *TermsAggregation `json:"terms,omitempty"` - TopHits *TopHitsAggregation `json:"top_hits,omitempty"` - TopMetrics *TopMetricsAggregation `json:"top_metrics,omitempty"` - ValueCount *ValueCountAggregation `json:"value_count,omitempty"` - VariableWidthHistogram *VariableWidthHistogramAggregation `json:"variable_width_histogram,omitempty"` - WeightedAvg *WeightedAverageAggregation `json:"weighted_avg,omitempty"` + Meta Metadata `json:"meta,omitempty"` + // Min A single-value metrics aggregation that returns the minimum value among + // numeric values extracted from the aggregated documents. + Min *MinAggregation `json:"min,omitempty"` + // MinBucket A sibling pipeline aggregation which identifies the bucket(s) with the + // minimum value of a specified metric in a sibling aggregation and outputs both + // the value and the key(s) of the bucket(s). + MinBucket *MinBucketAggregation `json:"min_bucket,omitempty"` + // Missing A field data based single bucket aggregation, that creates a bucket of all + // documents in the current document set context that are missing a field value + // (effectively, missing a field or having the configured NULL value set). + Missing *MissingAggregation `json:"missing,omitempty"` + MovingAvg MovingAverageAggregation `json:"moving_avg,omitempty"` + // MovingFn Given an ordered series of data, "slides" a window across the data and runs a + // custom script on each window of data. + // For convenience, a number of common functions are predefined such as `min`, + // `max`, and moving averages. + MovingFn *MovingFunctionAggregation `json:"moving_fn,omitempty"` + // MovingPercentiles Given an ordered series of percentiles, "slides" a window across those + // percentiles and computes cumulative percentiles. + MovingPercentiles *MovingPercentilesAggregation `json:"moving_percentiles,omitempty"` + // MultiTerms A multi-bucket value source based aggregation where buckets are dynamically + // built - one per unique set of values. + MultiTerms *MultiTermsAggregation `json:"multi_terms,omitempty"` + // Nested A special single bucket aggregation that enables aggregating nested + // documents. + Nested *NestedAggregation `json:"nested,omitempty"` + // Normalize A parent pipeline aggregation which calculates the specific + // normalized/rescaled value for a specific bucket value. + Normalize *NormalizeAggregation `json:"normalize,omitempty"` + // Parent A special single bucket aggregation that selects parent documents that have + // the specified type, as defined in a `join` field. + Parent *ParentAggregation `json:"parent,omitempty"` + // PercentileRanks A multi-value metrics aggregation that calculates one or more percentile + // ranks over numeric values extracted from the aggregated documents. + PercentileRanks *PercentileRanksAggregation `json:"percentile_ranks,omitempty"` + // Percentiles A multi-value metrics aggregation that calculates one or more percentiles + // over numeric values extracted from the aggregated documents. + Percentiles *PercentilesAggregation `json:"percentiles,omitempty"` + // PercentilesBucket A sibling pipeline aggregation which calculates percentiles across all bucket + // of a specified metric in a sibling aggregation. + PercentilesBucket *PercentilesBucketAggregation `json:"percentiles_bucket,omitempty"` + // Range A multi-bucket value source based aggregation that enables the user to define + // a set of ranges - each representing a bucket. + Range *RangeAggregation `json:"range,omitempty"` + // RareTerms A multi-bucket value source based aggregation which finds "rare" terms — + // terms that are at the long-tail of the distribution and are not frequent. + RareTerms *RareTermsAggregation `json:"rare_terms,omitempty"` + // Rate Calculates a rate of documents or a field in each bucket. + // Can only be used inside a `date_histogram` or `composite` aggregation. + Rate *RateAggregation `json:"rate,omitempty"` + // ReverseNested A special single bucket aggregation that enables aggregating on parent + // documents from nested documents. + // Should only be defined inside a `nested` aggregation. + ReverseNested *ReverseNestedAggregation `json:"reverse_nested,omitempty"` + // Sampler A filtering aggregation used to limit any sub aggregations' processing to a + // sample of the top-scoring documents. + Sampler *SamplerAggregation `json:"sampler,omitempty"` + // ScriptedMetric A metric aggregation that uses scripts to provide a metric output. + ScriptedMetric *ScriptedMetricAggregation `json:"scripted_metric,omitempty"` + // SerialDiff An aggregation that subtracts values in a time series from themselves at + // different time lags or periods. + SerialDiff *SerialDifferencingAggregation `json:"serial_diff,omitempty"` + // SignificantTerms Returns interesting or unusual occurrences of terms in a set. + SignificantTerms *SignificantTermsAggregation `json:"significant_terms,omitempty"` + // SignificantText Returns interesting or unusual occurrences of free-text terms in a set. + SignificantText *SignificantTextAggregation `json:"significant_text,omitempty"` + // Stats A multi-value metrics aggregation that computes stats over numeric values + // extracted from the aggregated documents. + Stats *StatsAggregation `json:"stats,omitempty"` + // StatsBucket A sibling pipeline aggregation which calculates a variety of stats across all + // bucket of a specified metric in a sibling aggregation. + StatsBucket *StatsBucketAggregation `json:"stats_bucket,omitempty"` + // StringStats A multi-value metrics aggregation that computes statistics over string values + // extracted from the aggregated documents. + StringStats *StringStatsAggregation `json:"string_stats,omitempty"` + // Sum A single-value metrics aggregation that sums numeric values that are + // extracted from the aggregated documents. + Sum *SumAggregation `json:"sum,omitempty"` + // SumBucket A sibling pipeline aggregation which calculates the sum of a specified metric + // across all buckets in a sibling aggregation. + SumBucket *SumBucketAggregation `json:"sum_bucket,omitempty"` + // TTest A metrics aggregation that performs a statistical hypothesis test in which + // the test statistic follows a Student’s t-distribution under the null + // hypothesis on numeric values extracted from the aggregated documents. + TTest *TTestAggregation `json:"t_test,omitempty"` + // Terms A multi-bucket value source based aggregation where buckets are dynamically + // built - one per unique value. + Terms *TermsAggregation `json:"terms,omitempty"` + // TopHits A metric aggregation that returns the top matching documents per bucket. + TopHits *TopHitsAggregation `json:"top_hits,omitempty"` + // TopMetrics A metric aggregation that selects metrics from the document with the largest + // or smallest sort value. + TopMetrics *TopMetricsAggregation `json:"top_metrics,omitempty"` + // ValueCount A single-value metrics aggregation that counts the number of values that are + // extracted from the aggregated documents. + ValueCount *ValueCountAggregation `json:"value_count,omitempty"` + // VariableWidthHistogram A multi-bucket aggregation similar to the histogram, except instead of + // providing an interval to use as the width of each bucket, a target number of + // buckets is provided. + VariableWidthHistogram *VariableWidthHistogramAggregation `json:"variable_width_histogram,omitempty"` + // WeightedAvg A single-value metrics aggregation that computes the weighted average of + // numeric values that are extracted from the aggregated documents. + WeightedAvg *WeightedAverageAggregation `json:"weighted_avg,omitempty"` } func (s *Aggregations) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -132,6 +307,9 @@ func (s *Aggregations) UnmarshalJSON(data []byte) error { } case "aggregations", "aggs": + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregations, 0) + } if err := dec.Decode(&s.Aggregations); err != nil { return err } @@ -251,6 +429,11 @@ func (s *Aggregations) UnmarshalJSON(data []byte) error { return err } + case "frequent_item_sets": + if err := dec.Decode(&s.FrequentItemSets); err != nil { + return err + } + case "geo_bounds": if err := dec.Decode(&s.GeoBounds); err != nil { return err @@ -370,36 +553,36 @@ func (s *Aggregations) UnmarshalJSON(data []byte) error { case "linear": o := NewLinearMovingAverageAggregation() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.MovingAvg = *o case "simple": o := NewSimpleMovingAverageAggregation() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.MovingAvg = *o case "ewma": o := NewEwmaMovingAverageAggregation() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.MovingAvg = *o case "holt": o := NewHoltMovingAverageAggregation() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.MovingAvg = *o case "holt_winters": o := NewHoltWintersMovingAverageAggregation() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.MovingAvg = *o default: - if err := dec.Decode(&s.MovingAvg); err != nil { + if err := localDec.Decode(&s.MovingAvg); err != nil { return err } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/alias.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/alias.go index 73731cf47..fc70c17dd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/alias.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/alias.go @@ -16,22 +16,107 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Alias type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/Alias.ts#L23-L30 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/Alias.ts#L23-L53 type Alias struct { - Filter *Query `json:"filter,omitempty"` - IndexRouting *string `json:"index_routing,omitempty"` - IsHidden *bool `json:"is_hidden,omitempty"` - IsWriteIndex *bool `json:"is_write_index,omitempty"` - Routing *string `json:"routing,omitempty"` + // Filter Query used to limit documents the alias can access. + Filter *Query `json:"filter,omitempty"` + // IndexRouting Value used to route indexing operations to a specific shard. + // If specified, this overwrites the `routing` value for indexing operations. + IndexRouting *string `json:"index_routing,omitempty"` + // IsHidden If `true`, the alias is hidden. + // All indices for the alias must have the same `is_hidden` value. + IsHidden *bool `json:"is_hidden,omitempty"` + // IsWriteIndex If `true`, the index is the write index for the alias. + IsWriteIndex *bool `json:"is_write_index,omitempty"` + // Routing Value used to route indexing and search operations to a specific shard. + Routing *string `json:"routing,omitempty"` + // SearchRouting Value used to route search operations to a specific shard. + // If specified, this overwrites the `routing` value for search operations. SearchRouting *string `json:"search_routing,omitempty"` } +func (s *Alias) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "filter": + if err := dec.Decode(&s.Filter); err != nil { + return err + } + + case "index_routing": + if err := dec.Decode(&s.IndexRouting); err != nil { + return err + } + + case "is_hidden": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IsHidden = &value + case bool: + s.IsHidden = &v + } + + case "is_write_index": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IsWriteIndex = &value + case bool: + s.IsWriteIndex = &v + } + + case "routing": + if err := dec.Decode(&s.Routing); err != nil { + return err + } + + case "search_routing": + if err := dec.Decode(&s.SearchRouting); err != nil { + return err + } + + } + } + return nil +} + // NewAlias returns a Alias. func NewAlias() *Alias { r := &Alias{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aliasdefinition.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aliasdefinition.go index b6197d2f2..6b2647913 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aliasdefinition.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aliasdefinition.go @@ -16,22 +16,128 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // AliasDefinition type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/AliasDefinition.ts#L22-L30 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/AliasDefinition.ts#L22-L54 type AliasDefinition struct { - Filter *Query `json:"filter,omitempty"` - IndexRouting *string `json:"index_routing,omitempty"` - IsHidden *bool `json:"is_hidden,omitempty"` - IsWriteIndex *bool `json:"is_write_index,omitempty"` - Routing *string `json:"routing,omitempty"` + // Filter Query used to limit documents the alias can access. + Filter *Query `json:"filter,omitempty"` + // IndexRouting Value used to route indexing operations to a specific shard. + // If specified, this overwrites the `routing` value for indexing operations. + IndexRouting *string `json:"index_routing,omitempty"` + // IsHidden If `true`, the alias is hidden. + // All indices for the alias must have the same `is_hidden` value. + IsHidden *bool `json:"is_hidden,omitempty"` + // IsWriteIndex If `true`, the index is the write index for the alias. + IsWriteIndex *bool `json:"is_write_index,omitempty"` + // Routing Value used to route indexing and search operations to a specific shard. + Routing *string `json:"routing,omitempty"` + // SearchRouting Value used to route search operations to a specific shard. + // If specified, this overwrites the `routing` value for search operations. SearchRouting *string `json:"search_routing,omitempty"` } +func (s *AliasDefinition) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "filter": + if err := dec.Decode(&s.Filter); err != nil { + return err + } + + case "index_routing": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexRouting = &o + + case "is_hidden": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IsHidden = &value + case bool: + s.IsHidden = &v + } + + case "is_write_index": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IsWriteIndex = &value + case bool: + s.IsWriteIndex = &v + } + + case "routing": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Routing = &o + + case "search_routing": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchRouting = &o + + } + } + return nil +} + // NewAliasDefinition returns a AliasDefinition. func NewAliasDefinition() *AliasDefinition { r := &AliasDefinition{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aliasesrecord.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aliasesrecord.go index 539349e51..cce86e5f6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aliasesrecord.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/aliasesrecord.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // AliasesRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/aliases/types.ts#L22-L53 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/aliases/types.ts#L22-L53 type AliasesRecord struct { // Alias alias name Alias *string `json:"alias,omitempty"` @@ -38,6 +46,91 @@ type AliasesRecord struct { RoutingSearch *string `json:"routing.search,omitempty"` } +func (s *AliasesRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "alias", "a": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Alias = &o + + case "filter", "f", "fi": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Filter = &o + + case "index", "i", "idx": + if err := dec.Decode(&s.Index); err != nil { + return err + } + + case "is_write_index", "w", "isWriteIndex": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IsWriteIndex = &o + + case "routing.index", "ri", "routingIndex": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RoutingIndex = &o + + case "routing.search", "rs", "routingSearch": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RoutingSearch = &o + + } + } + return nil +} + // NewAliasesRecord returns a AliasesRecord. func NewAliasesRecord() *AliasesRecord { r := &AliasesRecord{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/allfield.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/allfield.go index 2fd98054a..c3fa43ffc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/allfield.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/allfield.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // AllField type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/meta-fields.ts#L29-L40 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/meta-fields.ts#L29-L40 type AllField struct { Analyzer string `json:"analyzer"` Enabled bool `json:"enabled"` @@ -36,6 +44,160 @@ type AllField struct { StoreTermVectors bool `json:"store_term_vectors"` } +func (s *AllField) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = o + + case "enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "omit_norms": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.OmitNorms = value + case bool: + s.OmitNorms = v + } + + case "search_analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchAnalyzer = o + + case "similarity": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Similarity = o + + case "store": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Store = value + case bool: + s.Store = v + } + + case "store_term_vector_offsets": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.StoreTermVectorOffsets = value + case bool: + s.StoreTermVectorOffsets = v + } + + case "store_term_vector_payloads": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.StoreTermVectorPayloads = value + case bool: + s.StoreTermVectorPayloads = v + } + + case "store_term_vector_positions": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.StoreTermVectorPositions = value + case bool: + s.StoreTermVectorPositions = v + } + + case "store_term_vectors": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.StoreTermVectors = value + case bool: + s.StoreTermVectors = v + } + + } + } + return nil +} + // NewAllField returns a AllField. func NewAllField() *AllField { r := &AllField{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/allocationdecision.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/allocationdecision.go index 4b2043053..e92ecfdaf 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/allocationdecision.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/allocationdecision.go @@ -16,23 +16,78 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/allocationexplaindecision" ) // AllocationDecision type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/allocation_explain/types.ts#L26-L30 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/allocation_explain/types.ts#L26-L30 type AllocationDecision struct { Decider string `json:"decider"` Decision allocationexplaindecision.AllocationExplainDecision `json:"decision"` Explanation string `json:"explanation"` } +func (s *AllocationDecision) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "decider": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Decider = o + + case "decision": + if err := dec.Decode(&s.Decision); err != nil { + return err + } + + case "explanation": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Explanation = o + + } + } + return nil +} + // NewAllocationDecision returns a AllocationDecision. func NewAllocationDecision() *AllocationDecision { r := &AllocationDecision{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/allocationrecord.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/allocationrecord.go index 97d3ab224..1a8e93b87 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/allocationrecord.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/allocationrecord.go @@ -16,34 +16,134 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // AllocationRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/allocation/types.ts#L24-L69 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/allocation/types.ts#L24-L75 type AllocationRecord struct { - // DiskAvail disk available + // DiskAvail Free disk space available to Elasticsearch. + // Elasticsearch retrieves this metric from the node’s operating system. + // Disk-based shard allocation uses this metric to assign shards to nodes based + // on available disk space. DiskAvail ByteSize `json:"disk.avail,omitempty"` - // DiskIndices disk used by ES indices + // DiskIndices Disk space used by the node’s shards. Does not include disk space for the + // translog or unassigned shards. + // IMPORTANT: This metric double-counts disk space for hard-linked files, such + // as those created when shrinking, splitting, or cloning an index. DiskIndices ByteSize `json:"disk.indices,omitempty"` - // DiskPercent percent disk used + // DiskPercent Total percentage of disk space in use. Calculated as `disk.used / + // disk.total`. DiskPercent Percentage `json:"disk.percent,omitempty"` - // DiskTotal total capacity of all volumes + // DiskTotal Total disk space for the node, including in-use and available space. DiskTotal ByteSize `json:"disk.total,omitempty"` - // DiskUsed disk used (total, not just ES) + // DiskUsed Total disk space in use. + // Elasticsearch retrieves this metric from the node’s operating system (OS). + // The metric includes disk space for: Elasticsearch, including the translog and + // unassigned shards; the node’s operating system; any other applications or + // files on the node. + // Unlike `disk.indices`, this metric does not double-count disk space for + // hard-linked files. DiskUsed ByteSize `json:"disk.used,omitempty"` - // Host host of node + // Host Network host for the node. Set using the `network.host` setting. Host string `json:"host,omitempty"` - // Ip ip of node + // Ip IP address and port for the node. Ip string `json:"ip,omitempty"` - // Node name of node + // Node Name for the node. Set using the `node.name` setting. Node *string `json:"node,omitempty"` - // Shards number of shards on node + // Shards Number of primary and replica shards assigned to the node. Shards *string `json:"shards,omitempty"` } +func (s *AllocationRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "disk.avail", "da", "diskAvail": + if err := dec.Decode(&s.DiskAvail); err != nil { + return err + } + + case "disk.indices", "di", "diskIndices": + if err := dec.Decode(&s.DiskIndices); err != nil { + return err + } + + case "disk.percent", "dp", "diskPercent": + if err := dec.Decode(&s.DiskPercent); err != nil { + return err + } + + case "disk.total", "dt", "diskTotal": + if err := dec.Decode(&s.DiskTotal); err != nil { + return err + } + + case "disk.used", "du", "diskUsed": + if err := dec.Decode(&s.DiskUsed); err != nil { + return err + } + + case "host", "h": + if err := dec.Decode(&s.Host); err != nil { + return err + } + + case "ip": + if err := dec.Decode(&s.Ip); err != nil { + return err + } + + case "node", "n": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Node = &o + + case "shards", "s": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Shards = &o + + } + } + return nil +} + // NewAllocationRecord returns a AllocationRecord. func NewAllocationRecord() *AllocationRecord { r := &AllocationRecord{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/allocationstore.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/allocationstore.go index 7024e7433..642486a8f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/allocationstore.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/allocationstore.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // AllocationStore type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/allocation_explain/types.ts#L39-L46 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/allocation_explain/types.ts#L39-L46 type AllocationStore struct { AllocationId string `json:"allocation_id"` Found bool `json:"found"` @@ -32,6 +40,107 @@ type AllocationStore struct { StoreException string `json:"store_exception"` } +func (s *AllocationStore) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allocation_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AllocationId = o + + case "found": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Found = value + case bool: + s.Found = v + } + + case "in_sync": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.InSync = value + case bool: + s.InSync = v + } + + case "matching_size_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.MatchingSizeInBytes = value + case float64: + f := int64(v) + s.MatchingSizeInBytes = f + } + + case "matching_sync_id": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.MatchingSyncId = value + case bool: + s.MatchingSyncId = v + } + + case "store_exception": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StoreException = o + + } + } + return nil +} + // NewAllocationStore returns a AllocationStore. func NewAllocationStore() *AllocationStore { r := &AllocationStore{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/alwayscondition.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/alwayscondition.go index 0ba2f9516..4b196d2c2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/alwayscondition.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/alwayscondition.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // AlwaysCondition type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Conditions.ts#L25-L25 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Conditions.ts#L25-L25 type AlwaysCondition struct { } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/analysisconfig.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/analysisconfig.go index 9951b25ff..c9c39182f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/analysisconfig.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/analysisconfig.go @@ -16,21 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" + "strconv" ) // AnalysisConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Analysis.ts#L29-L77 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Analysis.ts#L29-L77 type AnalysisConfig struct { // BucketSpan The size of the interval that the analysis is aggregated into, typically // between `5m` and `1h`. This value should be either a whole number of days or @@ -113,6 +113,7 @@ type AnalysisConfig struct { } func (s *AnalysisConfig) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -132,6 +133,7 @@ func (s *AnalysisConfig) UnmarshalJSON(data []byte) error { } case "categorization_analyzer": + rawMsg := json.RawMessage{} dec.Decode(&rawMsg) source := bytes.NewReader(rawMsg) @@ -181,8 +183,17 @@ func (s *AnalysisConfig) UnmarshalJSON(data []byte) error { } case "multivariate_by_fields": - if err := dec.Decode(&s.MultivariateByFields); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.MultivariateByFields = &value + case bool: + s.MultivariateByFields = &v } case "per_partition_categorization": diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/analysisconfigread.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/analysisconfigread.go index 6d4d8d58f..3c0e98df8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/analysisconfigread.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/analysisconfigread.go @@ -16,103 +16,91 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" + "strconv" ) // AnalysisConfigRead type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Analysis.ts#L79-L91 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Analysis.ts#L79-L148 type AnalysisConfigRead struct { // BucketSpan The size of the interval that the analysis is aggregated into, typically - // between `5m` and `1h`. This value should be either a whole number of days or - // equate to a - // whole number of buckets in one day. If the anomaly detection job uses a - // datafeed with aggregations, this value must also be divisible by the interval - // of the date histogram aggregation. + // between `5m` and `1h`. BucketSpan Duration `json:"bucket_span"` // CategorizationAnalyzer If `categorization_field_name` is specified, you can also define the analyzer - // that is used to interpret the categorization field. This property cannot be - // used at the same time as `categorization_filters`. The categorization - // analyzer specifies how the `categorization_field` is interpreted by the - // categorization process. The `categorization_analyzer` field can be specified - // either as a string or as an object. If it is a string, it must refer to a - // built-in analyzer or one added by another plugin. + // that is used to interpret the categorization field. + // This property cannot be used at the same time as `categorization_filters`. + // The categorization analyzer specifies how the `categorization_field` is + // interpreted by the categorization process. CategorizationAnalyzer CategorizationAnalyzer `json:"categorization_analyzer,omitempty"` // CategorizationFieldName If this property is specified, the values of the specified field will be - // categorized. The resulting categories must be used in a detector by setting + // categorized. + // The resulting categories must be used in a detector by setting // `by_field_name`, `over_field_name`, or `partition_field_name` to the keyword // `mlcategory`. CategorizationFieldName *string `json:"categorization_field_name,omitempty"` // CategorizationFilters If `categorization_field_name` is specified, you can also define optional - // filters. This property expects an array of regular expressions. The - // expressions are used to filter out matching sequences from the categorization - // field values. You can use this functionality to fine tune the categorization - // by excluding sequences from consideration when categories are defined. For - // example, you can exclude SQL statements that appear in your log files. This - // property cannot be used at the same time as `categorization_analyzer`. If you - // only want to define simple regular expression filters that are applied prior - // to tokenization, setting this property is the easiest method. If you also - // want to customize the tokenizer or post-tokenization filtering, use the - // `categorization_analyzer` property instead and include the filters as - // pattern_replace character filters. The effect is exactly the same. + // filters. + // This property expects an array of regular expressions. + // The expressions are used to filter out matching sequences from the + // categorization field values. CategorizationFilters []string `json:"categorization_filters,omitempty"` - // Detectors Detector configuration objects specify which data fields a job analyzes. They - // also specify which analytical functions are used. You can specify multiple - // detectors for a job. If the detectors array does not contain at least one - // detector, no analysis can occur and an error is returned. + // Detectors An array of detector configuration objects. + // Detector configuration objects specify which data fields a job analyzes. + // They also specify which analytical functions are used. + // You can specify multiple detectors for a job. Detectors []DetectorRead `json:"detectors"` - // Influencers A comma separated list of influencer field names. Typically these can be the - // by, over, or partition fields that are used in the detector configuration. + // Influencers A comma separated list of influencer field names. + // Typically these can be the by, over, or partition fields that are used in the + // detector configuration. // You might also want to use a field name that is not specifically named in a - // detector, but is available as part of the input data. When you use multiple - // detectors, the use of influencers is recommended as it aggregates results for - // each influencer entity. + // detector, but is available as part of the input data. + // When you use multiple detectors, the use of influencers is recommended as it + // aggregates results for each influencer entity. Influencers []string `json:"influencers"` - // Latency The size of the window in which to expect data that is out of time order. If - // you specify a non-zero value, it must be greater than or equal to one second. - // NOTE: Latency is applicable only when you send data by using the post data - // API. + // Latency The size of the window in which to expect data that is out of time order. + // Defaults to no latency. + // If you specify a non-zero value, it must be greater than or equal to one + // second. Latency Duration `json:"latency,omitempty"` - // ModelPruneWindow Advanced configuration option. Affects the pruning of models that have not - // been updated for the given time duration. The value must be set to a multiple - // of the `bucket_span`. If set too low, important information may be removed - // from the model. For jobs created in 8.1 and later, the default value is the - // greater of `30d` or 20 times `bucket_span`. + // ModelPruneWindow Advanced configuration option. + // Affects the pruning of models that have not been updated for the given time + // duration. + // The value must be set to a multiple of the `bucket_span`. + // If set too low, important information may be removed from the model. + // Typically, set to `30d` or longer. + // If not set, model pruning only occurs if the model memory status reaches the + // soft limit or the hard limit. + // For jobs created in 8.1 and later, the default value is the greater of `30d` + // or 20 times `bucket_span`. ModelPruneWindow Duration `json:"model_prune_window,omitempty"` - // MultivariateByFields This functionality is reserved for internal use. It is not supported for use - // in customer environments and is not subject to the support SLA of official GA - // features. If set to `true`, the analysis will automatically find correlations - // between metrics for a given by field value and report anomalies when those - // correlations cease to hold. For example, suppose CPU and memory usage on host - // A is usually highly correlated with the same metrics on host B. Perhaps this - // correlation occurs because they are running a load-balanced application. If - // you enable this property, anomalies will be reported when, for example, CPU - // usage on host A is high and the value of CPU usage on host B is low. That is - // to say, you’ll see an anomaly when the CPU of host A is unusual given the CPU - // of host B. To use the `multivariate_by_fields` property, you must also - // specify `by_field_name` in your detector. + // MultivariateByFields This functionality is reserved for internal use. + // It is not supported for use in customer environments and is not subject to + // the support SLA of official GA features. + // If set to `true`, the analysis will automatically find correlations between + // metrics for a given by field value and report anomalies when those + // correlations cease to hold. MultivariateByFields *bool `json:"multivariate_by_fields,omitempty"` // PerPartitionCategorization Settings related to how categorization interacts with partition fields. PerPartitionCategorization *PerPartitionCategorization `json:"per_partition_categorization,omitempty"` // SummaryCountFieldName If this property is specified, the data that is fed to the job is expected to - // be pre-summarized. This property value is the name of the field that contains - // the count of raw data points that have been summarized. The same - // `summary_count_field_name` applies to all detectors in the job. NOTE: The - // `summary_count_field_name` property cannot be used with the `metric` - // function. + // be pre-summarized. + // This property value is the name of the field that contains the count of raw + // data points that have been summarized. + // The same `summary_count_field_name` applies to all detectors in the job. SummaryCountFieldName *string `json:"summary_count_field_name,omitempty"` } func (s *AnalysisConfigRead) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -132,6 +120,7 @@ func (s *AnalysisConfigRead) UnmarshalJSON(data []byte) error { } case "categorization_analyzer": + rawMsg := json.RawMessage{} dec.Decode(&rawMsg) source := bytes.NewReader(rawMsg) @@ -181,8 +170,17 @@ func (s *AnalysisConfigRead) UnmarshalJSON(data []byte) error { } case "multivariate_by_fields": - if err := dec.Decode(&s.MultivariateByFields); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.MultivariateByFields = &value + case bool: + s.MultivariateByFields = &v } case "per_partition_categorization": diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/analysislimits.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/analysislimits.go index 8fd313f17..ace070415 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/analysislimits.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/analysislimits.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // AnalysisLimits type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Analysis.ts#L104-L115 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Analysis.ts#L161-L172 type AnalysisLimits struct { // CategorizationExamplesLimit The maximum number of examples stored per category in memory and in the // results data store. If you increase this value, more examples are available, @@ -49,6 +57,53 @@ type AnalysisLimits struct { ModelMemoryLimit *string `json:"model_memory_limit,omitempty"` } +func (s *AnalysisLimits) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "categorization_examples_limit": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.CategorizationExamplesLimit = &value + case float64: + f := int64(v) + s.CategorizationExamplesLimit = &f + } + + case "model_memory_limit": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelMemoryLimit = &o + + } + } + return nil +} + // NewAnalysisLimits returns a AnalysisLimits. func NewAnalysisLimits() *AnalysisLimits { r := &AnalysisLimits{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/analysismemorylimit.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/analysismemorylimit.go index a74dbfc54..8ded1b481 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/analysismemorylimit.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/analysismemorylimit.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // AnalysisMemoryLimit type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Analysis.ts#L117-L122 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Analysis.ts#L174-L179 type AnalysisMemoryLimit struct { // ModelMemoryLimit Limits can be applied for the resources required to hold the mathematical // models in memory. These limits are approximate and can be set per job. They @@ -31,6 +39,38 @@ type AnalysisMemoryLimit struct { ModelMemoryLimit string `json:"model_memory_limit"` } +func (s *AnalysisMemoryLimit) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "model_memory_limit": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelMemoryLimit = o + + } + } + return nil +} + // NewAnalysisMemoryLimit returns a AnalysisMemoryLimit. func NewAnalysisMemoryLimit() *AnalysisMemoryLimit { r := &AnalysisMemoryLimit{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/analytics.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/analytics.go index 9ec6a5289..02576048b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/analytics.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/analytics.go @@ -16,19 +16,80 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Analytics type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L324-L326 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L330-L332 type Analytics struct { Available bool `json:"available"` Enabled bool `json:"enabled"` Stats AnalyticsStatistics `json:"stats"` } +func (s *Analytics) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "available": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Available = value + case bool: + s.Available = v + } + + case "enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "stats": + if err := dec.Decode(&s.Stats); err != nil { + return err + } + + } + } + return nil +} + // NewAnalytics returns a Analytics. func NewAnalytics() *Analytics { r := &Analytics{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/analyticscollection.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/analyticscollection.go new file mode 100644 index 000000000..999e76d28 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/analyticscollection.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +// AnalyticsCollection type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/search_application/_types/BehavioralAnalytics.ts#L22-L27 +type AnalyticsCollection struct { + // EventDataStream Data stream for the collection. + EventDataStream EventDataStream `json:"event_data_stream"` +} + +// NewAnalyticsCollection returns a AnalyticsCollection. +func NewAnalyticsCollection() *AnalyticsCollection { + r := &AnalyticsCollection{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/analyticsstatistics.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/analyticsstatistics.go index e62c80fa6..1704d82bb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/analyticsstatistics.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/analyticsstatistics.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // AnalyticsStatistics type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L61-L71 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L61-L71 type AnalyticsStatistics struct { BoxplotUsage int64 `json:"boxplot_usage"` CumulativeCardinalityUsage int64 `json:"cumulative_cardinality_usage"` @@ -35,6 +43,161 @@ type AnalyticsStatistics struct { TopMetricsUsage int64 `json:"top_metrics_usage"` } +func (s *AnalyticsStatistics) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boxplot_usage": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.BoxplotUsage = value + case float64: + f := int64(v) + s.BoxplotUsage = f + } + + case "cumulative_cardinality_usage": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.CumulativeCardinalityUsage = value + case float64: + f := int64(v) + s.CumulativeCardinalityUsage = f + } + + case "moving_percentiles_usage": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.MovingPercentilesUsage = value + case float64: + f := int64(v) + s.MovingPercentilesUsage = f + } + + case "multi_terms_usage": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.MultiTermsUsage = &value + case float64: + f := int64(v) + s.MultiTermsUsage = &f + } + + case "normalize_usage": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.NormalizeUsage = value + case float64: + f := int64(v) + s.NormalizeUsage = f + } + + case "rate_usage": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.RateUsage = value + case float64: + f := int64(v) + s.RateUsage = f + } + + case "string_stats_usage": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.StringStatsUsage = value + case float64: + f := int64(v) + s.StringStatsUsage = f + } + + case "t_test_usage": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TTestUsage = value + case float64: + f := int64(v) + s.TTestUsage = f + } + + case "top_metrics_usage": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TopMetricsUsage = value + case float64: + f := int64(v) + s.TopMetricsUsage = f + } + + } + } + return nil +} + // NewAnalyticsStatistics returns a AnalyticsStatistics. func NewAnalyticsStatistics() *AnalyticsStatistics { r := &AnalyticsStatistics{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/analyzedetail.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/analyzedetail.go index 40261318d..647839f3a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/analyzedetail.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/analyzedetail.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // AnalyzeDetail type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/analyze/types.ts#L24-L30 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/analyze/types.ts#L24-L30 type AnalyzeDetail struct { Analyzer *AnalyzerDetail `json:"analyzer,omitempty"` Charfilters []CharFilterDetail `json:"charfilters,omitempty"` @@ -31,6 +39,60 @@ type AnalyzeDetail struct { Tokenizer *TokenDetail `json:"tokenizer,omitempty"` } +func (s *AnalyzeDetail) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analyzer": + if err := dec.Decode(&s.Analyzer); err != nil { + return err + } + + case "charfilters": + if err := dec.Decode(&s.Charfilters); err != nil { + return err + } + + case "custom_analyzer": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.CustomAnalyzer = value + case bool: + s.CustomAnalyzer = v + } + + case "tokenfilters": + if err := dec.Decode(&s.Tokenfilters); err != nil { + return err + } + + case "tokenizer": + if err := dec.Decode(&s.Tokenizer); err != nil { + return err + } + + } + } + return nil +} + // NewAnalyzeDetail returns a AnalyzeDetail. func NewAnalyzeDetail() *AnalyzeDetail { r := &AnalyzeDetail{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/analyzer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/analyzer.go index ca6ccdb61..c4e593092 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/analyzer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/analyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -37,5 +37,5 @@ package types // SnowballAnalyzer // DutchAnalyzer // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/analyzers.ts#L113-L131 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/analyzers.ts#L113-L131 type Analyzer interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/analyzerdetail.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/analyzerdetail.go index 22b39ea00..5d56ea6d0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/analyzerdetail.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/analyzerdetail.go @@ -16,18 +16,63 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // AnalyzerDetail type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/analyze/types.ts#L32-L35 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/analyze/types.ts#L32-L35 type AnalyzerDetail struct { Name string `json:"name"` Tokens []ExplainAnalyzeToken `json:"tokens"` } +func (s *AnalyzerDetail) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = o + + case "tokens": + if err := dec.Decode(&s.Tokens); err != nil { + return err + } + + } + } + return nil +} + // NewAnalyzerDetail returns a AnalyzerDetail. func NewAnalyzerDetail() *AnalyzerDetail { r := &AnalyzerDetail{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/analyzetoken.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/analyzetoken.go index ab152a70e..3e24b5717 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/analyzetoken.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/analyzetoken.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // AnalyzeToken type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/analyze/types.ts#L37-L44 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/analyze/types.ts#L37-L44 type AnalyzeToken struct { EndOffset int64 `json:"end_offset"` Position int64 `json:"position"` @@ -32,6 +40,110 @@ type AnalyzeToken struct { Type string `json:"type"` } +func (s *AnalyzeToken) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "end_offset": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.EndOffset = value + case float64: + f := int64(v) + s.EndOffset = f + } + + case "position": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Position = value + case float64: + f := int64(v) + s.Position = f + } + + case "positionLength": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.PositionLength = &value + case float64: + f := int64(v) + s.PositionLength = &f + } + + case "start_offset": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.StartOffset = value + case float64: + f := int64(v) + s.StartOffset = f + } + + case "token": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Token = o + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + // NewAnalyzeToken returns a AnalyzeToken. func NewAnalyzeToken() *AnalyzeToken { r := &AnalyzeToken{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/anomaly.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/anomaly.go index b7f5d3049..e88fe9bb3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/anomaly.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/anomaly.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Anomaly type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Anomaly.ts#L24-L121 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Anomaly.ts#L24-L121 type Anomaly struct { // Actual The actual value for the bucket. Actual []Float64 `json:"actual,omitempty"` @@ -103,6 +111,276 @@ type Anomaly struct { Typical []Float64 `json:"typical,omitempty"` } +func (s *Anomaly) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "actual": + if err := dec.Decode(&s.Actual); err != nil { + return err + } + + case "anomaly_score_explanation": + if err := dec.Decode(&s.AnomalyScoreExplanation); err != nil { + return err + } + + case "bucket_span": + if err := dec.Decode(&s.BucketSpan); err != nil { + return err + } + + case "by_field_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ByFieldName = &o + + case "by_field_value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ByFieldValue = &o + + case "causes": + if err := dec.Decode(&s.Causes); err != nil { + return err + } + + case "detector_index": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.DetectorIndex = value + case float64: + f := int(v) + s.DetectorIndex = f + } + + case "field_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FieldName = &o + + case "function": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Function = &o + + case "function_description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FunctionDescription = &o + + case "geo_results": + if err := dec.Decode(&s.GeoResults); err != nil { + return err + } + + case "influencers": + if err := dec.Decode(&s.Influencers); err != nil { + return err + } + + case "initial_record_score": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.InitialRecordScore = f + case float64: + f := Float64(v) + s.InitialRecordScore = f + } + + case "is_interim": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IsInterim = value + case bool: + s.IsInterim = v + } + + case "job_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.JobId = o + + case "over_field_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.OverFieldName = &o + + case "over_field_value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.OverFieldValue = &o + + case "partition_field_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PartitionFieldName = &o + + case "partition_field_value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PartitionFieldValue = &o + + case "probability": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Probability = f + case float64: + f := Float64(v) + s.Probability = f + } + + case "record_score": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.RecordScore = f + case float64: + f := Float64(v) + s.RecordScore = f + } + + case "result_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultType = o + + case "timestamp": + if err := dec.Decode(&s.Timestamp); err != nil { + return err + } + + case "typical": + if err := dec.Decode(&s.Typical); err != nil { + return err + } + + } + } + return nil +} + // NewAnomaly returns a Anomaly. func NewAnomaly() *Anomaly { r := &Anomaly{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/anomalycause.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/anomalycause.go index 5ef7fd50f..492b0fabb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/anomalycause.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/anomalycause.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // AnomalyCause type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Anomaly.ts#L123-L138 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Anomaly.ts#L123-L138 type AnomalyCause struct { Actual []Float64 `json:"actual"` ByFieldName string `json:"by_field_name"` @@ -40,6 +48,156 @@ type AnomalyCause struct { Typical []Float64 `json:"typical"` } +func (s *AnomalyCause) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "actual": + if err := dec.Decode(&s.Actual); err != nil { + return err + } + + case "by_field_name": + if err := dec.Decode(&s.ByFieldName); err != nil { + return err + } + + case "by_field_value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ByFieldValue = o + + case "correlated_by_field_value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CorrelatedByFieldValue = o + + case "field_name": + if err := dec.Decode(&s.FieldName); err != nil { + return err + } + + case "function": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Function = o + + case "function_description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FunctionDescription = o + + case "influencers": + if err := dec.Decode(&s.Influencers); err != nil { + return err + } + + case "over_field_name": + if err := dec.Decode(&s.OverFieldName); err != nil { + return err + } + + case "over_field_value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.OverFieldValue = o + + case "partition_field_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PartitionFieldName = o + + case "partition_field_value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PartitionFieldValue = o + + case "probability": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Probability = f + case float64: + f := Float64(v) + s.Probability = f + } + + case "typical": + if err := dec.Decode(&s.Typical); err != nil { + return err + } + + } + } + return nil +} + // NewAnomalyCause returns a AnomalyCause. func NewAnomalyCause() *AnomalyCause { r := &AnomalyCause{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/anomalydetectors.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/anomalydetectors.go index 31ecaf263..529e5fd4b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/anomalydetectors.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/anomalydetectors.go @@ -16,21 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" + "strconv" ) // AnomalyDetectors type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/info/types.ts#L44-L50 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/info/types.ts#L44-L50 type AnomalyDetectors struct { CategorizationAnalyzer CategorizationAnalyzer `json:"categorization_analyzer"` CategorizationExamplesLimit int `json:"categorization_examples_limit"` @@ -40,6 +40,7 @@ type AnomalyDetectors struct { } func (s *AnomalyDetectors) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -54,6 +55,7 @@ func (s *AnomalyDetectors) UnmarshalJSON(data []byte) error { switch t { case "categorization_analyzer": + rawMsg := json.RawMessage{} dec.Decode(&rawMsg) source := bytes.NewReader(rawMsg) @@ -73,23 +75,63 @@ func (s *AnomalyDetectors) UnmarshalJSON(data []byte) error { } case "categorization_examples_limit": - if err := dec.Decode(&s.CategorizationExamplesLimit); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.CategorizationExamplesLimit = value + case float64: + f := int(v) + s.CategorizationExamplesLimit = f } case "daily_model_snapshot_retention_after_days": - if err := dec.Decode(&s.DailyModelSnapshotRetentionAfterDays); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.DailyModelSnapshotRetentionAfterDays = value + case float64: + f := int(v) + s.DailyModelSnapshotRetentionAfterDays = f } case "model_memory_limit": - if err := dec.Decode(&s.ModelMemoryLimit); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelMemoryLimit = o case "model_snapshot_retention_days": - if err := dec.Decode(&s.ModelSnapshotRetentionDays); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.ModelSnapshotRetentionDays = value + case float64: + f := int(v) + s.ModelSnapshotRetentionDays = f } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/anomalyexplanation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/anomalyexplanation.go index d098e45a5..50f10d7aa 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/anomalyexplanation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/anomalyexplanation.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // AnomalyExplanation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Anomaly.ts#L156-L197 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Anomaly.ts#L156-L197 type AnomalyExplanation struct { // AnomalyCharacteristicsImpact Impact from the duration and magnitude of the detected anomaly relative to // the historical average. @@ -50,6 +58,178 @@ type AnomalyExplanation struct { UpperConfidenceBound *Float64 `json:"upper_confidence_bound,omitempty"` } +func (s *AnomalyExplanation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "anomaly_characteristics_impact": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.AnomalyCharacteristicsImpact = &value + case float64: + f := int(v) + s.AnomalyCharacteristicsImpact = &f + } + + case "anomaly_length": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.AnomalyLength = &value + case float64: + f := int(v) + s.AnomalyLength = &f + } + + case "anomaly_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AnomalyType = &o + + case "high_variance_penalty": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.HighVariancePenalty = &value + case bool: + s.HighVariancePenalty = &v + } + + case "incomplete_bucket_penalty": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IncompleteBucketPenalty = &value + case bool: + s.IncompleteBucketPenalty = &v + } + + case "lower_confidence_bound": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.LowerConfidenceBound = &f + case float64: + f := Float64(v) + s.LowerConfidenceBound = &f + } + + case "multi_bucket_impact": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MultiBucketImpact = &value + case float64: + f := int(v) + s.MultiBucketImpact = &f + } + + case "single_bucket_impact": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.SingleBucketImpact = &value + case float64: + f := int(v) + s.SingleBucketImpact = &f + } + + case "typical_value": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.TypicalValue = &f + case float64: + f := Float64(v) + s.TypicalValue = &f + } + + case "upper_confidence_bound": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.UpperConfidenceBound = &f + case float64: + f := Float64(v) + s.UpperConfidenceBound = &f + } + + } + } + return nil +} + // NewAnomalyExplanation returns a AnomalyExplanation. func NewAnomalyExplanation() *AnomalyExplanation { r := &AnomalyExplanation{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/apikey.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/apikey.go index d5f2f9149..b10bfc92f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/apikey.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/apikey.go @@ -16,29 +16,165 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // ApiKey type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/_types/ApiKey.ts#L27-L41 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/_types/ApiKey.ts#L27-L77 type ApiKey struct { - Creation *int64 `json:"creation,omitempty"` - Expiration *int64 `json:"expiration,omitempty"` - Id string `json:"id"` - Invalidated *bool `json:"invalidated,omitempty"` - LimitedBy []map[string]RoleDescriptor `json:"limited_by,omitempty"` - Metadata map[string]json.RawMessage `json:"metadata,omitempty"` - Name string `json:"name"` - Realm *string `json:"realm,omitempty"` - RoleDescriptors map[string]RoleDescriptor `json:"role_descriptors,omitempty"` - Sort_ []FieldValue `json:"_sort,omitempty"` - Username *string `json:"username,omitempty"` + // Creation Creation time for the API key in milliseconds. + Creation *int64 `json:"creation,omitempty"` + // Expiration Expiration time for the API key in milliseconds. + Expiration *int64 `json:"expiration,omitempty"` + // Id Id for the API key + Id string `json:"id"` + // Invalidated Invalidation status for the API key. + // If the key has been invalidated, it has a value of `true`. Otherwise, it is + // `false`. + Invalidated *bool `json:"invalidated,omitempty"` + // LimitedBy The owner user’s permissions associated with the API key. + // It is a point-in-time snapshot captured at creation and subsequent updates. + // An API key’s effective permissions are an intersection of its assigned + // privileges and the owner user’s permissions. + LimitedBy []map[string]RoleDescriptor `json:"limited_by,omitempty"` + // Metadata Metadata of the API key + Metadata Metadata `json:"metadata,omitempty"` + // Name Name of the API key. + Name string `json:"name"` + // Realm Realm name of the principal for which this API key was created. + Realm *string `json:"realm,omitempty"` + // RoleDescriptors The role descriptors assigned to this API key when it was created or last + // updated. + // An empty role descriptor means the API key inherits the owner user’s + // permissions. + RoleDescriptors map[string]RoleDescriptor `json:"role_descriptors,omitempty"` + Sort_ []FieldValue `json:"_sort,omitempty"` + // Username Principal for which this API key was created + Username *string `json:"username,omitempty"` +} + +func (s *ApiKey) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "creation": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Creation = &value + case float64: + f := int64(v) + s.Creation = &f + } + + case "expiration": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Expiration = &value + case float64: + f := int64(v) + s.Expiration = &f + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return err + } + + case "invalidated": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Invalidated = &value + case bool: + s.Invalidated = &v + } + + case "limited_by": + if err := dec.Decode(&s.LimitedBy); err != nil { + return err + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return err + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "realm": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Realm = &o + + case "role_descriptors": + if s.RoleDescriptors == nil { + s.RoleDescriptors = make(map[string]RoleDescriptor, 0) + } + if err := dec.Decode(&s.RoleDescriptors); err != nil { + return err + } + + case "_sort": + if err := dec.Decode(&s.Sort_); err != nil { + return err + } + + case "username": + if err := dec.Decode(&s.Username); err != nil { + return err + } + + } + } + return nil } // NewApiKey returns a ApiKey. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/apikeyauthorization.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/apikeyauthorization.go index f8bba5014..3424a9627 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/apikeyauthorization.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/apikeyauthorization.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ApiKeyAuthorization type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Authorization.ts#L20-L29 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Authorization.ts#L20-L29 type ApiKeyAuthorization struct { // Id The identifier for the API key. Id string `json:"id"` @@ -30,6 +38,50 @@ type ApiKeyAuthorization struct { Name string `json:"name"` } +func (s *ApiKeyAuthorization) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Id = o + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = o + + } + } + return nil +} + // NewApiKeyAuthorization returns a ApiKeyAuthorization. func NewApiKeyAuthorization() *ApiKeyAuthorization { r := &ApiKeyAuthorization{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/appendprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/appendprocessor.go index 512a8722e..985f4680b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/appendprocessor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/appendprocessor.go @@ -16,26 +16,141 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // AppendProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/_types/Processors.ts#L90-L94 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/_types/Processors.ts#L273-L288 type AppendProcessor struct { - AllowDuplicates *bool `json:"allow_duplicates,omitempty"` - Description *string `json:"description,omitempty"` - Field string `json:"field"` - If *string `json:"if,omitempty"` - IgnoreFailure *bool `json:"ignore_failure,omitempty"` - OnFailure []ProcessorContainer `json:"on_failure,omitempty"` - Tag *string `json:"tag,omitempty"` - Value []json.RawMessage `json:"value"` + // AllowDuplicates If `false`, the processor does not append values already present in the + // field. + AllowDuplicates *bool `json:"allow_duplicates,omitempty"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field to be appended to. + // Supports template snippets. + Field string `json:"field"` + // If Conditionally execute the processor. + If *string `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // Value The value to be appended. Supports template snippets. + Value []json.RawMessage `json:"value"` +} + +func (s *AppendProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_duplicates": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.AllowDuplicates = &value + case bool: + s.AllowDuplicates = &v + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.If = &o + + case "ignore_failure": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return err + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return err + } + + } + } + return nil } // NewAppendProcessor returns a AppendProcessor. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/applicationglobaluserprivileges.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/applicationglobaluserprivileges.go index 4abc319ed..7e4541932 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/applicationglobaluserprivileges.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/applicationglobaluserprivileges.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // ApplicationGlobalUserPrivileges type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/_types/Privileges.ts#L191-L193 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/_types/Privileges.ts#L193-L195 type ApplicationGlobalUserPrivileges struct { Manage ManageUserPrivileges `json:"manage"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/applicationprivileges.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/applicationprivileges.go index 5c3189d35..d9841d712 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/applicationprivileges.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/applicationprivileges.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ApplicationPrivileges type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/_types/Privileges.ts#L26-L39 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/_types/Privileges.ts#L26-L39 type ApplicationPrivileges struct { // Application The name of the application to which this entry applies. Application string `json:"application"` @@ -33,6 +41,48 @@ type ApplicationPrivileges struct { Resources []string `json:"resources"` } +func (s *ApplicationPrivileges) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "application": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Application = o + + case "privileges": + if err := dec.Decode(&s.Privileges); err != nil { + return err + } + + case "resources": + if err := dec.Decode(&s.Resources); err != nil { + return err + } + + } + } + return nil +} + // NewApplicationPrivileges returns a ApplicationPrivileges. func NewApplicationPrivileges() *ApplicationPrivileges { r := &ApplicationPrivileges{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/applicationprivilegescheck.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/applicationprivilegescheck.go index c80e37d11..c99abb15c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/applicationprivilegescheck.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/applicationprivilegescheck.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ApplicationPrivilegesCheck type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/has_privileges/types.ts#L24-L31 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/has_privileges/types.ts#L24-L31 type ApplicationPrivilegesCheck struct { // Application The name of the application. Application string `json:"application"` @@ -34,6 +42,48 @@ type ApplicationPrivilegesCheck struct { Resources []string `json:"resources"` } +func (s *ApplicationPrivilegesCheck) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "application": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Application = o + + case "privileges": + if err := dec.Decode(&s.Privileges); err != nil { + return err + } + + case "resources": + if err := dec.Decode(&s.Resources); err != nil { + return err + } + + } + } + return nil +} + // NewApplicationPrivilegesCheck returns a ApplicationPrivilegesCheck. func NewApplicationPrivilegesCheck() *ApplicationPrivilegesCheck { r := &ApplicationPrivilegesCheck{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/applicationsprivileges.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/applicationsprivileges.go index ae45ed9bf..6d773788c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/applicationsprivileges.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/applicationsprivileges.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // ApplicationsPrivileges type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/has_privileges/types.ts#L46-L46 -type ApplicationsPrivileges map[string]map[string]map[string]bool +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/has_privileges/types.ts#L46-L46 +type ApplicationsPrivileges map[string]ResourcePrivileges diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/archive.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/archive.go index 9aaaff5fc..9512537ac 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/archive.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/archive.go @@ -16,19 +16,90 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Archive type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L48-L50 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L48-L50 type Archive struct { Available bool `json:"available"` Enabled bool `json:"enabled"` IndicesCount int64 `json:"indices_count"` } +func (s *Archive) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "available": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Available = value + case bool: + s.Available = v + } + + case "enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "indices_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.IndicesCount = value + case float64: + f := int64(v) + s.IndicesCount = f + } + + } + } + return nil +} + // NewArchive returns a Archive. func NewArchive() *Archive { r := &Archive{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/arraycomparecondition.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/arraycomparecondition.go index a5f39545f..c1fab2e22 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/arraycomparecondition.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/arraycomparecondition.go @@ -16,25 +16,71 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/conditionop" - + "bytes" "encoding/json" + "errors" "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/conditionop" ) // ArrayCompareCondition type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Conditions.ts#L32-L36 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Conditions.ts#L32-L36 type ArrayCompareCondition struct { - ArrayCompareCondition map[conditionop.ConditionOp]ArrayCompareOpParams `json:"-"` + ArrayCompareCondition map[conditionop.ConditionOp]ArrayCompareOpParams `json:"ArrayCompareCondition,omitempty"` Path string `json:"path"` } +func (s *ArrayCompareCondition) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "ArrayCompareCondition": + if s.ArrayCompareCondition == nil { + s.ArrayCompareCondition = make(map[conditionop.ConditionOp]ArrayCompareOpParams, 0) + } + if err := dec.Decode(&s.ArrayCompareCondition); err != nil { + return err + } + + case "path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Path = o + + default: + + } + } + return nil +} + // MarhsalJSON overrides marshalling for types with additional properties func (s ArrayCompareCondition) MarshalJSON() ([]byte, error) { type opt ArrayCompareCondition @@ -54,6 +100,7 @@ func (s ArrayCompareCondition) MarshalJSON() ([]byte, error) { for key, value := range s.ArrayCompareCondition { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "ArrayCompareCondition") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/arraycompareopparams.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/arraycompareopparams.go index eb0c986d4..db23e2ccd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/arraycompareopparams.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/arraycompareopparams.go @@ -16,22 +16,57 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/quantifier" ) // ArrayCompareOpParams type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Conditions.ts#L27-L30 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Conditions.ts#L27-L30 type ArrayCompareOpParams struct { Quantifier quantifier.Quantifier `json:"quantifier"` Value FieldValue `json:"value"` } +func (s *ArrayCompareOpParams) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "quantifier": + if err := dec.Decode(&s.Quantifier); err != nil { + return err + } + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return err + } + + } + } + return nil +} + // NewArrayCompareOpParams returns a ArrayCompareOpParams. func NewArrayCompareOpParams() *ArrayCompareOpParams { r := &ArrayCompareOpParams{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/arraypercentilesitem.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/arraypercentilesitem.go index a57c909dd..2f664344f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/arraypercentilesitem.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/arraypercentilesitem.go @@ -16,19 +16,76 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ArrayPercentilesItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L159-L163 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L160-L164 type ArrayPercentilesItem struct { Key string `json:"key"` Value Float64 `json:"value,omitempty"` ValueAsString *string `json:"value_as_string,omitempty"` } +func (s *ArrayPercentilesItem) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Key = o + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return err + } + + case "value_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ValueAsString = &o + + } + } + return nil +} + // NewArrayPercentilesItem returns a ArrayPercentilesItem. func NewArrayPercentilesItem() *ArrayPercentilesItem { r := &ArrayPercentilesItem{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/asciifoldingtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/asciifoldingtokenfilter.go index 724d8944c..35c12e2e7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/asciifoldingtokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/asciifoldingtokenfilter.go @@ -16,24 +16,78 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // AsciiFoldingTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L167-L170 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L168-L171 type AsciiFoldingTokenFilter struct { - PreserveOriginal *bool `json:"preserve_original,omitempty"` - Type string `json:"type,omitempty"` - Version *string `json:"version,omitempty"` + PreserveOriginal Stringifiedboolean `json:"preserve_original,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *AsciiFoldingTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "preserve_original": + if err := dec.Decode(&s.PreserveOriginal); err != nil { + return err + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s AsciiFoldingTokenFilter) MarshalJSON() ([]byte, error) { + type innerAsciiFoldingTokenFilter AsciiFoldingTokenFilter + tmp := innerAsciiFoldingTokenFilter{ + PreserveOriginal: s.PreserveOriginal, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "asciifolding" + + return json.Marshal(tmp) } // NewAsciiFoldingTokenFilter returns a AsciiFoldingTokenFilter. func NewAsciiFoldingTokenFilter() *AsciiFoldingTokenFilter { r := &AsciiFoldingTokenFilter{} - r.Type = "asciifolding" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/asyncsearch.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/asyncsearch.go index a4624b610..64dd52dc6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/asyncsearch.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/asyncsearch.go @@ -16,41 +16,50 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - + "strconv" "strings" - - "encoding/json" ) // AsyncSearch type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/async_search/_types/AsyncSearch.ts#L30-L45 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/async_search/_types/AsyncSearch.ts#L30-L56 type AsyncSearch struct { - Aggregations map[string]Aggregate `json:"aggregations,omitempty"` - Clusters_ *ClusterStatistics `json:"_clusters,omitempty"` - Fields map[string]json.RawMessage `json:"fields,omitempty"` - Hits HitsMetadata `json:"hits"` - MaxScore *Float64 `json:"max_score,omitempty"` - NumReducePhases *int64 `json:"num_reduce_phases,omitempty"` - PitId *string `json:"pit_id,omitempty"` - Profile *Profile `json:"profile,omitempty"` - ScrollId_ *string `json:"_scroll_id,omitempty"` - Shards_ ShardStatistics `json:"_shards"` - Suggest map[string][]Suggest `json:"suggest,omitempty"` - TerminatedEarly *bool `json:"terminated_early,omitempty"` - TimedOut bool `json:"timed_out"` - Took int64 `json:"took"` + // Aggregations Partial aggregations results, coming from the shards that have already + // completed the execution of the query. + Aggregations map[string]Aggregate `json:"aggregations,omitempty"` + Clusters_ *ClusterStatistics `json:"_clusters,omitempty"` + Fields map[string]json.RawMessage `json:"fields,omitempty"` + Hits HitsMetadata `json:"hits"` + MaxScore *Float64 `json:"max_score,omitempty"` + // NumReducePhases Indicates how many reductions of the results have been performed. + // If this number increases compared to the last retrieved results for a get + // asynch search request, you can expect additional results included in the + // search response. + NumReducePhases *int64 `json:"num_reduce_phases,omitempty"` + PitId *string `json:"pit_id,omitempty"` + Profile *Profile `json:"profile,omitempty"` + ScrollId_ *string `json:"_scroll_id,omitempty"` + // Shards_ Indicates how many shards have run the query. + // Note that in order for shard results to be included in the search response, + // they need to be reduced first. + Shards_ ShardStatistics `json:"_shards"` + Suggest map[string][]Suggest `json:"suggest,omitempty"` + TerminatedEarly *bool `json:"terminated_early,omitempty"` + TimedOut bool `json:"timed_out"` + Took int64 `json:"took"` } func (s *AsyncSearch) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -65,6 +74,10 @@ func (s *AsyncSearch) UnmarshalJSON(data []byte) error { switch t { case "aggregations": + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + for dec.More() { tt, err := dec.Token() if err != nil { @@ -77,415 +90,494 @@ func (s *AsyncSearch) UnmarshalJSON(data []byte) error { if strings.Contains(value, "#") { elems := strings.Split(value, "#") if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } switch elems[0] { + case "cardinality": o := NewCardinalityAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "hdr_percentiles": o := NewHdrPercentilesAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "hdr_percentile_ranks": o := NewHdrPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "tdigest_percentiles": o := NewTDigestPercentilesAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "tdigest_percentile_ranks": o := NewTDigestPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "percentiles_bucket": o := NewPercentilesBucketAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "median_absolute_deviation": o := NewMedianAbsoluteDeviationAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "min": o := NewMinAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "max": o := NewMaxAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "sum": o := NewSumAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "avg": o := NewAvgAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "weighted_avg": o := NewWeightedAvgAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "value_count": o := NewValueCountAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "simple_value": o := NewSimpleValueAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "derivative": o := NewDerivativeAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "bucket_metric_value": o := NewBucketMetricValueAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "stats": o := NewStatsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "stats_bucket": o := NewStatsBucketAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "extended_stats": o := NewExtendedStatsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "extended_stats_bucket": o := NewExtendedStatsBucketAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geo_bounds": o := NewGeoBoundsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geo_centroid": o := NewGeoCentroidAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "histogram": o := NewHistogramAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "date_histogram": o := NewDateHistogramAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "auto_date_histogram": o := NewAutoDateHistogramAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "variable_width_histogram": o := NewVariableWidthHistogramAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "sterms": o := NewStringTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "lterms": o := NewLongTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "dterms": o := NewDoubleTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "umterms": o := NewUnmappedTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "lrareterms": o := NewLongRareTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "srareterms": o := NewStringRareTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "umrareterms": o := NewUnmappedRareTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "multi_terms": o := NewMultiTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "missing": o := NewMissingAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "nested": o := NewNestedAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "reverse_nested": o := NewReverseNestedAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "global": o := NewGlobalAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "filter": o := NewFilterAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "children": o := NewChildrenAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "parent": o := NewParentAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "sampler": o := NewSamplerAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "unmapped_sampler": o := NewUnmappedSamplerAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geohash_grid": o := NewGeoHashGridAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geotile_grid": o := NewGeoTileGridAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geohex_grid": o := NewGeoHexGridAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "range": o := NewRangeAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "date_range": o := NewDateRangeAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geo_distance": o := NewGeoDistanceAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "ip_range": o := NewIpRangeAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "ip_prefix": o := NewIpPrefixAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "filters": o := NewFiltersAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "adjacency_matrix": o := NewAdjacencyMatrixAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "siglterms": o := NewSignificantLongTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "sigsterms": o := NewSignificantStringTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "umsigterms": o := NewUnmappedSignificantTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "composite": o := NewCompositeAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := NewScriptedMetricAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "top_hits": o := NewTopHitsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "inference": o := NewInferenceAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "string_stats": o := NewStringStatsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "box_plot": o := NewBoxPlotAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "top_metrics": o := NewTopMetricsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "t_test": o := NewTTestAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "rate": o := NewRateAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "simple_long_value": o := NewCumulativeCardinalityAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "matrix_stats": o := NewMatrixStatsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geo_line": o := NewGeoLineAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + default: o := make(map[string]interface{}, 0) if err := dec.Decode(&o); err != nil { @@ -512,6 +604,9 @@ func (s *AsyncSearch) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]json.RawMessage, 0) + } if err := dec.Decode(&s.Fields); err != nil { return err } @@ -522,13 +617,34 @@ func (s *AsyncSearch) UnmarshalJSON(data []byte) error { } case "max_score": - if err := dec.Decode(&s.MaxScore); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.MaxScore = &f + case float64: + f := Float64(v) + s.MaxScore = &f } case "num_reduce_phases": - if err := dec.Decode(&s.NumReducePhases); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.NumReducePhases = &value + case float64: + f := int64(v) + s.NumReducePhases = &f } case "pit_id": @@ -552,23 +668,109 @@ func (s *AsyncSearch) UnmarshalJSON(data []byte) error { } case "suggest": - if err := dec.Decode(&s.Suggest); err != nil { - return err + if s.Suggest == nil { + s.Suggest = make(map[string][]Suggest, 0) + } + + for dec.More() { + tt, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + if value, ok := tt.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Suggest == nil { + s.Suggest = make(map[string][]Suggest, 0) + } + switch elems[0] { + + case "completion": + o := NewCompletionSuggest() + if err := dec.Decode(&o); err != nil { + return err + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + case "phrase": + o := NewPhraseSuggest() + if err := dec.Decode(&o); err != nil { + return err + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + case "term": + o := NewTermSuggest() + if err := dec.Decode(&o); err != nil { + return err + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + default: + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + } + } else { + return errors.New("cannot decode JSON for field Suggest") + } + } else { + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Suggest[value] = append(s.Suggest[value], o) + } + } } case "terminated_early": - if err := dec.Decode(&s.TerminatedEarly); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.TerminatedEarly = &value + case bool: + s.TerminatedEarly = &v } case "timed_out": - if err := dec.Decode(&s.TimedOut); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.TimedOut = value + case bool: + s.TimedOut = v } case "took": - if err := dec.Decode(&s.Took); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Took = value + case float64: + f := int64(v) + s.Took = f } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/attachmentprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/attachmentprocessor.go index 0dfb534b4..eb0c0035a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/attachmentprocessor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/attachmentprocessor.go @@ -16,26 +16,191 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // AttachmentProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/_types/Processors.ts#L96-L104 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/_types/Processors.ts#L290-L326 type AttachmentProcessor struct { - Description *string `json:"description,omitempty"` - Field string `json:"field"` - If *string `json:"if,omitempty"` - IgnoreFailure *bool `json:"ignore_failure,omitempty"` - IgnoreMissing *bool `json:"ignore_missing,omitempty"` - IndexedChars *int64 `json:"indexed_chars,omitempty"` - IndexedCharsField *string `json:"indexed_chars_field,omitempty"` - OnFailure []ProcessorContainer `json:"on_failure,omitempty"` - Properties []string `json:"properties,omitempty"` - ResourceName *string `json:"resource_name,omitempty"` - Tag *string `json:"tag,omitempty"` - TargetField *string `json:"target_field,omitempty"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field to get the base64 encoded field from. + Field string `json:"field"` + // If Conditionally execute the processor. + If *string `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and field does not exist, the processor quietly exits without + // modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // IndexedChars The number of chars being used for extraction to prevent huge fields. + // Use `-1` for no limit. + IndexedChars *int64 `json:"indexed_chars,omitempty"` + // IndexedCharsField Field name from which you can overwrite the number of chars being used for + // extraction. + IndexedCharsField *string `json:"indexed_chars_field,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Properties Array of properties to select to be stored. + // Can be `content`, `title`, `name`, `author`, `keywords`, `date`, + // `content_type`, `content_length`, `language`. + Properties []string `json:"properties,omitempty"` + // ResourceName Field containing the name of the resource to decode. + // If specified, the processor passes this resource name to the underlying Tika + // library to enable Resource Name Based Detection. + ResourceName *string `json:"resource_name,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField The field that will hold the attachment information. + TargetField *string `json:"target_field,omitempty"` +} + +func (s *AttachmentProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.If = &o + + case "ignore_failure": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "indexed_chars": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.IndexedChars = &value + case float64: + f := int64(v) + s.IndexedChars = &f + } + + case "indexed_chars_field": + if err := dec.Decode(&s.IndexedCharsField); err != nil { + return err + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return err + } + + case "properties": + if err := dec.Decode(&s.Properties); err != nil { + return err + } + + case "resource_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResourceName = &o + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return err + } + + } + } + return nil } // NewAttachmentProcessor returns a AttachmentProcessor. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/audit.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/audit.go index 5e7d9582e..1d741fcd9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/audit.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/audit.go @@ -16,18 +16,65 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Audit type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L73-L75 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L73-L75 type Audit struct { Enabled bool `json:"enabled"` Outputs []string `json:"outputs,omitempty"` } +func (s *Audit) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "outputs": + if err := dec.Decode(&s.Outputs); err != nil { + return err + } + + } + } + return nil +} + // NewAudit returns a Audit. func NewAudit() *Audit { r := &Audit{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/authenticateduser.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/authenticateduser.go index 367bce37e..f4a151b79 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/authenticateduser.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/authenticateduser.go @@ -16,29 +16,131 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // AuthenticatedUser type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/get_token/types.ts#L40-L45 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/get_token/types.ts#L40-L45 type AuthenticatedUser struct { - AuthenticationProvider *AuthenticationProvider `json:"authentication_provider,omitempty"` - AuthenticationRealm UserRealm `json:"authentication_realm"` - AuthenticationType string `json:"authentication_type"` - Email string `json:"email,omitempty"` - Enabled bool `json:"enabled"` - FullName string `json:"full_name,omitempty"` - LookupRealm UserRealm `json:"lookup_realm"` - Metadata map[string]json.RawMessage `json:"metadata"` - ProfileUid *string `json:"profile_uid,omitempty"` - Roles []string `json:"roles"` - Username string `json:"username"` + AuthenticationProvider *AuthenticationProvider `json:"authentication_provider,omitempty"` + AuthenticationRealm UserRealm `json:"authentication_realm"` + AuthenticationType string `json:"authentication_type"` + Email string `json:"email,omitempty"` + Enabled bool `json:"enabled"` + FullName string `json:"full_name,omitempty"` + LookupRealm UserRealm `json:"lookup_realm"` + Metadata Metadata `json:"metadata"` + ProfileUid *string `json:"profile_uid,omitempty"` + Roles []string `json:"roles"` + Username string `json:"username"` +} + +func (s *AuthenticatedUser) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "authentication_provider": + if err := dec.Decode(&s.AuthenticationProvider); err != nil { + return err + } + + case "authentication_realm": + if err := dec.Decode(&s.AuthenticationRealm); err != nil { + return err + } + + case "authentication_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AuthenticationType = o + + case "email": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Email = o + + case "enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "full_name": + if err := dec.Decode(&s.FullName); err != nil { + return err + } + + case "lookup_realm": + if err := dec.Decode(&s.LookupRealm); err != nil { + return err + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return err + } + + case "profile_uid": + if err := dec.Decode(&s.ProfileUid); err != nil { + return err + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return err + } + + case "username": + if err := dec.Decode(&s.Username); err != nil { + return err + } + + } + } + return nil } // NewAuthenticatedUser returns a AuthenticatedUser. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/authenticatetoken.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/authenticatetoken.go index f61ca621a..3e7fd9497 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/authenticatetoken.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/authenticatetoken.go @@ -16,18 +16,63 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // AuthenticateToken type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/authenticate/types.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/authenticate/types.ts#L22-L29 type AuthenticateToken struct { Name string `json:"name"` Type *string `json:"type,omitempty"` } +func (s *AuthenticateToken) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = &o + + } + } + return nil +} + // NewAuthenticateToken returns a AuthenticateToken. func NewAuthenticateToken() *AuthenticateToken { r := &AuthenticateToken{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/authenticationprovider.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/authenticationprovider.go index 2d401590a..5a3c48324 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/authenticationprovider.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/authenticationprovider.go @@ -16,18 +16,63 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // AuthenticationProvider type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/get_token/types.ts#L35-L38 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/get_token/types.ts#L35-L38 type AuthenticationProvider struct { Name string `json:"name"` Type string `json:"type"` } +func (s *AuthenticationProvider) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + // NewAuthenticationProvider returns a AuthenticationProvider. func NewAuthenticationProvider() *AuthenticationProvider { r := &AuthenticationProvider{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autodatehistogramaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autodatehistogramaggregate.go index 2e76a54e6..69dbb3311 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autodatehistogramaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autodatehistogramaggregate.go @@ -16,28 +16,28 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" ) // AutoDateHistogramAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L355-L359 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L356-L360 type AutoDateHistogramAggregate struct { Buckets BucketsDateHistogramBucket `json:"buckets"` Interval string `json:"interval"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Meta Metadata `json:"meta,omitempty"` } func (s *AutoDateHistogramAggregate) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -58,15 +58,17 @@ func (s *AutoDateHistogramAggregate) UnmarshalJSON(data []byte) error { source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]DateHistogramBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []DateHistogramBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autodatehistogramaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autodatehistogramaggregation.go index 3de4f8cf8..3662c2cf1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autodatehistogramaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autodatehistogramaggregation.go @@ -16,31 +16,157 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/minimuminterval" ) // AutoDateHistogramAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L52-L62 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L65-L100 type AutoDateHistogramAggregation struct { - Buckets *int `json:"buckets,omitempty"` - Field *string `json:"field,omitempty"` - Format *string `json:"format,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + // Buckets The target number of buckets. + Buckets *int `json:"buckets,omitempty"` + // Field The field on which to run the aggregation. + Field *string `json:"field,omitempty"` + // Format The date format used to format `key_as_string` in the response. + // If no `format` is specified, the first date format specified in the field + // mapping is used. + Format *string `json:"format,omitempty"` + Meta Metadata `json:"meta,omitempty"` + // MinimumInterval The minimum rounding interval. + // This can make the collection process more efficient, as the aggregation will + // not attempt to round at any interval lower than `minimum_interval`. MinimumInterval *minimuminterval.MinimumInterval `json:"minimum_interval,omitempty"` - Missing DateTime `json:"missing,omitempty"` - Name *string `json:"name,omitempty"` - Offset *string `json:"offset,omitempty"` - Params map[string]json.RawMessage `json:"params,omitempty"` - Script Script `json:"script,omitempty"` - TimeZone *string `json:"time_zone,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing DateTime `json:"missing,omitempty"` + Name *string `json:"name,omitempty"` + // Offset Time zone specified as a ISO 8601 UTC offset. + Offset *string `json:"offset,omitempty"` + Params map[string]json.RawMessage `json:"params,omitempty"` + Script Script `json:"script,omitempty"` + // TimeZone Time zone ID. + TimeZone *string `json:"time_zone,omitempty"` +} + +func (s *AutoDateHistogramAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Buckets = &value + case float64: + f := int(v) + s.Buckets = &f + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "minimum_interval": + if err := dec.Decode(&s.MinimumInterval); err != nil { + return err + } + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return err + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + case "offset": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Offset = &o + + case "params": + if s.Params == nil { + s.Params = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Params); err != nil { + return err + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + case "time_zone": + if err := dec.Decode(&s.TimeZone); err != nil { + return err + } + + } + } + return nil } // NewAutoDateHistogramAggregation returns a AutoDateHistogramAggregation. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autofollowedcluster.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autofollowedcluster.go index eed33e2cf..bc7e0968d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autofollowedcluster.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autofollowedcluster.go @@ -16,19 +16,61 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // AutoFollowedCluster type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ccr/stats/types.ts.ts#L27-L31 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ccr/stats/types.ts.ts#L27-L31 type AutoFollowedCluster struct { ClusterName string `json:"cluster_name"` LastSeenMetadataVersion int64 `json:"last_seen_metadata_version"` TimeSinceLastCheckMillis int64 `json:"time_since_last_check_millis"` } +func (s *AutoFollowedCluster) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "cluster_name": + if err := dec.Decode(&s.ClusterName); err != nil { + return err + } + + case "last_seen_metadata_version": + if err := dec.Decode(&s.LastSeenMetadataVersion); err != nil { + return err + } + + case "time_since_last_check_millis": + if err := dec.Decode(&s.TimeSinceLastCheckMillis); err != nil { + return err + } + + } + } + return nil +} + // NewAutoFollowedCluster returns a AutoFollowedCluster. func NewAutoFollowedCluster() *AutoFollowedCluster { r := &AutoFollowedCluster{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autofollowpattern.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autofollowpattern.go index e497f2e68..19dbbc69f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autofollowpattern.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autofollowpattern.go @@ -16,18 +16,55 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // AutoFollowPattern type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ccr/get_auto_follow_pattern/types.ts#L23-L26 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ccr/get_auto_follow_pattern/types.ts#L23-L26 type AutoFollowPattern struct { Name string `json:"name"` Pattern AutoFollowPatternSummary `json:"pattern"` } +func (s *AutoFollowPattern) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "pattern": + if err := dec.Decode(&s.Pattern); err != nil { + return err + } + + } + } + return nil +} + // NewAutoFollowPattern returns a AutoFollowPattern. func NewAutoFollowPattern() *AutoFollowPattern { r := &AutoFollowPattern{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autofollowpatternsummary.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autofollowpatternsummary.go index ad4ddb00a..3c0b6ae23 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autofollowpatternsummary.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autofollowpatternsummary.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // AutoFollowPatternSummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ccr/get_auto_follow_pattern/types.ts#L28-L51 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ccr/get_auto_follow_pattern/types.ts#L28-L52 type AutoFollowPatternSummary struct { Active bool `json:"active"` // FollowIndexPattern The name of follower index. @@ -39,6 +47,83 @@ type AutoFollowPatternSummary struct { RemoteCluster string `json:"remote_cluster"` } +func (s *AutoFollowPatternSummary) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "active": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Active = value + case bool: + s.Active = v + } + + case "follow_index_pattern": + if err := dec.Decode(&s.FollowIndexPattern); err != nil { + return err + } + + case "leader_index_exclusion_patterns": + if err := dec.Decode(&s.LeaderIndexExclusionPatterns); err != nil { + return err + } + + case "leader_index_patterns": + if err := dec.Decode(&s.LeaderIndexPatterns); err != nil { + return err + } + + case "max_outstanding_read_requests": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxOutstandingReadRequests = value + case float64: + f := int(v) + s.MaxOutstandingReadRequests = f + } + + case "remote_cluster": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RemoteCluster = o + + } + } + return nil +} + // NewAutoFollowPatternSummary returns a AutoFollowPatternSummary. func NewAutoFollowPatternSummary() *AutoFollowPatternSummary { r := &AutoFollowPatternSummary{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autofollowstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autofollowstats.go index dc303f2bd..f32b80b80 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autofollowstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autofollowstats.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // AutoFollowStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ccr/stats/types.ts.ts#L33-L39 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ccr/stats/types.ts.ts#L33-L39 type AutoFollowStats struct { AutoFollowedClusters []AutoFollowedCluster `json:"auto_followed_clusters"` NumberOfFailedFollowIndices int64 `json:"number_of_failed_follow_indices"` @@ -31,6 +39,81 @@ type AutoFollowStats struct { RecentAutoFollowErrors []ErrorCause `json:"recent_auto_follow_errors"` } +func (s *AutoFollowStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "auto_followed_clusters": + if err := dec.Decode(&s.AutoFollowedClusters); err != nil { + return err + } + + case "number_of_failed_follow_indices": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.NumberOfFailedFollowIndices = value + case float64: + f := int64(v) + s.NumberOfFailedFollowIndices = f + } + + case "number_of_failed_remote_cluster_state_requests": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.NumberOfFailedRemoteClusterStateRequests = value + case float64: + f := int64(v) + s.NumberOfFailedRemoteClusterStateRequests = f + } + + case "number_of_successful_follow_indices": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.NumberOfSuccessfulFollowIndices = value + case float64: + f := int64(v) + s.NumberOfSuccessfulFollowIndices = f + } + + case "recent_auto_follow_errors": + if err := dec.Decode(&s.RecentAutoFollowErrors); err != nil { + return err + } + + } + } + return nil +} + // NewAutoFollowStats returns a AutoFollowStats. func NewAutoFollowStats() *AutoFollowStats { r := &AutoFollowStats{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autoscalingcapacity.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autoscalingcapacity.go index b5c3a18d2..632c70b5f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autoscalingcapacity.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autoscalingcapacity.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // AutoscalingCapacity type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/autoscaling/get_autoscaling_capacity/GetAutoscalingCapacityResponse.ts#L38-L41 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/autoscaling/get_autoscaling_capacity/GetAutoscalingCapacityResponse.ts#L38-L41 type AutoscalingCapacity struct { Node AutoscalingResources `json:"node"` Total AutoscalingResources `json:"total"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autoscalingdecider.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autoscalingdecider.go index 065f81475..22fbd40c7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autoscalingdecider.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autoscalingdecider.go @@ -16,23 +16,69 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // AutoscalingDecider type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/autoscaling/get_autoscaling_capacity/GetAutoscalingCapacityResponse.ts#L52-L56 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/autoscaling/get_autoscaling_capacity/GetAutoscalingCapacityResponse.ts#L52-L56 type AutoscalingDecider struct { ReasonDetails json.RawMessage `json:"reason_details,omitempty"` ReasonSummary *string `json:"reason_summary,omitempty"` RequiredCapacity AutoscalingCapacity `json:"required_capacity"` } +func (s *AutoscalingDecider) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "reason_details": + if err := dec.Decode(&s.ReasonDetails); err != nil { + return err + } + + case "reason_summary": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ReasonSummary = &o + + case "required_capacity": + if err := dec.Decode(&s.RequiredCapacity); err != nil { + return err + } + + } + } + return nil +} + // NewAutoscalingDecider returns a AutoscalingDecider. func NewAutoscalingDecider() *AutoscalingDecider { r := &AutoscalingDecider{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autoscalingdeciders.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autoscalingdeciders.go index d2dc949cc..69d9866a6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autoscalingdeciders.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autoscalingdeciders.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // AutoscalingDeciders type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/autoscaling/get_autoscaling_capacity/GetAutoscalingCapacityResponse.ts#L31-L36 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/autoscaling/get_autoscaling_capacity/GetAutoscalingCapacityResponse.ts#L31-L36 type AutoscalingDeciders struct { CurrentCapacity AutoscalingCapacity `json:"current_capacity"` CurrentNodes []AutoscalingNode `json:"current_nodes"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autoscalingnode.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autoscalingnode.go index ae964df5a..9479ce4f7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autoscalingnode.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autoscalingnode.go @@ -16,17 +16,49 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // AutoscalingNode type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/autoscaling/get_autoscaling_capacity/GetAutoscalingCapacityResponse.ts#L48-L50 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/autoscaling/get_autoscaling_capacity/GetAutoscalingCapacityResponse.ts#L48-L50 type AutoscalingNode struct { Name string `json:"name"` } +func (s *AutoscalingNode) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + } + } + return nil +} + // NewAutoscalingNode returns a AutoscalingNode. func NewAutoscalingNode() *AutoscalingNode { r := &AutoscalingNode{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autoscalingpolicy.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autoscalingpolicy.go index 9e0b1ff5a..4f4704e45 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autoscalingpolicy.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autoscalingpolicy.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -26,7 +26,7 @@ import ( // AutoscalingPolicy type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/autoscaling/_types/AutoscalingPolicy.ts#L23-L27 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/autoscaling/_types/AutoscalingPolicy.ts#L23-L27 type AutoscalingPolicy struct { // Deciders Decider settings Deciders map[string]json.RawMessage `json:"deciders"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autoscalingresources.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autoscalingresources.go index 187cb99d2..3d6d267d3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autoscalingresources.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/autoscalingresources.go @@ -16,18 +16,78 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // AutoscalingResources type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/autoscaling/get_autoscaling_capacity/GetAutoscalingCapacityResponse.ts#L43-L46 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/autoscaling/get_autoscaling_capacity/GetAutoscalingCapacityResponse.ts#L43-L46 type AutoscalingResources struct { Memory int `json:"memory"` Storage int `json:"storage"` } +func (s *AutoscalingResources) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "memory": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Memory = value + case float64: + f := int(v) + s.Memory = f + } + + case "storage": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Storage = value + case float64: + f := int(v) + s.Storage = f + } + + } + } + return nil +} + // NewAutoscalingResources returns a AutoscalingResources. func NewAutoscalingResources() *AutoscalingResources { r := &AutoscalingResources{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/averageaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/averageaggregation.go index 8f6ce6ebb..83cfd2d2e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/averageaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/averageaggregation.go @@ -16,20 +16,78 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // AverageAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/metric.ts#L48-L48 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/metric.ts#L55-L55 type AverageAggregation struct { - Field *string `json:"field,omitempty"` - Format *string `json:"format,omitempty"` + // Field The field on which to run the aggregation. + Field *string `json:"field,omitempty"` + Format *string `json:"format,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. Missing Missing `json:"missing,omitempty"` Script Script `json:"script,omitempty"` } +func (s *AverageAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return err + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + } + } + return nil +} + // NewAverageAggregation returns a AverageAggregation. func NewAverageAggregation() *AverageAggregation { r := &AverageAggregation{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/averagebucketaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/averagebucketaggregation.go index 970fe1b2d..613dd7eb9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/averagebucketaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/averagebucketaggregation.go @@ -16,33 +16,38 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" ) // AverageBucketAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/pipeline.ts#L69-L69 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/pipeline.ts#L78-L78 type AverageBucketAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. - BucketsPath BucketsPath `json:"buckets_path,omitempty"` - Format *string `json:"format,omitempty"` - GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Name *string `json:"name,omitempty"` + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Name *string `json:"name,omitempty"` } func (s *AverageBucketAggregation) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -62,9 +67,16 @@ func (s *AverageBucketAggregation) UnmarshalJSON(data []byte) error { } case "format": - if err := dec.Decode(&s.Format); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o case "gap_policy": if err := dec.Decode(&s.GapPolicy); err != nil { @@ -77,9 +89,16 @@ func (s *AverageBucketAggregation) UnmarshalJSON(data []byte) error { } case "name": - if err := dec.Decode(&s.Name); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/avgaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/avgaggregate.go index b85ef5a5e..5ddcf321e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/avgaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/avgaggregate.go @@ -16,19 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // AvgAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L208-L209 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L209-L210 type AvgAggregate struct { - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Meta Metadata `json:"meta,omitempty"` // Value The metric value. A missing value generally means that there was no data to // aggregate, // unless specified otherwise. @@ -36,6 +40,48 @@ type AvgAggregate struct { ValueAsString *string `json:"value_as_string,omitempty"` } +func (s *AvgAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return err + } + + case "value_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ValueAsString = &o + + } + } + return nil +} + // NewAvgAggregate returns a AvgAggregate. func NewAvgAggregate() *AvgAggregate { r := &AvgAggregate{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/base.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/base.go index 56a7df100..2b97919e0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/base.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/base.go @@ -16,18 +16,74 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Base type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L30-L33 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L30-L33 type Base struct { Available bool `json:"available"` Enabled bool `json:"enabled"` } +func (s *Base) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "available": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Available = value + case bool: + s.Available = v + } + + case "enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = value + case bool: + s.Enabled = v + } + + } + } + return nil +} + // NewBase returns a Base. func NewBase() *Base { r := &Base{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/baseindicator.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/baseindicator.go new file mode 100644 index 000000000..50ce19a23 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/baseindicator.go @@ -0,0 +1,95 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indicatorhealthstatus" +) + +// BaseIndicator type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/health_report/types.ts#L42-L47 +type BaseIndicator struct { + Diagnosis []Diagnosis `json:"diagnosis,omitempty"` + Impacts []Impact `json:"impacts,omitempty"` + Status indicatorhealthstatus.IndicatorHealthStatus `json:"status"` + Symptom string `json:"symptom"` +} + +func (s *BaseIndicator) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "diagnosis": + if err := dec.Decode(&s.Diagnosis); err != nil { + return err + } + + case "impacts": + if err := dec.Decode(&s.Impacts); err != nil { + return err + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return err + } + + case "symptom": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Symptom = o + + } + } + return nil +} + +// NewBaseIndicator returns a BaseIndicator. +func NewBaseIndicator() *BaseIndicator { + r := &BaseIndicator{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/basenode.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/basenode.go index ad444f651..7d6fb76b2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/basenode.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/basenode.go @@ -16,17 +16,22 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/noderole" ) // BaseNode type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_spec_utils/BaseNode.ts#L25-L32 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_spec_utils/BaseNode.ts#L25-L32 type BaseNode struct { Attributes map[string]string `json:"attributes"` Host string `json:"host"` @@ -36,6 +41,59 @@ type BaseNode struct { TransportAddress string `json:"transport_address"` } +func (s *BaseNode) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "attributes": + if s.Attributes == nil { + s.Attributes = make(map[string]string, 0) + } + if err := dec.Decode(&s.Attributes); err != nil { + return err + } + + case "host": + if err := dec.Decode(&s.Host); err != nil { + return err + } + + case "ip": + if err := dec.Decode(&s.Ip); err != nil { + return err + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return err + } + + case "transport_address": + if err := dec.Decode(&s.TransportAddress); err != nil { + return err + } + + } + } + return nil +} + // NewBaseNode returns a BaseNode. func NewBaseNode() *BaseNode { r := &BaseNode{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/binaryproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/binaryproperty.go index eadc0a909..b17b9e9e0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/binaryproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/binaryproperty.go @@ -16,23 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" ) // BinaryProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/core.ts#L49-L51 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/core.ts#L49-L51 type BinaryProperty struct { CopyTo []string `json:"copy_to,omitempty"` DocValues *bool `json:"doc_values,omitempty"` @@ -48,6 +48,7 @@ type BinaryProperty struct { } func (s *BinaryProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -62,13 +63,33 @@ func (s *BinaryProperty) UnmarshalJSON(data []byte) error { switch t { case "copy_to": - if err := dec.Decode(&s.CopyTo); err != nil { - return err + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return err + } } case "doc_values": - if err := dec.Decode(&s.DocValues); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DocValues = &value + case bool: + s.DocValues = &v } case "dynamic": @@ -77,6 +98,9 @@ func (s *BinaryProperty) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -85,7 +109,9 @@ func (s *BinaryProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -364,23 +390,42 @@ func (s *BinaryProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -389,7 +434,9 @@ func (s *BinaryProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -668,20 +715,38 @@ func (s *BinaryProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } case "similarity": - if err := dec.Decode(&s.Similarity); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Similarity = &o case "store": - if err := dec.Decode(&s.Store); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Store = &value + case bool: + s.Store = &v } case "type": @@ -694,6 +759,27 @@ func (s *BinaryProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s BinaryProperty) MarshalJSON() ([]byte, error) { + type innerBinaryProperty BinaryProperty + tmp := innerBinaryProperty{ + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + Properties: s.Properties, + Similarity: s.Similarity, + Store: s.Store, + Type: s.Type, + } + + tmp.Type = "binary" + + return json.Marshal(tmp) +} + // NewBinaryProperty returns a BinaryProperty. func NewBinaryProperty() *BinaryProperty { r := &BinaryProperty{ @@ -702,7 +788,5 @@ func NewBinaryProperty() *BinaryProperty { Properties: make(map[string]Property, 0), } - r.Type = "binary" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/booleanproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/booleanproperty.go index f0cf2e962..c66458c67 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/booleanproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/booleanproperty.go @@ -16,23 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" ) // BooleanProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/core.ts#L53-L59 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/core.ts#L53-L59 type BooleanProperty struct { Boost *Float64 `json:"boost,omitempty"` CopyTo []string `json:"copy_to,omitempty"` @@ -52,6 +52,7 @@ type BooleanProperty struct { } func (s *BooleanProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -66,18 +67,49 @@ func (s *BooleanProperty) UnmarshalJSON(data []byte) error { switch t { case "boost": - if err := dec.Decode(&s.Boost); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f } case "copy_to": - if err := dec.Decode(&s.CopyTo); err != nil { - return err + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return err + } } case "doc_values": - if err := dec.Decode(&s.DocValues); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DocValues = &value + case bool: + s.DocValues = &v } case "dynamic": @@ -91,6 +123,9 @@ func (s *BooleanProperty) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -99,7 +134,9 @@ func (s *BooleanProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -378,33 +415,70 @@ func (s *BooleanProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "index": - if err := dec.Decode(&s.Index); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Index = &value + case bool: + s.Index = &v } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } case "null_value": - if err := dec.Decode(&s.NullValue); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.NullValue = &value + case bool: + s.NullValue = &v } case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -413,7 +487,9 @@ func (s *BooleanProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -692,20 +768,38 @@ func (s *BooleanProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } case "similarity": - if err := dec.Decode(&s.Similarity); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Similarity = &o case "store": - if err := dec.Decode(&s.Store); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Store = &value + case bool: + s.Store = &v } case "type": @@ -718,6 +812,31 @@ func (s *BooleanProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s BooleanProperty) MarshalJSON() ([]byte, error) { + type innerBooleanProperty BooleanProperty + tmp := innerBooleanProperty{ + Boost: s.Boost, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fielddata: s.Fielddata, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + Meta: s.Meta, + NullValue: s.NullValue, + Properties: s.Properties, + Similarity: s.Similarity, + Store: s.Store, + Type: s.Type, + } + + tmp.Type = "boolean" + + return json.Marshal(tmp) +} + // NewBooleanProperty returns a BooleanProperty. func NewBooleanProperty() *BooleanProperty { r := &BooleanProperty{ @@ -726,7 +845,5 @@ func NewBooleanProperty() *BooleanProperty { Properties: make(map[string]Property, 0), } - r.Type = "boolean" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/boolquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/boolquery.go index 351a94555..f4d562697 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/boolquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/boolquery.go @@ -16,21 +16,160 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // BoolQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/compound.ts#L28-L34 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/compound.ts#L28-L52 type BoolQuery struct { - Boost *float32 `json:"boost,omitempty"` - Filter []Query `json:"filter,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Filter The clause (query) must appear in matching documents. + // However, unlike `must`, the score of the query will be ignored. + Filter []Query `json:"filter,omitempty"` + // MinimumShouldMatch Specifies the number or percentage of `should` clauses returned documents + // must match. MinimumShouldMatch MinimumShouldMatch `json:"minimum_should_match,omitempty"` - Must []Query `json:"must,omitempty"` - MustNot []Query `json:"must_not,omitempty"` - QueryName_ *string `json:"_name,omitempty"` - Should []Query `json:"should,omitempty"` + // Must The clause (query) must appear in matching documents and will contribute to + // the score. + Must []Query `json:"must,omitempty"` + // MustNot The clause (query) must not appear in the matching documents. + // Because scoring is ignored, a score of `0` is returned for all documents. + MustNot []Query `json:"must_not,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + // Should The clause (query) should appear in the matching document. + Should []Query `json:"should,omitempty"` +} + +func (s *BoolQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "filter": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewQuery() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Filter = append(s.Filter, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Filter); err != nil { + return err + } + } + + case "minimum_should_match": + if err := dec.Decode(&s.MinimumShouldMatch); err != nil { + return err + } + + case "must": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewQuery() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Must = append(s.Must, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Must); err != nil { + return err + } + } + + case "must_not": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewQuery() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.MustNot = append(s.MustNot, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.MustNot); err != nil { + return err + } + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "should": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewQuery() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Should = append(s.Should, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Should); err != nil { + return err + } + } + + } + } + return nil } // NewBoolQuery returns a BoolQuery. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/boostingquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/boostingquery.go index 8e69a26f2..857a66090 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/boostingquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/boostingquery.go @@ -16,19 +16,110 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // BoostingQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/compound.ts#L36-L40 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/compound.ts#L54-L67 type BoostingQuery struct { - Boost *float32 `json:"boost,omitempty"` - Negative *Query `json:"negative,omitempty"` - NegativeBoost Float64 `json:"negative_boost"` - Positive *Query `json:"positive,omitempty"` - QueryName_ *string `json:"_name,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Negative Query used to decrease the relevance score of matching documents. + Negative *Query `json:"negative,omitempty"` + // NegativeBoost Floating point number between 0 and 1.0 used to decrease the relevance scores + // of documents matching the `negative` query. + NegativeBoost Float64 `json:"negative_boost"` + // Positive Any returned documents must match this query. + Positive *Query `json:"positive,omitempty"` + QueryName_ *string `json:"_name,omitempty"` +} + +func (s *BoostingQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "negative": + if err := dec.Decode(&s.Negative); err != nil { + return err + } + + case "negative_boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.NegativeBoost = f + case float64: + f := Float64(v) + s.NegativeBoost = f + } + + case "positive": + if err := dec.Decode(&s.Positive); err != nil { + return err + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + } + } + return nil } // NewBoostingQuery returns a BoostingQuery. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/boxplotaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/boxplotaggregate.go index 66af6686d..b36256a3a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/boxplotaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/boxplotaggregate.go @@ -16,33 +16,258 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // BoxPlotAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L697-L713 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L706-L722 type BoxPlotAggregate struct { - Lower Float64 `json:"lower"` - LowerAsString *string `json:"lower_as_string,omitempty"` - Max Float64 `json:"max"` - MaxAsString *string `json:"max_as_string,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Min Float64 `json:"min"` - MinAsString *string `json:"min_as_string,omitempty"` - Q1 Float64 `json:"q1"` - Q1AsString *string `json:"q1_as_string,omitempty"` - Q2 Float64 `json:"q2"` - Q2AsString *string `json:"q2_as_string,omitempty"` - Q3 Float64 `json:"q3"` - Q3AsString *string `json:"q3_as_string,omitempty"` - Upper Float64 `json:"upper"` - UpperAsString *string `json:"upper_as_string,omitempty"` + Lower Float64 `json:"lower"` + LowerAsString *string `json:"lower_as_string,omitempty"` + Max Float64 `json:"max"` + MaxAsString *string `json:"max_as_string,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Min Float64 `json:"min"` + MinAsString *string `json:"min_as_string,omitempty"` + Q1 Float64 `json:"q1"` + Q1AsString *string `json:"q1_as_string,omitempty"` + Q2 Float64 `json:"q2"` + Q2AsString *string `json:"q2_as_string,omitempty"` + Q3 Float64 `json:"q3"` + Q3AsString *string `json:"q3_as_string,omitempty"` + Upper Float64 `json:"upper"` + UpperAsString *string `json:"upper_as_string,omitempty"` +} + +func (s *BoxPlotAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "lower": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Lower = f + case float64: + f := Float64(v) + s.Lower = f + } + + case "lower_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.LowerAsString = &o + + case "max": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Max = f + case float64: + f := Float64(v) + s.Max = f + } + + case "max_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MaxAsString = &o + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "min": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Min = f + case float64: + f := Float64(v) + s.Min = f + } + + case "min_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MinAsString = &o + + case "q1": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Q1 = f + case float64: + f := Float64(v) + s.Q1 = f + } + + case "q1_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Q1AsString = &o + + case "q2": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Q2 = f + case float64: + f := Float64(v) + s.Q2 = f + } + + case "q2_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Q2AsString = &o + + case "q3": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Q3 = f + case float64: + f := Float64(v) + s.Q3 = f + } + + case "q3_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Q3AsString = &o + + case "upper": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Upper = f + case float64: + f := Float64(v) + s.Upper = f + } + + case "upper_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.UpperAsString = &o + + } + } + return nil } // NewBoxPlotAggregate returns a BoxPlotAggregate. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/boxplotaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/boxplotaggregation.go index 0675cddc6..90f6e9377 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/boxplotaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/boxplotaggregation.go @@ -16,18 +16,83 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // BoxplotAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/metric.ts#L50-L52 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/metric.ts#L57-L62 type BoxplotAggregation struct { + // Compression Limits the maximum number of nodes used by the underlying TDigest algorithm + // to `20 * compression`, enabling control of memory usage and approximation + // error. Compression *Float64 `json:"compression,omitempty"` - Field *string `json:"field,omitempty"` - Missing Missing `json:"missing,omitempty"` - Script Script `json:"script,omitempty"` + // Field The field on which to run the aggregation. + Field *string `json:"field,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing Missing `json:"missing,omitempty"` + Script Script `json:"script,omitempty"` +} + +func (s *BoxplotAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "compression": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Compression = &f + case float64: + f := Float64(v) + s.Compression = &f + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return err + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + } + } + return nil } // NewBoxplotAggregation returns a BoxplotAggregation. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/breaker.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/breaker.go index 41bcae98b..a64fa570f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/breaker.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/breaker.go @@ -16,20 +16,142 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Breaker type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L179-L186 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L434-L459 type Breaker struct { - EstimatedSize *string `json:"estimated_size,omitempty"` - EstimatedSizeInBytes *int64 `json:"estimated_size_in_bytes,omitempty"` - LimitSize *string `json:"limit_size,omitempty"` - LimitSizeInBytes *int64 `json:"limit_size_in_bytes,omitempty"` - Overhead *float32 `json:"overhead,omitempty"` - Tripped *float32 `json:"tripped,omitempty"` + // EstimatedSize Estimated memory used for the operation. + EstimatedSize *string `json:"estimated_size,omitempty"` + // EstimatedSizeInBytes Estimated memory used, in bytes, for the operation. + EstimatedSizeInBytes *int64 `json:"estimated_size_in_bytes,omitempty"` + // LimitSize Memory limit for the circuit breaker. + LimitSize *string `json:"limit_size,omitempty"` + // LimitSizeInBytes Memory limit, in bytes, for the circuit breaker. + LimitSizeInBytes *int64 `json:"limit_size_in_bytes,omitempty"` + // Overhead A constant that all estimates for the circuit breaker are multiplied with to + // calculate a final estimate. + Overhead *float32 `json:"overhead,omitempty"` + // Tripped Total number of times the circuit breaker has been triggered and prevented an + // out of memory error. + Tripped *float32 `json:"tripped,omitempty"` +} + +func (s *Breaker) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "estimated_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.EstimatedSize = &o + + case "estimated_size_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.EstimatedSizeInBytes = &value + case float64: + f := int64(v) + s.EstimatedSizeInBytes = &f + } + + case "limit_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.LimitSize = &o + + case "limit_size_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.LimitSizeInBytes = &value + case float64: + f := int64(v) + s.LimitSizeInBytes = &f + } + + case "overhead": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Overhead = &f + case float64: + f := float32(v) + s.Overhead = &f + } + + case "tripped": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Tripped = &f + case float64: + f := float32(v) + s.Tripped = &f + } + + } + } + return nil } // NewBreaker returns a Breaker. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketcorrelationaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketcorrelationaggregation.go index 70f95dc5d..b1a2e20ad 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketcorrelationaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketcorrelationaggregation.go @@ -16,31 +16,32 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" + "strconv" ) // BucketCorrelationAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/pipeline.ts#L114-L120 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/pipeline.ts#L129-L135 type BucketCorrelationAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. BucketsPath BucketsPath `json:"buckets_path,omitempty"` // Function The correlation function to execute. - Function BucketCorrelationFunction `json:"function"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Name *string `json:"name,omitempty"` + Function BucketCorrelationFunction `json:"function"` + Meta Metadata `json:"meta,omitempty"` + Name *string `json:"name,omitempty"` } func (s *BucketCorrelationAggregation) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -70,9 +71,16 @@ func (s *BucketCorrelationAggregation) UnmarshalJSON(data []byte) error { } case "name": - if err := dec.Decode(&s.Name); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketcorrelationfunction.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketcorrelationfunction.go index f8c862807..968b1db54 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketcorrelationfunction.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketcorrelationfunction.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // BucketCorrelationFunction type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/pipeline.ts#L122-L127 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/pipeline.ts#L137-L142 type BucketCorrelationFunction struct { // CountCorrelation The configuration to calculate a count correlation. This function is designed // for determining the correlation of a term value and a given metric. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketcorrelationfunctioncountcorrelation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketcorrelationfunctioncountcorrelation.go index bd4dbcea4..f45b7fc78 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketcorrelationfunctioncountcorrelation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketcorrelationfunctioncountcorrelation.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // BucketCorrelationFunctionCountCorrelation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/pipeline.ts#L129-L132 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/pipeline.ts#L144-L147 type BucketCorrelationFunctionCountCorrelation struct { // Indicator The indicator with which to correlate the configured `bucket_path` values. Indicator BucketCorrelationFunctionCountCorrelationIndicator `json:"indicator"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketcorrelationfunctioncountcorrelationindicator.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketcorrelationfunctioncountcorrelationindicator.go index 2c16702b5..f83973459 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketcorrelationfunctioncountcorrelationindicator.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketcorrelationfunctioncountcorrelationindicator.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // BucketCorrelationFunctionCountCorrelationIndicator type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/pipeline.ts#L134-L152 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/pipeline.ts#L149-L167 type BucketCorrelationFunctionCountCorrelationIndicator struct { // DocCount The total number of documents that initially created the expectations. It’s // required to be greater @@ -43,6 +51,52 @@ type BucketCorrelationFunctionCountCorrelationIndicator struct { Fractions []Float64 `json:"fractions,omitempty"` } +func (s *BucketCorrelationFunctionCountCorrelationIndicator) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.DocCount = value + case float64: + f := int(v) + s.DocCount = f + } + + case "expectations": + if err := dec.Decode(&s.Expectations); err != nil { + return err + } + + case "fractions": + if err := dec.Decode(&s.Fractions); err != nil { + return err + } + + } + } + return nil +} + // NewBucketCorrelationFunctionCountCorrelationIndicator returns a BucketCorrelationFunctionCountCorrelationIndicator. func NewBucketCorrelationFunctionCountCorrelationIndicator() *BucketCorrelationFunctionCountCorrelationIndicator { r := &BucketCorrelationFunctionCountCorrelationIndicator{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketinfluencer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketinfluencer.go index 4ed6eee6c..99a365b68 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketinfluencer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketinfluencer.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // BucketInfluencer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Bucket.ts#L80-L128 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Bucket.ts#L80-L128 type BucketInfluencer struct { // AnomalyScore A normalized score between 0-100, which is calculated for each bucket // influencer. This score might be updated as @@ -58,6 +66,141 @@ type BucketInfluencer struct { TimestampString DateTime `json:"timestamp_string,omitempty"` } +func (s *BucketInfluencer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "anomaly_score": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.AnomalyScore = f + case float64: + f := Float64(v) + s.AnomalyScore = f + } + + case "bucket_span": + if err := dec.Decode(&s.BucketSpan); err != nil { + return err + } + + case "influencer_field_name": + if err := dec.Decode(&s.InfluencerFieldName); err != nil { + return err + } + + case "initial_anomaly_score": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.InitialAnomalyScore = f + case float64: + f := Float64(v) + s.InitialAnomalyScore = f + } + + case "is_interim": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IsInterim = value + case bool: + s.IsInterim = v + } + + case "job_id": + if err := dec.Decode(&s.JobId); err != nil { + return err + } + + case "probability": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Probability = f + case float64: + f := Float64(v) + s.Probability = f + } + + case "raw_anomaly_score": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.RawAnomalyScore = f + case float64: + f := Float64(v) + s.RawAnomalyScore = f + } + + case "result_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultType = o + + case "timestamp": + if err := dec.Decode(&s.Timestamp); err != nil { + return err + } + + case "timestamp_string": + if err := dec.Decode(&s.TimestampString); err != nil { + return err + } + + } + } + return nil +} + // NewBucketInfluencer returns a BucketInfluencer. func NewBucketInfluencer() *BucketInfluencer { r := &BucketInfluencer{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketksaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketksaggregation.go index c6210ea39..7a2ecb8dd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketksaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketksaggregation.go @@ -16,21 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" + "strconv" ) // BucketKsAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/pipeline.ts#L79-L112 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/pipeline.ts#L94-L127 type BucketKsAggregation struct { // Alternative A list of string values indicating which K-S test alternative to calculate. // The valid values @@ -50,9 +50,9 @@ type BucketKsAggregation struct { // documents are uniformly distributed on these buckets, which they would be if // one used equal percentiles of a // metric to define the bucket end points. - Fractions []Float64 `json:"fractions,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Name *string `json:"name,omitempty"` + Fractions []Float64 `json:"fractions,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Name *string `json:"name,omitempty"` // SamplingMethod Indicates the sampling methodology when calculating the K-S test. Note, this // is sampling of the returned values. // This determines the cumulative distribution function (CDF) points used @@ -64,6 +64,7 @@ type BucketKsAggregation struct { } func (s *BucketKsAggregation) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -98,14 +99,28 @@ func (s *BucketKsAggregation) UnmarshalJSON(data []byte) error { } case "name": - if err := dec.Decode(&s.Name); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o case "sampling_method": - if err := dec.Decode(&s.SamplingMethod); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SamplingMethod = &o } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketmetricvalueaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketmetricvalueaggregate.go index 7c9fd5357..d4d2ef7a1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketmetricvalueaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketmetricvalueaggregate.go @@ -16,20 +16,24 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // BucketMetricValueAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L232-L235 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L233-L236 type BucketMetricValueAggregate struct { - Keys []string `json:"keys"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Keys []string `json:"keys"` + Meta Metadata `json:"meta,omitempty"` // Value The metric value. A missing value generally means that there was no data to // aggregate, // unless specified otherwise. @@ -37,6 +41,53 @@ type BucketMetricValueAggregate struct { ValueAsString *string `json:"value_as_string,omitempty"` } +func (s *BucketMetricValueAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "keys": + if err := dec.Decode(&s.Keys); err != nil { + return err + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return err + } + + case "value_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ValueAsString = &o + + } + } + return nil +} + // NewBucketMetricValueAggregate returns a BucketMetricValueAggregate. func NewBucketMetricValueAggregate() *BucketMetricValueAggregate { r := &BucketMetricValueAggregate{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketpathaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketpathaggregation.go index 344cff050..e9f321574 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketpathaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketpathaggregation.go @@ -16,29 +16,30 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" + "strconv" ) // BucketPathAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/pipeline.ts#L31-L37 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/pipeline.ts#L31-L37 type BucketPathAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. - BucketsPath BucketsPath `json:"buckets_path,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Name *string `json:"name,omitempty"` + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Name *string `json:"name,omitempty"` } func (s *BucketPathAggregation) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -63,9 +64,16 @@ func (s *BucketPathAggregation) UnmarshalJSON(data []byte) error { } case "name": - if err := dec.Decode(&s.Name); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsadjacencymatrixbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsadjacencymatrixbucket.go index 5c7244d0b..9766afa3c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsadjacencymatrixbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsadjacencymatrixbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // map[string]AdjacencyMatrixBucket // []AdjacencyMatrixBucket // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L315-L324 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L316-L325 type BucketsAdjacencyMatrixBucket interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketscompositebucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketscompositebucket.go index dcd769073..6a355a7c5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketscompositebucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketscompositebucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // map[string]CompositeBucket // []CompositeBucket // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L315-L324 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L316-L325 type BucketsCompositeBucket interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketscriptaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketscriptaggregation.go index d74964054..61843b24b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketscriptaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketscriptaggregation.go @@ -16,34 +16,40 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" ) // BucketScriptAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/pipeline.ts#L71-L73 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/pipeline.ts#L80-L85 type BucketScriptAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. - BucketsPath BucketsPath `json:"buckets_path,omitempty"` - Format *string `json:"format,omitempty"` - GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Name *string `json:"name,omitempty"` - Script Script `json:"script,omitempty"` + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Name *string `json:"name,omitempty"` + // Script The script to run for this aggregation. + Script Script `json:"script,omitempty"` } func (s *BucketScriptAggregation) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -63,9 +69,16 @@ func (s *BucketScriptAggregation) UnmarshalJSON(data []byte) error { } case "format": - if err := dec.Decode(&s.Format); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o case "gap_policy": if err := dec.Decode(&s.GapPolicy); err != nil { @@ -78,9 +91,16 @@ func (s *BucketScriptAggregation) UnmarshalJSON(data []byte) error { } case "name": - if err := dec.Decode(&s.Name); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o case "script": if err := dec.Decode(&s.Script); err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsdatehistogrambucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsdatehistogrambucket.go index 751c53e6d..d3b5b383c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsdatehistogrambucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsdatehistogrambucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // map[string]DateHistogramBucket // []DateHistogramBucket // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L315-L324 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L316-L325 type BucketsDateHistogramBucket interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsdoubletermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsdoubletermsbucket.go index 11aa02d3c..c9de0cc74 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsdoubletermsbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsdoubletermsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // map[string]DoubleTermsBucket // []DoubleTermsBucket // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L315-L324 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L316-L325 type BucketsDoubleTermsBucket interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketselectoraggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketselectoraggregation.go index be0155b0d..86152f311 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketselectoraggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketselectoraggregation.go @@ -16,34 +16,40 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" ) // BucketSelectorAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/pipeline.ts#L75-L77 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/pipeline.ts#L87-L92 type BucketSelectorAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. - BucketsPath BucketsPath `json:"buckets_path,omitempty"` - Format *string `json:"format,omitempty"` - GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Name *string `json:"name,omitempty"` - Script Script `json:"script,omitempty"` + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Name *string `json:"name,omitempty"` + // Script The script to run for this aggregation. + Script Script `json:"script,omitempty"` } func (s *BucketSelectorAggregation) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -63,9 +69,16 @@ func (s *BucketSelectorAggregation) UnmarshalJSON(data []byte) error { } case "format": - if err := dec.Decode(&s.Format); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o case "gap_policy": if err := dec.Decode(&s.GapPolicy); err != nil { @@ -78,9 +91,16 @@ func (s *BucketSelectorAggregation) UnmarshalJSON(data []byte) error { } case "name": - if err := dec.Decode(&s.Name); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o case "script": if err := dec.Decode(&s.Script); err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsfiltersbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsfiltersbucket.go index 25206d898..dd4ae5011 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsfiltersbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsfiltersbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // map[string]FiltersBucket // []FiltersBucket // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L315-L324 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L316-L325 type BucketsFiltersBucket interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsfrequentitemsetsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsfrequentitemsetsbucket.go new file mode 100644 index 000000000..e8439c032 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsfrequentitemsetsbucket.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +// BucketsFrequentItemSetsBucket holds the union for the following types: +// +// map[string]FrequentItemSetsBucket +// []FrequentItemSetsBucket +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L316-L325 +type BucketsFrequentItemSetsBucket interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsgeohashgridbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsgeohashgridbucket.go index 74ecb38ea..6f0938a0a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsgeohashgridbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsgeohashgridbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // map[string]GeoHashGridBucket // []GeoHashGridBucket // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L315-L324 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L316-L325 type BucketsGeoHashGridBucket interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsgeohexgridbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsgeohexgridbucket.go index a5b26d1ce..6e25ca92c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsgeohexgridbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsgeohexgridbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // map[string]GeoHexGridBucket // []GeoHexGridBucket // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L315-L324 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L316-L325 type BucketsGeoHexGridBucket interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsgeotilegridbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsgeotilegridbucket.go index edeab34d4..2675c7e11 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsgeotilegridbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsgeotilegridbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // map[string]GeoTileGridBucket // []GeoTileGridBucket // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L315-L324 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L316-L325 type BucketsGeoTileGridBucket interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketshistogrambucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketshistogrambucket.go index 88f2d2319..46c6da8e7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketshistogrambucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketshistogrambucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // map[string]HistogramBucket // []HistogramBucket // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L315-L324 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L316-L325 type BucketsHistogramBucket interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsipprefixbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsipprefixbucket.go index 525c00a20..014e9da65 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsipprefixbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsipprefixbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // map[string]IpPrefixBucket // []IpPrefixBucket // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L315-L324 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L316-L325 type BucketsIpPrefixBucket interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsiprangebucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsiprangebucket.go index b165ae296..e9147b272 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsiprangebucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsiprangebucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // map[string]IpRangeBucket // []IpRangeBucket // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L315-L324 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L316-L325 type BucketsIpRangeBucket interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketslongraretermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketslongraretermsbucket.go index 8ac15ad87..d72ad257c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketslongraretermsbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketslongraretermsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // map[string]LongRareTermsBucket // []LongRareTermsBucket // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L315-L324 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L316-L325 type BucketsLongRareTermsBucket interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketslongtermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketslongtermsbucket.go index 1330bcff6..8d3a3750f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketslongtermsbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketslongtermsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // map[string]LongTermsBucket // []LongTermsBucket // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L315-L324 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L316-L325 type BucketsLongTermsBucket interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsmultitermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsmultitermsbucket.go index 1eb52c6e4..aeea9b7fb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsmultitermsbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsmultitermsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // map[string]MultiTermsBucket // []MultiTermsBucket // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L315-L324 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L316-L325 type BucketsMultiTermsBucket interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsortaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsortaggregation.go index 37bf5c098..d00123494 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsortaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsortaggregation.go @@ -16,26 +16,125 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" ) // BucketSortAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/pipeline.ts#L154-L159 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/pipeline.ts#L169-L190 type BucketSortAggregation struct { - From *int `json:"from,omitempty"` - GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Name *string `json:"name,omitempty"` - Size *int `json:"size,omitempty"` - Sort []SortCombinations `json:"sort,omitempty"` + // From Buckets in positions prior to `from` will be truncated. + From *int `json:"from,omitempty"` + // GapPolicy The policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Name *string `json:"name,omitempty"` + // Size The number of buckets to return. + // Defaults to all buckets of the parent aggregation. + Size *int `json:"size,omitempty"` + // Sort The list of fields to sort on. + Sort []SortCombinations `json:"sort,omitempty"` +} + +func (s *BucketSortAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "from": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.From = &value + case float64: + f := int(v) + s.From = &f + } + + case "gap_policy": + if err := dec.Decode(&s.GapPolicy); err != nil { + return err + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + case "size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + case "sort": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(SortCombinations) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Sort = append(s.Sort, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Sort); err != nil { + return err + } + } + + } + } + return nil } // NewBucketSortAggregation returns a BucketSortAggregation. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketspath.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketspath.go index 7c968203d..19a5aff46 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketspath.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketspath.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -26,5 +26,5 @@ package types // []string // map[string]string // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/pipeline.ts#L44-L50 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/pipeline.ts#L53-L59 type BucketsPath interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsquery.go index ea8c69d78..2d0bd9e9a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // map[string]Query // []Query // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L315-L324 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L316-L325 type BucketsQuery interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsrangebucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsrangebucket.go index 1b56e27e8..ef25ae50f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsrangebucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsrangebucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // map[string]RangeBucket // []RangeBucket // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L315-L324 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L316-L325 type BucketsRangeBucket interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketssignificantlongtermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketssignificantlongtermsbucket.go index 1d5f493c1..b470e88f4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketssignificantlongtermsbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketssignificantlongtermsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // map[string]SignificantLongTermsBucket // []SignificantLongTermsBucket // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L315-L324 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L316-L325 type BucketsSignificantLongTermsBucket interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketssignificantstringtermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketssignificantstringtermsbucket.go index 2cf9bf1f2..ce4cb63ec 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketssignificantstringtermsbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketssignificantstringtermsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // map[string]SignificantStringTermsBucket // []SignificantStringTermsBucket // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L315-L324 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L316-L325 type BucketsSignificantStringTermsBucket interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsstringraretermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsstringraretermsbucket.go index 3153431e2..1cbf0c462 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsstringraretermsbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsstringraretermsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // map[string]StringRareTermsBucket // []StringRareTermsBucket // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L315-L324 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L316-L325 type BucketsStringRareTermsBucket interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsstringtermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsstringtermsbucket.go index 4f66cdd37..4ef8116d5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsstringtermsbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsstringtermsbucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // map[string]StringTermsBucket // []StringTermsBucket // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L315-L324 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L316-L325 type BucketsStringTermsBucket interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsummary.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsummary.go index 5b07bf9f2..51bb54fa0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsummary.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsummary.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // BucketSummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Bucket.ts#L31-L78 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Bucket.ts#L31-L78 type BucketSummary struct { // AnomalyScore The maximum anomaly score, between 0-100, for any of the bucket influencers. // This is an overall, rate-limited @@ -60,6 +68,129 @@ type BucketSummary struct { TimestampString DateTime `json:"timestamp_string,omitempty"` } +func (s *BucketSummary) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "anomaly_score": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.AnomalyScore = f + case float64: + f := Float64(v) + s.AnomalyScore = f + } + + case "bucket_influencers": + if err := dec.Decode(&s.BucketInfluencers); err != nil { + return err + } + + case "bucket_span": + if err := dec.Decode(&s.BucketSpan); err != nil { + return err + } + + case "event_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.EventCount = value + case float64: + f := int64(v) + s.EventCount = f + } + + case "initial_anomaly_score": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.InitialAnomalyScore = f + case float64: + f := Float64(v) + s.InitialAnomalyScore = f + } + + case "is_interim": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IsInterim = value + case bool: + s.IsInterim = v + } + + case "job_id": + if err := dec.Decode(&s.JobId); err != nil { + return err + } + + case "processing_time_ms": + if err := dec.Decode(&s.ProcessingTimeMs); err != nil { + return err + } + + case "result_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultType = o + + case "timestamp": + if err := dec.Decode(&s.Timestamp); err != nil { + return err + } + + case "timestamp_string": + if err := dec.Decode(&s.TimestampString); err != nil { + return err + } + + } + } + return nil +} + // NewBucketSummary returns a BucketSummary. func NewBucketSummary() *BucketSummary { r := &BucketSummary{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsvariablewidthhistogrambucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsvariablewidthhistogrambucket.go index c41c3dd31..d4481743f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsvariablewidthhistogrambucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsvariablewidthhistogrambucket.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // map[string]VariableWidthHistogramBucket // []VariableWidthHistogramBucket // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L315-L324 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L316-L325 type BucketsVariableWidthHistogramBucket interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsvoid.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsvoid.go index 5ab0ca16d..573fa4294 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsvoid.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bucketsvoid.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // BucketsVoid holds the union for the following types: // -// map[string]struct{} -// []struct{} +// map[string]interface{} +// []interface{} // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L315-L324 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L316-L325 type BucketsVoid interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/buildinformation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/buildinformation.go index ca683db0a..0fdc95082 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/buildinformation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/buildinformation.go @@ -16,18 +16,63 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // BuildInformation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/info/types.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/info/types.ts#L24-L27 type BuildInformation struct { Date DateTime `json:"date"` Hash string `json:"hash"` } +func (s *BuildInformation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "date": + if err := dec.Decode(&s.Date); err != nil { + return err + } + + case "hash": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Hash = o + + } + } + return nil +} + // NewBuildInformation returns a BuildInformation. func NewBuildInformation() *BuildInformation { r := &BuildInformation{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bulkindexbyscrollfailure.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bulkindexbyscrollfailure.go index 7e1b33df6..501c5b5c0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bulkindexbyscrollfailure.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bulkindexbyscrollfailure.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // BulkIndexByScrollFailure type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Errors.ts#L58-L64 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Errors.ts#L58-L64 type BulkIndexByScrollFailure struct { Cause ErrorCause `json:"cause"` Id string `json:"id"` @@ -31,6 +39,69 @@ type BulkIndexByScrollFailure struct { Type string `json:"type"` } +func (s *BulkIndexByScrollFailure) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "cause": + if err := dec.Decode(&s.Cause); err != nil { + return err + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return err + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return err + } + + case "status": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Status = value + case float64: + f := int(v) + s.Status = f + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + // NewBulkIndexByScrollFailure returns a BulkIndexByScrollFailure. func NewBulkIndexByScrollFailure() *BulkIndexByScrollFailure { r := &BulkIndexByScrollFailure{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bulkstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bulkstats.go index 1ff598037..a4bf6d30e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bulkstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bulkstats.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // BulkStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Stats.ts#L41-L51 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Stats.ts#L68-L78 type BulkStats struct { AvgSize ByteSize `json:"avg_size,omitempty"` AvgSizeInBytes int64 `json:"avg_size_in_bytes"` @@ -35,6 +43,101 @@ type BulkStats struct { TotalTimeInMillis int64 `json:"total_time_in_millis"` } +func (s *BulkStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "avg_size": + if err := dec.Decode(&s.AvgSize); err != nil { + return err + } + + case "avg_size_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.AvgSizeInBytes = value + case float64: + f := int64(v) + s.AvgSizeInBytes = f + } + + case "avg_time": + if err := dec.Decode(&s.AvgTime); err != nil { + return err + } + + case "avg_time_in_millis": + if err := dec.Decode(&s.AvgTimeInMillis); err != nil { + return err + } + + case "total_operations": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TotalOperations = value + case float64: + f := int64(v) + s.TotalOperations = f + } + + case "total_size": + if err := dec.Decode(&s.TotalSize); err != nil { + return err + } + + case "total_size_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TotalSizeInBytes = value + case float64: + f := int64(v) + s.TotalSizeInBytes = f + } + + case "total_time": + if err := dec.Decode(&s.TotalTime); err != nil { + return err + } + + case "total_time_in_millis": + if err := dec.Decode(&s.TotalTimeInMillis); err != nil { + return err + } + + } + } + return nil +} + // NewBulkStats returns a BulkStats. func NewBulkStats() *BulkStats { r := &BulkStats{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bytenumberproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bytenumberproperty.go index 7da668702..0961b7bf2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bytenumberproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bytenumberproperty.go @@ -16,25 +16,25 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" ) // ByteNumberProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/core.ts#L161-L164 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/core.ts#L164-L167 type ByteNumberProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -63,6 +63,7 @@ type ByteNumberProperty struct { } func (s *ByteNumberProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -77,23 +78,63 @@ func (s *ByteNumberProperty) UnmarshalJSON(data []byte) error { switch t { case "boost": - if err := dec.Decode(&s.Boost); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f } case "coerce": - if err := dec.Decode(&s.Coerce); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Coerce = &value + case bool: + s.Coerce = &v } case "copy_to": - if err := dec.Decode(&s.CopyTo); err != nil { - return err + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return err + } } case "doc_values": - if err := dec.Decode(&s.DocValues); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DocValues = &value + case bool: + s.DocValues = &v } case "dynamic": @@ -102,6 +143,9 @@ func (s *ByteNumberProperty) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -110,7 +154,9 @@ func (s *ByteNumberProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -389,28 +435,62 @@ func (s *ByteNumberProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "ignore_malformed": - if err := dec.Decode(&s.IgnoreMalformed); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreMalformed = &value + case bool: + s.IgnoreMalformed = &v } case "index": - if err := dec.Decode(&s.Index); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Index = &value + case bool: + s.Index = &v } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } @@ -426,6 +506,9 @@ func (s *ByteNumberProperty) UnmarshalJSON(data []byte) error { } case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -434,7 +517,9 @@ func (s *ByteNumberProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -713,9 +798,11 @@ func (s *ByteNumberProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } @@ -725,18 +812,43 @@ func (s *ByteNumberProperty) UnmarshalJSON(data []byte) error { } case "similarity": - if err := dec.Decode(&s.Similarity); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Similarity = &o case "store": - if err := dec.Decode(&s.Store); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Store = &value + case bool: + s.Store = &v } case "time_series_dimension": - if err := dec.Decode(&s.TimeSeriesDimension); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.TimeSeriesDimension = &value + case bool: + s.TimeSeriesDimension = &v } case "time_series_metric": @@ -754,6 +866,36 @@ func (s *ByteNumberProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s ByteNumberProperty) MarshalJSON() ([]byte, error) { + type innerByteNumberProperty ByteNumberProperty + tmp := innerByteNumberProperty{ + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + Index: s.Index, + Meta: s.Meta, + NullValue: s.NullValue, + OnScriptError: s.OnScriptError, + Properties: s.Properties, + Script: s.Script, + Similarity: s.Similarity, + Store: s.Store, + TimeSeriesDimension: s.TimeSeriesDimension, + TimeSeriesMetric: s.TimeSeriesMetric, + Type: s.Type, + } + + tmp.Type = "byte" + + return json.Marshal(tmp) +} + // NewByteNumberProperty returns a ByteNumberProperty. func NewByteNumberProperty() *ByteNumberProperty { r := &ByteNumberProperty{ @@ -762,7 +904,5 @@ func NewByteNumberProperty() *ByteNumberProperty { Properties: make(map[string]Property, 0), } - r.Type = "byte" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bytesize.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bytesize.go index a63afaa59..ce3fcea6b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bytesize.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bytesize.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // int64 // string // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/common.ts#L88-L89 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/common.ts#L90-L91 type ByteSize interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bytesprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bytesprocessor.go index a3ed18a8a..f9b7e7b8f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bytesprocessor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/bytesprocessor.go @@ -16,22 +16,141 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // BytesProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/_types/Processors.ts#L123-L127 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/_types/Processors.ts#L381-L397 type BytesProcessor struct { - Description *string `json:"description,omitempty"` - Field string `json:"field"` - If *string `json:"if,omitempty"` - IgnoreFailure *bool `json:"ignore_failure,omitempty"` - IgnoreMissing *bool `json:"ignore_missing,omitempty"` - OnFailure []ProcessorContainer `json:"on_failure,omitempty"` - Tag *string `json:"tag,omitempty"` - TargetField *string `json:"target_field,omitempty"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field to convert. + Field string `json:"field"` + // If Conditionally execute the processor. + If *string `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist or is `null`, the processor quietly + // exits without modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField The field to assign the converted value to. + // By default, the field is updated in-place. + TargetField *string `json:"target_field,omitempty"` +} + +func (s *BytesProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.If = &o + + case "ignore_failure": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return err + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return err + } + + } + } + return nil } // NewBytesProcessor returns a BytesProcessor. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cachequeries.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cachequeries.go index e4b7b66b8..5a1736d47 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cachequeries.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cachequeries.go @@ -16,17 +16,59 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // CacheQueries type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L398-L400 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L398-L400 type CacheQueries struct { Enabled bool `json:"enabled"` } +func (s *CacheQueries) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = value + case bool: + s.Enabled = v + } + + } + } + return nil +} + // NewCacheQueries returns a CacheQueries. func NewCacheQueries() *CacheQueries { r := &CacheQueries{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cachestats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cachestats.go index e9e0505d6..91a0293d8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cachestats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cachestats.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // CacheStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/enrich/stats/types.ts#L37-L43 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/enrich/stats/types.ts#L37-L43 type CacheStats struct { Count int `json:"count"` Evictions int `json:"evictions"` @@ -31,6 +39,95 @@ type CacheStats struct { NodeId string `json:"node_id"` } +func (s *CacheStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Count = value + case float64: + f := int(v) + s.Count = f + } + + case "evictions": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Evictions = value + case float64: + f := int(v) + s.Evictions = f + } + + case "hits": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Hits = value + case float64: + f := int(v) + s.Hits = f + } + + case "misses": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Misses = value + case float64: + f := int(v) + s.Misses = f + } + + case "node_id": + if err := dec.Decode(&s.NodeId); err != nil { + return err + } + + } + } + return nil +} + // NewCacheStats returns a CacheStats. func NewCacheStats() *CacheStats { r := &CacheStats{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/calendar.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/calendar.go index 89881df7b..5600f2480 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/calendar.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/calendar.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Calendar type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/get_calendars/types.ts#L22-L29 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/get_calendars/types.ts#L22-L29 type Calendar struct { // CalendarId A string that uniquely identifies a calendar. CalendarId string `json:"calendar_id"` @@ -32,6 +40,48 @@ type Calendar struct { JobIds []string `json:"job_ids"` } +func (s *Calendar) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "calendar_id": + if err := dec.Decode(&s.CalendarId); err != nil { + return err + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "job_ids": + if err := dec.Decode(&s.JobIds); err != nil { + return err + } + + } + } + return nil +} + // NewCalendar returns a Calendar. func NewCalendar() *Calendar { r := &Calendar{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/calendarevent.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/calendarevent.go index aafae59bb..7b972b97f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/calendarevent.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/calendarevent.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // CalendarEvent type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/CalendarEvent.ts#L23-L33 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/CalendarEvent.ts#L23-L33 type CalendarEvent struct { // CalendarId A string that uniquely identifies a calendar. CalendarId *string `json:"calendar_id,omitempty"` @@ -37,6 +45,58 @@ type CalendarEvent struct { StartTime DateTime `json:"start_time"` } +func (s *CalendarEvent) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "calendar_id": + if err := dec.Decode(&s.CalendarId); err != nil { + return err + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = o + + case "end_time": + if err := dec.Decode(&s.EndTime); err != nil { + return err + } + + case "event_id": + if err := dec.Decode(&s.EventId); err != nil { + return err + } + + case "start_time": + if err := dec.Decode(&s.StartTime); err != nil { + return err + } + + } + } + return nil +} + // NewCalendarEvent returns a CalendarEvent. func NewCalendarEvent() *CalendarEvent { r := &CalendarEvent{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cardinalityaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cardinalityaggregate.go index a8637d6c1..420fcbc3f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cardinalityaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cardinalityaggregate.go @@ -16,20 +16,64 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // CardinalityAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L137-L140 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L138-L141 type CardinalityAggregate struct { - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Value int64 `json:"value"` + Meta Metadata `json:"meta,omitempty"` + Value int64 `json:"value"` +} + +func (s *CardinalityAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "value": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Value = value + case float64: + f := int64(v) + s.Value = f + } + + } + } + return nil } // NewCardinalityAggregate returns a CardinalityAggregate. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cardinalityaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cardinalityaggregation.go index 9cf681f71..825b3b67a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cardinalityaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cardinalityaggregation.go @@ -16,24 +16,106 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/cardinalityexecutionmode" ) // CardinalityAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/metric.ts#L62-L66 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/metric.ts#L87-L99 type CardinalityAggregation struct { - ExecutionHint *cardinalityexecutionmode.CardinalityExecutionMode `json:"execution_hint,omitempty"` - Field *string `json:"field,omitempty"` - Missing Missing `json:"missing,omitempty"` - PrecisionThreshold *int `json:"precision_threshold,omitempty"` - Rehash *bool `json:"rehash,omitempty"` - Script Script `json:"script,omitempty"` + // ExecutionHint Mechanism by which cardinality aggregations is run. + ExecutionHint *cardinalityexecutionmode.CardinalityExecutionMode `json:"execution_hint,omitempty"` + // Field The field on which to run the aggregation. + Field *string `json:"field,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing Missing `json:"missing,omitempty"` + // PrecisionThreshold A unique count below which counts are expected to be close to accurate. + // This allows to trade memory for accuracy. + PrecisionThreshold *int `json:"precision_threshold,omitempty"` + Rehash *bool `json:"rehash,omitempty"` + Script Script `json:"script,omitempty"` +} + +func (s *CardinalityAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "execution_hint": + if err := dec.Decode(&s.ExecutionHint); err != nil { + return err + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return err + } + + case "precision_threshold": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.PrecisionThreshold = &value + case float64: + f := int(v) + s.PrecisionThreshold = &f + } + + case "rehash": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Rehash = &value + case bool: + s.Rehash = &v + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + } + } + return nil } // NewCardinalityAggregation returns a CardinalityAggregation. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/catanonalydetectorcolumns.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/catanonalydetectorcolumns.go index 44c8606b5..b44ae6937 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/catanonalydetectorcolumns.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/catanonalydetectorcolumns.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -26,5 +26,5 @@ import ( // CatAnonalyDetectorColumns type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/_types/CatBase.ts#L402-L404 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/_types/CatBase.ts#L402-L404 type CatAnonalyDetectorColumns []catanomalydetectorcolumn.CatAnomalyDetectorColumn diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/catcomponenttemplate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/catcomponenttemplate.go index 9279441a0..eedbd4173 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/catcomponenttemplate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/catcomponenttemplate.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // CatComponentTemplate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/component_templates/types.ts#L20-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/component_templates/types.ts#L20-L28 type CatComponentTemplate struct { AliasCount string `json:"alias_count"` IncludedIn string `json:"included_in"` @@ -33,6 +41,110 @@ type CatComponentTemplate struct { Version string `json:"version"` } +func (s *CatComponentTemplate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "alias_count": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AliasCount = o + + case "included_in": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IncludedIn = o + + case "mapping_count": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MappingCount = o + + case "metadata_count": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MetadataCount = o + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = o + + case "settings_count": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SettingsCount = o + + case "version": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Version = o + + } + } + return nil +} + // NewCatComponentTemplate returns a CatComponentTemplate. func NewCatComponentTemplate() *CatComponentTemplate { r := &CatComponentTemplate{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/catdatafeedcolumns.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/catdatafeedcolumns.go index 034ffab7b..f24056fd7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/catdatafeedcolumns.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/catdatafeedcolumns.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -26,5 +26,5 @@ import ( // CatDatafeedColumns type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/_types/CatBase.ts#L559-L559 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/_types/CatBase.ts#L559-L559 type CatDatafeedColumns []catdatafeedcolumn.CatDatafeedColumn diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/catdfacolumns.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/catdfacolumns.go index c4ca28e22..3f0f13937 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/catdfacolumns.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/catdfacolumns.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -26,5 +26,5 @@ import ( // CatDfaColumns type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/_types/CatBase.ts#L558-L558 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/_types/CatBase.ts#L558-L558 type CatDfaColumns []catdfacolumn.CatDfaColumn diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/categorizationanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/categorizationanalyzer.go index 2cf8bc277..7c4ddcc1f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/categorizationanalyzer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/categorizationanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // string // CategorizationAnalyzerDefinition // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Analysis.ts#L124-L125 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Analysis.ts#L181-L182 type CategorizationAnalyzer interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/categorizationanalyzerdefinition.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/categorizationanalyzerdefinition.go index 0bb6d5d9b..111508109 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/categorizationanalyzerdefinition.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/categorizationanalyzerdefinition.go @@ -16,21 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" ) // CategorizationAnalyzerDefinition type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Analysis.ts#L127-L140 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Analysis.ts#L184-L197 type CategorizationAnalyzerDefinition struct { // CharFilter One or more character filters. In addition to the built-in character filters, // other plugins can provide more character filters. If this property is not @@ -61,6 +60,7 @@ type CategorizationAnalyzerDefinition struct { } func (s *CategorizationAnalyzerDefinition) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -82,6 +82,7 @@ func (s *CategorizationAnalyzerDefinition) UnmarshalJSON(data []byte) error { switch rawMsg[0] { case '{': + source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) kind := make(map[string]string, 0) @@ -92,37 +93,37 @@ func (s *CategorizationAnalyzerDefinition) UnmarshalJSON(data []byte) error { case "html_strip": o := NewHtmlStripCharFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.CharFilter = append(s.CharFilter, *o) case "mapping": o := NewMappingCharFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.CharFilter = append(s.CharFilter, *o) case "pattern_replace": o := NewPatternReplaceCharFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.CharFilter = append(s.CharFilter, *o) case "icu_normalizer": o := NewIcuNormalizationCharFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.CharFilter = append(s.CharFilter, *o) case "kuromoji_iteration_mark": o := NewKuromojiIterationMarkCharFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.CharFilter = append(s.CharFilter, *o) default: o := new(interface{}) - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.CharFilter = append(s.CharFilter, *o) @@ -130,7 +131,7 @@ func (s *CategorizationAnalyzerDefinition) UnmarshalJSON(data []byte) error { default: source := bytes.NewReader(rawMsg) o := new(interface{}) - if err := json.NewDecoder(source).Decode(o); err != nil { + if err := json.NewDecoder(source).Decode(&o); err != nil { return err } s.CharFilter = append(s.CharFilter, *o) @@ -145,6 +146,7 @@ func (s *CategorizationAnalyzerDefinition) UnmarshalJSON(data []byte) error { switch rawMsg[0] { case '{': + source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) kind := make(map[string]string, 0) @@ -155,295 +157,295 @@ func (s *CategorizationAnalyzerDefinition) UnmarshalJSON(data []byte) error { case "asciifolding": o := NewAsciiFoldingTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "common_grams": o := NewCommonGramsTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "condition": o := NewConditionTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "delimited_payload": o := NewDelimitedPayloadTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "edge_ngram": o := NewEdgeNGramTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "elision": o := NewElisionTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "fingerprint": o := NewFingerprintTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "hunspell": o := NewHunspellTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "hyphenation_decompounder": o := NewHyphenationDecompounderTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "keep_types": o := NewKeepTypesTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "keep": o := NewKeepWordsTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "keyword_marker": o := NewKeywordMarkerTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "kstem": o := NewKStemTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "length": o := NewLengthTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "limit": o := NewLimitTokenCountTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "lowercase": o := NewLowercaseTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "multiplexer": o := NewMultiplexerTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "ngram": o := NewNGramTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "nori_part_of_speech": o := NewNoriPartOfSpeechTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "pattern_capture": o := NewPatternCaptureTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "pattern_replace": o := NewPatternReplaceTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "porter_stem": o := NewPorterStemTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "predicate_token_filter": o := NewPredicateTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "remove_duplicates": o := NewRemoveDuplicatesTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "reverse": o := NewReverseTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "shingle": o := NewShingleTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "snowball": o := NewSnowballTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "stemmer_override": o := NewStemmerOverrideTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "stemmer": o := NewStemmerTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "stop": o := NewStopTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "synonym_graph": o := NewSynonymGraphTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "synonym": o := NewSynonymTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "trim": o := NewTrimTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "truncate": o := NewTruncateTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "unique": o := NewUniqueTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "uppercase": o := NewUppercaseTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "word_delimiter_graph": o := NewWordDelimiterGraphTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "word_delimiter": o := NewWordDelimiterTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "kuromoji_stemmer": o := NewKuromojiStemmerTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "kuromoji_readingform": o := NewKuromojiReadingFormTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "kuromoji_part_of_speech": o := NewKuromojiPartOfSpeechTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "icu_tokenizer": o := NewIcuTokenizer() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "icu_collation": o := NewIcuCollationTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "icu_folding": o := NewIcuFoldingTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "icu_normalizer": o := NewIcuNormalizationTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "icu_transform": o := NewIcuTransformTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "phonetic": o := NewPhoneticTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) case "dictionary_decompounder": o := NewDictionaryDecompounderTokenFilter() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) default: o := new(interface{}) - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) @@ -451,7 +453,7 @@ func (s *CategorizationAnalyzerDefinition) UnmarshalJSON(data []byte) error { default: source := bytes.NewReader(rawMsg) o := new(interface{}) - if err := json.NewDecoder(source).Decode(o); err != nil { + if err := json.NewDecoder(source).Decode(&o); err != nil { return err } s.Filter = append(s.Filter, *o) @@ -459,12 +461,14 @@ func (s *CategorizationAnalyzerDefinition) UnmarshalJSON(data []byte) error { } case "tokenizer": + rawMsg := json.RawMessage{} dec.Decode(&rawMsg) source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { case '{': + kind := make(map[string]string, 0) localDec.Decode(&kind) source.Seek(0, io.SeekStart) @@ -473,90 +477,90 @@ func (s *CategorizationAnalyzerDefinition) UnmarshalJSON(data []byte) error { case "char_group": o := NewCharGroupTokenizer() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Tokenizer = *o case "edge_ngram": o := NewEdgeNGramTokenizer() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Tokenizer = *o case "keyword": o := NewKeywordTokenizer() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Tokenizer = *o case "letter": o := NewLetterTokenizer() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Tokenizer = *o case "lowercase": o := NewLowercaseTokenizer() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Tokenizer = *o case "ngram": o := NewNGramTokenizer() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Tokenizer = *o case "nori_tokenizer": o := NewNoriTokenizer() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Tokenizer = *o case "path_hierarchy": o := NewPathHierarchyTokenizer() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Tokenizer = *o case "standard": o := NewStandardTokenizer() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Tokenizer = *o case "uax_url_email": o := NewUaxEmailUrlTokenizer() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Tokenizer = *o case "whitespace": o := NewWhitespaceTokenizer() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Tokenizer = *o case "kuromoji_tokenizer": o := NewKuromojiTokenizer() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Tokenizer = *o case "pattern": o := NewPatternTokenizer() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Tokenizer = *o case "icu_tokenizer": o := NewIcuTokenizer() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Tokenizer = *o default: - if err := dec.Decode(&s.Tokenizer); err != nil { + if err := localDec.Decode(&s.Tokenizer); err != nil { return err } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/categorizetextaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/categorizetextaggregation.go index bae1fe657..aa231a27f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/categorizetextaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/categorizetextaggregation.go @@ -16,21 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" + "strconv" ) // CategorizeTextAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L437-L501 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L1037-L1101 type CategorizeTextAggregation struct { // CategorizationAnalyzer The categorization analyzer specifies how the text is analyzed and tokenized // before being categorized. @@ -67,12 +67,12 @@ type CategorizeTextAggregation struct { // Smaller values use less memory and create fewer categories. Larger values // will use more memory and // create narrower categories. Max allowed value is 100. - MaxUniqueTokens *int `json:"max_unique_tokens,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - // MinDocCount The minimum number of documents for a bucket to be returned to the results. + MaxUniqueTokens *int `json:"max_unique_tokens,omitempty"` + Meta Metadata `json:"meta,omitempty"` + // MinDocCount The minimum number of documents in a bucket to be returned to the results. MinDocCount *int `json:"min_doc_count,omitempty"` Name *string `json:"name,omitempty"` - // ShardMinDocCount The minimum number of documents for a bucket to be returned from the shard + // ShardMinDocCount The minimum number of documents in a bucket to be returned from the shard // before merging. ShardMinDocCount *int `json:"shard_min_doc_count,omitempty"` // ShardSize The number of categorization buckets to return from each shard before merging @@ -89,6 +89,7 @@ type CategorizeTextAggregation struct { } func (s *CategorizeTextAggregation) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -103,6 +104,7 @@ func (s *CategorizeTextAggregation) UnmarshalJSON(data []byte) error { switch t { case "categorization_analyzer": + rawMsg := json.RawMessage{} dec.Decode(&rawMsg) source := bytes.NewReader(rawMsg) @@ -132,13 +134,35 @@ func (s *CategorizeTextAggregation) UnmarshalJSON(data []byte) error { } case "max_matched_tokens": - if err := dec.Decode(&s.MaxMatchedTokens); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxMatchedTokens = &value + case float64: + f := int(v) + s.MaxMatchedTokens = &f } case "max_unique_tokens": - if err := dec.Decode(&s.MaxUniqueTokens); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxUniqueTokens = &value + case float64: + f := int(v) + s.MaxUniqueTokens = &f } case "meta": @@ -147,33 +171,95 @@ func (s *CategorizeTextAggregation) UnmarshalJSON(data []byte) error { } case "min_doc_count": - if err := dec.Decode(&s.MinDocCount); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MinDocCount = &value + case float64: + f := int(v) + s.MinDocCount = &f } case "name": - if err := dec.Decode(&s.Name); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o case "shard_min_doc_count": - if err := dec.Decode(&s.ShardMinDocCount); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.ShardMinDocCount = &value + case float64: + f := int(v) + s.ShardMinDocCount = &f } case "shard_size": - if err := dec.Decode(&s.ShardSize); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.ShardSize = &value + case float64: + f := int(v) + s.ShardSize = &f } case "similarity_threshold": - if err := dec.Decode(&s.SimilarityThreshold); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.SimilarityThreshold = &value + case float64: + f := int(v) + s.SimilarityThreshold = &f } case "size": - if err := dec.Decode(&s.Size); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/categorizetextanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/categorizetextanalyzer.go index 2f82962b4..4a72e5944 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/categorizetextanalyzer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/categorizetextanalyzer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // string // CustomCategorizeTextAnalyzer // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L503-L506 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L1103-L1106 type CategorizeTextAnalyzer interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/category.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/category.go index b6a8dfbbe..9c1df2eea 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/category.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/category.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Category type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Category.ts#L23-L49 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Category.ts#L23-L49 type Category struct { // CategoryId A unique identifier for the category. category_id is unique at the job level, // even when per-partition categorization is enabled. @@ -68,6 +76,162 @@ type Category struct { Terms string `json:"terms"` } +func (s *Category) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "category_id": + if err := dec.Decode(&s.CategoryId); err != nil { + return err + } + + case "examples": + if err := dec.Decode(&s.Examples); err != nil { + return err + } + + case "grok_pattern": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.GrokPattern = &o + + case "job_id": + if err := dec.Decode(&s.JobId); err != nil { + return err + } + + case "max_matching_length": + if err := dec.Decode(&s.MaxMatchingLength); err != nil { + return err + } + + case "mlcategory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Mlcategory = o + + case "num_matches": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.NumMatches = &value + case float64: + f := int64(v) + s.NumMatches = &f + } + + case "p": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.P = &o + + case "partition_field_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PartitionFieldName = &o + + case "partition_field_value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PartitionFieldValue = &o + + case "preferred_to_categories": + if err := dec.Decode(&s.PreferredToCategories); err != nil { + return err + } + + case "regex": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Regex = o + + case "result_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultType = o + + case "terms": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Terms = o + + } + } + return nil +} + // NewCategory returns a Category. func NewCategory() *Category { r := &Category{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cattrainedmodelscolumns.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cattrainedmodelscolumns.go index c45124df2..2906d722f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cattrainedmodelscolumns.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cattrainedmodelscolumns.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -26,5 +26,5 @@ import ( // CatTrainedModelsColumns type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/_types/CatBase.ts#L636-L638 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/_types/CatBase.ts#L636-L638 type CatTrainedModelsColumns []cattrainedmodelscolumn.CatTrainedModelsColumn diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cattransformcolumns.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cattransformcolumns.go index a950a1ae4..60d75ff6d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cattransformcolumns.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cattransformcolumns.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -26,5 +26,5 @@ import ( // CatTransformColumns type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/_types/CatBase.ts#L845-L845 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/_types/CatBase.ts#L845-L845 type CatTransformColumns []cattransformcolumn.CatTransformColumn diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ccr.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ccr.go index 5c15269a4..2bc9039b6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ccr.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ccr.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Ccr type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L328-L331 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L334-L337 type Ccr struct { AutoFollowPatternsCount int `json:"auto_follow_patterns_count"` Available bool `json:"available"` @@ -30,6 +38,86 @@ type Ccr struct { FollowerIndicesCount int `json:"follower_indices_count"` } +func (s *Ccr) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "auto_follow_patterns_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.AutoFollowPatternsCount = value + case float64: + f := int(v) + s.AutoFollowPatternsCount = f + } + + case "available": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Available = value + case bool: + s.Available = v + } + + case "enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "follower_indices_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.FollowerIndicesCount = value + case float64: + f := int(v) + s.FollowerIndicesCount = f + } + + } + } + return nil +} + // NewCcr returns a Ccr. func NewCcr() *Ccr { r := &Ccr{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ccrshardstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ccrshardstats.go index 62dce4985..48c0fe014 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ccrshardstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ccrshardstats.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // CcrShardStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ccr/_types/FollowIndexStats.ts#L35-L69 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ccr/_types/FollowIndexStats.ts#L35-L69 type CcrShardStats struct { BytesRead int64 `json:"bytes_read"` FailedReadRequests int64 `json:"failed_read_requests"` @@ -59,6 +67,345 @@ type CcrShardStats struct { WriteBufferSizeInBytes ByteSize `json:"write_buffer_size_in_bytes"` } +func (s *CcrShardStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bytes_read": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.BytesRead = value + case float64: + f := int64(v) + s.BytesRead = f + } + + case "failed_read_requests": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.FailedReadRequests = value + case float64: + f := int64(v) + s.FailedReadRequests = f + } + + case "failed_write_requests": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.FailedWriteRequests = value + case float64: + f := int64(v) + s.FailedWriteRequests = f + } + + case "fatal_exception": + if err := dec.Decode(&s.FatalException); err != nil { + return err + } + + case "follower_aliases_version": + if err := dec.Decode(&s.FollowerAliasesVersion); err != nil { + return err + } + + case "follower_global_checkpoint": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.FollowerGlobalCheckpoint = value + case float64: + f := int64(v) + s.FollowerGlobalCheckpoint = f + } + + case "follower_index": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FollowerIndex = o + + case "follower_mapping_version": + if err := dec.Decode(&s.FollowerMappingVersion); err != nil { + return err + } + + case "follower_max_seq_no": + if err := dec.Decode(&s.FollowerMaxSeqNo); err != nil { + return err + } + + case "follower_settings_version": + if err := dec.Decode(&s.FollowerSettingsVersion); err != nil { + return err + } + + case "last_requested_seq_no": + if err := dec.Decode(&s.LastRequestedSeqNo); err != nil { + return err + } + + case "leader_global_checkpoint": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.LeaderGlobalCheckpoint = value + case float64: + f := int64(v) + s.LeaderGlobalCheckpoint = f + } + + case "leader_index": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.LeaderIndex = o + + case "leader_max_seq_no": + if err := dec.Decode(&s.LeaderMaxSeqNo); err != nil { + return err + } + + case "operations_read": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.OperationsRead = value + case float64: + f := int64(v) + s.OperationsRead = f + } + + case "operations_written": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.OperationsWritten = value + case float64: + f := int64(v) + s.OperationsWritten = f + } + + case "outstanding_read_requests": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.OutstandingReadRequests = value + case float64: + f := int(v) + s.OutstandingReadRequests = f + } + + case "outstanding_write_requests": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.OutstandingWriteRequests = value + case float64: + f := int(v) + s.OutstandingWriteRequests = f + } + + case "read_exceptions": + if err := dec.Decode(&s.ReadExceptions); err != nil { + return err + } + + case "remote_cluster": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RemoteCluster = o + + case "shard_id": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.ShardId = value + case float64: + f := int(v) + s.ShardId = f + } + + case "successful_read_requests": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.SuccessfulReadRequests = value + case float64: + f := int64(v) + s.SuccessfulReadRequests = f + } + + case "successful_write_requests": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.SuccessfulWriteRequests = value + case float64: + f := int64(v) + s.SuccessfulWriteRequests = f + } + + case "time_since_last_read": + if err := dec.Decode(&s.TimeSinceLastRead); err != nil { + return err + } + + case "time_since_last_read_millis": + if err := dec.Decode(&s.TimeSinceLastReadMillis); err != nil { + return err + } + + case "total_read_remote_exec_time": + if err := dec.Decode(&s.TotalReadRemoteExecTime); err != nil { + return err + } + + case "total_read_remote_exec_time_millis": + if err := dec.Decode(&s.TotalReadRemoteExecTimeMillis); err != nil { + return err + } + + case "total_read_time": + if err := dec.Decode(&s.TotalReadTime); err != nil { + return err + } + + case "total_read_time_millis": + if err := dec.Decode(&s.TotalReadTimeMillis); err != nil { + return err + } + + case "total_write_time": + if err := dec.Decode(&s.TotalWriteTime); err != nil { + return err + } + + case "total_write_time_millis": + if err := dec.Decode(&s.TotalWriteTimeMillis); err != nil { + return err + } + + case "write_buffer_operation_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.WriteBufferOperationCount = value + case float64: + f := int64(v) + s.WriteBufferOperationCount = f + } + + case "write_buffer_size_in_bytes": + if err := dec.Decode(&s.WriteBufferSizeInBytes); err != nil { + return err + } + + } + } + return nil +} + // NewCcrShardStats returns a CcrShardStats. func NewCcrShardStats() *CcrShardStats { r := &CcrShardStats{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/certificateinformation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/certificateinformation.go index 0e8604d12..48c46dd63 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/certificateinformation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/certificateinformation.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // CertificateInformation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ssl/certificates/types.ts#L22-L31 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ssl/certificates/types.ts#L22-L31 type CertificateInformation struct { Alias string `json:"alias,omitempty"` Expiry DateTime `json:"expiry"` @@ -34,6 +42,117 @@ type CertificateInformation struct { SubjectDn string `json:"subject_dn"` } +func (s *CertificateInformation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "alias": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Alias = o + + case "expiry": + if err := dec.Decode(&s.Expiry); err != nil { + return err + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = o + + case "has_private_key": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.HasPrivateKey = value + case bool: + s.HasPrivateKey = v + } + + case "issuer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Issuer = &o + + case "path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Path = o + + case "serial_number": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SerialNumber = o + + case "subject_dn": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SubjectDn = o + + } + } + return nil +} + // NewCertificateInformation returns a CertificateInformation. func NewCertificateInformation() *CertificateInformation { r := &CertificateInformation{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cgroup.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cgroup.go index 9bac45d66..45de69f73 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cgroup.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cgroup.go @@ -16,17 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // Cgroup type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L188-L192 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L461-L474 type Cgroup struct { - Cpu *CgroupCpu `json:"cpu,omitempty"` - Cpuacct *CpuAcct `json:"cpuacct,omitempty"` - Memory *CgroupMemory `json:"memory,omitempty"` + // Cpu Contains statistics about `cpu` control group for the node. + Cpu *CgroupCpu `json:"cpu,omitempty"` + // Cpuacct Contains statistics about `cpuacct` control group for the node. + Cpuacct *CpuAcct `json:"cpuacct,omitempty"` + // Memory Contains statistics about the memory control group for the node. + Memory *CgroupMemory `json:"memory,omitempty"` } // NewCgroup returns a Cgroup. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cgroupcpu.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cgroupcpu.go index 7173975d4..94f0b44a2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cgroupcpu.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cgroupcpu.go @@ -16,18 +16,103 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // CgroupCpu type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L199-L204 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L487-L504 type CgroupCpu struct { - CfsPeriodMicros *int `json:"cfs_period_micros,omitempty"` - CfsQuotaMicros *int `json:"cfs_quota_micros,omitempty"` - ControlGroup *string `json:"control_group,omitempty"` - Stat *CgroupCpuStat `json:"stat,omitempty"` + // CfsPeriodMicros The period of time, in microseconds, for how regularly all tasks in the same + // cgroup as the Elasticsearch process should have their access to CPU resources + // reallocated. + CfsPeriodMicros *int `json:"cfs_period_micros,omitempty"` + // CfsQuotaMicros The total amount of time, in microseconds, for which all tasks in the same + // cgroup as the Elasticsearch process can run during one period + // `cfs_period_micros`. + CfsQuotaMicros *int `json:"cfs_quota_micros,omitempty"` + // ControlGroup The `cpu` control group to which the Elasticsearch process belongs. + ControlGroup *string `json:"control_group,omitempty"` + // Stat Contains CPU statistics for the node. + Stat *CgroupCpuStat `json:"stat,omitempty"` +} + +func (s *CgroupCpu) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "cfs_period_micros": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.CfsPeriodMicros = &value + case float64: + f := int(v) + s.CfsPeriodMicros = &f + } + + case "cfs_quota_micros": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.CfsQuotaMicros = &value + case float64: + f := int(v) + s.CfsQuotaMicros = &f + } + + case "control_group": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ControlGroup = &o + + case "stat": + if err := dec.Decode(&s.Stat); err != nil { + return err + } + + } + } + return nil } // NewCgroupCpu returns a CgroupCpu. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cgroupcpustat.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cgroupcpustat.go index 2186dba5b..5a69e3dd9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cgroupcpustat.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cgroupcpustat.go @@ -16,17 +16,86 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // CgroupCpuStat type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L206-L210 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L506-L519 type CgroupCpuStat struct { + // NumberOfElapsedPeriods The number of reporting periods (as specified by `cfs_period_micros`) that + // have elapsed. NumberOfElapsedPeriods *int64 `json:"number_of_elapsed_periods,omitempty"` + // NumberOfTimesThrottled The number of times all tasks in the same cgroup as the Elasticsearch process + // have been throttled. NumberOfTimesThrottled *int64 `json:"number_of_times_throttled,omitempty"` - TimeThrottledNanos *int64 `json:"time_throttled_nanos,omitempty"` + // TimeThrottledNanos The total amount of time, in nanoseconds, for which all tasks in the same + // cgroup as the Elasticsearch process have been throttled. + TimeThrottledNanos *int64 `json:"time_throttled_nanos,omitempty"` +} + +func (s *CgroupCpuStat) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "number_of_elapsed_periods": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.NumberOfElapsedPeriods = &value + case float64: + f := int64(v) + s.NumberOfElapsedPeriods = &f + } + + case "number_of_times_throttled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.NumberOfTimesThrottled = &value + case float64: + f := int64(v) + s.NumberOfTimesThrottled = &f + } + + case "time_throttled_nanos": + if err := dec.Decode(&s.TimeThrottledNanos); err != nil { + return err + } + + } + } + return nil } // NewCgroupCpuStat returns a CgroupCpuStat. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cgroupmemory.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cgroupmemory.go index 648e4638b..406ef7f7e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cgroupmemory.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cgroupmemory.go @@ -16,19 +16,94 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // CgroupMemory type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L212-L216 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L521-L537 type CgroupMemory struct { + // ControlGroup The `memory` control group to which the Elasticsearch process belongs. ControlGroup *string `json:"control_group,omitempty"` + // LimitInBytes The maximum amount of user memory (including file cache) allowed for all + // tasks in the same cgroup as the Elasticsearch process. + // This value can be too big to store in a `long`, so is returned as a string so + // that the value returned can exactly match what the underlying operating + // system interface returns. + // Any value that is too large to parse into a `long` almost certainly means no + // limit has been set for the cgroup. LimitInBytes *string `json:"limit_in_bytes,omitempty"` + // UsageInBytes The total current memory usage by processes in the cgroup, in bytes, by all + // tasks in the same cgroup as the Elasticsearch process. + // This value is stored as a string for consistency with `limit_in_bytes`. UsageInBytes *string `json:"usage_in_bytes,omitempty"` } +func (s *CgroupMemory) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "control_group": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ControlGroup = &o + + case "limit_in_bytes": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.LimitInBytes = &o + + case "usage_in_bytes": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.UsageInBytes = &o + + } + } + return nil +} + // NewCgroupMemory returns a CgroupMemory. func NewCgroupMemory() *CgroupMemory { r := &CgroupMemory{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/chaininput.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/chaininput.go index 8b720c9b3..e6bdb2cb1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/chaininput.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/chaininput.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // ChainInput type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Input.ts#L35-L37 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Input.ts#L35-L37 type ChainInput struct { Inputs []map[string]WatcherInput `json:"inputs"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/charfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/charfilter.go index 8d10e8d81..12f88204f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/charfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/charfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // string // CharFilterDefinition // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/char_filters.ts#L28-L30 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/char_filters.ts#L28-L30 type CharFilter interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/charfilterdefinition.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/charfilterdefinition.go index 5e06604cf..659c994b3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/charfilterdefinition.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/charfilterdefinition.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -28,5 +28,5 @@ package types // IcuNormalizationCharFilter // KuromojiIterationMarkCharFilter // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/char_filters.ts#L32-L41 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/char_filters.ts#L32-L41 type CharFilterDefinition interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/charfilterdetail.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/charfilterdetail.go index a7666da85..0756ee7a6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/charfilterdetail.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/charfilterdetail.go @@ -16,18 +16,63 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // CharFilterDetail type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/analyze/types.ts#L46-L49 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/analyze/types.ts#L46-L49 type CharFilterDetail struct { FilteredText []string `json:"filtered_text"` Name string `json:"name"` } +func (s *CharFilterDetail) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "filtered_text": + if err := dec.Decode(&s.FilteredText); err != nil { + return err + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = o + + } + } + return nil +} + // NewCharFilterDetail returns a CharFilterDetail. func NewCharFilterDetail() *CharFilterDetail { r := &CharFilterDetail{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/charfiltertypes.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/charfiltertypes.go index a9e1298b5..5bfc0db53 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/charfiltertypes.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/charfiltertypes.go @@ -16,22 +16,30 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // CharFilterTypes type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/stats/types.ts#L133-L142 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/stats/types.ts#L228-L261 type CharFilterTypes struct { - AnalyzerTypes []FieldTypes `json:"analyzer_types"` - BuiltInAnalyzers []FieldTypes `json:"built_in_analyzers"` + // AnalyzerTypes Contains statistics about analyzer types used in selected nodes. + AnalyzerTypes []FieldTypes `json:"analyzer_types"` + // BuiltInAnalyzers Contains statistics about built-in analyzers used in selected nodes. + BuiltInAnalyzers []FieldTypes `json:"built_in_analyzers"` + // BuiltInCharFilters Contains statistics about built-in character filters used in selected nodes. BuiltInCharFilters []FieldTypes `json:"built_in_char_filters"` - BuiltInFilters []FieldTypes `json:"built_in_filters"` - BuiltInTokenizers []FieldTypes `json:"built_in_tokenizers"` - CharFilterTypes []FieldTypes `json:"char_filter_types"` - FilterTypes []FieldTypes `json:"filter_types"` - TokenizerTypes []FieldTypes `json:"tokenizer_types"` + // BuiltInFilters Contains statistics about built-in token filters used in selected nodes. + BuiltInFilters []FieldTypes `json:"built_in_filters"` + // BuiltInTokenizers Contains statistics about built-in tokenizers used in selected nodes. + BuiltInTokenizers []FieldTypes `json:"built_in_tokenizers"` + // CharFilterTypes Contains statistics about character filter types used in selected nodes. + CharFilterTypes []FieldTypes `json:"char_filter_types"` + // FilterTypes Contains statistics about token filter types used in selected nodes. + FilterTypes []FieldTypes `json:"filter_types"` + // TokenizerTypes Contains statistics about tokenizer types used in selected nodes. + TokenizerTypes []FieldTypes `json:"tokenizer_types"` } // NewCharFilterTypes returns a CharFilterTypes. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/chargrouptokenizer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/chargrouptokenizer.go index b666e24fa..14f7c9936 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/chargrouptokenizer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/chargrouptokenizer.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // CharGroupTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/tokenizers.ts#L55-L59 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/tokenizers.ts#L56-L60 type CharGroupTokenizer struct { MaxTokenLength *int `json:"max_token_length,omitempty"` TokenizeOnChars []string `json:"tokenize_on_chars"` @@ -30,11 +38,75 @@ type CharGroupTokenizer struct { Version *string `json:"version,omitempty"` } +func (s *CharGroupTokenizer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_token_length": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxTokenLength = &value + case float64: + f := int(v) + s.MaxTokenLength = &f + } + + case "tokenize_on_chars": + if err := dec.Decode(&s.TokenizeOnChars); err != nil { + return err + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s CharGroupTokenizer) MarshalJSON() ([]byte, error) { + type innerCharGroupTokenizer CharGroupTokenizer + tmp := innerCharGroupTokenizer{ + MaxTokenLength: s.MaxTokenLength, + TokenizeOnChars: s.TokenizeOnChars, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "char_group" + + return json.Marshal(tmp) +} + // NewCharGroupTokenizer returns a CharGroupTokenizer. func NewCharGroupTokenizer() *CharGroupTokenizer { r := &CharGroupTokenizer{} - r.Type = "char_group" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/checkpointing.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/checkpointing.go index 857a87bdf..a245445dd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/checkpointing.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/checkpointing.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Checkpointing type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/transform/get_transform_stats/types.ts#L82-L89 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/transform/get_transform_stats/types.ts#L85-L92 type Checkpointing struct { ChangesLastDetectedAt *int64 `json:"changes_last_detected_at,omitempty"` ChangesLastDetectedAtDateTime DateTime `json:"changes_last_detected_at_date_time,omitempty"` @@ -32,6 +40,86 @@ type Checkpointing struct { OperationsBehind *int64 `json:"operations_behind,omitempty"` } +func (s *Checkpointing) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "changes_last_detected_at": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.ChangesLastDetectedAt = &value + case float64: + f := int64(v) + s.ChangesLastDetectedAt = &f + } + + case "changes_last_detected_at_date_time": + if err := dec.Decode(&s.ChangesLastDetectedAtDateTime); err != nil { + return err + } + + case "last": + if err := dec.Decode(&s.Last); err != nil { + return err + } + + case "last_search_time": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.LastSearchTime = &value + case float64: + f := int64(v) + s.LastSearchTime = &f + } + + case "next": + if err := dec.Decode(&s.Next); err != nil { + return err + } + + case "operations_behind": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.OperationsBehind = &value + case float64: + f := int64(v) + s.OperationsBehind = &f + } + + } + } + return nil +} + // NewCheckpointing returns a Checkpointing. func NewCheckpointing() *Checkpointing { r := &Checkpointing{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/checkpointstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/checkpointstats.go index 3c340b5af..ca9dd83cf 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/checkpointstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/checkpointstats.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // CheckpointStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/transform/get_transform_stats/types.ts#L73-L80 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/transform/get_transform_stats/types.ts#L76-L83 type CheckpointStats struct { Checkpoint int64 `json:"checkpoint"` CheckpointProgress *TransformProgress `json:"checkpoint_progress,omitempty"` @@ -32,6 +40,66 @@ type CheckpointStats struct { TimestampMillis *int64 `json:"timestamp_millis,omitempty"` } +func (s *CheckpointStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "checkpoint": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Checkpoint = value + case float64: + f := int64(v) + s.Checkpoint = f + } + + case "checkpoint_progress": + if err := dec.Decode(&s.CheckpointProgress); err != nil { + return err + } + + case "time_upper_bound": + if err := dec.Decode(&s.TimeUpperBound); err != nil { + return err + } + + case "time_upper_bound_millis": + if err := dec.Decode(&s.TimeUpperBoundMillis); err != nil { + return err + } + + case "timestamp": + if err := dec.Decode(&s.Timestamp); err != nil { + return err + } + + case "timestamp_millis": + if err := dec.Decode(&s.TimestampMillis); err != nil { + return err + } + + } + } + return nil +} + // NewCheckpointStats returns a CheckpointStats. func NewCheckpointStats() *CheckpointStats { r := &CheckpointStats{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/childrenaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/childrenaggregate.go index 274c734c4..5fadbc079 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/childrenaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/childrenaggregate.go @@ -16,32 +16,31 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "fmt" - "bytes" + "encoding/json" "errors" + "fmt" "io" - + "strconv" "strings" - - "encoding/json" ) // ChildrenAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L767-L768 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L776-L777 type ChildrenAggregate struct { - Aggregations map[string]Aggregate `json:"-"` - DocCount int64 `json:"doc_count"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Aggregations map[string]Aggregate `json:"-"` + DocCount int64 `json:"doc_count"` + Meta Metadata `json:"meta,omitempty"` } func (s *ChildrenAggregate) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -55,451 +54,19 @@ func (s *ChildrenAggregate) UnmarshalJSON(data []byte) error { switch t { - case "aggregations": - for dec.More() { - tt, err := dec.Token() + case "doc_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) if err != nil { - if errors.Is(err, io.EOF) { - break - } return err } - if value, ok := tt.(string); ok { - if strings.Contains(value, "#") { - elems := strings.Split(value, "#") - if len(elems) == 2 { - switch elems[0] { - case "cardinality": - o := NewCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentiles": - o := NewHdrPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentile_ranks": - o := NewHdrPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentiles": - o := NewTDigestPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentile_ranks": - o := NewTDigestPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "percentiles_bucket": - o := NewPercentilesBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "median_absolute_deviation": - o := NewMedianAbsoluteDeviationAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "min": - o := NewMinAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "max": - o := NewMaxAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sum": - o := NewSumAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "avg": - o := NewAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "weighted_avg": - o := NewWeightedAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "value_count": - o := NewValueCountAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_value": - o := NewSimpleValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "derivative": - o := NewDerivativeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "bucket_metric_value": - o := NewBucketMetricValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats": - o := NewStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats_bucket": - o := NewStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats": - o := NewExtendedStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats_bucket": - o := NewExtendedStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_bounds": - o := NewGeoBoundsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_centroid": - o := NewGeoCentroidAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "histogram": - o := NewHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_histogram": - o := NewDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "auto_date_histogram": - o := NewAutoDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "variable_width_histogram": - o := NewVariableWidthHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sterms": - o := NewStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lterms": - o := NewLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "dterms": - o := NewDoubleTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umterms": - o := NewUnmappedTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lrareterms": - o := NewLongRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "srareterms": - o := NewStringRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umrareterms": - o := NewUnmappedRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "multi_terms": - o := NewMultiTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "missing": - o := NewMissingAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "nested": - o := NewNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "reverse_nested": - o := NewReverseNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "global": - o := NewGlobalAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filter": - o := NewFilterAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "children": - o := NewChildrenAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "parent": - o := NewParentAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sampler": - o := NewSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "unmapped_sampler": - o := NewUnmappedSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohash_grid": - o := NewGeoHashGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geotile_grid": - o := NewGeoTileGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohex_grid": - o := NewGeoHexGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "range": - o := NewRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_range": - o := NewDateRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_distance": - o := NewGeoDistanceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_range": - o := NewIpRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_prefix": - o := NewIpPrefixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filters": - o := NewFiltersAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "adjacency_matrix": - o := NewAdjacencyMatrixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "siglterms": - o := NewSignificantLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sigsterms": - o := NewSignificantStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umsigterms": - o := NewUnmappedSignificantTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "composite": - o := NewCompositeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "scripted_metric": - o := NewScriptedMetricAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_hits": - o := NewTopHitsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "inference": - o := NewInferenceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "string_stats": - o := NewStringStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "box_plot": - o := NewBoxPlotAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_metrics": - o := NewTopMetricsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "t_test": - o := NewTTestAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "rate": - o := NewRateAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_long_value": - o := NewCumulativeCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "matrix_stats": - o := NewMatrixStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_line": - o := NewGeoLineAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - default: - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - } - } else { - return errors.New("cannot decode JSON for field Aggregations") - } - } else { - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[value] = o - } - } - } - - case "doc_count": - if err := dec.Decode(&s.DocCount); err != nil { - return err + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f } case "meta": @@ -507,6 +74,519 @@ func (s *ChildrenAggregate) UnmarshalJSON(data []byte) error { return err } + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "box_plot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[value] = o + } + } + } } return nil @@ -531,6 +611,7 @@ func (s ChildrenAggregate) MarshalJSON() ([]byte, error) { for key, value := range s.Aggregations { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "Aggregations") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/childrenaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/childrenaggregation.go index a32067985..777941ea4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/childrenaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/childrenaggregation.go @@ -16,21 +16,68 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // ChildrenAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L73-L75 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L111-L116 type ChildrenAggregation struct { - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Name *string `json:"name,omitempty"` - Type *string `json:"type,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Name *string `json:"name,omitempty"` + // Type The child type that should be selected. + Type *string `json:"type,omitempty"` +} + +func (s *ChildrenAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + } + } + return nil } // NewChildrenAggregation returns a ChildrenAggregation. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/chisquareheuristic.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/chisquareheuristic.go index d7bc5df65..eefa4ab5a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/chisquareheuristic.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/chisquareheuristic.go @@ -16,16 +16,76 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ChiSquareHeuristic type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L322-L325 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L735-L744 type ChiSquareHeuristic struct { + // BackgroundIsSuperset Set to `false` if you defined a custom background filter that represents a + // different set of documents that you want to compare to. BackgroundIsSuperset bool `json:"background_is_superset"` - IncludeNegatives bool `json:"include_negatives"` + // IncludeNegatives Set to `false` to filter out the terms that appear less often in the subset + // than in documents outside the subset. + IncludeNegatives bool `json:"include_negatives"` +} + +func (s *ChiSquareHeuristic) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "background_is_superset": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.BackgroundIsSuperset = value + case bool: + s.BackgroundIsSuperset = v + } + + case "include_negatives": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IncludeNegatives = value + case bool: + s.IncludeNegatives = v + } + + } + } + return nil } // NewChiSquareHeuristic returns a ChiSquareHeuristic. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/chunkingconfig.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/chunkingconfig.go index 26948ad5a..513612e3e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/chunkingconfig.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/chunkingconfig.go @@ -16,17 +16,22 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/chunkingmode" ) // ChunkingConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Datafeed.ts#L177-L190 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Datafeed.ts#L239-L252 type ChunkingConfig struct { // Mode If the mode is `auto`, the chunk size is dynamically calculated; // this is the recommended value when the datafeed does not use aggregations. @@ -40,6 +45,36 @@ type ChunkingConfig struct { TimeSpan Duration `json:"time_span,omitempty"` } +func (s *ChunkingConfig) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "mode": + if err := dec.Decode(&s.Mode); err != nil { + return err + } + + case "time_span": + if err := dec.Decode(&s.TimeSpan); err != nil { + return err + } + + } + } + return nil +} + // NewChunkingConfig returns a ChunkingConfig. func NewChunkingConfig() *ChunkingConfig { r := &ChunkingConfig{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/circleprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/circleprocessor.go index 2553d0643..83dea3d37 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/circleprocessor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/circleprocessor.go @@ -16,28 +16,172 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/shapetype" ) // CircleProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/_types/Processors.ts#L129-L135 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/_types/Processors.ts#L399-L422 type CircleProcessor struct { - Description *string `json:"description,omitempty"` - ErrorDistance Float64 `json:"error_distance"` - Field string `json:"field"` - If *string `json:"if,omitempty"` - IgnoreFailure *bool `json:"ignore_failure,omitempty"` - IgnoreMissing *bool `json:"ignore_missing,omitempty"` - OnFailure []ProcessorContainer `json:"on_failure,omitempty"` - ShapeType shapetype.ShapeType `json:"shape_type"` - Tag *string `json:"tag,omitempty"` - TargetField *string `json:"target_field,omitempty"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // ErrorDistance The difference between the resulting inscribed distance from center to side + // and the circle’s radius (measured in meters for `geo_shape`, unit-less for + // `shape`). + ErrorDistance Float64 `json:"error_distance"` + // Field The field to interpret as a circle. Either a string in WKT format or a map + // for GeoJSON. + Field string `json:"field"` + // If Conditionally execute the processor. + If *string `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist, the processor quietly exits without + // modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // ShapeType Which field mapping type is to be used when processing the circle: + // `geo_shape` or `shape`. + ShapeType shapetype.ShapeType `json:"shape_type"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField The field to assign the polygon shape to + // By default, the field is updated in-place. + TargetField *string `json:"target_field,omitempty"` +} + +func (s *CircleProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "error_distance": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.ErrorDistance = f + case float64: + f := Float64(v) + s.ErrorDistance = f + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.If = &o + + case "ignore_failure": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return err + } + + case "shape_type": + if err := dec.Decode(&s.ShapeType); err != nil { + return err + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return err + } + + } + } + return nil } // NewCircleProcessor returns a CircleProcessor. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/classificationinferenceoptions.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/classificationinferenceoptions.go index 25a98f1ab..32f94d56a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/classificationinferenceoptions.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/classificationinferenceoptions.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ClassificationInferenceOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/inference.ts#L80-L95 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/inference.ts#L93-L108 type ClassificationInferenceOptions struct { // NumTopClasses Specifies the number of top class predictions to return. Defaults to 0. NumTopClasses *int `json:"num_top_classes,omitempty"` @@ -40,6 +48,94 @@ type ClassificationInferenceOptions struct { TopClassesResultsField *string `json:"top_classes_results_field,omitempty"` } +func (s *ClassificationInferenceOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "num_top_classes": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NumTopClasses = &value + case float64: + f := int(v) + s.NumTopClasses = &f + } + + case "num_top_feature_importance_values": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NumTopFeatureImportanceValues = &value + case float64: + f := int(v) + s.NumTopFeatureImportanceValues = &f + } + + case "prediction_field_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PredictionFieldType = &o + + case "results_field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultsField = &o + + case "top_classes_results_field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TopClassesResultsField = &o + + } + } + return nil +} + // NewClassificationInferenceOptions returns a ClassificationInferenceOptions. func NewClassificationInferenceOptions() *ClassificationInferenceOptions { r := &ClassificationInferenceOptions{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cleanuprepositoryresults.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cleanuprepositoryresults.go index 9a4642e9a..b8e080a00 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cleanuprepositoryresults.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cleanuprepositoryresults.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // CleanupRepositoryResults type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/snapshot/cleanup_repository/SnapshotCleanupRepositoryResponse.ts#L29-L34 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/snapshot/cleanup_repository/SnapshotCleanupRepositoryResponse.ts#L29-L34 type CleanupRepositoryResults struct { // DeletedBlobs Number of binary large objects (blobs) removed during cleanup. DeletedBlobs int64 `json:"deleted_blobs"` @@ -30,6 +38,56 @@ type CleanupRepositoryResults struct { DeletedBytes int64 `json:"deleted_bytes"` } +func (s *CleanupRepositoryResults) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "deleted_blobs": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DeletedBlobs = value + case float64: + f := int64(v) + s.DeletedBlobs = f + } + + case "deleted_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DeletedBytes = value + case float64: + f := int64(v) + s.DeletedBytes = f + } + + } + } + return nil +} + // NewCleanupRepositoryResults returns a CleanupRepositoryResults. func NewCleanupRepositoryResults() *CleanupRepositoryResults { r := &CleanupRepositoryResults{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/client.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/client.go index 5baf20391..882c1210f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/client.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/client.go @@ -16,25 +16,216 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Client type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L272-L284 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L649-L696 type Client struct { - Agent *string `json:"agent,omitempty"` - ClosedTimeMillis *int64 `json:"closed_time_millis,omitempty"` - Id *int64 `json:"id,omitempty"` - LastRequestTimeMillis *int64 `json:"last_request_time_millis,omitempty"` - LastUri *string `json:"last_uri,omitempty"` - LocalAddress *string `json:"local_address,omitempty"` - OpenedTimeMillis *int64 `json:"opened_time_millis,omitempty"` - RemoteAddress *string `json:"remote_address,omitempty"` - RequestCount *int64 `json:"request_count,omitempty"` - RequestSizeBytes *int64 `json:"request_size_bytes,omitempty"` - XOpaqueId *string `json:"x_opaque_id,omitempty"` + // Agent Reported agent for the HTTP client. + // If unavailable, this property is not included in the response. + Agent *string `json:"agent,omitempty"` + // ClosedTimeMillis Time at which the client closed the connection if the connection is closed. + ClosedTimeMillis *int64 `json:"closed_time_millis,omitempty"` + // Id Unique ID for the HTTP client. + Id *int64 `json:"id,omitempty"` + // LastRequestTimeMillis Time of the most recent request from this client. + LastRequestTimeMillis *int64 `json:"last_request_time_millis,omitempty"` + // LastUri The URI of the client’s most recent request. + LastUri *string `json:"last_uri,omitempty"` + // LocalAddress Local address for the HTTP connection. + LocalAddress *string `json:"local_address,omitempty"` + // OpenedTimeMillis Time at which the client opened the connection. + OpenedTimeMillis *int64 `json:"opened_time_millis,omitempty"` + // RemoteAddress Remote address for the HTTP connection. + RemoteAddress *string `json:"remote_address,omitempty"` + // RequestCount Number of requests from this client. + RequestCount *int64 `json:"request_count,omitempty"` + // RequestSizeBytes Cumulative size in bytes of all requests from this client. + RequestSizeBytes *int64 `json:"request_size_bytes,omitempty"` + // XOpaqueId Value from the client’s `x-opaque-id` HTTP header. + // If unavailable, this property is not included in the response. + XOpaqueId *string `json:"x_opaque_id,omitempty"` +} + +func (s *Client) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "agent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Agent = &o + + case "closed_time_millis": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.ClosedTimeMillis = &value + case float64: + f := int64(v) + s.ClosedTimeMillis = &f + } + + case "id": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Id = &value + case float64: + f := int64(v) + s.Id = &f + } + + case "last_request_time_millis": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.LastRequestTimeMillis = &value + case float64: + f := int64(v) + s.LastRequestTimeMillis = &f + } + + case "last_uri": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.LastUri = &o + + case "local_address": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.LocalAddress = &o + + case "opened_time_millis": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.OpenedTimeMillis = &value + case float64: + f := int64(v) + s.OpenedTimeMillis = &f + } + + case "remote_address": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RemoteAddress = &o + + case "request_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.RequestCount = &value + case float64: + f := int64(v) + s.RequestCount = &f + } + + case "request_size_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.RequestSizeBytes = &value + case float64: + f := int64(v) + s.RequestSizeBytes = &f + } + + case "x_opaque_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.XOpaqueId = &o + + } + } + return nil } // NewClient returns a Client. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/closeindexresult.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/closeindexresult.go index 87e410b4b..353216021 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/closeindexresult.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/closeindexresult.go @@ -16,18 +16,68 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // CloseIndexResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/close/CloseIndexResponse.ts#L32-L35 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/close/CloseIndexResponse.ts#L32-L35 type CloseIndexResult struct { Closed bool `json:"closed"` Shards map[string]CloseShardResult `json:"shards,omitempty"` } +func (s *CloseIndexResult) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "closed": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Closed = value + case bool: + s.Closed = v + } + + case "shards": + if s.Shards == nil { + s.Shards = make(map[string]CloseShardResult, 0) + } + if err := dec.Decode(&s.Shards); err != nil { + return err + } + + } + } + return nil +} + // NewCloseIndexResult returns a CloseIndexResult. func NewCloseIndexResult() *CloseIndexResult { r := &CloseIndexResult{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/closeshardresult.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/closeshardresult.go index 66dd3f0fc..6b843fefd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/closeshardresult.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/closeshardresult.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // CloseShardResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/close/CloseIndexResponse.ts#L37-L39 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/close/CloseIndexResponse.ts#L37-L39 type CloseShardResult struct { Failures []ShardFailure `json:"failures"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterappliedstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterappliedstats.go index abe086407..0849ce59e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterappliedstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterappliedstats.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // ClusterAppliedStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L90-L92 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L221-L223 type ClusterAppliedStats struct { Recordings []Recording `json:"recordings,omitempty"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clustercomponenttemplate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clustercomponenttemplate.go index 309ec9513..89fddc188 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clustercomponenttemplate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clustercomponenttemplate.go @@ -16,18 +16,55 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // ClusterComponentTemplate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/_types/ComponentTemplate.ts#L26-L29 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/_types/ComponentTemplate.ts#L30-L33 type ClusterComponentTemplate struct { ComponentTemplate ComponentTemplateNode `json:"component_template"` Name string `json:"name"` } +func (s *ClusterComponentTemplate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "component_template": + if err := dec.Decode(&s.ComponentTemplate); err != nil { + return err + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + } + } + return nil +} + // NewClusterComponentTemplate returns a ClusterComponentTemplate. func NewClusterComponentTemplate() *ClusterComponentTemplate { r := &ClusterComponentTemplate{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterdetails.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterdetails.go new file mode 100644 index 000000000..35a39f909 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterdetails.go @@ -0,0 +1,116 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/clustersearchstatus" +) + +// ClusterDetails type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Stats.ts#L45-L52 +type ClusterDetails struct { + Failures []ShardFailure `json:"failures,omitempty"` + Indices string `json:"indices"` + Shards_ *ShardStatistics `json:"_shards,omitempty"` + Status clustersearchstatus.ClusterSearchStatus `json:"status"` + TimedOut bool `json:"timed_out"` + Took *int64 `json:"took,omitempty"` +} + +func (s *ClusterDetails) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "failures": + if err := dec.Decode(&s.Failures); err != nil { + return err + } + + case "indices": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Indices = o + + case "_shards": + if err := dec.Decode(&s.Shards_); err != nil { + return err + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return err + } + + case "timed_out": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.TimedOut = value + case bool: + s.TimedOut = v + } + + case "took": + if err := dec.Decode(&s.Took); err != nil { + return err + } + + } + } + return nil +} + +// NewClusterDetails returns a ClusterDetails. +func NewClusterDetails() *ClusterDetails { + r := &ClusterDetails{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterfilesystem.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterfilesystem.go index a48a88f82..b321b2ed8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterfilesystem.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterfilesystem.go @@ -16,17 +16,98 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ClusterFileSystem type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/stats/types.ts#L34-L38 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/stats/types.ts#L34-L49 type ClusterFileSystem struct { + // AvailableInBytes Total number of bytes available to JVM in file stores across all selected + // nodes. + // Depending on operating system or process-level restrictions, this number may + // be less than `nodes.fs.free_in_byes`. + // This is the actual amount of free disk space the selected Elasticsearch nodes + // can use. AvailableInBytes int64 `json:"available_in_bytes"` - FreeInBytes int64 `json:"free_in_bytes"` - TotalInBytes int64 `json:"total_in_bytes"` + // FreeInBytes Total number of unallocated bytes in file stores across all selected nodes. + FreeInBytes int64 `json:"free_in_bytes"` + // TotalInBytes Total size, in bytes, of all file stores across all selected nodes. + TotalInBytes int64 `json:"total_in_bytes"` +} + +func (s *ClusterFileSystem) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "available_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.AvailableInBytes = value + case float64: + f := int64(v) + s.AvailableInBytes = f + } + + case "free_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.FreeInBytes = value + case float64: + f := int64(v) + s.FreeInBytes = f + } + + case "total_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TotalInBytes = value + case float64: + f := int64(v) + s.TotalInBytes = f + } + + } + } + return nil } // NewClusterFileSystem returns a ClusterFileSystem. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterindexingpressure.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterindexingpressure.go index d208fca6a..e85d6912c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterindexingpressure.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterindexingpressure.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // ClusterIndexingPressure type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/stats/types.ts#L299-L301 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/stats/types.ts#L570-L572 type ClusterIndexingPressure struct { Memory ClusterPressureMemory `json:"memory"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterindices.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterindices.go index 4762ee7cb..02e651b49 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterindices.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterindices.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ClusterIndices type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/stats/types.ts#L63-L94 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/stats/types.ts#L74-L107 type ClusterIndices struct { // Analysis Contains statistics about analyzers and analyzer components used in selected // nodes. @@ -44,10 +52,97 @@ type ClusterIndices struct { // Shards Contains statistics about indices with shards assigned to selected nodes. Shards ClusterIndicesShards `json:"shards"` // Store Contains statistics about the size of shards assigned to selected nodes. - Store StoreStats `json:"store"` + Store StoreStats `json:"store"` + // Versions Contains statistics about analyzers and analyzer components used in selected + // nodes. Versions []IndicesVersions `json:"versions,omitempty"` } +func (s *ClusterIndices) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analysis": + if err := dec.Decode(&s.Analysis); err != nil { + return err + } + + case "completion": + if err := dec.Decode(&s.Completion); err != nil { + return err + } + + case "count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + case "docs": + if err := dec.Decode(&s.Docs); err != nil { + return err + } + + case "fielddata": + if err := dec.Decode(&s.Fielddata); err != nil { + return err + } + + case "mappings": + if err := dec.Decode(&s.Mappings); err != nil { + return err + } + + case "query_cache": + if err := dec.Decode(&s.QueryCache); err != nil { + return err + } + + case "segments": + if err := dec.Decode(&s.Segments); err != nil { + return err + } + + case "shards": + if err := dec.Decode(&s.Shards); err != nil { + return err + } + + case "store": + if err := dec.Decode(&s.Store); err != nil { + return err + } + + case "versions": + if err := dec.Decode(&s.Versions); err != nil { + return err + } + + } + } + return nil +} + // NewClusterIndices returns a ClusterIndices. func NewClusterIndices() *ClusterIndices { r := &ClusterIndices{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterindicesshards.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterindicesshards.go index f8a38e636..17df50af4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterindicesshards.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterindicesshards.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ClusterIndicesShards type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/stats/types.ts#L49-L61 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/stats/types.ts#L60-L72 type ClusterIndicesShards struct { // Index Contains statistics about shards assigned to selected nodes. Index *ClusterIndicesShardsIndex `json:"index,omitempty"` @@ -34,6 +42,79 @@ type ClusterIndicesShards struct { Total *Float64 `json:"total,omitempty"` } +func (s *ClusterIndicesShards) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return err + } + + case "primaries": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Primaries = &f + case float64: + f := Float64(v) + s.Primaries = &f + } + + case "replication": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Replication = &f + case float64: + f := Float64(v) + s.Replication = &f + } + + case "total": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Total = &f + case float64: + f := Float64(v) + s.Total = &f + } + + } + } + return nil +} + // NewClusterIndicesShards returns a ClusterIndicesShards. func NewClusterIndicesShards() *ClusterIndicesShards { r := &ClusterIndicesShards{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterindicesshardsindex.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterindicesshardsindex.go index 0cf26cc2a..9af8b8d7d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterindicesshardsindex.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterindicesshardsindex.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // ClusterIndicesShardsIndex type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/stats/types.ts#L40-L47 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/stats/types.ts#L51-L58 type ClusterIndicesShardsIndex struct { // Primaries Contains statistics about the number of primary shards assigned to selected // nodes. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterinfo.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterinfo.go index b27989ced..87d3f9668 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterinfo.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterinfo.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // ClusterInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/allocation_explain/types.ts#L48-L54 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/allocation_explain/types.ts#L48-L54 type ClusterInfo struct { Nodes map[string]NodeDiskUsage `json:"nodes"` ReservedSizes []ReservedSize `json:"reserved_sizes"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterinfotargets.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterinfotargets.go new file mode 100644 index 000000000..55067040e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterinfotargets.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/clusterinfotarget" +) + +// ClusterInfoTargets type alias. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/common.ts#L386-L386 +type ClusterInfoTargets []clusterinfotarget.ClusterInfoTarget diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusteringest.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusteringest.go index 07209340d..c896b80b2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusteringest.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusteringest.go @@ -16,18 +16,70 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ClusterIngest type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/stats/types.ts#L151-L154 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/stats/types.ts#L270-L273 type ClusterIngest struct { NumberOfPipelines int `json:"number_of_pipelines"` ProcessorStats map[string]ClusterProcessor `json:"processor_stats"` } +func (s *ClusterIngest) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "number_of_pipelines": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NumberOfPipelines = value + case float64: + f := int(v) + s.NumberOfPipelines = f + } + + case "processor_stats": + if s.ProcessorStats == nil { + s.ProcessorStats = make(map[string]ClusterProcessor, 0) + } + if err := dec.Decode(&s.ProcessorStats); err != nil { + return err + } + + } + } + return nil +} + // NewClusterIngest returns a ClusterIngest. func NewClusterIngest() *ClusterIngest { r := &ClusterIngest{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterjvm.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterjvm.go index 2c1fea60d..5dce50b12 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterjvm.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterjvm.go @@ -16,18 +16,80 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ClusterJvm type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/stats/types.ts#L156-L161 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/stats/types.ts#L275-L292 type ClusterJvm struct { - MaxUptimeInMillis int64 `json:"max_uptime_in_millis"` - Mem ClusterJvmMemory `json:"mem"` - Threads int64 `json:"threads"` - Versions []ClusterJvmVersion `json:"versions"` + // MaxUptimeInMillis Uptime duration, in milliseconds, since JVM last started. + MaxUptimeInMillis int64 `json:"max_uptime_in_millis"` + // Mem Contains statistics about memory used by selected nodes. + Mem ClusterJvmMemory `json:"mem"` + // Threads Number of active threads in use by JVM across all selected nodes. + Threads int64 `json:"threads"` + // Versions Contains statistics about the JVM versions used by selected nodes. + Versions []ClusterJvmVersion `json:"versions"` +} + +func (s *ClusterJvm) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_uptime_in_millis": + if err := dec.Decode(&s.MaxUptimeInMillis); err != nil { + return err + } + + case "mem": + if err := dec.Decode(&s.Mem); err != nil { + return err + } + + case "threads": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Threads = value + case float64: + f := int64(v) + s.Threads = f + } + + case "versions": + if err := dec.Decode(&s.Versions); err != nil { + return err + } + + } + } + return nil } // NewClusterJvm returns a ClusterJvm. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterjvmmemory.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterjvmmemory.go index 0bcefd854..fe070ffb3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterjvmmemory.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterjvmmemory.go @@ -16,18 +16,79 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ClusterJvmMemory type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/stats/types.ts#L163-L166 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/stats/types.ts#L294-L303 type ClusterJvmMemory struct { - HeapMaxInBytes int64 `json:"heap_max_in_bytes"` + // HeapMaxInBytes Maximum amount of memory, in bytes, available for use by the heap across all + // selected nodes. + HeapMaxInBytes int64 `json:"heap_max_in_bytes"` + // HeapUsedInBytes Memory, in bytes, currently in use by the heap across all selected nodes. HeapUsedInBytes int64 `json:"heap_used_in_bytes"` } +func (s *ClusterJvmMemory) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "heap_max_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.HeapMaxInBytes = value + case float64: + f := int64(v) + s.HeapMaxInBytes = f + } + + case "heap_used_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.HeapUsedInBytes = value + case float64: + f := int64(v) + s.HeapUsedInBytes = f + } + + } + } + return nil +} + // NewClusterJvmMemory returns a ClusterJvmMemory. func NewClusterJvmMemory() *ClusterJvmMemory { r := &ClusterJvmMemory{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterjvmversion.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterjvmversion.go index a2f7828ce..1d4745996 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterjvmversion.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterjvmversion.go @@ -16,21 +16,137 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ClusterJvmVersion type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/stats/types.ts#L168-L176 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/stats/types.ts#L305-L335 type ClusterJvmVersion struct { - BundledJdk bool `json:"bundled_jdk"` - Count int `json:"count"` - UsingBundledJdk bool `json:"using_bundled_jdk"` - Version string `json:"version"` - VmName string `json:"vm_name"` - VmVendor string `json:"vm_vendor"` - VmVersion string `json:"vm_version"` + // BundledJdk Always `true`. All distributions come with a bundled Java Development Kit + // (JDK). + BundledJdk bool `json:"bundled_jdk"` + // Count Total number of selected nodes using JVM. + Count int `json:"count"` + // UsingBundledJdk If `true`, a bundled JDK is in use by JVM. + UsingBundledJdk bool `json:"using_bundled_jdk"` + // Version Version of JVM used by one or more selected nodes. + Version string `json:"version"` + // VmName Name of the JVM. + VmName string `json:"vm_name"` + // VmVendor Vendor of the JVM. + VmVendor string `json:"vm_vendor"` + // VmVersion Full version number of JVM. + // The full version number includes a plus sign (+) followed by the build + // number. + VmVersion string `json:"vm_version"` +} + +func (s *ClusterJvmVersion) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bundled_jdk": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.BundledJdk = value + case bool: + s.BundledJdk = v + } + + case "count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Count = value + case float64: + f := int(v) + s.Count = f + } + + case "using_bundled_jdk": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.UsingBundledJdk = value + case bool: + s.UsingBundledJdk = v + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + case "vm_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.VmName = o + + case "vm_vendor": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.VmVendor = o + + case "vm_version": + if err := dec.Decode(&s.VmVersion); err != nil { + return err + } + + } + } + return nil } // NewClusterJvmVersion returns a ClusterJvmVersion. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusternetworktypes.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusternetworktypes.go index 6d0a99c30..ecfb13b57 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusternetworktypes.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusternetworktypes.go @@ -16,15 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // ClusterNetworkTypes type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/stats/types.ts#L178-L181 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/stats/types.ts#L337-L346 type ClusterNetworkTypes struct { - HttpTypes map[string]int `json:"http_types"` + // HttpTypes Contains statistics about the HTTP network types used by selected nodes. + HttpTypes map[string]int `json:"http_types"` + // TransportTypes Contains statistics about the transport network types used by selected nodes. TransportTypes map[string]int `json:"transport_types"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusternode.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusternode.go index 61725c65f..6edec2be3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusternode.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusternode.go @@ -16,17 +16,49 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // ClusterNode type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/_types/ClusterNode.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/_types/ClusterNode.ts#L22-L24 type ClusterNode struct { Name string `json:"name"` } +func (s *ClusterNode) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + } + } + return nil +} + // NewClusterNode returns a ClusterNode. func NewClusterNode() *ClusterNode { r := &ClusterNode{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusternodecount.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusternodecount.go index efc3e4bbc..ce66b827f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusternodecount.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusternodecount.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ClusterNodeCount type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/stats/types.ts#L183-L199 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/stats/types.ts#L348-L367 type ClusterNodeCount struct { CoordinatingOnly int `json:"coordinating_only"` Data int `json:"data"` @@ -40,6 +48,250 @@ type ClusterNodeCount struct { VotingOnly int `json:"voting_only"` } +func (s *ClusterNodeCount) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "coordinating_only": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.CoordinatingOnly = value + case float64: + f := int(v) + s.CoordinatingOnly = f + } + + case "data": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Data = value + case float64: + f := int(v) + s.Data = f + } + + case "data_cold": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.DataCold = value + case float64: + f := int(v) + s.DataCold = f + } + + case "data_content": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.DataContent = value + case float64: + f := int(v) + s.DataContent = f + } + + case "data_frozen": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.DataFrozen = &value + case float64: + f := int(v) + s.DataFrozen = &f + } + + case "data_hot": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.DataHot = value + case float64: + f := int(v) + s.DataHot = f + } + + case "data_warm": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.DataWarm = value + case float64: + f := int(v) + s.DataWarm = f + } + + case "ingest": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Ingest = value + case float64: + f := int(v) + s.Ingest = f + } + + case "master": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Master = value + case float64: + f := int(v) + s.Master = f + } + + case "ml": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Ml = value + case float64: + f := int(v) + s.Ml = f + } + + case "remote_cluster_client": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.RemoteClusterClient = value + case float64: + f := int(v) + s.RemoteClusterClient = f + } + + case "total": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Total = value + case float64: + f := int(v) + s.Total = f + } + + case "transform": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Transform = value + case float64: + f := int(v) + s.Transform = f + } + + case "voting_only": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.VotingOnly = value + case float64: + f := int(v) + s.VotingOnly = f + } + + } + } + return nil +} + // NewClusterNodeCount returns a ClusterNodeCount. func NewClusterNodeCount() *ClusterNodeCount { r := &ClusterNodeCount{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusternodes.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusternodes.go index 2c25a7f02..69aad8dd1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusternodes.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusternodes.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // ClusterNodes type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/stats/types.ts#L201-L228 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/stats/types.ts#L369-L402 type ClusterNodes struct { // Count Contains counts for nodes selected by the request’s node filters. Count ClusterNodeCount `json:"count"` @@ -44,6 +44,7 @@ type ClusterNodes struct { // nodes. PackagingTypes []NodePackagingType `json:"packaging_types"` // Plugins Contains statistics about installed plugins and modules by selected nodes. + // If no plugins or modules are installed, this array is empty. Plugins []PluginStats `json:"plugins"` // Process Contains statistics about processes used by selected nodes. Process ClusterProcess `json:"process"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusteroperatingsystem.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusteroperatingsystem.go index 0d2995c84..caafe3602 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusteroperatingsystem.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusteroperatingsystem.go @@ -16,20 +16,111 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ClusterOperatingSystem type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/stats/types.ts#L235-L242 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/stats/types.ts#L415-L442 type ClusterOperatingSystem struct { - AllocatedProcessors int `json:"allocated_processors"` - Architectures []ClusterOperatingSystemArchitecture `json:"architectures,omitempty"` - AvailableProcessors int `json:"available_processors"` - Mem OperatingSystemMemoryInfo `json:"mem"` - Names []ClusterOperatingSystemName `json:"names"` - PrettyNames []ClusterOperatingSystemPrettyName `json:"pretty_names"` + // AllocatedProcessors Number of processors used to calculate thread pool size across all selected + // nodes. + // This number can be set with the processors setting of a node and defaults to + // the number of processors reported by the operating system. + // In both cases, this number will never be larger than 32. + AllocatedProcessors int `json:"allocated_processors"` + // Architectures Contains statistics about processor architectures (for example, x86_64 or + // aarch64) used by selected nodes. + Architectures []ClusterOperatingSystemArchitecture `json:"architectures,omitempty"` + // AvailableProcessors Number of processors available to JVM across all selected nodes. + AvailableProcessors int `json:"available_processors"` + // Mem Contains statistics about memory used by selected nodes. + Mem OperatingSystemMemoryInfo `json:"mem"` + // Names Contains statistics about operating systems used by selected nodes. + Names []ClusterOperatingSystemName `json:"names"` + // PrettyNames Contains statistics about operating systems used by selected nodes. + PrettyNames []ClusterOperatingSystemPrettyName `json:"pretty_names"` +} + +func (s *ClusterOperatingSystem) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allocated_processors": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.AllocatedProcessors = value + case float64: + f := int(v) + s.AllocatedProcessors = f + } + + case "architectures": + if err := dec.Decode(&s.Architectures); err != nil { + return err + } + + case "available_processors": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.AvailableProcessors = value + case float64: + f := int(v) + s.AvailableProcessors = f + } + + case "mem": + if err := dec.Decode(&s.Mem); err != nil { + return err + } + + case "names": + if err := dec.Decode(&s.Names); err != nil { + return err + } + + case "pretty_names": + if err := dec.Decode(&s.PrettyNames); err != nil { + return err + } + + } + } + return nil } // NewClusterOperatingSystem returns a ClusterOperatingSystem. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusteroperatingsystemarchitecture.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusteroperatingsystemarchitecture.go index e75428166..dda5d7a3b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusteroperatingsystemarchitecture.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusteroperatingsystemarchitecture.go @@ -16,16 +16,74 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ClusterOperatingSystemArchitecture type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/stats/types.ts#L230-L233 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/stats/types.ts#L404-L413 type ClusterOperatingSystemArchitecture struct { - Arch string `json:"arch"` - Count int `json:"count"` + // Arch Name of an architecture used by one or more selected nodes. + Arch string `json:"arch"` + // Count Number of selected nodes using the architecture. + Count int `json:"count"` +} + +func (s *ClusterOperatingSystemArchitecture) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "arch": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Arch = o + + case "count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Count = value + case float64: + f := int(v) + s.Count = f + } + + } + } + return nil } // NewClusterOperatingSystemArchitecture returns a ClusterOperatingSystemArchitecture. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusteroperatingsystemname.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusteroperatingsystemname.go index 85ec6d184..5843044e9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusteroperatingsystemname.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusteroperatingsystemname.go @@ -16,16 +16,67 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ClusterOperatingSystemName type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/stats/types.ts#L244-L247 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/stats/types.ts#L444-L453 type ClusterOperatingSystemName struct { - Count int `json:"count"` - Name string `json:"name"` + // Count Number of selected nodes using the operating system. + Count int `json:"count"` + // Name Name of an operating system used by one or more selected nodes. + Name string `json:"name"` +} + +func (s *ClusterOperatingSystemName) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Count = value + case float64: + f := int(v) + s.Count = f + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + } + } + return nil } // NewClusterOperatingSystemName returns a ClusterOperatingSystemName. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusteroperatingsystemprettyname.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusteroperatingsystemprettyname.go index 6dba81a96..c172fb2bb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusteroperatingsystemprettyname.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusteroperatingsystemprettyname.go @@ -16,18 +16,70 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ClusterOperatingSystemPrettyName type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/stats/types.ts#L249-L252 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/stats/types.ts#L455-L464 type ClusterOperatingSystemPrettyName struct { - Count int `json:"count"` + // Count Number of selected nodes using the operating system. + Count int `json:"count"` + // PrettyName Human-readable name of an operating system used by one or more selected + // nodes. PrettyName string `json:"pretty_name"` } +func (s *ClusterOperatingSystemPrettyName) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Count = value + case float64: + f := int(v) + s.Count = f + } + + case "pretty_name": + if err := dec.Decode(&s.PrettyName); err != nil { + return err + } + + } + } + return nil +} + // NewClusterOperatingSystemPrettyName returns a ClusterOperatingSystemPrettyName. func NewClusterOperatingSystemPrettyName() *ClusterOperatingSystemPrettyName { r := &ClusterOperatingSystemPrettyName{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterpressurememory.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterpressurememory.go index df70a4de3..688409853 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterpressurememory.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterpressurememory.go @@ -16,19 +16,72 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ClusterPressureMemory type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/stats/types.ts#L303-L307 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/stats/types.ts#L574-L578 type ClusterPressureMemory struct { Current IndexingPressureMemorySummary `json:"current"` LimitInBytes int64 `json:"limit_in_bytes"` Total IndexingPressureMemorySummary `json:"total"` } +func (s *ClusterPressureMemory) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "current": + if err := dec.Decode(&s.Current); err != nil { + return err + } + + case "limit_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.LimitInBytes = value + case float64: + f := int64(v) + s.LimitInBytes = f + } + + case "total": + if err := dec.Decode(&s.Total); err != nil { + return err + } + + } + } + return nil +} + // NewClusterPressureMemory returns a ClusterPressureMemory. func NewClusterPressureMemory() *ClusterPressureMemory { r := &ClusterPressureMemory{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterprocess.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterprocess.go index cd59c1f81..68af79eb8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterprocess.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterprocess.go @@ -16,15 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // ClusterProcess type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/stats/types.ts#L254-L257 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/stats/types.ts#L466-L475 type ClusterProcess struct { - Cpu ClusterProcessCpu `json:"cpu"` + // Cpu Contains statistics about CPU used by selected nodes. + Cpu ClusterProcessCpu `json:"cpu"` + // OpenFileDescriptors Contains statistics about open file descriptors in selected nodes. OpenFileDescriptors ClusterProcessOpenFileDescriptors `json:"open_file_descriptors"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterprocesscpu.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterprocesscpu.go index c92655ed4..7075b7d8e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterprocesscpu.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterprocesscpu.go @@ -16,17 +16,63 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ClusterProcessCpu type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/stats/types.ts#L259-L261 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/stats/types.ts#L477-L483 type ClusterProcessCpu struct { + // Percent Percentage of CPU used across all selected nodes. + // Returns `-1` if not supported. Percent int `json:"percent"` } +func (s *ClusterProcessCpu) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "percent": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Percent = value + case float64: + f := int(v) + s.Percent = f + } + + } + } + return nil +} + // NewClusterProcessCpu returns a ClusterProcessCpu. func NewClusterProcessCpu() *ClusterProcessCpu { r := &ClusterProcessCpu{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterprocessopenfiledescriptors.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterprocessopenfiledescriptors.go index 2a52aca8a..ffdc56dce 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterprocessopenfiledescriptors.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterprocessopenfiledescriptors.go @@ -16,19 +16,100 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ClusterProcessOpenFileDescriptors type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/stats/types.ts#L263-L267 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/stats/types.ts#L485-L501 type ClusterProcessOpenFileDescriptors struct { + // Avg Average number of concurrently open file descriptors. + // Returns `-1` if not supported. Avg int64 `json:"avg"` + // Max Maximum number of concurrently open file descriptors allowed across all + // selected nodes. + // Returns `-1` if not supported. Max int64 `json:"max"` + // Min Minimum number of concurrently open file descriptors across all selected + // nodes. + // Returns -1 if not supported. Min int64 `json:"min"` } +func (s *ClusterProcessOpenFileDescriptors) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "avg": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Avg = value + case float64: + f := int64(v) + s.Avg = f + } + + case "max": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Max = value + case float64: + f := int64(v) + s.Max = f + } + + case "min": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Min = value + case float64: + f := int64(v) + s.Min = f + } + + } + } + return nil +} + // NewClusterProcessOpenFileDescriptors returns a ClusterProcessOpenFileDescriptors. func NewClusterProcessOpenFileDescriptors() *ClusterProcessOpenFileDescriptors { r := &ClusterProcessOpenFileDescriptors{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterprocessor.go index f83ce2910..c9180aa71 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterprocessor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterprocessor.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ClusterProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/stats/types.ts#L269-L275 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/stats/types.ts#L503-L509 type ClusterProcessor struct { Count int64 `json:"count"` Current int64 `json:"current"` @@ -31,6 +39,81 @@ type ClusterProcessor struct { TimeInMillis int64 `json:"time_in_millis"` } +func (s *ClusterProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + case "current": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Current = value + case float64: + f := int64(v) + s.Current = f + } + + case "failed": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Failed = value + case float64: + f := int64(v) + s.Failed = f + } + + case "time": + if err := dec.Decode(&s.Time); err != nil { + return err + } + + case "time_in_millis": + if err := dec.Decode(&s.TimeInMillis); err != nil { + return err + } + + } + } + return nil +} + // NewClusterProcessor returns a ClusterProcessor. func NewClusterProcessor() *ClusterProcessor { r := &ClusterProcessor{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterremoteinfo.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterremoteinfo.go index 88ae87094..413dd0e10 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterremoteinfo.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterremoteinfo.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // ClusterRemoteSniffInfo // ClusterRemoteProxyInfo // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/remote_info/ClusterRemoteInfoResponse.ts#L28-L29 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/remote_info/ClusterRemoteInfoResponse.ts#L28-L29 type ClusterRemoteInfo interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterremoteproxyinfo.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterremoteproxyinfo.go index 030c2c998..0289fc9e7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterremoteproxyinfo.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterremoteproxyinfo.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ClusterRemoteProxyInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/remote_info/ClusterRemoteInfoResponse.ts#L41-L50 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/remote_info/ClusterRemoteInfoResponse.ts#L41-L50 type ClusterRemoteProxyInfo struct { Connected bool `json:"connected"` InitialConnectTimeout Duration `json:"initial_connect_timeout"` @@ -34,11 +42,142 @@ type ClusterRemoteProxyInfo struct { SkipUnavailable bool `json:"skip_unavailable"` } +func (s *ClusterRemoteProxyInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "connected": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Connected = value + case bool: + s.Connected = v + } + + case "initial_connect_timeout": + if err := dec.Decode(&s.InitialConnectTimeout); err != nil { + return err + } + + case "max_proxy_socket_connections": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxProxySocketConnections = value + case float64: + f := int(v) + s.MaxProxySocketConnections = f + } + + case "mode": + if err := dec.Decode(&s.Mode); err != nil { + return err + } + + case "num_proxy_sockets_connected": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NumProxySocketsConnected = value + case float64: + f := int(v) + s.NumProxySocketsConnected = f + } + + case "proxy_address": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ProxyAddress = o + + case "server_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ServerName = o + + case "skip_unavailable": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.SkipUnavailable = value + case bool: + s.SkipUnavailable = v + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s ClusterRemoteProxyInfo) MarshalJSON() ([]byte, error) { + type innerClusterRemoteProxyInfo ClusterRemoteProxyInfo + tmp := innerClusterRemoteProxyInfo{ + Connected: s.Connected, + InitialConnectTimeout: s.InitialConnectTimeout, + MaxProxySocketConnections: s.MaxProxySocketConnections, + Mode: s.Mode, + NumProxySocketsConnected: s.NumProxySocketsConnected, + ProxyAddress: s.ProxyAddress, + ServerName: s.ServerName, + SkipUnavailable: s.SkipUnavailable, + } + + tmp.Mode = "proxy" + + return json.Marshal(tmp) +} + // NewClusterRemoteProxyInfo returns a ClusterRemoteProxyInfo. func NewClusterRemoteProxyInfo() *ClusterRemoteProxyInfo { r := &ClusterRemoteProxyInfo{} - r.Mode = "proxy" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterremotesniffinfo.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterremotesniffinfo.go index 77596fc10..b24871cd8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterremotesniffinfo.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterremotesniffinfo.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ClusterRemoteSniffInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/remote_info/ClusterRemoteInfoResponse.ts#L31-L39 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/remote_info/ClusterRemoteInfoResponse.ts#L31-L39 type ClusterRemoteSniffInfo struct { Connected bool `json:"connected"` InitialConnectTimeout Duration `json:"initial_connect_timeout"` @@ -33,11 +41,121 @@ type ClusterRemoteSniffInfo struct { SkipUnavailable bool `json:"skip_unavailable"` } +func (s *ClusterRemoteSniffInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "connected": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Connected = value + case bool: + s.Connected = v + } + + case "initial_connect_timeout": + if err := dec.Decode(&s.InitialConnectTimeout); err != nil { + return err + } + + case "max_connections_per_cluster": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxConnectionsPerCluster = value + case float64: + f := int(v) + s.MaxConnectionsPerCluster = f + } + + case "mode": + if err := dec.Decode(&s.Mode); err != nil { + return err + } + + case "num_nodes_connected": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.NumNodesConnected = value + case float64: + f := int64(v) + s.NumNodesConnected = f + } + + case "seeds": + if err := dec.Decode(&s.Seeds); err != nil { + return err + } + + case "skip_unavailable": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.SkipUnavailable = value + case bool: + s.SkipUnavailable = v + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s ClusterRemoteSniffInfo) MarshalJSON() ([]byte, error) { + type innerClusterRemoteSniffInfo ClusterRemoteSniffInfo + tmp := innerClusterRemoteSniffInfo{ + Connected: s.Connected, + InitialConnectTimeout: s.InitialConnectTimeout, + MaxConnectionsPerCluster: s.MaxConnectionsPerCluster, + Mode: s.Mode, + NumNodesConnected: s.NumNodesConnected, + Seeds: s.Seeds, + SkipUnavailable: s.SkipUnavailable, + } + + tmp.Mode = "sniff" + + return json.Marshal(tmp) +} + // NewClusterRemoteSniffInfo returns a ClusterRemoteSniffInfo. func NewClusterRemoteSniffInfo() *ClusterRemoteSniffInfo { r := &ClusterRemoteSniffInfo{} - r.Mode = "sniff" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterruntimefieldtypes.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterruntimefieldtypes.go index b3691c0b7..bb12e283b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterruntimefieldtypes.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterruntimefieldtypes.go @@ -16,28 +16,277 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ClusterRuntimeFieldTypes type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/stats/types.ts#L116-L131 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/stats/types.ts#L169-L226 type ClusterRuntimeFieldTypes struct { - CharsMax int `json:"chars_max"` - CharsTotal int `json:"chars_total"` - Count int `json:"count"` - DocMax int `json:"doc_max"` - DocTotal int `json:"doc_total"` - IndexCount int `json:"index_count"` - Lang []string `json:"lang"` - LinesMax int `json:"lines_max"` - LinesTotal int `json:"lines_total"` - Name string `json:"name"` - ScriptlessCount int `json:"scriptless_count"` - ShadowedCount int `json:"shadowed_count"` - SourceMax int `json:"source_max"` - SourceTotal int `json:"source_total"` + // CharsMax Maximum number of characters for a single runtime field script. + CharsMax int `json:"chars_max"` + // CharsTotal Total number of characters for the scripts that define the current runtime + // field data type. + CharsTotal int `json:"chars_total"` + // Count Number of runtime fields mapped to the field data type in selected nodes. + Count int `json:"count"` + // DocMax Maximum number of accesses to doc_values for a single runtime field script + DocMax int `json:"doc_max"` + // DocTotal Total number of accesses to doc_values for the scripts that define the + // current runtime field data type. + DocTotal int `json:"doc_total"` + // IndexCount Number of indices containing a mapping of the runtime field data type in + // selected nodes. + IndexCount int `json:"index_count"` + // Lang Script languages used for the runtime fields scripts. + Lang []string `json:"lang"` + // LinesMax Maximum number of lines for a single runtime field script. + LinesMax int `json:"lines_max"` + // LinesTotal Total number of lines for the scripts that define the current runtime field + // data type. + LinesTotal int `json:"lines_total"` + // Name Field data type used in selected nodes. + Name string `json:"name"` + // ScriptlessCount Number of runtime fields that don’t declare a script. + ScriptlessCount int `json:"scriptless_count"` + // ShadowedCount Number of runtime fields that shadow an indexed field. + ShadowedCount int `json:"shadowed_count"` + // SourceMax Maximum number of accesses to _source for a single runtime field script. + SourceMax int `json:"source_max"` + // SourceTotal Total number of accesses to _source for the scripts that define the current + // runtime field data type. + SourceTotal int `json:"source_total"` +} + +func (s *ClusterRuntimeFieldTypes) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "chars_max": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.CharsMax = value + case float64: + f := int(v) + s.CharsMax = f + } + + case "chars_total": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.CharsTotal = value + case float64: + f := int(v) + s.CharsTotal = f + } + + case "count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Count = value + case float64: + f := int(v) + s.Count = f + } + + case "doc_max": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.DocMax = value + case float64: + f := int(v) + s.DocMax = f + } + + case "doc_total": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.DocTotal = value + case float64: + f := int(v) + s.DocTotal = f + } + + case "index_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IndexCount = value + case float64: + f := int(v) + s.IndexCount = f + } + + case "lang": + if err := dec.Decode(&s.Lang); err != nil { + return err + } + + case "lines_max": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.LinesMax = value + case float64: + f := int(v) + s.LinesMax = f + } + + case "lines_total": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.LinesTotal = value + case float64: + f := int(v) + s.LinesTotal = f + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "scriptless_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.ScriptlessCount = value + case float64: + f := int(v) + s.ScriptlessCount = f + } + + case "shadowed_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.ShadowedCount = value + case float64: + f := int(v) + s.ShadowedCount = f + } + + case "source_max": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.SourceMax = value + case float64: + f := int(v) + s.SourceMax = f + } + + case "source_total": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.SourceTotal = value + case float64: + f := int(v) + s.SourceTotal = f + } + + } + } + return nil } // NewClusterRuntimeFieldTypes returns a ClusterRuntimeFieldTypes. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clustershardmetrics.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clustershardmetrics.go index 3e119ae73..1801120be 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clustershardmetrics.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clustershardmetrics.go @@ -16,19 +16,101 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ClusterShardMetrics type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/stats/types.ts#L277-L281 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/stats/types.ts#L511-L524 type ClusterShardMetrics struct { + // Avg Mean number of shards in an index, counting only shards assigned to selected + // nodes. Avg Float64 `json:"avg"` + // Max Maximum number of shards in an index, counting only shards assigned to + // selected nodes. Max Float64 `json:"max"` + // Min Minimum number of shards in an index, counting only shards assigned to + // selected nodes. Min Float64 `json:"min"` } +func (s *ClusterShardMetrics) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "avg": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Avg = f + case float64: + f := Float64(v) + s.Avg = f + } + + case "max": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Max = f + case float64: + f := Float64(v) + s.Max = f + } + + case "min": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Min = f + case float64: + f := Float64(v) + s.Min = f + } + + } + } + return nil +} + // NewClusterShardMetrics returns a ClusterShardMetrics. func NewClusterShardMetrics() *ClusterShardMetrics { r := &ClusterShardMetrics{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterstatequeue.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterstatequeue.go index 6a5ae789c..5b9f7b0c4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterstatequeue.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterstatequeue.go @@ -16,17 +16,93 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ClusterStateQueue type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L114-L118 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L248-L261 type ClusterStateQueue struct { + // Committed Number of committed cluster states in queue. Committed *int64 `json:"committed,omitempty"` - Pending *int64 `json:"pending,omitempty"` - Total *int64 `json:"total,omitempty"` + // Pending Number of pending cluster states in queue. + Pending *int64 `json:"pending,omitempty"` + // Total Total number of cluster states in queue. + Total *int64 `json:"total,omitempty"` +} + +func (s *ClusterStateQueue) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "committed": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Committed = &value + case float64: + f := int64(v) + s.Committed = &f + } + + case "pending": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Pending = &value + case float64: + f := int64(v) + s.Pending = &f + } + + case "total": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Total = &value + case float64: + f := int64(v) + s.Total = &f + } + + } + } + return nil } // NewClusterStateQueue returns a ClusterStateQueue. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterstateupdate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterstateupdate.go index 9a29f72a7..bf1dfa05f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterstateupdate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterstateupdate.go @@ -16,29 +16,197 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ClusterStateUpdate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L126-L142 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L278-L343 type ClusterStateUpdate struct { - CommitTime Duration `json:"commit_time,omitempty"` - CommitTimeMillis *int64 `json:"commit_time_millis,omitempty"` - CompletionTime Duration `json:"completion_time,omitempty"` - CompletionTimeMillis *int64 `json:"completion_time_millis,omitempty"` - ComputationTime Duration `json:"computation_time,omitempty"` - ComputationTimeMillis *int64 `json:"computation_time_millis,omitempty"` - ContextConstructionTime Duration `json:"context_construction_time,omitempty"` - ContextConstructionTimeMillis *int64 `json:"context_construction_time_millis,omitempty"` - Count int64 `json:"count"` - MasterApplyTime Duration `json:"master_apply_time,omitempty"` - MasterApplyTimeMillis *int64 `json:"master_apply_time_millis,omitempty"` - NotificationTime Duration `json:"notification_time,omitempty"` - NotificationTimeMillis *int64 `json:"notification_time_millis,omitempty"` - PublicationTime Duration `json:"publication_time,omitempty"` - PublicationTimeMillis *int64 `json:"publication_time_millis,omitempty"` + // CommitTime The cumulative amount of time spent waiting for a successful cluster state + // update to commit, which measures the time from the start of each publication + // until a majority of the master-eligible nodes have written the state to disk + // and confirmed the write to the elected master. + CommitTime Duration `json:"commit_time,omitempty"` + // CommitTimeMillis The cumulative amount of time, in milliseconds, spent waiting for a + // successful cluster state update to commit, which measures the time from the + // start of each publication until a majority of the master-eligible nodes have + // written the state to disk and confirmed the write to the elected master. + CommitTimeMillis *int64 `json:"commit_time_millis,omitempty"` + // CompletionTime The cumulative amount of time spent waiting for a successful cluster state + // update to complete, which measures the time from the start of each + // publication until all the other nodes have notified the elected master that + // they have applied the cluster state. + CompletionTime Duration `json:"completion_time,omitempty"` + // CompletionTimeMillis The cumulative amount of time, in milliseconds, spent waiting for a + // successful cluster state update to complete, which measures the time from the + // start of each publication until all the other nodes have notified the elected + // master that they have applied the cluster state. + CompletionTimeMillis *int64 `json:"completion_time_millis,omitempty"` + // ComputationTime The cumulative amount of time spent computing no-op cluster state updates + // since the node started. + ComputationTime Duration `json:"computation_time,omitempty"` + // ComputationTimeMillis The cumulative amount of time, in milliseconds, spent computing no-op cluster + // state updates since the node started. + ComputationTimeMillis *int64 `json:"computation_time_millis,omitempty"` + // ContextConstructionTime The cumulative amount of time spent constructing a publication context since + // the node started for publications that ultimately succeeded. + // This statistic includes the time spent computing the difference between the + // current and new cluster state preparing a serialized representation of this + // difference. + ContextConstructionTime Duration `json:"context_construction_time,omitempty"` + // ContextConstructionTimeMillis The cumulative amount of time, in milliseconds, spent constructing a + // publication context since the node started for publications that ultimately + // succeeded. + // This statistic includes the time spent computing the difference between the + // current and new cluster state preparing a serialized representation of this + // difference. + ContextConstructionTimeMillis *int64 `json:"context_construction_time_millis,omitempty"` + // Count The number of cluster state update attempts that did not change the cluster + // state since the node started. + Count int64 `json:"count"` + // MasterApplyTime The cumulative amount of time spent successfully applying cluster state + // updates on the elected master since the node started. + MasterApplyTime Duration `json:"master_apply_time,omitempty"` + // MasterApplyTimeMillis The cumulative amount of time, in milliseconds, spent successfully applying + // cluster state updates on the elected master since the node started. + MasterApplyTimeMillis *int64 `json:"master_apply_time_millis,omitempty"` + // NotificationTime The cumulative amount of time spent notifying listeners of a no-op cluster + // state update since the node started. + NotificationTime Duration `json:"notification_time,omitempty"` + // NotificationTimeMillis The cumulative amount of time, in milliseconds, spent notifying listeners of + // a no-op cluster state update since the node started. + NotificationTimeMillis *int64 `json:"notification_time_millis,omitempty"` + // PublicationTime The cumulative amount of time spent publishing cluster state updates which + // ultimately succeeded, which includes everything from the start of the + // publication (just after the computation of the new cluster state) until the + // publication has finished and the master node is ready to start processing the + // next state update. + // This includes the time measured by `context_construction_time`, + // `commit_time`, `completion_time` and `master_apply_time`. + PublicationTime Duration `json:"publication_time,omitempty"` + // PublicationTimeMillis The cumulative amount of time, in milliseconds, spent publishing cluster + // state updates which ultimately succeeded, which includes everything from the + // start of the publication (just after the computation of the new cluster + // state) until the publication has finished and the master node is ready to + // start processing the next state update. + // This includes the time measured by `context_construction_time`, + // `commit_time`, `completion_time` and `master_apply_time`. + PublicationTimeMillis *int64 `json:"publication_time_millis,omitempty"` +} + +func (s *ClusterStateUpdate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "commit_time": + if err := dec.Decode(&s.CommitTime); err != nil { + return err + } + + case "commit_time_millis": + if err := dec.Decode(&s.CommitTimeMillis); err != nil { + return err + } + + case "completion_time": + if err := dec.Decode(&s.CompletionTime); err != nil { + return err + } + + case "completion_time_millis": + if err := dec.Decode(&s.CompletionTimeMillis); err != nil { + return err + } + + case "computation_time": + if err := dec.Decode(&s.ComputationTime); err != nil { + return err + } + + case "computation_time_millis": + if err := dec.Decode(&s.ComputationTimeMillis); err != nil { + return err + } + + case "context_construction_time": + if err := dec.Decode(&s.ContextConstructionTime); err != nil { + return err + } + + case "context_construction_time_millis": + if err := dec.Decode(&s.ContextConstructionTimeMillis); err != nil { + return err + } + + case "count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + case "master_apply_time": + if err := dec.Decode(&s.MasterApplyTime); err != nil { + return err + } + + case "master_apply_time_millis": + if err := dec.Decode(&s.MasterApplyTimeMillis); err != nil { + return err + } + + case "notification_time": + if err := dec.Decode(&s.NotificationTime); err != nil { + return err + } + + case "notification_time_millis": + if err := dec.Decode(&s.NotificationTimeMillis); err != nil { + return err + } + + case "publication_time": + if err := dec.Decode(&s.PublicationTime); err != nil { + return err + } + + case "publication_time_millis": + if err := dec.Decode(&s.PublicationTimeMillis); err != nil { + return err + } + + } + } + return nil } // NewClusterStateUpdate returns a ClusterStateUpdate. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterstatistics.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterstatistics.go index fc90be208..7c0765974 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterstatistics.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/clusterstatistics.go @@ -16,22 +16,160 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ClusterStatistics type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Stats.ts#L27-L31 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Stats.ts#L27-L35 type ClusterStatistics struct { - Skipped int `json:"skipped"` - Successful int `json:"successful"` - Total int `json:"total"` + Details map[string]ClusterDetails `json:"details,omitempty"` + Failed int `json:"failed"` + Partial int `json:"partial"` + Running int `json:"running"` + Skipped int `json:"skipped"` + Successful int `json:"successful"` + Total int `json:"total"` +} + +func (s *ClusterStatistics) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "details": + if s.Details == nil { + s.Details = make(map[string]ClusterDetails, 0) + } + if err := dec.Decode(&s.Details); err != nil { + return err + } + + case "failed": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Failed = value + case float64: + f := int(v) + s.Failed = f + } + + case "partial": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Partial = value + case float64: + f := int(v) + s.Partial = f + } + + case "running": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Running = value + case float64: + f := int(v) + s.Running = f + } + + case "skipped": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Skipped = value + case float64: + f := int(v) + s.Skipped = f + } + + case "successful": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Successful = value + case float64: + f := int(v) + s.Successful = f + } + + case "total": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Total = value + case float64: + f := int(v) + s.Total = f + } + + } + } + return nil } // NewClusterStatistics returns a ClusterStatistics. func NewClusterStatistics() *ClusterStatistics { - r := &ClusterStatistics{} + r := &ClusterStatistics{ + Details: make(map[string]ClusterDetails, 0), + } return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/collector.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/collector.go index 87598de1d..0060dd847 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/collector.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/collector.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Collector type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/profile.ts#L86-L91 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/profile.ts#L86-L91 type Collector struct { Children []Collector `json:"children,omitempty"` Name string `json:"name"` @@ -30,6 +38,60 @@ type Collector struct { TimeInNanos int64 `json:"time_in_nanos"` } +func (s *Collector) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "children": + if err := dec.Decode(&s.Children); err != nil { + return err + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = o + + case "reason": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Reason = o + + case "time_in_nanos": + if err := dec.Decode(&s.TimeInNanos); err != nil { + return err + } + + } + } + return nil +} + // NewCollector returns a Collector. func NewCollector() *Collector { r := &Collector{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/column.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/column.go index ade63065a..c0dd2349f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/column.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/column.go @@ -16,18 +16,63 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Column type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/sql/types.ts#L23-L26 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/sql/types.ts#L23-L26 type Column struct { Name string `json:"name"` Type string `json:"type"` } +func (s *Column) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + // NewColumn returns a Column. func NewColumn() *Column { r := &Column{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/combinedfieldsquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/combinedfieldsquery.go index 479c007cc..c03b75df4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/combinedfieldsquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/combinedfieldsquery.go @@ -16,27 +16,143 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/combinedfieldsoperator" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/combinedfieldszeroterms" ) // CombinedFieldsQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/abstractions.ts#L181-L195 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/abstractions.ts#L429-L463 type CombinedFieldsQuery struct { - AutoGenerateSynonymsPhraseQuery *bool `json:"auto_generate_synonyms_phrase_query,omitempty"` - Boost *float32 `json:"boost,omitempty"` - Fields []string `json:"fields"` - MinimumShouldMatch MinimumShouldMatch `json:"minimum_should_match,omitempty"` - Operator *combinedfieldsoperator.CombinedFieldsOperator `json:"operator,omitempty"` - Query string `json:"query"` - QueryName_ *string `json:"_name,omitempty"` - ZeroTermsQuery *combinedfieldszeroterms.CombinedFieldsZeroTerms `json:"zero_terms_query,omitempty"` + // AutoGenerateSynonymsPhraseQuery If true, match phrase queries are automatically created for multi-term + // synonyms. + AutoGenerateSynonymsPhraseQuery *bool `json:"auto_generate_synonyms_phrase_query,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Fields List of fields to search. Field wildcard patterns are allowed. Only `text` + // fields are supported, and they must all have the same search `analyzer`. + Fields []string `json:"fields"` + // MinimumShouldMatch Minimum number of clauses that must match for a document to be returned. + MinimumShouldMatch MinimumShouldMatch `json:"minimum_should_match,omitempty"` + // Operator Boolean logic used to interpret text in the query value. + Operator *combinedfieldsoperator.CombinedFieldsOperator `json:"operator,omitempty"` + // Query Text to search for in the provided `fields`. + // The `combined_fields` query analyzes the provided text before performing a + // search. + Query string `json:"query"` + QueryName_ *string `json:"_name,omitempty"` + // ZeroTermsQuery Indicates whether no documents are returned if the analyzer removes all + // tokens, such as when using a `stop` filter. + ZeroTermsQuery *combinedfieldszeroterms.CombinedFieldsZeroTerms `json:"zero_terms_query,omitempty"` +} + +func (s *CombinedFieldsQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "auto_generate_synonyms_phrase_query": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.AutoGenerateSynonymsPhraseQuery = &value + case bool: + s.AutoGenerateSynonymsPhraseQuery = &v + } + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "fields": + if err := dec.Decode(&s.Fields); err != nil { + return err + } + + case "minimum_should_match": + if err := dec.Decode(&s.MinimumShouldMatch); err != nil { + return err + } + + case "operator": + if err := dec.Decode(&s.Operator); err != nil { + return err + } + + case "query": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Query = o + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "zero_terms_query": + if err := dec.Decode(&s.ZeroTermsQuery); err != nil { + return err + } + + } + } + return nil } // NewCombinedFieldsQuery returns a CombinedFieldsQuery. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/command.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/command.go index 2623649a1..0be057613 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/command.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/command.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // Command type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/reroute/types.ts#L22-L43 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/reroute/types.ts#L22-L43 type Command struct { // AllocateEmptyPrimary Allocate an empty primary shard to a node. Accepts the index and shard for // index name and shard number, and node to allocate the shard to. Using this diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/commandallocateprimaryaction.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/commandallocateprimaryaction.go index 4e4252d0b..633d6be97 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/commandallocateprimaryaction.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/commandallocateprimaryaction.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // CommandAllocatePrimaryAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/reroute/types.ts#L78-L84 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/reroute/types.ts#L78-L84 type CommandAllocatePrimaryAction struct { // AcceptDataLoss If a node which has a copy of the data rejoins the cluster later on, that // data will be deleted. To ensure that these implications are well-understood, @@ -33,6 +41,73 @@ type CommandAllocatePrimaryAction struct { Shard int `json:"shard"` } +func (s *CommandAllocatePrimaryAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "accept_data_loss": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.AcceptDataLoss = value + case bool: + s.AcceptDataLoss = v + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return err + } + + case "node": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Node = o + + case "shard": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Shard = value + case float64: + f := int(v) + s.Shard = f + } + + } + } + return nil +} + // NewCommandAllocatePrimaryAction returns a CommandAllocatePrimaryAction. func NewCommandAllocatePrimaryAction() *CommandAllocatePrimaryAction { r := &CommandAllocatePrimaryAction{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/commandallocatereplicaaction.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/commandallocatereplicaaction.go index e6512d159..6c8c453df 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/commandallocatereplicaaction.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/commandallocatereplicaaction.go @@ -16,19 +16,80 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // CommandAllocateReplicaAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/reroute/types.ts#L69-L76 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/reroute/types.ts#L69-L76 type CommandAllocateReplicaAction struct { Index string `json:"index"` Node string `json:"node"` Shard int `json:"shard"` } +func (s *CommandAllocateReplicaAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return err + } + + case "node": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Node = o + + case "shard": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Shard = value + case float64: + f := int(v) + s.Shard = f + } + + } + } + return nil +} + // NewCommandAllocateReplicaAction returns a CommandAllocateReplicaAction. func NewCommandAllocateReplicaAction() *CommandAllocateReplicaAction { r := &CommandAllocateReplicaAction{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/commandcancelaction.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/commandcancelaction.go index a175ef34d..85b558e8a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/commandcancelaction.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/commandcancelaction.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // CommandCancelAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/reroute/types.ts#L45-L50 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/reroute/types.ts#L45-L50 type CommandCancelAction struct { AllowPrimary *bool `json:"allow_primary,omitempty"` Index string `json:"index"` @@ -30,6 +38,73 @@ type CommandCancelAction struct { Shard int `json:"shard"` } +func (s *CommandCancelAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_primary": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.AllowPrimary = &value + case bool: + s.AllowPrimary = &v + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return err + } + + case "node": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Node = o + + case "shard": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Shard = value + case float64: + f := int(v) + s.Shard = f + } + + } + } + return nil +} + // NewCommandCancelAction returns a CommandCancelAction. func NewCommandCancelAction() *CommandCancelAction { r := &CommandCancelAction{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/commandmoveaction.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/commandmoveaction.go index 471ad97a5..64f20bce8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/commandmoveaction.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/commandmoveaction.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // CommandMoveAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/reroute/types.ts#L60-L67 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/reroute/types.ts#L60-L67 type CommandMoveAction struct { // FromNode The node to move the shard from FromNode string `json:"from_node"` @@ -32,6 +40,71 @@ type CommandMoveAction struct { ToNode string `json:"to_node"` } +func (s *CommandMoveAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "from_node": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FromNode = o + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return err + } + + case "shard": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Shard = value + case float64: + f := int(v) + s.Shard = f + } + + case "to_node": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ToNode = o + + } + } + return nil +} + // NewCommandMoveAction returns a CommandMoveAction. func NewCommandMoveAction() *CommandMoveAction { r := &CommandMoveAction{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/commongramstokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/commongramstokenfilter.go index 6f218f35e..6d9cafa9b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/commongramstokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/commongramstokenfilter.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // CommonGramsTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L172-L178 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L173-L179 type CommonGramsTokenFilter struct { CommonWords []string `json:"common_words,omitempty"` CommonWordsPath *string `json:"common_words_path,omitempty"` @@ -32,11 +40,101 @@ type CommonGramsTokenFilter struct { Version *string `json:"version,omitempty"` } +func (s *CommonGramsTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "common_words": + if err := dec.Decode(&s.CommonWords); err != nil { + return err + } + + case "common_words_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CommonWordsPath = &o + + case "ignore_case": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreCase = &value + case bool: + s.IgnoreCase = &v + } + + case "query_mode": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.QueryMode = &value + case bool: + s.QueryMode = &v + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s CommonGramsTokenFilter) MarshalJSON() ([]byte, error) { + type innerCommonGramsTokenFilter CommonGramsTokenFilter + tmp := innerCommonGramsTokenFilter{ + CommonWords: s.CommonWords, + CommonWordsPath: s.CommonWordsPath, + IgnoreCase: s.IgnoreCase, + QueryMode: s.QueryMode, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "common_grams" + + return json.Marshal(tmp) +} + // NewCommonGramsTokenFilter returns a CommonGramsTokenFilter. func NewCommonGramsTokenFilter() *CommonGramsTokenFilter { r := &CommonGramsTokenFilter{} - r.Type = "common_grams" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/commontermsquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/commontermsquery.go index fdae1efa2..7b1c5d40a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/commontermsquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/commontermsquery.go @@ -16,19 +16,30 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/operator" ) // CommonTermsQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/fulltext.ts#L33-L43 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/fulltext.ts#L34-L44 type CommonTermsQuery struct { - Analyzer *string `json:"analyzer,omitempty"` + Analyzer *string `json:"analyzer,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. Boost *float32 `json:"boost,omitempty"` CutoffFrequency *Float64 `json:"cutoff_frequency,omitempty"` HighFreqOperator *operator.Operator `json:"high_freq_operator,omitempty"` @@ -38,6 +49,114 @@ type CommonTermsQuery struct { QueryName_ *string `json:"_name,omitempty"` } +func (s *CommonTermsQuery) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Query) + return err + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "cutoff_frequency": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.CutoffFrequency = &f + case float64: + f := Float64(v) + s.CutoffFrequency = &f + } + + case "high_freq_operator": + if err := dec.Decode(&s.HighFreqOperator); err != nil { + return err + } + + case "low_freq_operator": + if err := dec.Decode(&s.LowFreqOperator); err != nil { + return err + } + + case "minimum_should_match": + if err := dec.Decode(&s.MinimumShouldMatch); err != nil { + return err + } + + case "query": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Query = o + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + } + } + return nil +} + // NewCommonTermsQuery returns a CommonTermsQuery. func NewCommonTermsQuery() *CommonTermsQuery { r := &CommonTermsQuery{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/compactnodeinfo.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/compactnodeinfo.go index 7387c4e91..e8975a8f5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/compactnodeinfo.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/compactnodeinfo.go @@ -16,17 +16,49 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // CompactNodeInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/snapshot/verify_repository/SnapshotVerifyRepositoryResponse.ts#L27-L29 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/snapshot/verify_repository/SnapshotVerifyRepositoryResponse.ts#L27-L29 type CompactNodeInfo struct { Name string `json:"name"` } +func (s *CompactNodeInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + } + } + return nil +} + // NewCompactNodeInfo returns a CompactNodeInfo. func NewCompactNodeInfo() *CompactNodeInfo { r := &CompactNodeInfo{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/completioncontext.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/completioncontext.go index fe02ca852..83fc8f946 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/completioncontext.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/completioncontext.go @@ -16,30 +16,49 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" + "strconv" ) // CompletionContext type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/suggester.ts#L155-L162 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/suggester.ts#L232-L261 type CompletionContext struct { - Boost *Float64 `json:"boost,omitempty"` - Context Context `json:"context"` + // Boost The factor by which the score of the suggestion should be boosted. + // The score is computed by multiplying the boost with the suggestion weight. + Boost *Float64 `json:"boost,omitempty"` + // Context The value of the category to filter/boost on. + Context Context `json:"context"` + // Neighbours An array of precision values at which neighboring geohashes should be taken + // into account. + // Precision value can be a distance value (`5m`, `10km`, etc.) or a raw geohash + // precision (`1`..`12`). + // Defaults to generating neighbors for index time precision level. Neighbours []GeoHashPrecision `json:"neighbours,omitempty"` - Precision GeoHashPrecision `json:"precision,omitempty"` - Prefix *bool `json:"prefix,omitempty"` + // Precision The precision of the geohash to encode the query geo point. + // Can be specified as a distance value (`5m`, `10km`, etc.), or as a raw + // geohash precision (`1`..`12`). + // Defaults to index time precision level. + Precision GeoHashPrecision `json:"precision,omitempty"` + // Prefix Whether the category value should be treated as a prefix or not. + Prefix *bool `json:"prefix,omitempty"` } func (s *CompletionContext) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Context) + return err + } + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -54,11 +73,23 @@ func (s *CompletionContext) UnmarshalJSON(data []byte) error { switch t { case "boost": - if err := dec.Decode(&s.Boost); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f } case "context": + rawMsg := json.RawMessage{} dec.Decode(&rawMsg) source := bytes.NewReader(rawMsg) @@ -88,8 +119,17 @@ func (s *CompletionContext) UnmarshalJSON(data []byte) error { } case "prefix": - if err := dec.Decode(&s.Prefix); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Prefix = &value + case bool: + s.Prefix = &v } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/completionproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/completionproperty.go index 37811f3ff..f34bf07c0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/completionproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/completionproperty.go @@ -16,23 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" ) // CompletionProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/specialized.ts#L27-L35 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/specialized.ts#L27-L35 type CompletionProperty struct { Analyzer *string `json:"analyzer,omitempty"` Contexts []SuggestContext `json:"contexts,omitempty"` @@ -54,6 +54,7 @@ type CompletionProperty struct { } func (s *CompletionProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -68,9 +69,16 @@ func (s *CompletionProperty) UnmarshalJSON(data []byte) error { switch t { case "analyzer": - if err := dec.Decode(&s.Analyzer); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o case "contexts": if err := dec.Decode(&s.Contexts); err != nil { @@ -78,13 +86,33 @@ func (s *CompletionProperty) UnmarshalJSON(data []byte) error { } case "copy_to": - if err := dec.Decode(&s.CopyTo); err != nil { - return err + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return err + } } case "doc_values": - if err := dec.Decode(&s.DocValues); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DocValues = &value + case bool: + s.DocValues = &v } case "dynamic": @@ -93,6 +121,9 @@ func (s *CompletionProperty) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -101,7 +132,9 @@ func (s *CompletionProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -380,38 +413,86 @@ func (s *CompletionProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "max_input_length": - if err := dec.Decode(&s.MaxInputLength); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxInputLength = &value + case float64: + f := int(v) + s.MaxInputLength = &f } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } case "preserve_position_increments": - if err := dec.Decode(&s.PreservePositionIncrements); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.PreservePositionIncrements = &value + case bool: + s.PreservePositionIncrements = &v } case "preserve_separators": - if err := dec.Decode(&s.PreserveSeparators); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.PreserveSeparators = &value + case bool: + s.PreserveSeparators = &v } case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -420,7 +501,9 @@ func (s *CompletionProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -699,25 +782,50 @@ func (s *CompletionProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } case "search_analyzer": - if err := dec.Decode(&s.SearchAnalyzer); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchAnalyzer = &o case "similarity": - if err := dec.Decode(&s.Similarity); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Similarity = &o case "store": - if err := dec.Decode(&s.Store); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Store = &value + case bool: + s.Store = &v } case "type": @@ -730,6 +838,33 @@ func (s *CompletionProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s CompletionProperty) MarshalJSON() ([]byte, error) { + type innerCompletionProperty CompletionProperty + tmp := innerCompletionProperty{ + Analyzer: s.Analyzer, + Contexts: s.Contexts, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + MaxInputLength: s.MaxInputLength, + Meta: s.Meta, + PreservePositionIncrements: s.PreservePositionIncrements, + PreserveSeparators: s.PreserveSeparators, + Properties: s.Properties, + SearchAnalyzer: s.SearchAnalyzer, + Similarity: s.Similarity, + Store: s.Store, + Type: s.Type, + } + + tmp.Type = "completion" + + return json.Marshal(tmp) +} + // NewCompletionProperty returns a CompletionProperty. func NewCompletionProperty() *CompletionProperty { r := &CompletionProperty{ @@ -738,7 +873,5 @@ func NewCompletionProperty() *CompletionProperty { Properties: make(map[string]Property, 0), } - r.Type = "completion" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/completionstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/completionstats.go index 7ed8c1884..a1a21a5ec 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/completionstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/completionstats.go @@ -16,17 +16,77 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // CompletionStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Stats.ts#L53-L57 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Stats.ts#L80-L90 type CompletionStats struct { - Fields map[string]FieldSizeUsage `json:"fields,omitempty"` - Size ByteSize `json:"size,omitempty"` - SizeInBytes int64 `json:"size_in_bytes"` + Fields map[string]FieldSizeUsage `json:"fields,omitempty"` + // Size Total amount of memory used for completion across all shards assigned to + // selected nodes. + Size ByteSize `json:"size,omitempty"` + // SizeInBytes Total amount, in bytes, of memory used for completion across all shards + // assigned to selected nodes. + SizeInBytes int64 `json:"size_in_bytes"` +} + +func (s *CompletionStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]FieldSizeUsage, 0) + } + if err := dec.Decode(&s.Fields); err != nil { + return err + } + + case "size": + if err := dec.Decode(&s.Size); err != nil { + return err + } + + case "size_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.SizeInBytes = value + case float64: + f := int64(v) + s.SizeInBytes = f + } + + } + } + return nil } // NewCompletionStats returns a CompletionStats. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/completionsuggest.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/completionsuggest.go index bd243cb0c..ece5442b8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/completionsuggest.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/completionsuggest.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // CompletionSuggest type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/suggester.ts#L48-L55 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/suggester.ts#L48-L55 type CompletionSuggest struct { Length int `json:"length"` Offset int `json:"offset"` @@ -30,6 +38,86 @@ type CompletionSuggest struct { Text string `json:"text"` } +func (s *CompletionSuggest) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "length": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Length = value + case float64: + f := int(v) + s.Length = f + } + + case "offset": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Offset = value + case float64: + f := int(v) + s.Offset = f + } + + case "options": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewCompletionSuggestOption() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Options = append(s.Options, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Options); err != nil { + return err + } + } + + case "text": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Text = o + + } + } + return nil +} + // NewCompletionSuggest returns a CompletionSuggest. func NewCompletionSuggest() *CompletionSuggest { r := &CompletionSuggest{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/completionsuggester.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/completionsuggester.go index 7c6f4ba27..3be4d29e6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/completionsuggester.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/completionsuggester.go @@ -16,22 +16,142 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // CompletionSuggester type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/suggester.ts#L130-L136 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/suggester.ts#L160-L178 type CompletionSuggester struct { - Analyzer *string `json:"analyzer,omitempty"` - Contexts map[string][]CompletionContext `json:"contexts,omitempty"` - Field string `json:"field"` - Fuzzy *SuggestFuzziness `json:"fuzzy,omitempty"` - Prefix *string `json:"prefix,omitempty"` - Regex *string `json:"regex,omitempty"` - Size *int `json:"size,omitempty"` - SkipDuplicates *bool `json:"skip_duplicates,omitempty"` + // Analyzer The analyzer to analyze the suggest text with. + // Defaults to the search analyzer of the suggest field. + Analyzer *string `json:"analyzer,omitempty"` + // Contexts A value, geo point object, or a geo hash string to filter or boost the + // suggestion on. + Contexts map[string][]CompletionContext `json:"contexts,omitempty"` + // Field The field to fetch the candidate suggestions from. + // Needs to be set globally or per suggestion. + Field string `json:"field"` + // Fuzzy Enables fuzziness, meaning you can have a typo in your search and still get + // results back. + Fuzzy *SuggestFuzziness `json:"fuzzy,omitempty"` + // Regex A regex query that expresses a prefix as a regular expression. + Regex *RegexOptions `json:"regex,omitempty"` + // Size The maximum corrections to be returned per suggest text token. + Size *int `json:"size,omitempty"` + // SkipDuplicates Whether duplicate suggestions should be filtered out. + SkipDuplicates *bool `json:"skip_duplicates,omitempty"` +} + +func (s *CompletionSuggester) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o + + case "contexts": + if s.Contexts == nil { + s.Contexts = make(map[string][]CompletionContext, 0) + } + rawMsg := make(map[string]json.RawMessage, 0) + dec.Decode(&rawMsg) + for key, value := range rawMsg { + switch { + case bytes.HasPrefix(value, []byte("\"")), bytes.HasPrefix(value, []byte("{")): + o := NewCompletionContext() + err := json.NewDecoder(bytes.NewReader(value)).Decode(&o) + if err != nil { + return err + } + s.Contexts[key] = append(s.Contexts[key], *o) + default: + o := []CompletionContext{} + err := json.NewDecoder(bytes.NewReader(value)).Decode(&o) + if err != nil { + return err + } + s.Contexts[key] = o + } + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "fuzzy": + if err := dec.Decode(&s.Fuzzy); err != nil { + return err + } + + case "regex": + if err := dec.Decode(&s.Regex); err != nil { + return err + } + + case "size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + case "skip_duplicates": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.SkipDuplicates = &value + case bool: + s.SkipDuplicates = &v + } + + } + } + return nil } // NewCompletionSuggester returns a CompletionSuggester. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/completionsuggestoption.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/completionsuggestoption.go index dbc9fa681..5eb67e8dd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/completionsuggestoption.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/completionsuggestoption.go @@ -16,17 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // CompletionSuggestOption type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/suggester.ts#L73-L84 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/suggester.ts#L73-L84 type CompletionSuggestOption struct { CollateMatch *bool `json:"collate_match,omitempty"` Contexts map[string][]Context `json:"contexts,omitempty"` @@ -40,6 +44,127 @@ type CompletionSuggestOption struct { Text string `json:"text"` } +func (s *CompletionSuggestOption) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "collate_match": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.CollateMatch = &value + case bool: + s.CollateMatch = &v + } + + case "contexts": + if s.Contexts == nil { + s.Contexts = make(map[string][]Context, 0) + } + if err := dec.Decode(&s.Contexts); err != nil { + return err + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Fields); err != nil { + return err + } + + case "_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Id_ = &o + + case "_index": + if err := dec.Decode(&s.Index_); err != nil { + return err + } + + case "_routing": + if err := dec.Decode(&s.Routing_); err != nil { + return err + } + + case "score": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Score = &f + case float64: + f := Float64(v) + s.Score = &f + } + + case "_score": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Score_ = &f + case float64: + f := Float64(v) + s.Score_ = &f + } + + case "_source": + if err := dec.Decode(&s.Source_); err != nil { + return err + } + + case "text": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Text = o + + } + } + return nil +} + // NewCompletionSuggestOption returns a CompletionSuggestOption. func NewCompletionSuggestOption() *CompletionSuggestOption { r := &CompletionSuggestOption{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/componenttemplatenode.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/componenttemplatenode.go index 5218c8428..5f26df495 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/componenttemplatenode.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/componenttemplatenode.go @@ -16,21 +16,59 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" ) // ComponentTemplateNode type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/_types/ComponentTemplate.ts#L31-L36 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/_types/ComponentTemplate.ts#L35-L40 type ComponentTemplateNode struct { - Meta_ map[string]json.RawMessage `json:"_meta,omitempty"` - Template ComponentTemplateSummary `json:"template"` - Version *int64 `json:"version,omitempty"` + Meta_ Metadata `json:"_meta,omitempty"` + Template ComponentTemplateSummary `json:"template"` + Version *int64 `json:"version,omitempty"` +} + +func (s *ComponentTemplateNode) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "_meta": + if err := dec.Decode(&s.Meta_); err != nil { + return err + } + + case "template": + if err := dec.Decode(&s.Template); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil } // NewComponentTemplateNode returns a ComponentTemplateNode. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/componenttemplatesummary.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/componenttemplatesummary.go index d0d4b9c13..4d981148a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/componenttemplatesummary.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/componenttemplatesummary.go @@ -16,23 +16,83 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" ) // ComponentTemplateSummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/_types/ComponentTemplate.ts#L38-L45 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/_types/ComponentTemplate.ts#L42-L54 type ComponentTemplateSummary struct { - Aliases map[string]AliasDefinition `json:"aliases,omitempty"` - Mappings *TypeMapping `json:"mappings,omitempty"` - Meta_ map[string]json.RawMessage `json:"_meta,omitempty"` - Settings map[string]IndexSettings `json:"settings,omitempty"` - Version *int64 `json:"version,omitempty"` + Aliases map[string]AliasDefinition `json:"aliases,omitempty"` + Lifecycle *DataStreamLifecycleWithRollover `json:"lifecycle,omitempty"` + Mappings *TypeMapping `json:"mappings,omitempty"` + Meta_ Metadata `json:"_meta,omitempty"` + Settings map[string]IndexSettings `json:"settings,omitempty"` + Version *int64 `json:"version,omitempty"` +} + +func (s *ComponentTemplateSummary) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aliases": + if s.Aliases == nil { + s.Aliases = make(map[string]AliasDefinition, 0) + } + if err := dec.Decode(&s.Aliases); err != nil { + return err + } + + case "lifecycle": + if err := dec.Decode(&s.Lifecycle); err != nil { + return err + } + + case "mappings": + if err := dec.Decode(&s.Mappings); err != nil { + return err + } + + case "_meta": + if err := dec.Decode(&s.Meta_); err != nil { + return err + } + + case "settings": + if s.Settings == nil { + s.Settings = make(map[string]IndexSettings, 0) + } + if err := dec.Decode(&s.Settings); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil } // NewComponentTemplateSummary returns a ComponentTemplateSummary. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/compositeaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/compositeaggregate.go index 5bc800b17..322cbb3ce 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/compositeaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/compositeaggregate.go @@ -16,28 +16,28 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" ) // CompositeAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L617-L622 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L618-L623 type CompositeAggregate struct { - AfterKey map[string]FieldValue `json:"after_key,omitempty"` - Buckets BucketsCompositeBucket `json:"buckets"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + AfterKey CompositeAggregateKey `json:"after_key,omitempty"` + Buckets BucketsCompositeBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` } func (s *CompositeAggregate) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -63,15 +63,17 @@ func (s *CompositeAggregate) UnmarshalJSON(data []byte) error { source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]CompositeBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []CompositeBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/compositeaggregatekey.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/compositeaggregatekey.go index 341c4f508..0d1d3117d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/compositeaggregatekey.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/compositeaggregatekey.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // CompositeAggregateKey type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L77-L77 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L118-L118 type CompositeAggregateKey map[string]FieldValue diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/compositeaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/compositeaggregation.go index cac2c9a65..cf4510d1e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/compositeaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/compositeaggregation.go @@ -16,25 +16,97 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // CompositeAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L79-L84 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L120-L136 type CompositeAggregation struct { - After map[string]FieldValue `json:"after,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Name *string `json:"name,omitempty"` - Size *int `json:"size,omitempty"` + // After When paginating, use the `after_key` value returned in the previous response + // to retrieve the next page. + After CompositeAggregateKey `json:"after,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Name *string `json:"name,omitempty"` + // Size The number of composite buckets that should be returned. + Size *int `json:"size,omitempty"` + // Sources The value sources used to build composite buckets. + // Keys are returned in the order of the `sources` definition. Sources []map[string]CompositeAggregationSource `json:"sources,omitempty"` } +func (s *CompositeAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "after": + if err := dec.Decode(&s.After); err != nil { + return err + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + case "size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + case "sources": + if err := dec.Decode(&s.Sources); err != nil { + return err + } + + } + } + return nil +} + // NewCompositeAggregation returns a CompositeAggregation. func NewCompositeAggregation() *CompositeAggregation { r := &CompositeAggregation{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/compositeaggregationsource.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/compositeaggregationsource.go index f6cc25c52..fdbd5490c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/compositeaggregationsource.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/compositeaggregationsource.go @@ -16,18 +16,22 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // CompositeAggregationSource type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L86-L91 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L138-L155 type CompositeAggregationSource struct { - DateHistogram *DateHistogramAggregation `json:"date_histogram,omitempty"` - GeotileGrid *GeoTileGridAggregation `json:"geotile_grid,omitempty"` - Histogram *HistogramAggregation `json:"histogram,omitempty"` - Terms *TermsAggregation `json:"terms,omitempty"` + // DateHistogram A date histogram aggregation. + DateHistogram *CompositeDateHistogramAggregation `json:"date_histogram,omitempty"` + // GeotileGrid A geotile grid aggregation. + GeotileGrid *CompositeGeoTileGridAggregation `json:"geotile_grid,omitempty"` + // Histogram A histogram aggregation. + Histogram *CompositeHistogramAggregation `json:"histogram,omitempty"` + // Terms A terms aggregation. + Terms *CompositeTermsAggregation `json:"terms,omitempty"` } // NewCompositeAggregationSource returns a CompositeAggregationSource. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/compositebucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/compositebucket.go index 47967c202..cbcb17201 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/compositebucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/compositebucket.go @@ -16,32 +16,31 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "fmt" - "bytes" + "encoding/json" "errors" + "fmt" "io" - + "strconv" "strings" - - "encoding/json" ) // CompositeBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L624-L626 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L625-L627 type CompositeBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` - Key map[string]FieldValue `json:"key"` + Key CompositeAggregateKey `json:"key"` } func (s *CompositeBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -55,451 +54,19 @@ func (s *CompositeBucket) UnmarshalJSON(data []byte) error { switch t { - case "aggregations": - for dec.More() { - tt, err := dec.Token() + case "doc_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) if err != nil { - if errors.Is(err, io.EOF) { - break - } return err } - if value, ok := tt.(string); ok { - if strings.Contains(value, "#") { - elems := strings.Split(value, "#") - if len(elems) == 2 { - switch elems[0] { - case "cardinality": - o := NewCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentiles": - o := NewHdrPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentile_ranks": - o := NewHdrPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentiles": - o := NewTDigestPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentile_ranks": - o := NewTDigestPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "percentiles_bucket": - o := NewPercentilesBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "median_absolute_deviation": - o := NewMedianAbsoluteDeviationAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "min": - o := NewMinAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "max": - o := NewMaxAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sum": - o := NewSumAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "avg": - o := NewAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "weighted_avg": - o := NewWeightedAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "value_count": - o := NewValueCountAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_value": - o := NewSimpleValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "derivative": - o := NewDerivativeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "bucket_metric_value": - o := NewBucketMetricValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats": - o := NewStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats_bucket": - o := NewStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats": - o := NewExtendedStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats_bucket": - o := NewExtendedStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_bounds": - o := NewGeoBoundsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_centroid": - o := NewGeoCentroidAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "histogram": - o := NewHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_histogram": - o := NewDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "auto_date_histogram": - o := NewAutoDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "variable_width_histogram": - o := NewVariableWidthHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sterms": - o := NewStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lterms": - o := NewLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "dterms": - o := NewDoubleTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umterms": - o := NewUnmappedTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lrareterms": - o := NewLongRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "srareterms": - o := NewStringRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umrareterms": - o := NewUnmappedRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "multi_terms": - o := NewMultiTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "missing": - o := NewMissingAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "nested": - o := NewNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "reverse_nested": - o := NewReverseNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "global": - o := NewGlobalAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filter": - o := NewFilterAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "children": - o := NewChildrenAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "parent": - o := NewParentAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sampler": - o := NewSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "unmapped_sampler": - o := NewUnmappedSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohash_grid": - o := NewGeoHashGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geotile_grid": - o := NewGeoTileGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohex_grid": - o := NewGeoHexGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "range": - o := NewRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_range": - o := NewDateRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_distance": - o := NewGeoDistanceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_range": - o := NewIpRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_prefix": - o := NewIpPrefixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filters": - o := NewFiltersAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "adjacency_matrix": - o := NewAdjacencyMatrixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "siglterms": - o := NewSignificantLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sigsterms": - o := NewSignificantStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umsigterms": - o := NewUnmappedSignificantTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "composite": - o := NewCompositeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "scripted_metric": - o := NewScriptedMetricAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_hits": - o := NewTopHitsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "inference": - o := NewInferenceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "string_stats": - o := NewStringStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "box_plot": - o := NewBoxPlotAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_metrics": - o := NewTopMetricsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "t_test": - o := NewTTestAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "rate": - o := NewRateAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_long_value": - o := NewCumulativeCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "matrix_stats": - o := NewMatrixStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_line": - o := NewGeoLineAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - default: - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - } - } else { - return errors.New("cannot decode JSON for field Aggregations") - } - } else { - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[value] = o - } - } - } - - case "doc_count": - if err := dec.Decode(&s.DocCount); err != nil { - return err + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f } case "key": @@ -507,6 +74,519 @@ func (s *CompositeBucket) UnmarshalJSON(data []byte) error { return err } + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "box_plot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[value] = o + } + } + } } return nil @@ -531,6 +611,7 @@ func (s CompositeBucket) MarshalJSON() ([]byte, error) { for key, value := range s.Aggregations { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "Aggregations") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/compositedatehistogramaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/compositedatehistogramaggregation.go new file mode 100644 index 000000000..b175089fc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/compositedatehistogramaggregation.go @@ -0,0 +1,152 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/missingorder" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortorder" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/valuetype" +) + +// CompositeDateHistogramAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L174-L182 +type CompositeDateHistogramAggregation struct { + // CalendarInterval Either `calendar_interval` or `fixed_interval` must be present + CalendarInterval *string `json:"calendar_interval,omitempty"` + // Field Either `field` or `script` must be present + Field *string `json:"field,omitempty"` + // FixedInterval Either `calendar_interval` or `fixed_interval` must be present + FixedInterval *string `json:"fixed_interval,omitempty"` + Format *string `json:"format,omitempty"` + MissingBucket *bool `json:"missing_bucket,omitempty"` + MissingOrder *missingorder.MissingOrder `json:"missing_order,omitempty"` + Offset Duration `json:"offset,omitempty"` + Order *sortorder.SortOrder `json:"order,omitempty"` + // Script Either `field` or `script` must be present + Script Script `json:"script,omitempty"` + TimeZone *string `json:"time_zone,omitempty"` + ValueType *valuetype.ValueType `json:"value_type,omitempty"` +} + +func (s *CompositeDateHistogramAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "calendar_interval": + if err := dec.Decode(&s.CalendarInterval); err != nil { + return err + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "fixed_interval": + if err := dec.Decode(&s.FixedInterval); err != nil { + return err + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "missing_bucket": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.MissingBucket = &value + case bool: + s.MissingBucket = &v + } + + case "missing_order": + if err := dec.Decode(&s.MissingOrder); err != nil { + return err + } + + case "offset": + if err := dec.Decode(&s.Offset); err != nil { + return err + } + + case "order": + if err := dec.Decode(&s.Order); err != nil { + return err + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + case "time_zone": + if err := dec.Decode(&s.TimeZone); err != nil { + return err + } + + case "value_type": + if err := dec.Decode(&s.ValueType); err != nil { + return err + } + + } + } + return nil +} + +// NewCompositeDateHistogramAggregation returns a CompositeDateHistogramAggregation. +func NewCompositeDateHistogramAggregation() *CompositeDateHistogramAggregation { + r := &CompositeDateHistogramAggregation{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/compositegeotilegridaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/compositegeotilegridaggregation.go new file mode 100644 index 000000000..73e295835 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/compositegeotilegridaggregation.go @@ -0,0 +1,136 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/missingorder" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortorder" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/valuetype" +) + +// CompositeGeoTileGridAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L184-L187 +type CompositeGeoTileGridAggregation struct { + Bounds GeoBounds `json:"bounds,omitempty"` + // Field Either `field` or `script` must be present + Field *string `json:"field,omitempty"` + MissingBucket *bool `json:"missing_bucket,omitempty"` + MissingOrder *missingorder.MissingOrder `json:"missing_order,omitempty"` + Order *sortorder.SortOrder `json:"order,omitempty"` + Precision *int `json:"precision,omitempty"` + // Script Either `field` or `script` must be present + Script Script `json:"script,omitempty"` + ValueType *valuetype.ValueType `json:"value_type,omitempty"` +} + +func (s *CompositeGeoTileGridAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bounds": + if err := dec.Decode(&s.Bounds); err != nil { + return err + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "missing_bucket": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.MissingBucket = &value + case bool: + s.MissingBucket = &v + } + + case "missing_order": + if err := dec.Decode(&s.MissingOrder); err != nil { + return err + } + + case "order": + if err := dec.Decode(&s.Order); err != nil { + return err + } + + case "precision": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Precision = &value + case float64: + f := int(v) + s.Precision = &f + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + case "value_type": + if err := dec.Decode(&s.ValueType); err != nil { + return err + } + + } + } + return nil +} + +// NewCompositeGeoTileGridAggregation returns a CompositeGeoTileGridAggregation. +func NewCompositeGeoTileGridAggregation() *CompositeGeoTileGridAggregation { + r := &CompositeGeoTileGridAggregation{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/compositehistogramaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/compositehistogramaggregation.go new file mode 100644 index 000000000..0d6bcc49f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/compositehistogramaggregation.go @@ -0,0 +1,130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/missingorder" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortorder" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/valuetype" +) + +// CompositeHistogramAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L170-L172 +type CompositeHistogramAggregation struct { + // Field Either `field` or `script` must be present + Field *string `json:"field,omitempty"` + Interval Float64 `json:"interval"` + MissingBucket *bool `json:"missing_bucket,omitempty"` + MissingOrder *missingorder.MissingOrder `json:"missing_order,omitempty"` + Order *sortorder.SortOrder `json:"order,omitempty"` + // Script Either `field` or `script` must be present + Script Script `json:"script,omitempty"` + ValueType *valuetype.ValueType `json:"value_type,omitempty"` +} + +func (s *CompositeHistogramAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "interval": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Interval = f + case float64: + f := Float64(v) + s.Interval = f + } + + case "missing_bucket": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.MissingBucket = &value + case bool: + s.MissingBucket = &v + } + + case "missing_order": + if err := dec.Decode(&s.MissingOrder); err != nil { + return err + } + + case "order": + if err := dec.Decode(&s.Order); err != nil { + return err + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + case "value_type": + if err := dec.Decode(&s.ValueType); err != nil { + return err + } + + } + } + return nil +} + +// NewCompositeHistogramAggregation returns a CompositeHistogramAggregation. +func NewCompositeHistogramAggregation() *CompositeHistogramAggregation { + r := &CompositeHistogramAggregation{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/compositetermsaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/compositetermsaggregation.go new file mode 100644 index 000000000..66d517987 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/compositetermsaggregation.go @@ -0,0 +1,113 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/missingorder" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortorder" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/valuetype" +) + +// CompositeTermsAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L168-L168 +type CompositeTermsAggregation struct { + // Field Either `field` or `script` must be present + Field *string `json:"field,omitempty"` + MissingBucket *bool `json:"missing_bucket,omitempty"` + MissingOrder *missingorder.MissingOrder `json:"missing_order,omitempty"` + Order *sortorder.SortOrder `json:"order,omitempty"` + // Script Either `field` or `script` must be present + Script Script `json:"script,omitempty"` + ValueType *valuetype.ValueType `json:"value_type,omitempty"` +} + +func (s *CompositeTermsAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "missing_bucket": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.MissingBucket = &value + case bool: + s.MissingBucket = &v + } + + case "missing_order": + if err := dec.Decode(&s.MissingOrder); err != nil { + return err + } + + case "order": + if err := dec.Decode(&s.Order); err != nil { + return err + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + case "value_type": + if err := dec.Decode(&s.ValueType); err != nil { + return err + } + + } + } + return nil +} + +// NewCompositeTermsAggregation returns a CompositeTermsAggregation. +func NewCompositeTermsAggregation() *CompositeTermsAggregation { + r := &CompositeTermsAggregation{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/conditiontokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/conditiontokenfilter.go index 2954d85ce..5029dc8ac 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/conditiontokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/conditiontokenfilter.go @@ -16,13 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // ConditionTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L180-L184 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L181-L185 type ConditionTokenFilter struct { Filter []string `json:"filter"` Script Script `json:"script"` @@ -30,11 +37,64 @@ type ConditionTokenFilter struct { Version *string `json:"version,omitempty"` } +func (s *ConditionTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "filter": + if err := dec.Decode(&s.Filter); err != nil { + return err + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s ConditionTokenFilter) MarshalJSON() ([]byte, error) { + type innerConditionTokenFilter ConditionTokenFilter + tmp := innerConditionTokenFilter{ + Filter: s.Filter, + Script: s.Script, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "condition" + + return json.Marshal(tmp) +} + // NewConditionTokenFilter returns a ConditionTokenFilter. func NewConditionTokenFilter() *ConditionTokenFilter { r := &ConditionTokenFilter{} - r.Type = "condition" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/configuration.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/configuration.go index 244434572..89f600237 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/configuration.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/configuration.go @@ -16,17 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // Configuration type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/slm/_types/SnapshotLifecycle.ts#L99-L129 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/slm/_types/SnapshotLifecycle.ts#L99-L129 type Configuration struct { // FeatureStates A list of feature states to be included in this snapshot. A list of features // available for inclusion in the snapshot and their descriptions be can be @@ -53,12 +57,100 @@ type Configuration struct { // Metadata Attaches arbitrary metadata to the snapshot, such as a record of who took the // snapshot, why it was taken, or any other useful data. Metadata must be less // than 1024 bytes. - Metadata map[string]json.RawMessage `json:"metadata,omitempty"` + Metadata Metadata `json:"metadata,omitempty"` // Partial If false, the entire snapshot will fail if one or more indices included in // the snapshot do not have all primary shards available. Partial *bool `json:"partial,omitempty"` } +func (s *Configuration) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "feature_states": + if err := dec.Decode(&s.FeatureStates); err != nil { + return err + } + + case "ignore_unavailable": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreUnavailable = &value + case bool: + s.IgnoreUnavailable = &v + } + + case "include_global_state": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IncludeGlobalState = &value + case bool: + s.IncludeGlobalState = &v + } + + case "indices": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Indices = append(s.Indices, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Indices); err != nil { + return err + } + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return err + } + + case "partial": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Partial = &value + case bool: + s.Partial = &v + } + + } + } + return nil +} + // NewConfiguration returns a Configuration. func NewConfiguration() *Configuration { r := &Configuration{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/configurations.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/configurations.go index 4885517b0..7cd46f72b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/configurations.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/configurations.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // Configurations type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ilm/_types/Phase.ts#L47-L51 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ilm/_types/Phase.ts#L50-L54 type Configurations struct { Forcemerge *ForceMergeConfiguration `json:"forcemerge,omitempty"` Rollover *RolloverConditions `json:"rollover,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/confusionmatrixitem.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/confusionmatrixitem.go index 0855e6343..d9ce12a71 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/confusionmatrixitem.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/confusionmatrixitem.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ConfusionMatrixItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/evaluate_data_frame/types.ts#L84-L89 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/evaluate_data_frame/types.ts#L125-L130 type ConfusionMatrixItem struct { ActualClass string `json:"actual_class"` ActualClassDocCount int `json:"actual_class_doc_count"` @@ -30,6 +38,68 @@ type ConfusionMatrixItem struct { PredictedClasses []ConfusionMatrixPrediction `json:"predicted_classes"` } +func (s *ConfusionMatrixItem) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "actual_class": + if err := dec.Decode(&s.ActualClass); err != nil { + return err + } + + case "actual_class_doc_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.ActualClassDocCount = value + case float64: + f := int(v) + s.ActualClassDocCount = f + } + + case "other_predicted_class_doc_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.OtherPredictedClassDocCount = value + case float64: + f := int(v) + s.OtherPredictedClassDocCount = f + } + + case "predicted_classes": + if err := dec.Decode(&s.PredictedClasses); err != nil { + return err + } + + } + } + return nil +} + // NewConfusionMatrixItem returns a ConfusionMatrixItem. func NewConfusionMatrixItem() *ConfusionMatrixItem { r := &ConfusionMatrixItem{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/confusionmatrixprediction.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/confusionmatrixprediction.go index f3b5c2baf..427bbbf2f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/confusionmatrixprediction.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/confusionmatrixprediction.go @@ -16,18 +16,67 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ConfusionMatrixPrediction type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/evaluate_data_frame/types.ts#L91-L94 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/evaluate_data_frame/types.ts#L132-L135 type ConfusionMatrixPrediction struct { Count int `json:"count"` PredictedClass string `json:"predicted_class"` } +func (s *ConfusionMatrixPrediction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Count = value + case float64: + f := int(v) + s.Count = f + } + + case "predicted_class": + if err := dec.Decode(&s.PredictedClass); err != nil { + return err + } + + } + } + return nil +} + // NewConfusionMatrixPrediction returns a ConfusionMatrixPrediction. func NewConfusionMatrixPrediction() *ConfusionMatrixPrediction { r := &ConfusionMatrixPrediction{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/confusionmatrixthreshold.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/confusionmatrixthreshold.go index 2fe06b63b..4bea3fb46 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/confusionmatrixthreshold.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/confusionmatrixthreshold.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ConfusionMatrixThreshold type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/evaluate_data_frame/types.ts#L96-L117 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/evaluate_data_frame/types.ts#L137-L158 type ConfusionMatrixThreshold struct { // FalseNegative False Negative FalseNegative int `json:"fn"` @@ -34,6 +42,90 @@ type ConfusionMatrixThreshold struct { TruePositive int `json:"tp"` } +func (s *ConfusionMatrixThreshold) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fn": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.FalseNegative = value + case float64: + f := int(v) + s.FalseNegative = f + } + + case "fp": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.FalsePositive = value + case float64: + f := int(v) + s.FalsePositive = f + } + + case "tn": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.TrueNegative = value + case float64: + f := int(v) + s.TrueNegative = f + } + + case "tp": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.TruePositive = value + case float64: + f := int(v) + s.TruePositive = f + } + + } + } + return nil +} + // NewConfusionMatrixThreshold returns a ConfusionMatrixThreshold. func NewConfusionMatrixThreshold() *ConfusionMatrixThreshold { r := &ConfusionMatrixThreshold{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/connection.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/connection.go index 0a2040aed..91a668d05 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/connection.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/connection.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Connection type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/graph/_types/Connection.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/graph/_types/Connection.ts#L22-L27 type Connection struct { DocCount int64 `json:"doc_count"` Source int64 `json:"source"` @@ -30,6 +38,87 @@ type Connection struct { Weight Float64 `json:"weight"` } +func (s *Connection) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f + } + + case "source": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Source = value + case float64: + f := int64(v) + s.Source = f + } + + case "target": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Target = value + case float64: + f := int64(v) + s.Target = f + } + + case "weight": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Weight = f + case float64: + f := Float64(v) + s.Weight = f + } + + } + } + return nil +} + // NewConnection returns a Connection. func NewConnection() *Connection { r := &Connection{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/constantkeywordproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/constantkeywordproperty.go index fee131407..c141aea60 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/constantkeywordproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/constantkeywordproperty.go @@ -16,23 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" ) // ConstantKeywordProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/specialized.ts#L44-L47 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/specialized.ts#L44-L47 type ConstantKeywordProperty struct { Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` Fields map[string]Property `json:"fields,omitempty"` @@ -45,6 +45,7 @@ type ConstantKeywordProperty struct { } func (s *ConstantKeywordProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -64,6 +65,9 @@ func (s *ConstantKeywordProperty) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -72,7 +76,9 @@ func (s *ConstantKeywordProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -351,23 +357,42 @@ func (s *ConstantKeywordProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -376,7 +401,9 @@ func (s *ConstantKeywordProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -655,9 +682,11 @@ func (s *ConstantKeywordProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } @@ -676,6 +705,24 @@ func (s *ConstantKeywordProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s ConstantKeywordProperty) MarshalJSON() ([]byte, error) { + type innerConstantKeywordProperty ConstantKeywordProperty + tmp := innerConstantKeywordProperty{ + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + Properties: s.Properties, + Type: s.Type, + Value: s.Value, + } + + tmp.Type = "constant_keyword" + + return json.Marshal(tmp) +} + // NewConstantKeywordProperty returns a ConstantKeywordProperty. func NewConstantKeywordProperty() *ConstantKeywordProperty { r := &ConstantKeywordProperty{ @@ -684,7 +731,5 @@ func NewConstantKeywordProperty() *ConstantKeywordProperty { Properties: make(map[string]Property, 0), } - r.Type = "constant_keyword" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/constantscorequery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/constantscorequery.go index 772562349..26c4fe051 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/constantscorequery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/constantscorequery.go @@ -16,17 +16,87 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ConstantScoreQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/compound.ts#L42-L44 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/compound.ts#L69-L76 type ConstantScoreQuery struct { - Boost *float32 `json:"boost,omitempty"` - Filter *Query `json:"filter,omitempty"` - QueryName_ *string `json:"_name,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Filter Filter query you wish to run. Any returned documents must match this query. + // Filter queries do not calculate relevance scores. + // To speed up performance, Elasticsearch automatically caches frequently used + // filter queries. + Filter *Query `json:"filter,omitempty"` + QueryName_ *string `json:"_name,omitempty"` +} + +func (s *ConstantScoreQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "filter": + if err := dec.Decode(&s.Filter); err != nil { + return err + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + } + } + return nil } // NewConstantScoreQuery returns a ConstantScoreQuery. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/context.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/context.go index 63f8d256f..340295eca 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/context.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/context.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // string // GeoLocation // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/suggester.ts#L148-L153 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/suggester.ts#L225-L230 type Context interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/contextmethod.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/contextmethod.go index aa8630e7d..1d2b2435d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/contextmethod.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/contextmethod.go @@ -16,19 +16,69 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ContextMethod type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/get_script_context/types.ts#L27-L31 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/get_script_context/types.ts#L27-L31 type ContextMethod struct { Name string `json:"name"` Params []ContextMethodParam `json:"params"` ReturnType string `json:"return_type"` } +func (s *ContextMethod) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "params": + if err := dec.Decode(&s.Params); err != nil { + return err + } + + case "return_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ReturnType = o + + } + } + return nil +} + // NewContextMethod returns a ContextMethod. func NewContextMethod() *ContextMethod { r := &ContextMethod{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/contextmethodparam.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/contextmethodparam.go index e6379baf8..459f667a8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/contextmethodparam.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/contextmethodparam.go @@ -16,18 +16,63 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ContextMethodParam type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/get_script_context/types.ts#L33-L36 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/get_script_context/types.ts#L33-L36 type ContextMethodParam struct { Name string `json:"name"` Type string `json:"type"` } +func (s *ContextMethodParam) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + // NewContextMethodParam returns a ContextMethodParam. func NewContextMethodParam() *ContextMethodParam { r := &ContextMethodParam{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/convertprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/convertprocessor.go index 09c1504c1..66cb58483 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/convertprocessor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/convertprocessor.go @@ -16,27 +16,150 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/converttype" ) // ConvertProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/_types/Processors.ts#L147-L152 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/_types/Processors.ts#L434-L454 type ConvertProcessor struct { - Description *string `json:"description,omitempty"` - Field string `json:"field"` - If *string `json:"if,omitempty"` - IgnoreFailure *bool `json:"ignore_failure,omitempty"` - IgnoreMissing *bool `json:"ignore_missing,omitempty"` - OnFailure []ProcessorContainer `json:"on_failure,omitempty"` - Tag *string `json:"tag,omitempty"` - TargetField *string `json:"target_field,omitempty"` - Type converttype.ConvertType `json:"type"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field whose value is to be converted. + Field string `json:"field"` + // If Conditionally execute the processor. + If *string `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist or is `null`, the processor quietly + // exits without modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField The field to assign the converted value to. + // By default, the `field` is updated in-place. + TargetField *string `json:"target_field,omitempty"` + // Type The type to convert the existing value to. + Type converttype.ConvertType `json:"type"` +} + +func (s *ConvertProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.If = &o + + case "ignore_failure": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return err + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return err + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + } + } + return nil } // NewConvertProcessor returns a ConvertProcessor. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/coordinatorstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/coordinatorstats.go index 3c985ea40..25fe34247 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/coordinatorstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/coordinatorstats.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // CoordinatorStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/enrich/stats/types.ts#L29-L35 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/enrich/stats/types.ts#L29-L35 type CoordinatorStats struct { ExecutedSearchesTotal int64 `json:"executed_searches_total"` NodeId string `json:"node_id"` @@ -31,6 +39,93 @@ type CoordinatorStats struct { RemoteRequestsTotal int64 `json:"remote_requests_total"` } +func (s *CoordinatorStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "executed_searches_total": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.ExecutedSearchesTotal = value + case float64: + f := int64(v) + s.ExecutedSearchesTotal = f + } + + case "node_id": + if err := dec.Decode(&s.NodeId); err != nil { + return err + } + + case "queue_size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.QueueSize = value + case float64: + f := int(v) + s.QueueSize = f + } + + case "remote_requests_current": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.RemoteRequestsCurrent = value + case float64: + f := int(v) + s.RemoteRequestsCurrent = f + } + + case "remote_requests_total": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.RemoteRequestsTotal = value + case float64: + f := int64(v) + s.RemoteRequestsTotal = f + } + + } + } + return nil +} + // NewCoordinatorStats returns a CoordinatorStats. func NewCoordinatorStats() *CoordinatorStats { r := &CoordinatorStats{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/coordsgeobounds.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/coordsgeobounds.go index 5f02167d4..d2f132d65 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/coordsgeobounds.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/coordsgeobounds.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // CoordsGeoBounds type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Geo.ts#L138-L143 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Geo.ts#L163-L168 type CoordsGeoBounds struct { Bottom Float64 `json:"bottom"` Left Float64 `json:"left"` @@ -30,6 +38,90 @@ type CoordsGeoBounds struct { Top Float64 `json:"top"` } +func (s *CoordsGeoBounds) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bottom": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Bottom = f + case float64: + f := Float64(v) + s.Bottom = f + } + + case "left": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Left = f + case float64: + f := Float64(v) + s.Left = f + } + + case "right": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Right = f + case float64: + f := Float64(v) + s.Right = f + } + + case "top": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Top = f + case float64: + f := Float64(v) + s.Top = f + } + + } + } + return nil +} + // NewCoordsGeoBounds returns a CoordsGeoBounds. func NewCoordsGeoBounds() *CoordsGeoBounds { r := &CoordsGeoBounds{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/coreknnquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/coreknnquery.go index 217a4fde7..3d3cf81f0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/coreknnquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/coreknnquery.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // CoreKnnQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/knn_search/_types/Knn.ts#L24-L33 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/knn_search/_types/Knn.ts#L24-L33 type CoreKnnQuery struct { // Field The name of the vector field to search against Field string `json:"field"` @@ -34,6 +42,66 @@ type CoreKnnQuery struct { QueryVector []float32 `json:"query_vector"` } +func (s *CoreKnnQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "k": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.K = value + case float64: + f := int64(v) + s.K = f + } + + case "num_candidates": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.NumCandidates = value + case float64: + f := int64(v) + s.NumCandidates = f + } + + case "query_vector": + if err := dec.Decode(&s.QueryVector); err != nil { + return err + } + + } + } + return nil +} + // NewCoreKnnQuery returns a CoreKnnQuery. func NewCoreKnnQuery() *CoreKnnQuery { r := &CoreKnnQuery{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/counter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/counter.go index 0190ea0fa..7ad2f9061 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/counter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/counter.go @@ -16,18 +16,76 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Counter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L35-L38 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L35-L38 type Counter struct { Active int64 `json:"active"` Total int64 `json:"total"` } +func (s *Counter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "active": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Active = value + case float64: + f := int64(v) + s.Active = f + } + + case "total": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Total = value + case float64: + f := int64(v) + s.Total = f + } + + } + } + return nil +} + // NewCounter returns a Counter. func NewCounter() *Counter { r := &Counter{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/countrecord.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/countrecord.go index 468773f90..73086eb7b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/countrecord.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/countrecord.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // CountRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/count/types.ts#L23-L39 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/count/types.ts#L23-L39 type CountRecord struct { // Count the document count Count *string `json:"count,omitempty"` @@ -32,6 +40,48 @@ type CountRecord struct { Timestamp *string `json:"timestamp,omitempty"` } +func (s *CountRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count", "dc", "docs.count", "docsCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Count = &o + + case "epoch", "t", "time": + if err := dec.Decode(&s.Epoch); err != nil { + return err + } + + case "timestamp", "ts", "hms", "hhmmss": + if err := dec.Decode(&s.Timestamp); err != nil { + return err + } + + } + } + return nil +} + // NewCountRecord returns a CountRecord. func NewCountRecord() *CountRecord { r := &CountRecord{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cpu.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cpu.go index 90e81478d..d45e827e9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cpu.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cpu.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Cpu type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L218-L227 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L539-L548 type Cpu struct { LoadAverage map[string]Float64 `json:"load_average,omitempty"` Percent *int `json:"percent,omitempty"` @@ -34,6 +42,80 @@ type Cpu struct { UserInMillis *int64 `json:"user_in_millis,omitempty"` } +func (s *Cpu) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "load_average": + if s.LoadAverage == nil { + s.LoadAverage = make(map[string]Float64, 0) + } + if err := dec.Decode(&s.LoadAverage); err != nil { + return err + } + + case "percent": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Percent = &value + case float64: + f := int(v) + s.Percent = &f + } + + case "sys": + if err := dec.Decode(&s.Sys); err != nil { + return err + } + + case "sys_in_millis": + if err := dec.Decode(&s.SysInMillis); err != nil { + return err + } + + case "total": + if err := dec.Decode(&s.Total); err != nil { + return err + } + + case "total_in_millis": + if err := dec.Decode(&s.TotalInMillis); err != nil { + return err + } + + case "user": + if err := dec.Decode(&s.User); err != nil { + return err + } + + case "user_in_millis": + if err := dec.Decode(&s.UserInMillis); err != nil { + return err + } + + } + } + return nil +} + // NewCpu returns a Cpu. func NewCpu() *Cpu { r := &Cpu{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cpuacct.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cpuacct.go index d1119b656..568fab3e4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cpuacct.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cpuacct.go @@ -16,16 +16,64 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // CpuAcct type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L194-L197 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L476-L485 type CpuAcct struct { + // ControlGroup The `cpuacct` control group to which the Elasticsearch process belongs. ControlGroup *string `json:"control_group,omitempty"` - UsageNanos *int64 `json:"usage_nanos,omitempty"` + // UsageNanos The total CPU time, in nanoseconds, consumed by all tasks in the same cgroup + // as the Elasticsearch process. + UsageNanos *int64 `json:"usage_nanos,omitempty"` +} + +func (s *CpuAcct) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "control_group": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ControlGroup = &o + + case "usage_nanos": + if err := dec.Decode(&s.UsageNanos); err != nil { + return err + } + + } + } + return nil } // NewCpuAcct returns a CpuAcct. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/createdstatus.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/createdstatus.go index 3081e0d5e..1b5d1e4f0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/createdstatus.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/createdstatus.go @@ -16,17 +16,59 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // CreatedStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/_types/CreatedStatus.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/_types/CreatedStatus.ts#L20-L22 type CreatedStatus struct { Created bool `json:"created"` } +func (s *CreatedStatus) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "created": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Created = value + case bool: + s.Created = v + } + + } + } + return nil +} + // NewCreatedStatus returns a CreatedStatus. func NewCreatedStatus() *CreatedStatus { r := &CreatedStatus{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/createoperation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/createoperation.go new file mode 100644 index 000000000..ddf1bac09 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/createoperation.go @@ -0,0 +1,170 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/versiontype" +) + +// CreateOperation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/bulk/types.ts#L130-L130 +type CreateOperation struct { + // DynamicTemplates A map from the full name of fields to the name of dynamic templates. + // Defaults to an empty map. + // If a name matches a dynamic template, then that template will be applied + // regardless of other match predicates defined in the template. + // If a field is already defined in the mapping, then this parameter won’t be + // used. + DynamicTemplates map[string]string `json:"dynamic_templates,omitempty"` + // Id_ The document ID. + Id_ *string `json:"_id,omitempty"` + IfPrimaryTerm *int64 `json:"if_primary_term,omitempty"` + IfSeqNo *int64 `json:"if_seq_no,omitempty"` + // Index_ Name of the index or index alias to perform the action on. + Index_ *string `json:"_index,omitempty"` + // Pipeline ID of the pipeline to use to preprocess incoming documents. + // If the index has a default ingest pipeline specified, then setting the value + // to `_none` disables the default ingest pipeline for this request. + // If a final pipeline is configured it will always run, regardless of the value + // of this parameter. + Pipeline *string `json:"pipeline,omitempty"` + // RequireAlias If `true`, the request’s actions must target an index alias. + RequireAlias *bool `json:"require_alias,omitempty"` + // Routing Custom value used to route operations to a specific shard. + Routing *string `json:"routing,omitempty"` + Version *int64 `json:"version,omitempty"` + VersionType *versiontype.VersionType `json:"version_type,omitempty"` +} + +func (s *CreateOperation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "dynamic_templates": + if s.DynamicTemplates == nil { + s.DynamicTemplates = make(map[string]string, 0) + } + if err := dec.Decode(&s.DynamicTemplates); err != nil { + return err + } + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return err + } + + case "if_primary_term": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.IfPrimaryTerm = &value + case float64: + f := int64(v) + s.IfPrimaryTerm = &f + } + + case "if_seq_no": + if err := dec.Decode(&s.IfSeqNo); err != nil { + return err + } + + case "_index": + if err := dec.Decode(&s.Index_); err != nil { + return err + } + + case "pipeline": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pipeline = &o + + case "require_alias": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.RequireAlias = &value + case bool: + s.RequireAlias = &v + } + + case "routing": + if err := dec.Decode(&s.Routing); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + case "version_type": + if err := dec.Decode(&s.VersionType); err != nil { + return err + } + + } + } + return nil +} + +// NewCreateOperation returns a CreateOperation. +func NewCreateOperation() *CreateOperation { + r := &CreateOperation{ + DynamicTemplates: make(map[string]string, 0), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/csvprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/csvprocessor.go index ca0a3d0cd..5e206db30 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/csvprocessor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/csvprocessor.go @@ -16,30 +16,205 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // CsvProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/_types/Processors.ts#L154-L162 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/_types/Processors.ts#L456-L489 type CsvProcessor struct { - Description *string `json:"description,omitempty"` - EmptyValue json.RawMessage `json:"empty_value,omitempty"` - Field string `json:"field"` - If *string `json:"if,omitempty"` - IgnoreFailure *bool `json:"ignore_failure,omitempty"` - IgnoreMissing *bool `json:"ignore_missing,omitempty"` - OnFailure []ProcessorContainer `json:"on_failure,omitempty"` - Quote *string `json:"quote,omitempty"` - Separator *string `json:"separator,omitempty"` - Tag *string `json:"tag,omitempty"` - TargetFields []string `json:"target_fields"` - Trim *bool `json:"trim,omitempty"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // EmptyValue Value used to fill empty fields. + // Empty fields are skipped if this is not provided. + // An empty field is one with no value (2 consecutive separators) or empty + // quotes (`""`). + EmptyValue json.RawMessage `json:"empty_value,omitempty"` + // Field The field to extract data from. + Field string `json:"field"` + // If Conditionally execute the processor. + If *string `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist, the processor quietly exits without + // modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Quote Quote used in CSV, has to be single character string. + Quote *string `json:"quote,omitempty"` + // Separator Separator used in CSV, has to be single character string. + Separator *string `json:"separator,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetFields The array of fields to assign extracted values to. + TargetFields []string `json:"target_fields"` + // Trim Trim whitespaces in unquoted fields. + Trim *bool `json:"trim,omitempty"` +} + +func (s *CsvProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "empty_value": + if err := dec.Decode(&s.EmptyValue); err != nil { + return err + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.If = &o + + case "ignore_failure": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return err + } + + case "quote": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Quote = &o + + case "separator": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Separator = &o + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_fields": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.TargetFields = append(s.TargetFields, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.TargetFields); err != nil { + return err + } + } + + case "trim": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Trim = &value + case bool: + s.Trim = &v + } + + } + } + return nil } // NewCsvProcessor returns a CsvProcessor. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cumulativecardinalityaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cumulativecardinalityaggregate.go index 319d2e86e..1930855e3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cumulativecardinalityaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cumulativecardinalityaggregate.go @@ -16,21 +16,77 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // CumulativeCardinalityAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L738-L746 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L747-L755 type CumulativeCardinalityAggregate struct { - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Value int64 `json:"value"` - ValueAsString *string `json:"value_as_string,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Value int64 `json:"value"` + ValueAsString *string `json:"value_as_string,omitempty"` +} + +func (s *CumulativeCardinalityAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "value": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Value = value + case float64: + f := int64(v) + s.Value = f + } + + case "value_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ValueAsString = &o + + } + } + return nil } // NewCumulativeCardinalityAggregate returns a CumulativeCardinalityAggregate. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cumulativecardinalityaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cumulativecardinalityaggregation.go index 94d1bffde..7127934d6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cumulativecardinalityaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cumulativecardinalityaggregation.go @@ -16,33 +16,38 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" ) // CumulativeCardinalityAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/pipeline.ts#L161-L161 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/pipeline.ts#L192-L192 type CumulativeCardinalityAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. - BucketsPath BucketsPath `json:"buckets_path,omitempty"` - Format *string `json:"format,omitempty"` - GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Name *string `json:"name,omitempty"` + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Name *string `json:"name,omitempty"` } func (s *CumulativeCardinalityAggregation) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -62,9 +67,16 @@ func (s *CumulativeCardinalityAggregation) UnmarshalJSON(data []byte) error { } case "format": - if err := dec.Decode(&s.Format); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o case "gap_policy": if err := dec.Decode(&s.GapPolicy); err != nil { @@ -77,9 +89,16 @@ func (s *CumulativeCardinalityAggregation) UnmarshalJSON(data []byte) error { } case "name": - if err := dec.Decode(&s.Name); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cumulativesumaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cumulativesumaggregation.go index ccf82ecbd..825b5174a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cumulativesumaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/cumulativesumaggregation.go @@ -16,33 +16,38 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" ) // CumulativeSumAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/pipeline.ts#L163-L163 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/pipeline.ts#L194-L194 type CumulativeSumAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. - BucketsPath BucketsPath `json:"buckets_path,omitempty"` - Format *string `json:"format,omitempty"` - GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Name *string `json:"name,omitempty"` + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Name *string `json:"name,omitempty"` } func (s *CumulativeSumAggregation) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -62,9 +67,16 @@ func (s *CumulativeSumAggregation) UnmarshalJSON(data []byte) error { } case "format": - if err := dec.Decode(&s.Format); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o case "gap_policy": if err := dec.Decode(&s.GapPolicy); err != nil { @@ -77,9 +89,16 @@ func (s *CumulativeSumAggregation) UnmarshalJSON(data []byte) error { } case "name": - if err := dec.Decode(&s.Name); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/currentnode.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/currentnode.go index 590a74579..1dce5ca2e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/currentnode.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/currentnode.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // CurrentNode type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/allocation_explain/types.ts#L78-L84 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/allocation_explain/types.ts#L78-L84 type CurrentNode struct { Attributes map[string]string `json:"attributes"` Id string `json:"id"` @@ -31,6 +39,65 @@ type CurrentNode struct { WeightRanking int `json:"weight_ranking"` } +func (s *CurrentNode) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "attributes": + if s.Attributes == nil { + s.Attributes = make(map[string]string, 0) + } + if err := dec.Decode(&s.Attributes); err != nil { + return err + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return err + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "transport_address": + if err := dec.Decode(&s.TransportAddress); err != nil { + return err + } + + case "weight_ranking": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.WeightRanking = value + case float64: + f := int(v) + s.WeightRanking = f + } + + } + } + return nil +} + // NewCurrentNode returns a CurrentNode. func NewCurrentNode() *CurrentNode { r := &CurrentNode{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/customanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/customanalyzer.go index 9662b73d9..5aff98972 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/customanalyzer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/customanalyzer.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // CustomAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/analyzers.ts#L28-L35 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/analyzers.ts#L28-L35 type CustomAnalyzer struct { CharFilter []string `json:"char_filter,omitempty"` Filter []string `json:"filter,omitempty"` @@ -32,11 +40,105 @@ type CustomAnalyzer struct { Type string `json:"type,omitempty"` } +func (s *CustomAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "char_filter": + if err := dec.Decode(&s.CharFilter); err != nil { + return err + } + + case "filter": + if err := dec.Decode(&s.Filter); err != nil { + return err + } + + case "position_increment_gap": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.PositionIncrementGap = &value + case float64: + f := int(v) + s.PositionIncrementGap = &f + } + + case "position_offset_gap": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.PositionOffsetGap = &value + case float64: + f := int(v) + s.PositionOffsetGap = &f + } + + case "tokenizer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tokenizer = o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s CustomAnalyzer) MarshalJSON() ([]byte, error) { + type innerCustomAnalyzer CustomAnalyzer + tmp := innerCustomAnalyzer{ + CharFilter: s.CharFilter, + Filter: s.Filter, + PositionIncrementGap: s.PositionIncrementGap, + PositionOffsetGap: s.PositionOffsetGap, + Tokenizer: s.Tokenizer, + Type: s.Type, + } + + tmp.Type = "custom" + + return json.Marshal(tmp) +} + // NewCustomAnalyzer returns a CustomAnalyzer. func NewCustomAnalyzer() *CustomAnalyzer { r := &CustomAnalyzer{} - r.Type = "custom" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/customcategorizetextanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/customcategorizetextanalyzer.go index 65c794930..8e1ed639a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/customcategorizetextanalyzer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/customcategorizetextanalyzer.go @@ -16,19 +16,69 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // CustomCategorizeTextAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L508-L512 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L1108-L1112 type CustomCategorizeTextAnalyzer struct { CharFilter []string `json:"char_filter,omitempty"` Filter []string `json:"filter,omitempty"` Tokenizer *string `json:"tokenizer,omitempty"` } +func (s *CustomCategorizeTextAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "char_filter": + if err := dec.Decode(&s.CharFilter); err != nil { + return err + } + + case "filter": + if err := dec.Decode(&s.Filter); err != nil { + return err + } + + case "tokenizer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tokenizer = &o + + } + } + return nil +} + // NewCustomCategorizeTextAnalyzer returns a CustomCategorizeTextAnalyzer. func NewCustomCategorizeTextAnalyzer() *CustomCategorizeTextAnalyzer { r := &CustomCategorizeTextAnalyzer{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/customnormalizer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/customnormalizer.go index a2cca0dab..d50fefe23 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/customnormalizer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/customnormalizer.go @@ -16,24 +16,40 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "encoding/json" +) + // CustomNormalizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/normalizers.ts#L30-L34 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/normalizers.ts#L30-L34 type CustomNormalizer struct { CharFilter []string `json:"char_filter,omitempty"` Filter []string `json:"filter,omitempty"` Type string `json:"type,omitempty"` } +// MarshalJSON override marshalling to include literal value +func (s CustomNormalizer) MarshalJSON() ([]byte, error) { + type innerCustomNormalizer CustomNormalizer + tmp := innerCustomNormalizer{ + CharFilter: s.CharFilter, + Filter: s.Filter, + Type: s.Type, + } + + tmp.Type = "custom" + + return json.Marshal(tmp) +} + // NewCustomNormalizer returns a CustomNormalizer. func NewCustomNormalizer() *CustomNormalizer { r := &CustomNormalizer{} - r.Type = "custom" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dailyschedule.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dailyschedule.go index 66ec103a5..ba6e04031 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dailyschedule.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dailyschedule.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // DailySchedule type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Schedule.ts#L33-L35 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Schedule.ts#L33-L35 type DailySchedule struct { At []ScheduleTimeOfDay `json:"at"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/danglingindex.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/danglingindex.go index b3de53c0f..ca1135184 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/danglingindex.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/danglingindex.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DanglingIndex type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/dangling_indices/list_dangling_indices/ListDanglingIndicesResponse.ts#L29-L34 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/dangling_indices/list_dangling_indices/ListDanglingIndicesResponse.ts#L29-L34 type DanglingIndex struct { CreationDateMillis int64 `json:"creation_date_millis"` IndexName string `json:"index_name"` @@ -30,6 +38,71 @@ type DanglingIndex struct { NodeIds []string `json:"node_ids"` } +func (s *DanglingIndex) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "creation_date_millis": + if err := dec.Decode(&s.CreationDateMillis); err != nil { + return err + } + + case "index_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexName = o + + case "index_uuid": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexUuid = o + + case "node_ids": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.NodeIds = append(s.NodeIds, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.NodeIds); err != nil { + return err + } + } + + } + } + return nil +} + // NewDanglingIndex returns a DanglingIndex. func NewDanglingIndex() *DanglingIndex { r := &DanglingIndex{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datacounts.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datacounts.go index ed0c08e93..3863ab07e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datacounts.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datacounts.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DataCounts type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Job.ts#L129-L149 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Job.ts#L352-L372 type DataCounts struct { BucketCount int64 `json:"bucket_count"` EarliestRecordTimestamp *int64 `json:"earliest_record_timestamp,omitempty"` @@ -45,6 +53,301 @@ type DataCounts struct { SparseBucketCount int64 `json:"sparse_bucket_count"` } +func (s *DataCounts) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bucket_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.BucketCount = value + case float64: + f := int64(v) + s.BucketCount = f + } + + case "earliest_record_timestamp": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.EarliestRecordTimestamp = &value + case float64: + f := int64(v) + s.EarliestRecordTimestamp = &f + } + + case "empty_bucket_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.EmptyBucketCount = value + case float64: + f := int64(v) + s.EmptyBucketCount = f + } + + case "input_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.InputBytes = value + case float64: + f := int64(v) + s.InputBytes = f + } + + case "input_field_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.InputFieldCount = value + case float64: + f := int64(v) + s.InputFieldCount = f + } + + case "input_record_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.InputRecordCount = value + case float64: + f := int64(v) + s.InputRecordCount = f + } + + case "invalid_date_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.InvalidDateCount = value + case float64: + f := int64(v) + s.InvalidDateCount = f + } + + case "job_id": + if err := dec.Decode(&s.JobId); err != nil { + return err + } + + case "last_data_time": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.LastDataTime = &value + case float64: + f := int64(v) + s.LastDataTime = &f + } + + case "latest_bucket_timestamp": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.LatestBucketTimestamp = &value + case float64: + f := int64(v) + s.LatestBucketTimestamp = &f + } + + case "latest_empty_bucket_timestamp": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.LatestEmptyBucketTimestamp = &value + case float64: + f := int64(v) + s.LatestEmptyBucketTimestamp = &f + } + + case "latest_record_timestamp": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.LatestRecordTimestamp = &value + case float64: + f := int64(v) + s.LatestRecordTimestamp = &f + } + + case "latest_sparse_bucket_timestamp": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.LatestSparseBucketTimestamp = &value + case float64: + f := int64(v) + s.LatestSparseBucketTimestamp = &f + } + + case "log_time": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.LogTime = &value + case float64: + f := int64(v) + s.LogTime = &f + } + + case "missing_field_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.MissingFieldCount = value + case float64: + f := int64(v) + s.MissingFieldCount = f + } + + case "out_of_order_timestamp_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.OutOfOrderTimestampCount = value + case float64: + f := int64(v) + s.OutOfOrderTimestampCount = f + } + + case "processed_field_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.ProcessedFieldCount = value + case float64: + f := int64(v) + s.ProcessedFieldCount = f + } + + case "processed_record_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.ProcessedRecordCount = value + case float64: + f := int64(v) + s.ProcessedRecordCount = f + } + + case "sparse_bucket_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.SparseBucketCount = value + case float64: + f := int64(v) + s.SparseBucketCount = f + } + + } + } + return nil +} + // NewDataCounts returns a DataCounts. func NewDataCounts() *DataCounts { r := &DataCounts{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datadescription.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datadescription.go index 016c17e9b..5d65767f4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datadescription.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datadescription.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DataDescription type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Job.ts#L151-L167 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Job.ts#L374-L390 type DataDescription struct { FieldDelimiter *string `json:"field_delimiter,omitempty"` // Format Only JSON format is supported at this time. @@ -41,6 +49,67 @@ type DataDescription struct { TimeFormat *string `json:"time_format,omitempty"` } +func (s *DataDescription) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field_delimiter": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FieldDelimiter = &o + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "time_field": + if err := dec.Decode(&s.TimeField); err != nil { + return err + } + + case "time_format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TimeFormat = &o + + } + } + return nil +} + // NewDataDescription returns a DataDescription. func NewDataDescription() *DataDescription { r := &DataDescription{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataemailattachment.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataemailattachment.go index afca64a4b..6360b7940 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataemailattachment.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataemailattachment.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -26,7 +26,7 @@ import ( // DataEmailAttachment type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Actions.ts#L234-L236 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Actions.ts#L234-L236 type DataEmailAttachment struct { Format *dataattachmentformat.DataAttachmentFormat `json:"format,omitempty"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datafeedauthorization.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datafeedauthorization.go index e4173256c..61f1caacb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datafeedauthorization.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datafeedauthorization.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DatafeedAuthorization type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Authorization.ts#L31-L43 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Authorization.ts#L31-L43 type DatafeedAuthorization struct { // ApiKey If an API key was used for the most recent update to the datafeed, its name // and identifier are listed in the response. @@ -35,6 +43,48 @@ type DatafeedAuthorization struct { ServiceAccount *string `json:"service_account,omitempty"` } +func (s *DatafeedAuthorization) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "api_key": + if err := dec.Decode(&s.ApiKey); err != nil { + return err + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return err + } + + case "service_account": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ServiceAccount = &o + + } + } + return nil +} + // NewDatafeedAuthorization returns a DatafeedAuthorization. func NewDatafeedAuthorization() *DatafeedAuthorization { r := &DatafeedAuthorization{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datafeedconfig.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datafeedconfig.go index 1732ff6a8..aa25d5b32 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datafeedconfig.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datafeedconfig.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DatafeedConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Datafeed.ts#L60-L117 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Datafeed.ts#L60-L117 type DatafeedConfig struct { // Aggregations If set, the datafeed performs aggregation searches. Support for aggregations // is limited and should be used only with low cardinality data. @@ -81,7 +89,7 @@ type DatafeedConfig struct { // performance when there are multiple jobs running on the same node. QueryDelay Duration `json:"query_delay,omitempty"` // RuntimeMappings Specifies runtime fields for the datafeed search. - RuntimeMappings map[string]RuntimeField `json:"runtime_mappings,omitempty"` + RuntimeMappings RuntimeFields `json:"runtime_mappings,omitempty"` // ScriptFields Specifies scripts that evaluate custom expressions and returns script fields // to the datafeed. The detector configuration objects in a job can contain // functions that use these script fields. @@ -92,6 +100,124 @@ type DatafeedConfig struct { ScrollSize *int `json:"scroll_size,omitempty"` } +func (s *DatafeedConfig) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aggregations", "aggs": + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregations, 0) + } + if err := dec.Decode(&s.Aggregations); err != nil { + return err + } + + case "chunking_config": + if err := dec.Decode(&s.ChunkingConfig); err != nil { + return err + } + + case "datafeed_id": + if err := dec.Decode(&s.DatafeedId); err != nil { + return err + } + + case "delayed_data_check_config": + if err := dec.Decode(&s.DelayedDataCheckConfig); err != nil { + return err + } + + case "frequency": + if err := dec.Decode(&s.Frequency); err != nil { + return err + } + + case "indices", "indexes": + if err := dec.Decode(&s.Indices); err != nil { + return err + } + + case "indices_options": + if err := dec.Decode(&s.IndicesOptions); err != nil { + return err + } + + case "job_id": + if err := dec.Decode(&s.JobId); err != nil { + return err + } + + case "max_empty_searches": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxEmptySearches = &value + case float64: + f := int(v) + s.MaxEmptySearches = &f + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return err + } + + case "query_delay": + if err := dec.Decode(&s.QueryDelay); err != nil { + return err + } + + case "runtime_mappings": + if err := dec.Decode(&s.RuntimeMappings); err != nil { + return err + } + + case "script_fields": + if s.ScriptFields == nil { + s.ScriptFields = make(map[string]ScriptField, 0) + } + if err := dec.Decode(&s.ScriptFields); err != nil { + return err + } + + case "scroll_size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.ScrollSize = &value + case float64: + f := int(v) + s.ScrollSize = &f + } + + } + } + return nil +} + // NewDatafeedConfig returns a DatafeedConfig. func NewDatafeedConfig() *DatafeedConfig { r := &DatafeedConfig{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datafeedrunningstate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datafeedrunningstate.go index 391a00eca..fc2fd863d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datafeedrunningstate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datafeedrunningstate.go @@ -16,17 +16,85 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DatafeedRunningState type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Datafeed.ts#L158-L162 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Datafeed.ts#L198-L212 type DatafeedRunningState struct { - RealTimeConfigured bool `json:"real_time_configured"` - RealTimeRunning bool `json:"real_time_running"` - SearchInterval *RunningStateSearchInterval `json:"search_interval,omitempty"` + // RealTimeConfigured Indicates if the datafeed is "real-time"; meaning that the datafeed has no + // configured `end` time. + RealTimeConfigured bool `json:"real_time_configured"` + // RealTimeRunning Indicates whether the datafeed has finished running on the available past + // data. + // For datafeeds without a configured `end` time, this means that the datafeed + // is now running on "real-time" data. + RealTimeRunning bool `json:"real_time_running"` + // SearchInterval Provides the latest time interval the datafeed has searched. + SearchInterval *RunningStateSearchInterval `json:"search_interval,omitempty"` +} + +func (s *DatafeedRunningState) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "real_time_configured": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.RealTimeConfigured = value + case bool: + s.RealTimeConfigured = v + } + + case "real_time_running": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.RealTimeRunning = value + case bool: + s.RealTimeRunning = v + } + + case "search_interval": + if err := dec.Decode(&s.SearchInterval); err != nil { + return err + } + + } + } + return nil } // NewDatafeedRunningState returns a DatafeedRunningState. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datafeeds.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datafeeds.go index 74245d6cb..7c8af5981 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datafeeds.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datafeeds.go @@ -16,17 +16,61 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Datafeeds type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/info/types.ts#L40-L42 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/info/types.ts#L40-L42 type Datafeeds struct { ScrollSize int `json:"scroll_size"` } +func (s *Datafeeds) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "scroll_size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.ScrollSize = value + case float64: + f := int(v) + s.ScrollSize = f + } + + } + } + return nil +} + // NewDatafeeds returns a Datafeeds. func NewDatafeeds() *Datafeeds { r := &Datafeeds{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datafeedsrecord.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datafeedsrecord.go index 4ff2edd0e..3c7aee3d3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datafeedsrecord.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datafeedsrecord.go @@ -16,44 +16,216 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/datafeedstate" ) // DatafeedsRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/ml_datafeeds/types.ts#L22-L83 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/ml_datafeeds/types.ts#L22-L87 type DatafeedsRecord struct { - // AssignmentExplanation why the datafeed is or is not assigned to a node + // AssignmentExplanation For started datafeeds only, contains messages relating to the selection of a + // node. AssignmentExplanation *string `json:"assignment_explanation,omitempty"` - // BucketsCount bucket count + // BucketsCount The number of buckets processed. BucketsCount *string `json:"buckets.count,omitempty"` - // Id the datafeed_id + // Id The datafeed identifier. Id *string `json:"id,omitempty"` - // NodeAddress network address of the assigned node + // NodeAddress The network address of the assigned node. + // For started datafeeds only, this information pertains to the node upon which + // the datafeed is started. NodeAddress *string `json:"node.address,omitempty"` - // NodeEphemeralId ephemeral id of the assigned node + // NodeEphemeralId The ephemeral identifier of the assigned node. + // For started datafeeds only, this information pertains to the node upon which + // the datafeed is started. NodeEphemeralId *string `json:"node.ephemeral_id,omitempty"` - // NodeId id of the assigned node + // NodeId The unique identifier of the assigned node. + // For started datafeeds only, this information pertains to the node upon which + // the datafeed is started. NodeId *string `json:"node.id,omitempty"` - // NodeName name of the assigned node + // NodeName The name of the assigned node. + // For started datafeeds only, this information pertains to the node upon which + // the datafeed is started. NodeName *string `json:"node.name,omitempty"` - // SearchBucketAvg the average search time per bucket (millisecond) + // SearchBucketAvg The average search time per bucket, in milliseconds. SearchBucketAvg *string `json:"search.bucket_avg,omitempty"` - // SearchCount number of searches ran by the datafeed + // SearchCount The number of searches run by the datafeed. SearchCount *string `json:"search.count,omitempty"` - // SearchExpAvgHour the exponential average search time per hour (millisecond) + // SearchExpAvgHour The exponential average search time per hour, in milliseconds. SearchExpAvgHour *string `json:"search.exp_avg_hour,omitempty"` - // SearchTime the total search time + // SearchTime The total time the datafeed spent searching, in milliseconds. SearchTime *string `json:"search.time,omitempty"` - // State the datafeed state + // State The status of the datafeed. State *datafeedstate.DatafeedState `json:"state,omitempty"` } +func (s *DatafeedsRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "assignment_explanation", "ae": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AssignmentExplanation = &o + + case "buckets.count", "bc", "bucketsCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BucketsCount = &o + + case "id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Id = &o + + case "node.address", "na", "nodeAddress": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NodeAddress = &o + + case "node.ephemeral_id", "ne", "nodeEphemeralId": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NodeEphemeralId = &o + + case "node.id", "ni", "nodeId": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NodeId = &o + + case "node.name", "nn", "nodeName": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NodeName = &o + + case "search.bucket_avg", "sba", "searchBucketAvg": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchBucketAvg = &o + + case "search.count", "sc", "searchCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchCount = &o + + case "search.exp_avg_hour", "seah", "searchExpAvgHour": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchExpAvgHour = &o + + case "search.time", "st", "searchTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchTime = &o + + case "state", "s": + if err := dec.Decode(&s.State); err != nil { + return err + } + + } + } + return nil +} + // NewDatafeedsRecord returns a DatafeedsRecord. func NewDatafeedsRecord() *DatafeedsRecord { r := &DatafeedsRecord{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datafeedstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datafeedstats.go index 20ae3ebb6..587080436 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datafeedstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datafeedstats.go @@ -16,24 +16,101 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/datafeedstate" ) // DatafeedStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Datafeed.ts#L140-L147 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Datafeed.ts#L140-L169 type DatafeedStats struct { - AssignmentExplanation *string `json:"assignment_explanation,omitempty"` - DatafeedId string `json:"datafeed_id"` - Node *DiscoveryNode `json:"node,omitempty"` - RunningState *DatafeedRunningState `json:"running_state,omitempty"` - State datafeedstate.DatafeedState `json:"state"` - TimingStats DatafeedTimingStats `json:"timing_stats"` + // AssignmentExplanation For started datafeeds only, contains messages relating to the selection of a + // node. + AssignmentExplanation *string `json:"assignment_explanation,omitempty"` + // DatafeedId A numerical character string that uniquely identifies the datafeed. + // This identifier can contain lowercase alphanumeric characters (a-z and 0-9), + // hyphens, and underscores. + // It must start and end with alphanumeric characters. + DatafeedId string `json:"datafeed_id"` + // Node For started datafeeds only, this information pertains to the node upon which + // the datafeed is started. + Node *DiscoveryNode `json:"node,omitempty"` + // RunningState An object containing the running state for this datafeed. + // It is only provided if the datafeed is started. + RunningState *DatafeedRunningState `json:"running_state,omitempty"` + // State The status of the datafeed, which can be one of the following values: + // `starting`, `started`, `stopping`, `stopped`. + State datafeedstate.DatafeedState `json:"state"` + // TimingStats An object that provides statistical information about timing aspect of this + // datafeed. + TimingStats DatafeedTimingStats `json:"timing_stats"` +} + +func (s *DatafeedStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "assignment_explanation": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AssignmentExplanation = &o + + case "datafeed_id": + if err := dec.Decode(&s.DatafeedId); err != nil { + return err + } + + case "node": + if err := dec.Decode(&s.Node); err != nil { + return err + } + + case "running_state": + if err := dec.Decode(&s.RunningState); err != nil { + return err + } + + case "state": + if err := dec.Decode(&s.State); err != nil { + return err + } + + case "timing_stats": + if err := dec.Decode(&s.TimingStats); err != nil { + return err + } + + } + } + return nil } // NewDatafeedStats returns a DatafeedStats. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datafeedtimingstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datafeedtimingstats.go index f10d362b4..51fb332fd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datafeedtimingstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datafeedtimingstats.go @@ -16,20 +16,104 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DatafeedTimingStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Datafeed.ts#L149-L156 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Datafeed.ts#L171-L196 type DatafeedTimingStats struct { - AverageSearchTimePerBucketMs Float64 `json:"average_search_time_per_bucket_ms,omitempty"` - BucketCount int64 `json:"bucket_count"` + // AverageSearchTimePerBucketMs The average search time per bucket, in milliseconds. + AverageSearchTimePerBucketMs Float64 `json:"average_search_time_per_bucket_ms,omitempty"` + // BucketCount The number of buckets processed. + BucketCount int64 `json:"bucket_count"` + // ExponentialAverageSearchTimePerHourMs The exponential average search time per hour, in milliseconds. ExponentialAverageSearchTimePerHourMs Float64 `json:"exponential_average_search_time_per_hour_ms"` - JobId string `json:"job_id"` - SearchCount int64 `json:"search_count"` - TotalSearchTimeMs Float64 `json:"total_search_time_ms"` + // JobId Identifier for the anomaly detection job. + JobId string `json:"job_id"` + // SearchCount The number of searches run by the datafeed. + SearchCount int64 `json:"search_count"` + // TotalSearchTimeMs The total time the datafeed spent searching, in milliseconds. + TotalSearchTimeMs Float64 `json:"total_search_time_ms"` +} + +func (s *DatafeedTimingStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "average_search_time_per_bucket_ms": + if err := dec.Decode(&s.AverageSearchTimePerBucketMs); err != nil { + return err + } + + case "bucket_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.BucketCount = value + case float64: + f := int64(v) + s.BucketCount = f + } + + case "exponential_average_search_time_per_hour_ms": + if err := dec.Decode(&s.ExponentialAverageSearchTimePerHourMs); err != nil { + return err + } + + case "job_id": + if err := dec.Decode(&s.JobId); err != nil { + return err + } + + case "search_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.SearchCount = value + case float64: + f := int64(v) + s.SearchCount = f + } + + case "total_search_time_ms": + if err := dec.Decode(&s.TotalSearchTimeMs); err != nil { + return err + } + + } + } + return nil } // NewDatafeedTimingStats returns a DatafeedTimingStats. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysis.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysis.go index e78f4b751..018f96c49 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysis.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysis.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DataframeAnalysis type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/DataframeAnalytics.ts#L134-L213 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/DataframeAnalytics.ts#L134-L213 type DataframeAnalysis struct { // Alpha Advanced configuration option. Machine learning uses loss guided tree // growing, which means that the decision trees grow where the regularized loss @@ -139,6 +147,275 @@ type DataframeAnalysis struct { TrainingPercent Percentage `json:"training_percent,omitempty"` } +func (s *DataframeAnalysis) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "alpha": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Alpha = &f + case float64: + f := Float64(v) + s.Alpha = &f + } + + case "dependent_variable": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DependentVariable = o + + case "downsample_factor": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.DownsampleFactor = &f + case float64: + f := Float64(v) + s.DownsampleFactor = &f + } + + case "early_stopping_enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.EarlyStoppingEnabled = &value + case bool: + s.EarlyStoppingEnabled = &v + } + + case "eta": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Eta = &f + case float64: + f := Float64(v) + s.Eta = &f + } + + case "eta_growth_rate_per_tree": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.EtaGrowthRatePerTree = &f + case float64: + f := Float64(v) + s.EtaGrowthRatePerTree = &f + } + + case "feature_bag_fraction": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.FeatureBagFraction = &f + case float64: + f := Float64(v) + s.FeatureBagFraction = &f + } + + case "feature_processors": + if err := dec.Decode(&s.FeatureProcessors); err != nil { + return err + } + + case "gamma": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Gamma = &f + case float64: + f := Float64(v) + s.Gamma = &f + } + + case "lambda": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Lambda = &f + case float64: + f := Float64(v) + s.Lambda = &f + } + + case "max_optimization_rounds_per_hyperparameter": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxOptimizationRoundsPerHyperparameter = &value + case float64: + f := int(v) + s.MaxOptimizationRoundsPerHyperparameter = &f + } + + case "max_trees", "maximum_number_trees": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxTrees = &value + case float64: + f := int(v) + s.MaxTrees = &f + } + + case "num_top_feature_importance_values": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NumTopFeatureImportanceValues = &value + case float64: + f := int(v) + s.NumTopFeatureImportanceValues = &f + } + + case "prediction_field_name": + if err := dec.Decode(&s.PredictionFieldName); err != nil { + return err + } + + case "randomize_seed": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.RandomizeSeed = &f + case float64: + f := Float64(v) + s.RandomizeSeed = &f + } + + case "soft_tree_depth_limit": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.SoftTreeDepthLimit = &value + case float64: + f := int(v) + s.SoftTreeDepthLimit = &f + } + + case "soft_tree_depth_tolerance": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.SoftTreeDepthTolerance = &f + case float64: + f := Float64(v) + s.SoftTreeDepthTolerance = &f + } + + case "training_percent": + if err := dec.Decode(&s.TrainingPercent); err != nil { + return err + } + + } + } + return nil +} + // NewDataframeAnalysis returns a DataframeAnalysis. func NewDataframeAnalysis() *DataframeAnalysis { r := &DataframeAnalysis{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysisanalyzedfields.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysisanalyzedfields.go index 0e3ff1f85..f93349ff3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysisanalyzedfields.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysisanalyzedfields.go @@ -16,13 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // DataframeAnalysisAnalyzedFields type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/DataframeAnalytics.ts#L238-L244 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/DataframeAnalytics.ts#L238-L244 type DataframeAnalysisAnalyzedFields struct { // Excludes An array of strings that defines the fields that will be included in the // analysis. @@ -33,6 +40,41 @@ type DataframeAnalysisAnalyzedFields struct { Includes []string `json:"includes"` } +func (s *DataframeAnalysisAnalyzedFields) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Includes) + return err + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "excludes": + if err := dec.Decode(&s.Excludes); err != nil { + return err + } + + case "includes": + if err := dec.Decode(&s.Includes); err != nil { + return err + } + + } + } + return nil +} + // NewDataframeAnalysisAnalyzedFields returns a DataframeAnalysisAnalyzedFields. func NewDataframeAnalysisAnalyzedFields() *DataframeAnalysisAnalyzedFields { r := &DataframeAnalysisAnalyzedFields{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysisclassification.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysisclassification.go index 0836e38ee..46f5052aa 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysisclassification.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysisclassification.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DataframeAnalysisClassification type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/DataframeAnalytics.ts#L227-L236 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/DataframeAnalytics.ts#L227-L236 type DataframeAnalysisClassification struct { // Alpha Advanced configuration option. Machine learning uses loss guided tree // growing, which means that the decision trees grow where the regularized loss @@ -148,6 +156,303 @@ type DataframeAnalysisClassification struct { TrainingPercent Percentage `json:"training_percent,omitempty"` } +func (s *DataframeAnalysisClassification) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "alpha": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Alpha = &f + case float64: + f := Float64(v) + s.Alpha = &f + } + + case "class_assignment_objective": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ClassAssignmentObjective = &o + + case "dependent_variable": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DependentVariable = o + + case "downsample_factor": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.DownsampleFactor = &f + case float64: + f := Float64(v) + s.DownsampleFactor = &f + } + + case "early_stopping_enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.EarlyStoppingEnabled = &value + case bool: + s.EarlyStoppingEnabled = &v + } + + case "eta": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Eta = &f + case float64: + f := Float64(v) + s.Eta = &f + } + + case "eta_growth_rate_per_tree": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.EtaGrowthRatePerTree = &f + case float64: + f := Float64(v) + s.EtaGrowthRatePerTree = &f + } + + case "feature_bag_fraction": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.FeatureBagFraction = &f + case float64: + f := Float64(v) + s.FeatureBagFraction = &f + } + + case "feature_processors": + if err := dec.Decode(&s.FeatureProcessors); err != nil { + return err + } + + case "gamma": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Gamma = &f + case float64: + f := Float64(v) + s.Gamma = &f + } + + case "lambda": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Lambda = &f + case float64: + f := Float64(v) + s.Lambda = &f + } + + case "max_optimization_rounds_per_hyperparameter": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxOptimizationRoundsPerHyperparameter = &value + case float64: + f := int(v) + s.MaxOptimizationRoundsPerHyperparameter = &f + } + + case "max_trees", "maximum_number_trees": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxTrees = &value + case float64: + f := int(v) + s.MaxTrees = &f + } + + case "num_top_classes": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NumTopClasses = &value + case float64: + f := int(v) + s.NumTopClasses = &f + } + + case "num_top_feature_importance_values": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NumTopFeatureImportanceValues = &value + case float64: + f := int(v) + s.NumTopFeatureImportanceValues = &f + } + + case "prediction_field_name": + if err := dec.Decode(&s.PredictionFieldName); err != nil { + return err + } + + case "randomize_seed": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.RandomizeSeed = &f + case float64: + f := Float64(v) + s.RandomizeSeed = &f + } + + case "soft_tree_depth_limit": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.SoftTreeDepthLimit = &value + case float64: + f := int(v) + s.SoftTreeDepthLimit = &f + } + + case "soft_tree_depth_tolerance": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.SoftTreeDepthTolerance = &f + case float64: + f := Float64(v) + s.SoftTreeDepthTolerance = &f + } + + case "training_percent": + if err := dec.Decode(&s.TrainingPercent); err != nil { + return err + } + + } + } + return nil +} + // NewDataframeAnalysisClassification returns a DataframeAnalysisClassification. func NewDataframeAnalysisClassification() *DataframeAnalysisClassification { r := &DataframeAnalysisClassification{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysiscontainer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysiscontainer.go index 0f45a7b93..6fbcf5745 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysiscontainer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysiscontainer.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // DataframeAnalysisContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/DataframeAnalytics.ts#L84-L101 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/DataframeAnalytics.ts#L84-L101 type DataframeAnalysisContainer struct { // Classification The configuration information necessary to perform classification. Classification *DataframeAnalysisClassification `json:"classification,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysisfeatureprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysisfeatureprocessor.go index 1af6504f3..f30e86db5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysisfeatureprocessor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysisfeatureprocessor.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // DataframeAnalysisFeatureProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/DataframeAnalytics.ts#L246-L258 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/DataframeAnalytics.ts#L246-L258 type DataframeAnalysisFeatureProcessor struct { // FrequencyEncoding The configuration information necessary to perform frequency encoding. FrequencyEncoding *DataframeAnalysisFeatureProcessorFrequencyEncoding `json:"frequency_encoding,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysisfeatureprocessorfrequencyencoding.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysisfeatureprocessorfrequencyencoding.go index d73cc797e..3449f0945 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysisfeatureprocessorfrequencyencoding.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysisfeatureprocessorfrequencyencoding.go @@ -16,13 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // DataframeAnalysisFeatureProcessorFrequencyEncoding type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/DataframeAnalytics.ts#L260-L267 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/DataframeAnalytics.ts#L260-L267 type DataframeAnalysisFeatureProcessorFrequencyEncoding struct { // FeatureName The resulting feature name. FeatureName string `json:"feature_name"` @@ -32,6 +39,44 @@ type DataframeAnalysisFeatureProcessorFrequencyEncoding struct { FrequencyMap map[string]Float64 `json:"frequency_map"` } +func (s *DataframeAnalysisFeatureProcessorFrequencyEncoding) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "feature_name": + if err := dec.Decode(&s.FeatureName); err != nil { + return err + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "frequency_map": + if s.FrequencyMap == nil { + s.FrequencyMap = make(map[string]Float64, 0) + } + if err := dec.Decode(&s.FrequencyMap); err != nil { + return err + } + + } + } + return nil +} + // NewDataframeAnalysisFeatureProcessorFrequencyEncoding returns a DataframeAnalysisFeatureProcessorFrequencyEncoding. func NewDataframeAnalysisFeatureProcessorFrequencyEncoding() *DataframeAnalysisFeatureProcessorFrequencyEncoding { r := &DataframeAnalysisFeatureProcessorFrequencyEncoding{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysisfeatureprocessormultiencoding.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysisfeatureprocessormultiencoding.go index 4a2c18ae6..2ec65a861 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysisfeatureprocessormultiencoding.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysisfeatureprocessormultiencoding.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // DataframeAnalysisFeatureProcessorMultiEncoding type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/DataframeAnalytics.ts#L269-L272 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/DataframeAnalytics.ts#L269-L272 type DataframeAnalysisFeatureProcessorMultiEncoding struct { // Processors The ordered array of custom processors to execute. Must be more than 1. Processors []int `json:"processors"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysisfeatureprocessorngramencoding.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysisfeatureprocessorngramencoding.go index 48293991e..b7007fa08 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysisfeatureprocessorngramencoding.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysisfeatureprocessorngramencoding.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DataframeAnalysisFeatureProcessorNGramEncoding type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/DataframeAnalytics.ts#L274-L286 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/DataframeAnalytics.ts#L274-L286 type DataframeAnalysisFeatureProcessorNGramEncoding struct { Custom *bool `json:"custom,omitempty"` // FeaturePrefix The feature name prefix. Defaults to ngram__. @@ -40,6 +48,94 @@ type DataframeAnalysisFeatureProcessorNGramEncoding struct { Start *int `json:"start,omitempty"` } +func (s *DataframeAnalysisFeatureProcessorNGramEncoding) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "custom": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Custom = &value + case bool: + s.Custom = &v + } + + case "feature_prefix": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FeaturePrefix = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "length": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Length = &value + case float64: + f := int(v) + s.Length = &f + } + + case "n_grams": + if err := dec.Decode(&s.NGrams); err != nil { + return err + } + + case "start": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Start = &value + case float64: + f := int(v) + s.Start = &f + } + + } + } + return nil +} + // NewDataframeAnalysisFeatureProcessorNGramEncoding returns a DataframeAnalysisFeatureProcessorNGramEncoding. func NewDataframeAnalysisFeatureProcessorNGramEncoding() *DataframeAnalysisFeatureProcessorNGramEncoding { r := &DataframeAnalysisFeatureProcessorNGramEncoding{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysisfeatureprocessoronehotencoding.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysisfeatureprocessoronehotencoding.go index efa436169..574747a45 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysisfeatureprocessoronehotencoding.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysisfeatureprocessoronehotencoding.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DataframeAnalysisFeatureProcessorOneHotEncoding type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/DataframeAnalytics.ts#L288-L293 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/DataframeAnalytics.ts#L288-L293 type DataframeAnalysisFeatureProcessorOneHotEncoding struct { // Field The name of the field to encode. Field string `json:"field"` @@ -30,6 +38,43 @@ type DataframeAnalysisFeatureProcessorOneHotEncoding struct { HotMap string `json:"hot_map"` } +func (s *DataframeAnalysisFeatureProcessorOneHotEncoding) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "hot_map": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.HotMap = o + + } + } + return nil +} + // NewDataframeAnalysisFeatureProcessorOneHotEncoding returns a DataframeAnalysisFeatureProcessorOneHotEncoding. func NewDataframeAnalysisFeatureProcessorOneHotEncoding() *DataframeAnalysisFeatureProcessorOneHotEncoding { r := &DataframeAnalysisFeatureProcessorOneHotEncoding{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysisfeatureprocessortargetmeanencoding.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysisfeatureprocessortargetmeanencoding.go index 56fddb050..6f4d21002 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysisfeatureprocessortargetmeanencoding.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysisfeatureprocessortargetmeanencoding.go @@ -16,17 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // DataframeAnalysisFeatureProcessorTargetMeanEncoding type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/DataframeAnalytics.ts#L295-L304 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/DataframeAnalytics.ts#L295-L304 type DataframeAnalysisFeatureProcessorTargetMeanEncoding struct { // DefaultValue The default value if field value is not found in the target_map. DefaultValue int `json:"default_value"` @@ -38,6 +42,60 @@ type DataframeAnalysisFeatureProcessorTargetMeanEncoding struct { TargetMap map[string]json.RawMessage `json:"target_map"` } +func (s *DataframeAnalysisFeatureProcessorTargetMeanEncoding) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "default_value": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.DefaultValue = value + case float64: + f := int(v) + s.DefaultValue = f + } + + case "feature_name": + if err := dec.Decode(&s.FeatureName); err != nil { + return err + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "target_map": + if s.TargetMap == nil { + s.TargetMap = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.TargetMap); err != nil { + return err + } + + } + } + return nil +} + // NewDataframeAnalysisFeatureProcessorTargetMeanEncoding returns a DataframeAnalysisFeatureProcessorTargetMeanEncoding. func NewDataframeAnalysisFeatureProcessorTargetMeanEncoding() *DataframeAnalysisFeatureProcessorTargetMeanEncoding { r := &DataframeAnalysisFeatureProcessorTargetMeanEncoding{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysisoutlierdetection.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysisoutlierdetection.go index bd1aec48e..78477e07e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysisoutlierdetection.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysisoutlierdetection.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DataframeAnalysisOutlierDetection type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/DataframeAnalytics.ts#L103-L132 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/DataframeAnalytics.ts#L103-L132 type DataframeAnalysisOutlierDetection struct { // ComputeFeatureInfluence Specifies whether the feature influence calculation is enabled. ComputeFeatureInfluence *bool `json:"compute_feature_influence,omitempty"` @@ -50,6 +58,114 @@ type DataframeAnalysisOutlierDetection struct { StandardizationEnabled *bool `json:"standardization_enabled,omitempty"` } +func (s *DataframeAnalysisOutlierDetection) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "compute_feature_influence": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.ComputeFeatureInfluence = &value + case bool: + s.ComputeFeatureInfluence = &v + } + + case "feature_influence_threshold": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.FeatureInfluenceThreshold = &f + case float64: + f := Float64(v) + s.FeatureInfluenceThreshold = &f + } + + case "method": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Method = &o + + case "n_neighbors": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NNeighbors = &value + case float64: + f := int(v) + s.NNeighbors = &f + } + + case "outlier_fraction": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.OutlierFraction = &f + case float64: + f := Float64(v) + s.OutlierFraction = &f + } + + case "standardization_enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.StandardizationEnabled = &value + case bool: + s.StandardizationEnabled = &v + } + + } + } + return nil +} + // NewDataframeAnalysisOutlierDetection returns a DataframeAnalysisOutlierDetection. func NewDataframeAnalysisOutlierDetection() *DataframeAnalysisOutlierDetection { r := &DataframeAnalysisOutlierDetection{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysisregression.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysisregression.go index c78e94c31..a7d01d91d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysisregression.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalysisregression.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DataframeAnalysisRegression type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/DataframeAnalytics.ts#L215-L225 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/DataframeAnalytics.ts#L215-L225 type DataframeAnalysisRegression struct { // Alpha Advanced configuration option. Machine learning uses loss guided tree // growing, which means that the decision trees grow where the regularized loss @@ -145,6 +153,303 @@ type DataframeAnalysisRegression struct { TrainingPercent Percentage `json:"training_percent,omitempty"` } +func (s *DataframeAnalysisRegression) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "alpha": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Alpha = &f + case float64: + f := Float64(v) + s.Alpha = &f + } + + case "dependent_variable": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DependentVariable = o + + case "downsample_factor": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.DownsampleFactor = &f + case float64: + f := Float64(v) + s.DownsampleFactor = &f + } + + case "early_stopping_enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.EarlyStoppingEnabled = &value + case bool: + s.EarlyStoppingEnabled = &v + } + + case "eta": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Eta = &f + case float64: + f := Float64(v) + s.Eta = &f + } + + case "eta_growth_rate_per_tree": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.EtaGrowthRatePerTree = &f + case float64: + f := Float64(v) + s.EtaGrowthRatePerTree = &f + } + + case "feature_bag_fraction": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.FeatureBagFraction = &f + case float64: + f := Float64(v) + s.FeatureBagFraction = &f + } + + case "feature_processors": + if err := dec.Decode(&s.FeatureProcessors); err != nil { + return err + } + + case "gamma": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Gamma = &f + case float64: + f := Float64(v) + s.Gamma = &f + } + + case "lambda": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Lambda = &f + case float64: + f := Float64(v) + s.Lambda = &f + } + + case "loss_function": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.LossFunction = &o + + case "loss_function_parameter": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.LossFunctionParameter = &f + case float64: + f := Float64(v) + s.LossFunctionParameter = &f + } + + case "max_optimization_rounds_per_hyperparameter": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxOptimizationRoundsPerHyperparameter = &value + case float64: + f := int(v) + s.MaxOptimizationRoundsPerHyperparameter = &f + } + + case "max_trees", "maximum_number_trees": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxTrees = &value + case float64: + f := int(v) + s.MaxTrees = &f + } + + case "num_top_feature_importance_values": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NumTopFeatureImportanceValues = &value + case float64: + f := int(v) + s.NumTopFeatureImportanceValues = &f + } + + case "prediction_field_name": + if err := dec.Decode(&s.PredictionFieldName); err != nil { + return err + } + + case "randomize_seed": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.RandomizeSeed = &f + case float64: + f := Float64(v) + s.RandomizeSeed = &f + } + + case "soft_tree_depth_limit": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.SoftTreeDepthLimit = &value + case float64: + f := int(v) + s.SoftTreeDepthLimit = &f + } + + case "soft_tree_depth_tolerance": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.SoftTreeDepthTolerance = &f + case float64: + f := Float64(v) + s.SoftTreeDepthTolerance = &f + } + + case "training_percent": + if err := dec.Decode(&s.TrainingPercent); err != nil { + return err + } + + } + } + return nil +} + // NewDataframeAnalysisRegression returns a DataframeAnalysisRegression. func NewDataframeAnalysisRegression() *DataframeAnalysisRegression { r := &DataframeAnalysisRegression{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalytics.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalytics.go index 2a5d63d69..0960eb961 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalytics.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalytics.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dataframestate" ) // DataframeAnalytics type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/DataframeAnalytics.ts#L324-L341 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/DataframeAnalytics.ts#L324-L344 type DataframeAnalytics struct { // AnalysisStats An object containing information about the analysis job. AnalysisStats *DataframeAnalyticsStatsContainer `json:"analysis_stats,omitempty"` @@ -51,6 +57,73 @@ type DataframeAnalytics struct { State dataframestate.DataframeState `json:"state"` } +func (s *DataframeAnalytics) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analysis_stats": + if err := dec.Decode(&s.AnalysisStats); err != nil { + return err + } + + case "assignment_explanation": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AssignmentExplanation = &o + + case "data_counts": + if err := dec.Decode(&s.DataCounts); err != nil { + return err + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return err + } + + case "memory_usage": + if err := dec.Decode(&s.MemoryUsage); err != nil { + return err + } + + case "node": + if err := dec.Decode(&s.Node); err != nil { + return err + } + + case "progress": + if err := dec.Decode(&s.Progress); err != nil { + return err + } + + case "state": + if err := dec.Decode(&s.State); err != nil { + return err + } + + } + } + return nil +} + // NewDataframeAnalytics returns a DataframeAnalytics. func NewDataframeAnalytics() *DataframeAnalytics { r := &DataframeAnalytics{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsauthorization.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsauthorization.go index 20260e3e5..af8d63624 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsauthorization.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsauthorization.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DataframeAnalyticsAuthorization type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Authorization.ts#L45-L57 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Authorization.ts#L45-L57 type DataframeAnalyticsAuthorization struct { // ApiKey If an API key was used for the most recent update to the job, its name and // identifier are listed in the response. @@ -35,6 +43,48 @@ type DataframeAnalyticsAuthorization struct { ServiceAccount *string `json:"service_account,omitempty"` } +func (s *DataframeAnalyticsAuthorization) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "api_key": + if err := dec.Decode(&s.ApiKey); err != nil { + return err + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return err + } + + case "service_account": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ServiceAccount = &o + + } + } + return nil +} + // NewDataframeAnalyticsAuthorization returns a DataframeAnalyticsAuthorization. func NewDataframeAnalyticsAuthorization() *DataframeAnalyticsAuthorization { r := &DataframeAnalyticsAuthorization{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsdestination.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsdestination.go index 4d6f390ce..c6262033d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsdestination.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsdestination.go @@ -16,13 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // DataframeAnalyticsDestination type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/DataframeAnalytics.ts#L77-L82 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/DataframeAnalytics.ts#L77-L82 type DataframeAnalyticsDestination struct { // Index Defines the destination index to store the results of the data frame // analytics job. @@ -32,6 +39,36 @@ type DataframeAnalyticsDestination struct { ResultsField *string `json:"results_field,omitempty"` } +func (s *DataframeAnalyticsDestination) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return err + } + + case "results_field": + if err := dec.Decode(&s.ResultsField); err != nil { + return err + } + + } + } + return nil +} + // NewDataframeAnalyticsDestination returns a DataframeAnalyticsDestination. func NewDataframeAnalyticsDestination() *DataframeAnalyticsDestination { r := &DataframeAnalyticsDestination{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsfieldselection.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsfieldselection.go index df4c08ba7..c5d9260eb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsfieldselection.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsfieldselection.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DataframeAnalyticsFieldSelection type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/DataframeAnalytics.ts#L55-L68 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/DataframeAnalytics.ts#L55-L68 type DataframeAnalyticsFieldSelection struct { // FeatureType The feature type of this field for the analysis. May be categorical or // numerical. @@ -39,6 +47,88 @@ type DataframeAnalyticsFieldSelection struct { Reason *string `json:"reason,omitempty"` } +func (s *DataframeAnalyticsFieldSelection) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "feature_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FeatureType = &o + + case "is_included": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IsIncluded = value + case bool: + s.IsIncluded = v + } + + case "is_required": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IsRequired = value + case bool: + s.IsRequired = v + } + + case "mapping_types": + if err := dec.Decode(&s.MappingTypes); err != nil { + return err + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "reason": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Reason = &o + + } + } + return nil +} + // NewDataframeAnalyticsFieldSelection returns a DataframeAnalyticsFieldSelection. func NewDataframeAnalyticsFieldSelection() *DataframeAnalyticsFieldSelection { r := &DataframeAnalyticsFieldSelection{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsmemoryestimation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsmemoryestimation.go index 6f0f0057f..7eca046c4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsmemoryestimation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsmemoryestimation.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DataframeAnalyticsMemoryEstimation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/DataframeAnalytics.ts#L70-L75 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/DataframeAnalytics.ts#L70-L75 type DataframeAnalyticsMemoryEstimation struct { // ExpectedMemoryWithDisk Estimated memory usage under the assumption that overflowing to disk is // allowed during data frame analytics. expected_memory_with_disk is usually @@ -34,6 +42,50 @@ type DataframeAnalyticsMemoryEstimation struct { ExpectedMemoryWithoutDisk string `json:"expected_memory_without_disk"` } +func (s *DataframeAnalyticsMemoryEstimation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "expected_memory_with_disk": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ExpectedMemoryWithDisk = o + + case "expected_memory_without_disk": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ExpectedMemoryWithoutDisk = o + + } + } + return nil +} + // NewDataframeAnalyticsMemoryEstimation returns a DataframeAnalyticsMemoryEstimation. func NewDataframeAnalyticsMemoryEstimation() *DataframeAnalyticsMemoryEstimation { r := &DataframeAnalyticsMemoryEstimation{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsrecord.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsrecord.go index 3b385f68a..01c5f8026 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsrecord.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsrecord.go @@ -16,48 +16,220 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DataFrameAnalyticsRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/ml_data_frame_analytics/types.ts#L22-L102 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/ml_data_frame_analytics/types.ts#L22-L102 type DataFrameAnalyticsRecord struct { - // AssignmentExplanation why the job is or is not assigned to a node + // AssignmentExplanation Messages related to the selection of a node. AssignmentExplanation *string `json:"assignment_explanation,omitempty"` - // CreateTime job creation time + // CreateTime The time when the job was created. CreateTime *string `json:"create_time,omitempty"` - // Description description + // Description A description of the job. Description *string `json:"description,omitempty"` - // DestIndex destination index + // DestIndex The name of the destination index. DestIndex *string `json:"dest_index,omitempty"` - // FailureReason failure reason + // FailureReason Messages about the reason why the job failed. FailureReason *string `json:"failure_reason,omitempty"` - // Id the id + // Id The identifier for the job. Id *string `json:"id,omitempty"` - // ModelMemoryLimit model memory limit + // ModelMemoryLimit The approximate maximum amount of memory resources that are permitted for the + // job. ModelMemoryLimit *string `json:"model_memory_limit,omitempty"` - // NodeAddress network address of the assigned node + // NodeAddress The network address of the assigned node. NodeAddress *string `json:"node.address,omitempty"` - // NodeEphemeralId ephemeral id of the assigned node + // NodeEphemeralId The ephemeral identifier of the assigned node. NodeEphemeralId *string `json:"node.ephemeral_id,omitempty"` - // NodeId id of the assigned node + // NodeId The unique identifier of the assigned node. NodeId *string `json:"node.id,omitempty"` - // NodeName name of the assigned node + // NodeName The name of the assigned node. NodeName *string `json:"node.name,omitempty"` - // Progress progress + // Progress The progress report for the job by phase. Progress *string `json:"progress,omitempty"` - // SourceIndex source index + // SourceIndex The name of the source index. SourceIndex *string `json:"source_index,omitempty"` - // State job state + // State The current status of the job. State *string `json:"state,omitempty"` - // Type analysis type + // Type The type of analysis that the job performs. Type *string `json:"type,omitempty"` - // Version the version of Elasticsearch when the analytics was created + // Version The version of Elasticsearch when the job was created. Version *string `json:"version,omitempty"` } +func (s *DataFrameAnalyticsRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "assignment_explanation", "ae", "assignmentExplanation": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AssignmentExplanation = &o + + case "create_time", "ct", "createTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CreateTime = &o + + case "description", "d": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "dest_index", "di", "destIndex": + if err := dec.Decode(&s.DestIndex); err != nil { + return err + } + + case "failure_reason", "fr", "failureReason": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FailureReason = &o + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return err + } + + case "model_memory_limit", "mml", "modelMemoryLimit": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelMemoryLimit = &o + + case "node.address", "na", "nodeAddress": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NodeAddress = &o + + case "node.ephemeral_id", "ne", "nodeEphemeralId": + if err := dec.Decode(&s.NodeEphemeralId); err != nil { + return err + } + + case "node.id", "ni", "nodeId": + if err := dec.Decode(&s.NodeId); err != nil { + return err + } + + case "node.name", "nn", "nodeName": + if err := dec.Decode(&s.NodeName); err != nil { + return err + } + + case "progress", "p": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Progress = &o + + case "source_index", "si", "sourceIndex": + if err := dec.Decode(&s.SourceIndex); err != nil { + return err + } + + case "state", "s": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.State = &o + + case "type", "t": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = &o + + case "version", "v": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + // NewDataFrameAnalyticsRecord returns a DataFrameAnalyticsRecord. func NewDataFrameAnalyticsRecord() *DataFrameAnalyticsRecord { r := &DataFrameAnalyticsRecord{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticssource.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticssource.go index c640987e0..110c0dd10 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticssource.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticssource.go @@ -16,13 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // DataframeAnalyticsSource type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/DataframeAnalytics.ts#L39-L53 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/DataframeAnalytics.ts#L39-L53 type DataframeAnalyticsSource struct { // Index Index or indices on which to perform the analysis. It can be a single index // or index pattern as well as an array of indices or patterns. NOTE: If your @@ -37,13 +44,64 @@ type DataframeAnalyticsSource struct { Query *Query `json:"query,omitempty"` // RuntimeMappings Definitions of runtime fields that will become part of the mapping of the // destination index. - RuntimeMappings map[string]RuntimeField `json:"runtime_mappings,omitempty"` + RuntimeMappings RuntimeFields `json:"runtime_mappings,omitempty"` // Source_ Specify `includes` and/or `excludes patterns to select which fields will be // present in the destination. Fields that are excluded cannot be included in // the analysis. Source_ *DataframeAnalysisAnalyzedFields `json:"_source,omitempty"` } +func (s *DataframeAnalyticsSource) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Index = append(s.Index, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Index); err != nil { + return err + } + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return err + } + + case "runtime_mappings": + if err := dec.Decode(&s.RuntimeMappings); err != nil { + return err + } + + case "_source": + if err := dec.Decode(&s.Source_); err != nil { + return err + } + + } + } + return nil +} + // NewDataframeAnalyticsSource returns a DataframeAnalyticsSource. func NewDataframeAnalyticsSource() *DataframeAnalyticsSource { r := &DataframeAnalyticsSource{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsstatscontainer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsstatscontainer.go index fa8faaa8a..19128519e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsstatscontainer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsstatscontainer.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // DataframeAnalyticsStatsContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/DataframeAnalytics.ts#L370-L378 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/DataframeAnalytics.ts#L373-L381 type DataframeAnalyticsStatsContainer struct { // ClassificationStats An object containing information about the classification analysis job. ClassificationStats *DataframeAnalyticsStatsHyperparameters `json:"classification_stats,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsstatsdatacounts.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsstatsdatacounts.go index 8ff14fdbd..de7888ae8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsstatsdatacounts.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsstatsdatacounts.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DataframeAnalyticsStatsDataCounts type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/DataframeAnalytics.ts#L361-L368 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/DataframeAnalytics.ts#L364-L371 type DataframeAnalyticsStatsDataCounts struct { // SkippedDocsCount The number of documents that are skipped during the analysis because they // contained values that are not supported by the analysis. For example, outlier @@ -37,6 +45,74 @@ type DataframeAnalyticsStatsDataCounts struct { TrainingDocsCount int `json:"training_docs_count"` } +func (s *DataframeAnalyticsStatsDataCounts) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "skipped_docs_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.SkippedDocsCount = value + case float64: + f := int(v) + s.SkippedDocsCount = f + } + + case "test_docs_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.TestDocsCount = value + case float64: + f := int(v) + s.TestDocsCount = f + } + + case "training_docs_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.TrainingDocsCount = value + case float64: + f := int(v) + s.TrainingDocsCount = f + } + + } + } + return nil +} + // NewDataframeAnalyticsStatsDataCounts returns a DataframeAnalyticsStatsDataCounts. func NewDataframeAnalyticsStatsDataCounts() *DataframeAnalyticsStatsDataCounts { r := &DataframeAnalyticsStatsDataCounts{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsstatshyperparameters.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsstatshyperparameters.go index f50ae32dd..0e580f814 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsstatshyperparameters.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsstatshyperparameters.go @@ -16,22 +16,91 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DataframeAnalyticsStatsHyperparameters type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/DataframeAnalytics.ts#L380-L387 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/DataframeAnalytics.ts#L383-L402 type DataframeAnalyticsStatsHyperparameters struct { + // Hyperparameters An object containing the parameters of the classification analysis job. Hyperparameters Hyperparameters `json:"hyperparameters"` // Iteration The number of iterations on the analysis. - Iteration int `json:"iteration"` - Timestamp int64 `json:"timestamp"` - TimingStats TimingStats `json:"timing_stats"` + Iteration int `json:"iteration"` + // Timestamp The timestamp when the statistics were reported in milliseconds since the + // epoch. + Timestamp int64 `json:"timestamp"` + // TimingStats An object containing time statistics about the data frame analytics job. + TimingStats TimingStats `json:"timing_stats"` + // ValidationLoss An object containing information about validation loss. ValidationLoss ValidationLoss `json:"validation_loss"` } +func (s *DataframeAnalyticsStatsHyperparameters) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "hyperparameters": + if err := dec.Decode(&s.Hyperparameters); err != nil { + return err + } + + case "iteration": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Iteration = value + case float64: + f := int(v) + s.Iteration = f + } + + case "timestamp": + if err := dec.Decode(&s.Timestamp); err != nil { + return err + } + + case "timing_stats": + if err := dec.Decode(&s.TimingStats); err != nil { + return err + } + + case "validation_loss": + if err := dec.Decode(&s.ValidationLoss); err != nil { + return err + } + + } + } + return nil +} + // NewDataframeAnalyticsStatsHyperparameters returns a DataframeAnalyticsStatsHyperparameters. func NewDataframeAnalyticsStatsHyperparameters() *DataframeAnalyticsStatsHyperparameters { r := &DataframeAnalyticsStatsHyperparameters{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsstatsmemoryusage.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsstatsmemoryusage.go index cb735f17b..72ebbb445 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsstatsmemoryusage.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsstatsmemoryusage.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DataframeAnalyticsStatsMemoryUsage type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/DataframeAnalytics.ts#L350-L359 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/DataframeAnalytics.ts#L353-L362 type DataframeAnalyticsStatsMemoryUsage struct { // MemoryReestimateBytes This value is present when the status is hard_limit and it is a new estimate // of how much memory the job needs. @@ -35,6 +43,73 @@ type DataframeAnalyticsStatsMemoryUsage struct { Timestamp *int64 `json:"timestamp,omitempty"` } +func (s *DataframeAnalyticsStatsMemoryUsage) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "memory_reestimate_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.MemoryReestimateBytes = &value + case float64: + f := int64(v) + s.MemoryReestimateBytes = &f + } + + case "peak_usage_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.PeakUsageBytes = value + case float64: + f := int64(v) + s.PeakUsageBytes = f + } + + case "status": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Status = o + + case "timestamp": + if err := dec.Decode(&s.Timestamp); err != nil { + return err + } + + } + } + return nil +} + // NewDataframeAnalyticsStatsMemoryUsage returns a DataframeAnalyticsStatsMemoryUsage. func NewDataframeAnalyticsStatsMemoryUsage() *DataframeAnalyticsStatsMemoryUsage { r := &DataframeAnalyticsStatsMemoryUsage{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsstatsoutlierdetection.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsstatsoutlierdetection.go index 44705a15c..2ed7619ae 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsstatsoutlierdetection.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsstatsoutlierdetection.go @@ -16,17 +16,64 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // DataframeAnalyticsStatsOutlierDetection type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/DataframeAnalytics.ts#L389-L393 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/DataframeAnalytics.ts#L404-L417 type DataframeAnalyticsStatsOutlierDetection struct { - Parameters OutlierDetectionParameters `json:"parameters"` - Timestamp int64 `json:"timestamp"` - TimingStats TimingStats `json:"timing_stats"` + // Parameters The list of job parameters specified by the user or determined by algorithmic + // heuristics. + Parameters OutlierDetectionParameters `json:"parameters"` + // Timestamp The timestamp when the statistics were reported in milliseconds since the + // epoch. + Timestamp int64 `json:"timestamp"` + // TimingStats An object containing time statistics about the data frame analytics job. + TimingStats TimingStats `json:"timing_stats"` +} + +func (s *DataframeAnalyticsStatsOutlierDetection) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "parameters": + if err := dec.Decode(&s.Parameters); err != nil { + return err + } + + case "timestamp": + if err := dec.Decode(&s.Timestamp); err != nil { + return err + } + + case "timing_stats": + if err := dec.Decode(&s.TimingStats); err != nil { + return err + } + + } + } + return nil } // NewDataframeAnalyticsStatsOutlierDetection returns a DataframeAnalyticsStatsOutlierDetection. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsstatsprogress.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsstatsprogress.go index 4c485d463..6deea8f65 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsstatsprogress.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticsstatsprogress.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DataframeAnalyticsStatsProgress type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/DataframeAnalytics.ts#L343-L348 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/DataframeAnalytics.ts#L346-L351 type DataframeAnalyticsStatsProgress struct { // Phase Defines the phase of the data frame analytics job. Phase string `json:"phase"` @@ -31,6 +39,54 @@ type DataframeAnalyticsStatsProgress struct { ProgressPercent int `json:"progress_percent"` } +func (s *DataframeAnalyticsStatsProgress) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "phase": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Phase = o + + case "progress_percent": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.ProgressPercent = value + case float64: + f := int(v) + s.ProgressPercent = f + } + + } + } + return nil +} + // NewDataframeAnalyticsStatsProgress returns a DataframeAnalyticsStatsProgress. func NewDataframeAnalyticsStatsProgress() *DataframeAnalyticsStatsProgress { r := &DataframeAnalyticsStatsProgress{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticssummary.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticssummary.go index 331284270..c65ca9b52 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticssummary.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeanalyticssummary.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DataframeAnalyticsSummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/DataframeAnalytics.ts#L306-L322 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/DataframeAnalytics.ts#L306-L322 type DataframeAnalyticsSummary struct { AllowLazyStart *bool `json:"allow_lazy_start,omitempty"` Analysis DataframeAnalysisContainer `json:"analysis"` @@ -41,6 +49,120 @@ type DataframeAnalyticsSummary struct { Version *string `json:"version,omitempty"` } +func (s *DataframeAnalyticsSummary) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_lazy_start": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.AllowLazyStart = &value + case bool: + s.AllowLazyStart = &v + } + + case "analysis": + if err := dec.Decode(&s.Analysis); err != nil { + return err + } + + case "analyzed_fields": + if err := dec.Decode(&s.AnalyzedFields); err != nil { + return err + } + + case "authorization": + if err := dec.Decode(&s.Authorization); err != nil { + return err + } + + case "create_time": + if err := dec.Decode(&s.CreateTime); err != nil { + return err + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "dest": + if err := dec.Decode(&s.Dest); err != nil { + return err + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return err + } + + case "max_num_threads": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxNumThreads = &value + case float64: + f := int(v) + s.MaxNumThreads = &f + } + + case "model_memory_limit": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelMemoryLimit = &o + + case "source": + if err := dec.Decode(&s.Source); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + // NewDataframeAnalyticsSummary returns a DataframeAnalyticsSummary. func NewDataframeAnalyticsSummary() *DataframeAnalyticsSummary { r := &DataframeAnalyticsSummary{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeclassificationsummary.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeclassificationsummary.go index 512459190..6843b3ac9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeclassificationsummary.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeclassificationsummary.go @@ -16,19 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // DataframeClassificationSummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/evaluate_data_frame/types.ts#L31-L37 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/evaluate_data_frame/types.ts#L44-L66 type DataframeClassificationSummary struct { - Accuracy *DataframeClassificationSummaryAccuracy `json:"accuracy,omitempty"` - AucRoc *DataframeEvaluationSummaryAucRoc `json:"auc_roc,omitempty"` + // Accuracy Accuracy of predictions (per-class and overall). + Accuracy *DataframeClassificationSummaryAccuracy `json:"accuracy,omitempty"` + // AucRoc The AUC ROC (area under the curve of the receiver operating characteristic) + // score and optionally the curve. + // It is calculated for a specific class (provided as "class_name") treated as + // positive. + AucRoc *DataframeEvaluationSummaryAucRoc `json:"auc_roc,omitempty"` + // MulticlassConfusionMatrix Multiclass confusion matrix. MulticlassConfusionMatrix *DataframeClassificationSummaryMulticlassConfusionMatrix `json:"multiclass_confusion_matrix,omitempty"` - Precision *DataframeClassificationSummaryPrecision `json:"precision,omitempty"` - Recall *DataframeClassificationSummaryRecall `json:"recall,omitempty"` + // Precision Precision of predictions (per-class and average). + Precision *DataframeClassificationSummaryPrecision `json:"precision,omitempty"` + // Recall Recall of predictions (per-class and average). + Recall *DataframeClassificationSummaryRecall `json:"recall,omitempty"` } // NewDataframeClassificationSummary returns a DataframeClassificationSummary. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeclassificationsummaryaccuracy.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeclassificationsummaryaccuracy.go index 5b5807efe..b8a25951b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeclassificationsummaryaccuracy.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeclassificationsummaryaccuracy.go @@ -16,18 +16,67 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DataframeClassificationSummaryAccuracy type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/evaluate_data_frame/types.ts#L70-L73 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/evaluate_data_frame/types.ts#L111-L114 type DataframeClassificationSummaryAccuracy struct { Classes []DataframeEvaluationClass `json:"classes"` OverallAccuracy Float64 `json:"overall_accuracy"` } +func (s *DataframeClassificationSummaryAccuracy) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "classes": + if err := dec.Decode(&s.Classes); err != nil { + return err + } + + case "overall_accuracy": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.OverallAccuracy = f + case float64: + f := Float64(v) + s.OverallAccuracy = f + } + + } + } + return nil +} + // NewDataframeClassificationSummaryAccuracy returns a DataframeClassificationSummaryAccuracy. func NewDataframeClassificationSummaryAccuracy() *DataframeClassificationSummaryAccuracy { r := &DataframeClassificationSummaryAccuracy{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeclassificationsummarymulticlassconfusionmatrix.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeclassificationsummarymulticlassconfusionmatrix.go index d0aee4355..c76595d69 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeclassificationsummarymulticlassconfusionmatrix.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeclassificationsummarymulticlassconfusionmatrix.go @@ -16,18 +16,67 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DataframeClassificationSummaryMulticlassConfusionMatrix type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/evaluate_data_frame/types.ts#L79-L82 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/evaluate_data_frame/types.ts#L120-L123 type DataframeClassificationSummaryMulticlassConfusionMatrix struct { ConfusionMatrix []ConfusionMatrixItem `json:"confusion_matrix"` OtherActualClassCount int `json:"other_actual_class_count"` } +func (s *DataframeClassificationSummaryMulticlassConfusionMatrix) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "confusion_matrix": + if err := dec.Decode(&s.ConfusionMatrix); err != nil { + return err + } + + case "other_actual_class_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.OtherActualClassCount = value + case float64: + f := int(v) + s.OtherActualClassCount = f + } + + } + } + return nil +} + // NewDataframeClassificationSummaryMulticlassConfusionMatrix returns a DataframeClassificationSummaryMulticlassConfusionMatrix. func NewDataframeClassificationSummaryMulticlassConfusionMatrix() *DataframeClassificationSummaryMulticlassConfusionMatrix { r := &DataframeClassificationSummaryMulticlassConfusionMatrix{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeclassificationsummaryprecision.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeclassificationsummaryprecision.go index 23ca7e4f2..35050e345 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeclassificationsummaryprecision.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeclassificationsummaryprecision.go @@ -16,18 +16,67 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DataframeClassificationSummaryPrecision type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/evaluate_data_frame/types.ts#L60-L63 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/evaluate_data_frame/types.ts#L101-L104 type DataframeClassificationSummaryPrecision struct { AvgPrecision Float64 `json:"avg_precision"` Classes []DataframeEvaluationClass `json:"classes"` } +func (s *DataframeClassificationSummaryPrecision) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "avg_precision": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.AvgPrecision = f + case float64: + f := Float64(v) + s.AvgPrecision = f + } + + case "classes": + if err := dec.Decode(&s.Classes); err != nil { + return err + } + + } + } + return nil +} + // NewDataframeClassificationSummaryPrecision returns a DataframeClassificationSummaryPrecision. func NewDataframeClassificationSummaryPrecision() *DataframeClassificationSummaryPrecision { r := &DataframeClassificationSummaryPrecision{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeclassificationsummaryrecall.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeclassificationsummaryrecall.go index 2b3bc9bac..aadded161 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeclassificationsummaryrecall.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeclassificationsummaryrecall.go @@ -16,18 +16,67 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DataframeClassificationSummaryRecall type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/evaluate_data_frame/types.ts#L65-L68 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/evaluate_data_frame/types.ts#L106-L109 type DataframeClassificationSummaryRecall struct { AvgRecall Float64 `json:"avg_recall"` Classes []DataframeEvaluationClass `json:"classes"` } +func (s *DataframeClassificationSummaryRecall) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "avg_recall": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.AvgRecall = f + case float64: + f := Float64(v) + s.AvgRecall = f + } + + case "classes": + if err := dec.Decode(&s.Classes); err != nil { + return err + } + + } + } + return nil +} + // NewDataframeClassificationSummaryRecall returns a DataframeClassificationSummaryRecall. func NewDataframeClassificationSummaryRecall() *DataframeClassificationSummaryRecall { r := &DataframeClassificationSummaryRecall{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationclass.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationclass.go index 42ab2696b..8fafd60fb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationclass.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationclass.go @@ -16,18 +16,67 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DataframeEvaluationClass type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/evaluate_data_frame/types.ts#L75-L77 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/evaluate_data_frame/types.ts#L116-L118 type DataframeEvaluationClass struct { ClassName string `json:"class_name"` Value Float64 `json:"value"` } +func (s *DataframeEvaluationClass) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "class_name": + if err := dec.Decode(&s.ClassName); err != nil { + return err + } + + case "value": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Value = f + case float64: + f := Float64(v) + s.Value = f + } + + } + } + return nil +} + // NewDataframeEvaluationClass returns a DataframeEvaluationClass. func NewDataframeEvaluationClass() *DataframeEvaluationClass { r := &DataframeEvaluationClass{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationclassification.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationclassification.go index 438e1d57a..89a207406 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationclassification.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationclassification.go @@ -16,13 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // DataframeEvaluationClassification type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/DataframeEvaluation.ts#L35-L44 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/DataframeEvaluation.ts#L35-L44 type DataframeEvaluationClassification struct { // ActualField The field of the index which contains the ground truth. The data type of this // field can be boolean or integer. If the data type is integer, the value has @@ -39,6 +46,46 @@ type DataframeEvaluationClassification struct { TopClassesField *string `json:"top_classes_field,omitempty"` } +func (s *DataframeEvaluationClassification) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "actual_field": + if err := dec.Decode(&s.ActualField); err != nil { + return err + } + + case "metrics": + if err := dec.Decode(&s.Metrics); err != nil { + return err + } + + case "predicted_field": + if err := dec.Decode(&s.PredictedField); err != nil { + return err + } + + case "top_classes_field": + if err := dec.Decode(&s.TopClassesField); err != nil { + return err + } + + } + } + return nil +} + // NewDataframeEvaluationClassification returns a DataframeEvaluationClassification. func NewDataframeEvaluationClassification() *DataframeEvaluationClassification { r := &DataframeEvaluationClassification{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationclassificationmetrics.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationclassificationmetrics.go index a19d01142..609e65a71 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationclassificationmetrics.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationclassificationmetrics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -26,7 +26,7 @@ import ( // DataframeEvaluationClassificationMetrics type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/DataframeEvaluation.ts#L73-L78 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/DataframeEvaluation.ts#L73-L78 type DataframeEvaluationClassificationMetrics struct { // Accuracy Accuracy of predictions (per-class and overall). Accuracy map[string]json.RawMessage `json:"accuracy,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationclassificationmetricsaucroc.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationclassificationmetricsaucroc.go index d937c427e..4384fc142 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationclassificationmetricsaucroc.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationclassificationmetricsaucroc.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DataframeEvaluationClassificationMetricsAucRoc type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/DataframeEvaluation.ts#L85-L90 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/DataframeEvaluation.ts#L85-L90 type DataframeEvaluationClassificationMetricsAucRoc struct { // ClassName Name of the only class that is treated as positive during AUC ROC // calculation. Other classes are treated as negative ("one-vs-all" strategy). @@ -34,6 +42,45 @@ type DataframeEvaluationClassificationMetricsAucRoc struct { IncludeCurve *bool `json:"include_curve,omitempty"` } +func (s *DataframeEvaluationClassificationMetricsAucRoc) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "class_name": + if err := dec.Decode(&s.ClassName); err != nil { + return err + } + + case "include_curve": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IncludeCurve = &value + case bool: + s.IncludeCurve = &v + } + + } + } + return nil +} + // NewDataframeEvaluationClassificationMetricsAucRoc returns a DataframeEvaluationClassificationMetricsAucRoc. func NewDataframeEvaluationClassificationMetricsAucRoc() *DataframeEvaluationClassificationMetricsAucRoc { r := &DataframeEvaluationClassificationMetricsAucRoc{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationcontainer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationcontainer.go index 026281d46..9276d419a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationcontainer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationcontainer.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // DataframeEvaluationContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/DataframeEvaluation.ts#L25-L33 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/DataframeEvaluation.ts#L25-L33 type DataframeEvaluationContainer struct { // Classification Classification evaluation evaluates the results of a classification analysis // which outputs a prediction that identifies to which of the classes each diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationmetrics.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationmetrics.go index 8cbc7efb4..be0821604 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationmetrics.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationmetrics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -26,7 +26,7 @@ import ( // DataframeEvaluationMetrics type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/DataframeEvaluation.ts#L64-L71 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/DataframeEvaluation.ts#L64-L71 type DataframeEvaluationMetrics struct { // AucRoc The AUC ROC (area under the curve of the receiver operating characteristic) // score and optionally the curve. It is calculated for a specific class diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationoutlierdetection.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationoutlierdetection.go index 8510eeea2..c39facfaa 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationoutlierdetection.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationoutlierdetection.go @@ -16,13 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // DataframeEvaluationOutlierDetection type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/DataframeEvaluation.ts#L46-L53 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/DataframeEvaluation.ts#L46-L53 type DataframeEvaluationOutlierDetection struct { // ActualField The field of the index which contains the ground truth. The data type of this // field can be boolean or integer. If the data type is integer, the value has @@ -36,6 +43,41 @@ type DataframeEvaluationOutlierDetection struct { PredictedProbabilityField string `json:"predicted_probability_field"` } +func (s *DataframeEvaluationOutlierDetection) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "actual_field": + if err := dec.Decode(&s.ActualField); err != nil { + return err + } + + case "metrics": + if err := dec.Decode(&s.Metrics); err != nil { + return err + } + + case "predicted_probability_field": + if err := dec.Decode(&s.PredictedProbabilityField); err != nil { + return err + } + + } + } + return nil +} + // NewDataframeEvaluationOutlierDetection returns a DataframeEvaluationOutlierDetection. func NewDataframeEvaluationOutlierDetection() *DataframeEvaluationOutlierDetection { r := &DataframeEvaluationOutlierDetection{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationoutlierdetectionmetrics.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationoutlierdetectionmetrics.go index 19372587b..a524adc69 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationoutlierdetectionmetrics.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationoutlierdetectionmetrics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -26,7 +26,7 @@ import ( // DataframeEvaluationOutlierDetectionMetrics type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/DataframeEvaluation.ts#L80-L83 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/DataframeEvaluation.ts#L80-L83 type DataframeEvaluationOutlierDetectionMetrics struct { // AucRoc The AUC ROC (area under the curve of the receiver operating characteristic) // score and optionally the curve. It is calculated for a specific class diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationregression.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationregression.go index 6ed1079a4..9e21597fe 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationregression.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationregression.go @@ -16,13 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // DataframeEvaluationRegression type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/DataframeEvaluation.ts#L55-L62 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/DataframeEvaluation.ts#L55-L62 type DataframeEvaluationRegression struct { // ActualField The field of the index which contains the ground truth. The data type of this // field must be numerical. @@ -36,6 +43,41 @@ type DataframeEvaluationRegression struct { PredictedField string `json:"predicted_field"` } +func (s *DataframeEvaluationRegression) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "actual_field": + if err := dec.Decode(&s.ActualField); err != nil { + return err + } + + case "metrics": + if err := dec.Decode(&s.Metrics); err != nil { + return err + } + + case "predicted_field": + if err := dec.Decode(&s.PredictedField); err != nil { + return err + } + + } + } + return nil +} + // NewDataframeEvaluationRegression returns a DataframeEvaluationRegression. func NewDataframeEvaluationRegression() *DataframeEvaluationRegression { r := &DataframeEvaluationRegression{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationregressionmetrics.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationregressionmetrics.go index 7c4fbf6fa..548204aa3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationregressionmetrics.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationregressionmetrics.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -26,7 +26,7 @@ import ( // DataframeEvaluationRegressionMetrics type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/DataframeEvaluation.ts#L92-L110 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/DataframeEvaluation.ts#L92-L110 type DataframeEvaluationRegressionMetrics struct { // Huber Pseudo Huber loss function. Huber *DataframeEvaluationRegressionMetricsHuber `json:"huber,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationregressionmetricshuber.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationregressionmetricshuber.go index 8ee5b214e..2dfa77926 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationregressionmetricshuber.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationregressionmetricshuber.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DataframeEvaluationRegressionMetricsHuber type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/DataframeEvaluation.ts#L117-L120 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/DataframeEvaluation.ts#L117-L120 type DataframeEvaluationRegressionMetricsHuber struct { // Delta Approximates 1/2 (prediction - actual)2 for values much less than delta and // approximates a straight line with slope delta for values much larger than @@ -30,6 +38,42 @@ type DataframeEvaluationRegressionMetricsHuber struct { Delta *Float64 `json:"delta,omitempty"` } +func (s *DataframeEvaluationRegressionMetricsHuber) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "delta": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Delta = &f + case float64: + f := Float64(v) + s.Delta = &f + } + + } + } + return nil +} + // NewDataframeEvaluationRegressionMetricsHuber returns a DataframeEvaluationRegressionMetricsHuber. func NewDataframeEvaluationRegressionMetricsHuber() *DataframeEvaluationRegressionMetricsHuber { r := &DataframeEvaluationRegressionMetricsHuber{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationregressionmetricsmsle.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationregressionmetricsmsle.go index 4164beeba..dda22a3fd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationregressionmetricsmsle.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationregressionmetricsmsle.go @@ -16,19 +16,63 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DataframeEvaluationRegressionMetricsMsle type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/DataframeEvaluation.ts#L112-L115 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/DataframeEvaluation.ts#L112-L115 type DataframeEvaluationRegressionMetricsMsle struct { // Offset Defines the transition point at which you switch from minimizing quadratic // error to minimizing quadratic log error. Defaults to 1. Offset *Float64 `json:"offset,omitempty"` } +func (s *DataframeEvaluationRegressionMetricsMsle) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "offset": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Offset = &f + case float64: + f := Float64(v) + s.Offset = &f + } + + } + } + return nil +} + // NewDataframeEvaluationRegressionMetricsMsle returns a DataframeEvaluationRegressionMetricsMsle. func NewDataframeEvaluationRegressionMetricsMsle() *DataframeEvaluationRegressionMetricsMsle { r := &DataframeEvaluationRegressionMetricsMsle{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationsummaryaucroc.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationsummaryaucroc.go index 561da2b9d..8c20a48d8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationsummaryaucroc.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationsummaryaucroc.go @@ -16,18 +16,67 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DataframeEvaluationSummaryAucRoc type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/evaluate_data_frame/types.ts#L50-L52 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/evaluate_data_frame/types.ts#L91-L93 type DataframeEvaluationSummaryAucRoc struct { Curve []DataframeEvaluationSummaryAucRocCurveItem `json:"curve,omitempty"` Value Float64 `json:"value"` } +func (s *DataframeEvaluationSummaryAucRoc) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "curve": + if err := dec.Decode(&s.Curve); err != nil { + return err + } + + case "value": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Value = f + case float64: + f := Float64(v) + s.Value = f + } + + } + } + return nil +} + // NewDataframeEvaluationSummaryAucRoc returns a DataframeEvaluationSummaryAucRoc. func NewDataframeEvaluationSummaryAucRoc() *DataframeEvaluationSummaryAucRoc { r := &DataframeEvaluationSummaryAucRoc{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationsummaryaucroccurveitem.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationsummaryaucroccurveitem.go index 4d3ef3633..032993bdb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationsummaryaucroccurveitem.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationsummaryaucroccurveitem.go @@ -16,19 +16,95 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DataframeEvaluationSummaryAucRocCurveItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/evaluate_data_frame/types.ts#L54-L58 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/evaluate_data_frame/types.ts#L95-L99 type DataframeEvaluationSummaryAucRocCurveItem struct { Fpr Float64 `json:"fpr"` Threshold Float64 `json:"threshold"` Tpr Float64 `json:"tpr"` } +func (s *DataframeEvaluationSummaryAucRocCurveItem) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fpr": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Fpr = f + case float64: + f := Float64(v) + s.Fpr = f + } + + case "threshold": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Threshold = f + case float64: + f := Float64(v) + s.Threshold = f + } + + case "tpr": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Tpr = f + case float64: + f := Float64(v) + s.Tpr = f + } + + } + } + return nil +} + // NewDataframeEvaluationSummaryAucRocCurveItem returns a DataframeEvaluationSummaryAucRocCurveItem. func NewDataframeEvaluationSummaryAucRocCurveItem() *DataframeEvaluationSummaryAucRocCurveItem { r := &DataframeEvaluationSummaryAucRocCurveItem{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationvalue.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationvalue.go index 1cac7d408..b7de343e0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationvalue.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeevaluationvalue.go @@ -16,17 +16,61 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DataframeEvaluationValue type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/evaluate_data_frame/types.ts#L46-L48 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/evaluate_data_frame/types.ts#L87-L89 type DataframeEvaluationValue struct { Value Float64 `json:"value"` } +func (s *DataframeEvaluationValue) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "value": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Value = f + case float64: + f := Float64(v) + s.Value = f + } + + } + } + return nil +} + // NewDataframeEvaluationValue returns a DataframeEvaluationValue. func NewDataframeEvaluationValue() *DataframeEvaluationValue { r := &DataframeEvaluationValue{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeoutlierdetectionsummary.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeoutlierdetectionsummary.go index 2c6cf4559..653dd6e23 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeoutlierdetectionsummary.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeoutlierdetectionsummary.go @@ -16,18 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // DataframeOutlierDetectionSummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/evaluate_data_frame/types.ts#L24-L29 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/evaluate_data_frame/types.ts#L24-L42 type DataframeOutlierDetectionSummary struct { - AucRoc *DataframeEvaluationSummaryAucRoc `json:"auc_roc,omitempty"` + // AucRoc The AUC ROC (area under the curve of the receiver operating characteristic) + // score and optionally the curve. + AucRoc *DataframeEvaluationSummaryAucRoc `json:"auc_roc,omitempty"` + // ConfusionMatrix Set the different thresholds of the outlier score at where the metrics (`tp` + // - true positive, `fp` - false positive, `tn` - true negative, `fn` - false + // negative) are calculated. ConfusionMatrix map[string]ConfusionMatrixThreshold `json:"confusion_matrix,omitempty"` - Precision map[string]Float64 `json:"precision,omitempty"` - Recall map[string]Float64 `json:"recall,omitempty"` + // Precision Set the different thresholds of the outlier score at where the metric is + // calculated. + Precision map[string]Float64 `json:"precision,omitempty"` + // Recall Set the different thresholds of the outlier score at where the metric is + // calculated. + Recall map[string]Float64 `json:"recall,omitempty"` } // NewDataframeOutlierDetectionSummary returns a DataframeOutlierDetectionSummary. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframepreviewconfig.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframepreviewconfig.go index 5ac89e18b..5ee033e6c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframepreviewconfig.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframepreviewconfig.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DataframePreviewConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/preview_data_frame_analytics/types.ts#L27-L33 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/preview_data_frame_analytics/types.ts#L27-L33 type DataframePreviewConfig struct { Analysis DataframeAnalysisContainer `json:"analysis"` AnalyzedFields *DataframeAnalysisAnalyzedFields `json:"analyzed_fields,omitempty"` @@ -31,6 +39,69 @@ type DataframePreviewConfig struct { Source DataframeAnalyticsSource `json:"source"` } +func (s *DataframePreviewConfig) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analysis": + if err := dec.Decode(&s.Analysis); err != nil { + return err + } + + case "analyzed_fields": + if err := dec.Decode(&s.AnalyzedFields); err != nil { + return err + } + + case "max_num_threads": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxNumThreads = &value + case float64: + f := int(v) + s.MaxNumThreads = &f + } + + case "model_memory_limit": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelMemoryLimit = &o + + case "source": + if err := dec.Decode(&s.Source); err != nil { + return err + } + + } + } + return nil +} + // NewDataframePreviewConfig returns a DataframePreviewConfig. func NewDataframePreviewConfig() *DataframePreviewConfig { r := &DataframePreviewConfig{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeregressionsummary.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeregressionsummary.go index 64d200e9d..ebbe02072 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeregressionsummary.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dataframeregressionsummary.go @@ -16,17 +16,24 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // DataframeRegressionSummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/evaluate_data_frame/types.ts#L39-L44 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/evaluate_data_frame/types.ts#L68-L85 type DataframeRegressionSummary struct { - Huber *DataframeEvaluationValue `json:"huber,omitempty"` - Mse *DataframeEvaluationValue `json:"mse,omitempty"` - Msle *DataframeEvaluationValue `json:"msle,omitempty"` + // Huber Pseudo Huber loss function. + Huber *DataframeEvaluationValue `json:"huber,omitempty"` + // Mse Average squared difference between the predicted values and the actual + // (`ground truth`) value. + Mse *DataframeEvaluationValue `json:"mse,omitempty"` + // Msle Average squared difference between the logarithm of the predicted values and + // the logarithm of the actual (`ground truth`) value. + Msle *DataframeEvaluationValue `json:"msle,omitempty"` + // RSquared Proportion of the variance in the dependent variable that is predictable from + // the independent variables. RSquared *DataframeEvaluationValue `json:"r_squared,omitempty"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datapathstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datapathstats.go index 2188c419d..247a08723 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datapathstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datapathstats.go @@ -16,15 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DataPathStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L229-L246 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L550-L594 type DataPathStats struct { - Available *string `json:"available,omitempty"` + // Available Total amount of disk space available to this Java virtual machine on this + // file store. + Available *string `json:"available,omitempty"` + // AvailableInBytes Total number of bytes available to this Java virtual machine on this file + // store. AvailableInBytes *int64 `json:"available_in_bytes,omitempty"` DiskQueue *string `json:"disk_queue,omitempty"` DiskReadSize *string `json:"disk_read_size,omitempty"` @@ -33,13 +45,253 @@ type DataPathStats struct { DiskWriteSize *string `json:"disk_write_size,omitempty"` DiskWriteSizeInBytes *int64 `json:"disk_write_size_in_bytes,omitempty"` DiskWrites *int64 `json:"disk_writes,omitempty"` - Free *string `json:"free,omitempty"` - FreeInBytes *int64 `json:"free_in_bytes,omitempty"` - Mount *string `json:"mount,omitempty"` - Path *string `json:"path,omitempty"` - Total *string `json:"total,omitempty"` - TotalInBytes *int64 `json:"total_in_bytes,omitempty"` - Type *string `json:"type,omitempty"` + // Free Total amount of unallocated disk space in the file store. + Free *string `json:"free,omitempty"` + // FreeInBytes Total number of unallocated bytes in the file store. + FreeInBytes *int64 `json:"free_in_bytes,omitempty"` + // Mount Mount point of the file store (for example: `/dev/sda2`). + Mount *string `json:"mount,omitempty"` + // Path Path to the file store. + Path *string `json:"path,omitempty"` + // Total Total size of the file store. + Total *string `json:"total,omitempty"` + // TotalInBytes Total size of the file store in bytes. + TotalInBytes *int64 `json:"total_in_bytes,omitempty"` + // Type Type of the file store (ex: ext4). + Type *string `json:"type,omitempty"` +} + +func (s *DataPathStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "available": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Available = &o + + case "available_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.AvailableInBytes = &value + case float64: + f := int64(v) + s.AvailableInBytes = &f + } + + case "disk_queue": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DiskQueue = &o + + case "disk_read_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DiskReadSize = &o + + case "disk_read_size_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DiskReadSizeInBytes = &value + case float64: + f := int64(v) + s.DiskReadSizeInBytes = &f + } + + case "disk_reads": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DiskReads = &value + case float64: + f := int64(v) + s.DiskReads = &f + } + + case "disk_write_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DiskWriteSize = &o + + case "disk_write_size_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DiskWriteSizeInBytes = &value + case float64: + f := int64(v) + s.DiskWriteSizeInBytes = &f + } + + case "disk_writes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DiskWrites = &value + case float64: + f := int64(v) + s.DiskWrites = &f + } + + case "free": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Free = &o + + case "free_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.FreeInBytes = &value + case float64: + f := int64(v) + s.FreeInBytes = &f + } + + case "mount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Mount = &o + + case "path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Path = &o + + case "total": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Total = &o + + case "total_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TotalInBytes = &value + case float64: + f := int64(v) + s.TotalInBytes = &f + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = &o + + } + } + return nil } // NewDataPathStats returns a DataPathStats. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastream.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastream.go index 0f130d5c3..de4f52d13 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastream.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastream.go @@ -16,32 +16,202 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/healthstatus" ) // DataStream type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/DataStream.ts#L31-L46 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/DataStream.ts#L32-L96 type DataStream struct { - AllowCustomRouting *bool `json:"allow_custom_routing,omitempty"` - Generation int `json:"generation"` - Hidden bool `json:"hidden"` - IlmPolicy *string `json:"ilm_policy,omitempty"` - Indices []DataStreamIndex `json:"indices"` - Meta_ map[string]json.RawMessage `json:"_meta,omitempty"` - Name string `json:"name"` - Replicated *bool `json:"replicated,omitempty"` - Status healthstatus.HealthStatus `json:"status"` - System *bool `json:"system,omitempty"` - Template string `json:"template"` - TimestampField DataStreamTimestampField `json:"timestamp_field"` + // AllowCustomRouting If `true`, the data stream allows custom routing on write request. + AllowCustomRouting *bool `json:"allow_custom_routing,omitempty"` + // Generation Current generation for the data stream. This number acts as a cumulative + // count of the stream’s rollovers, starting at 1. + Generation int `json:"generation"` + // Hidden If `true`, the data stream is hidden. + Hidden bool `json:"hidden"` + // IlmPolicy Name of the current ILM lifecycle policy in the stream’s matching index + // template. + // This lifecycle policy is set in the `index.lifecycle.name` setting. + // If the template does not include a lifecycle policy, this property is not + // included in the response. + // NOTE: A data stream’s backing indices may be assigned different lifecycle + // policies. To retrieve the lifecycle policy for individual backing indices, + // use the get index settings API. + IlmPolicy *string `json:"ilm_policy,omitempty"` + // Indices Array of objects containing information about the data stream’s backing + // indices. + // The last item in this array contains information about the stream’s current + // write index. + Indices []DataStreamIndex `json:"indices"` + // Lifecycle Contains the configuration for the data lifecycle management of this data + // stream. + Lifecycle *DataStreamLifecycleWithRollover `json:"lifecycle,omitempty"` + // Meta_ Custom metadata for the stream, copied from the `_meta` object of the + // stream’s matching index template. + // If empty, the response omits this property. + Meta_ Metadata `json:"_meta,omitempty"` + // Name Name of the data stream. + Name string `json:"name"` + // Replicated If `true`, the data stream is created and managed by cross-cluster + // replication and the local cluster can not write into this data stream or + // change its mappings. + Replicated *bool `json:"replicated,omitempty"` + // Status Health status of the data stream. + // This health status is based on the state of the primary and replica shards of + // the stream’s backing indices. + Status healthstatus.HealthStatus `json:"status"` + // System If `true`, the data stream is created and managed by an Elastic stack + // component and cannot be modified through normal user interaction. + System *bool `json:"system,omitempty"` + // Template Name of the index template used to create the data stream’s backing indices. + // The template’s index pattern must match the name of this data stream. + Template string `json:"template"` + // TimestampField Information about the `@timestamp` field in the data stream. + TimestampField DataStreamTimestampField `json:"timestamp_field"` +} + +func (s *DataStream) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_custom_routing": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.AllowCustomRouting = &value + case bool: + s.AllowCustomRouting = &v + } + + case "generation": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Generation = value + case float64: + f := int(v) + s.Generation = f + } + + case "hidden": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Hidden = value + case bool: + s.Hidden = v + } + + case "ilm_policy": + if err := dec.Decode(&s.IlmPolicy); err != nil { + return err + } + + case "indices": + if err := dec.Decode(&s.Indices); err != nil { + return err + } + + case "lifecycle": + if err := dec.Decode(&s.Lifecycle); err != nil { + return err + } + + case "_meta": + if err := dec.Decode(&s.Meta_); err != nil { + return err + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "replicated": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Replicated = &value + case bool: + s.Replicated = &v + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return err + } + + case "system": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.System = &value + case bool: + s.System = &v + } + + case "template": + if err := dec.Decode(&s.Template); err != nil { + return err + } + + case "timestamp_field": + if err := dec.Decode(&s.TimestampField); err != nil { + return err + } + + } + } + return nil } // NewDataStream returns a DataStream. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreamindex.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreamindex.go index b1b6b986b..7de62a167 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreamindex.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreamindex.go @@ -16,18 +16,57 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // DataStreamIndex type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/DataStream.ts#L52-L55 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/DataStream.ts#L105-L114 type DataStreamIndex struct { + // IndexName Name of the backing index. IndexName string `json:"index_name"` + // IndexUuid Universally unique identifier (UUID) for the index. IndexUuid string `json:"index_uuid"` } +func (s *DataStreamIndex) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index_name": + if err := dec.Decode(&s.IndexName); err != nil { + return err + } + + case "index_uuid": + if err := dec.Decode(&s.IndexUuid); err != nil { + return err + } + + } + } + return nil +} + // NewDataStreamIndex returns a DataStreamIndex. func NewDataStreamIndex() *DataStreamIndex { r := &DataStreamIndex{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreamlifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreamlifecycle.go new file mode 100644 index 000000000..8ed15fccd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreamlifecycle.go @@ -0,0 +1,73 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + +// DataStreamLifecycle type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/DataStreamLifecycle.ts#L25-L31 +type DataStreamLifecycle struct { + DataRetention Duration `json:"data_retention,omitempty"` + Downsampling *DataStreamLifecycleDownsampling `json:"downsampling,omitempty"` +} + +func (s *DataStreamLifecycle) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "data_retention": + if err := dec.Decode(&s.DataRetention); err != nil { + return err + } + + case "downsampling": + if err := dec.Decode(&s.Downsampling); err != nil { + return err + } + + } + } + return nil +} + +// NewDataStreamLifecycle returns a DataStreamLifecycle. +func NewDataStreamLifecycle() *DataStreamLifecycle { + r := &DataStreamLifecycle{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreamlifecycledownsampling.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreamlifecycledownsampling.go new file mode 100644 index 000000000..37dc6d3e5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreamlifecycledownsampling.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +// DataStreamLifecycleDownsampling type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/DataStreamLifecycleDownsampling.ts#L22-L27 +type DataStreamLifecycleDownsampling struct { + // Rounds The list of downsampling rounds to execute as part of this downsampling + // configuration + Rounds []DownsamplingRound `json:"rounds"` +} + +// NewDataStreamLifecycleDownsampling returns a DataStreamLifecycleDownsampling. +func NewDataStreamLifecycleDownsampling() *DataStreamLifecycleDownsampling { + r := &DataStreamLifecycleDownsampling{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreamlifecycleexplain.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreamlifecycleexplain.go new file mode 100644 index 000000000..df70ca183 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreamlifecycleexplain.go @@ -0,0 +1,132 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + +// DataStreamLifecycleExplain type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/explain_data_lifecycle/IndicesExplainDataLifecycleResponse.ts#L31-L41 +type DataStreamLifecycleExplain struct { + Error *string `json:"error,omitempty"` + GenerationTime Duration `json:"generation_time,omitempty"` + Index string `json:"index"` + IndexCreationDateMillis *int64 `json:"index_creation_date_millis,omitempty"` + Lifecycle *DataStreamLifecycleWithRollover `json:"lifecycle,omitempty"` + ManagedByLifecycle bool `json:"managed_by_lifecycle"` + RolloverDateMillis *int64 `json:"rollover_date_millis,omitempty"` + TimeSinceIndexCreation Duration `json:"time_since_index_creation,omitempty"` + TimeSinceRollover Duration `json:"time_since_rollover,omitempty"` +} + +func (s *DataStreamLifecycleExplain) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "error": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Error = &o + + case "generation_time": + if err := dec.Decode(&s.GenerationTime); err != nil { + return err + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return err + } + + case "index_creation_date_millis": + if err := dec.Decode(&s.IndexCreationDateMillis); err != nil { + return err + } + + case "lifecycle": + if err := dec.Decode(&s.Lifecycle); err != nil { + return err + } + + case "managed_by_lifecycle": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.ManagedByLifecycle = value + case bool: + s.ManagedByLifecycle = v + } + + case "rollover_date_millis": + if err := dec.Decode(&s.RolloverDateMillis); err != nil { + return err + } + + case "time_since_index_creation": + if err := dec.Decode(&s.TimeSinceIndexCreation); err != nil { + return err + } + + case "time_since_rollover": + if err := dec.Decode(&s.TimeSinceRollover); err != nil { + return err + } + + } + } + return nil +} + +// NewDataStreamLifecycleExplain returns a DataStreamLifecycleExplain. +func NewDataStreamLifecycleExplain() *DataStreamLifecycleExplain { + r := &DataStreamLifecycleExplain{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreamlifecyclerolloverconditions.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreamlifecyclerolloverconditions.go new file mode 100644 index 000000000..98099aadc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreamlifecyclerolloverconditions.go @@ -0,0 +1,169 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + +// DataStreamLifecycleRolloverConditions type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/DataStreamLifecycle.ts#L57-L69 +type DataStreamLifecycleRolloverConditions struct { + MaxAge *string `json:"max_age,omitempty"` + MaxDocs *int64 `json:"max_docs,omitempty"` + MaxPrimaryShardDocs *int64 `json:"max_primary_shard_docs,omitempty"` + MaxPrimaryShardSize ByteSize `json:"max_primary_shard_size,omitempty"` + MaxSize ByteSize `json:"max_size,omitempty"` + MinAge Duration `json:"min_age,omitempty"` + MinDocs *int64 `json:"min_docs,omitempty"` + MinPrimaryShardDocs *int64 `json:"min_primary_shard_docs,omitempty"` + MinPrimaryShardSize ByteSize `json:"min_primary_shard_size,omitempty"` + MinSize ByteSize `json:"min_size,omitempty"` +} + +func (s *DataStreamLifecycleRolloverConditions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_age": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MaxAge = &o + + case "max_docs": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.MaxDocs = &value + case float64: + f := int64(v) + s.MaxDocs = &f + } + + case "max_primary_shard_docs": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.MaxPrimaryShardDocs = &value + case float64: + f := int64(v) + s.MaxPrimaryShardDocs = &f + } + + case "max_primary_shard_size": + if err := dec.Decode(&s.MaxPrimaryShardSize); err != nil { + return err + } + + case "max_size": + if err := dec.Decode(&s.MaxSize); err != nil { + return err + } + + case "min_age": + if err := dec.Decode(&s.MinAge); err != nil { + return err + } + + case "min_docs": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.MinDocs = &value + case float64: + f := int64(v) + s.MinDocs = &f + } + + case "min_primary_shard_docs": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.MinPrimaryShardDocs = &value + case float64: + f := int64(v) + s.MinPrimaryShardDocs = &f + } + + case "min_primary_shard_size": + if err := dec.Decode(&s.MinPrimaryShardSize); err != nil { + return err + } + + case "min_size": + if err := dec.Decode(&s.MinSize); err != nil { + return err + } + + } + } + return nil +} + +// NewDataStreamLifecycleRolloverConditions returns a DataStreamLifecycleRolloverConditions. +func NewDataStreamLifecycleRolloverConditions() *DataStreamLifecycleRolloverConditions { + r := &DataStreamLifecycleRolloverConditions{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreamlifecyclewithrollover.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreamlifecyclewithrollover.go new file mode 100644 index 000000000..35da6727d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreamlifecyclewithrollover.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + +// DataStreamLifecycleWithRollover type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/DataStreamLifecycle.ts#L33-L55 +type DataStreamLifecycleWithRollover struct { + // DataRetention If defined, every document added to this data stream will be stored at least + // for this time frame. + // Any time after this duration the document could be deleted. + // When empty, every document in this data stream will be stored indefinitely. + DataRetention Duration `json:"data_retention,omitempty"` + // Downsampling The downsampling configuration to execute for the managed backing index after + // rollover. + Downsampling *DataStreamLifecycleDownsampling `json:"downsampling,omitempty"` + // Rollover The conditions which will trigger the rollover of a backing index as + // configured by the cluster setting `cluster.lifecycle.default.rollover`. + // This property is an implementation detail and it will only be retrieved when + // the query param `include_defaults` is set to true. + // The contents of this field are subject to change. + Rollover *DataStreamLifecycleRolloverConditions `json:"rollover,omitempty"` +} + +func (s *DataStreamLifecycleWithRollover) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "data_retention": + if err := dec.Decode(&s.DataRetention); err != nil { + return err + } + + case "downsampling": + if err := dec.Decode(&s.Downsampling); err != nil { + return err + } + + case "rollover": + if err := dec.Decode(&s.Rollover); err != nil { + return err + } + + } + } + return nil +} + +// NewDataStreamLifecycleWithRollover returns a DataStreamLifecycleWithRollover. +func NewDataStreamLifecycleWithRollover() *DataStreamLifecycleWithRollover { + r := &DataStreamLifecycleWithRollover{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreamnames.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreamnames.go index bfb559a2e..6c7901150 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreamnames.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreamnames.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // DataStreamNames type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/common.ts#L86-L86 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/common.ts#L88-L88 type DataStreamNames []string diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreams.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreams.go index e35cb4448..0456ba117 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreams.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreams.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DataStreams type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L81-L84 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L81-L84 type DataStreams struct { Available bool `json:"available"` DataStreams int64 `json:"data_streams"` @@ -30,6 +38,84 @@ type DataStreams struct { IndicesCount int64 `json:"indices_count"` } +func (s *DataStreams) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "available": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Available = value + case bool: + s.Available = v + } + + case "data_streams": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DataStreams = value + case float64: + f := int64(v) + s.DataStreams = f + } + + case "enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "indices_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.IndicesCount = value + case float64: + f := int64(v) + s.IndicesCount = f + } + + } + } + return nil +} + // NewDataStreams returns a DataStreams. func NewDataStreams() *DataStreams { r := &DataStreams{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreamsstatsitem.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreamsstatsitem.go index 71a753f4e..890354f33 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreamsstatsitem.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreamsstatsitem.go @@ -16,19 +16,106 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DataStreamsStatsItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/data_streams_stats/IndicesDataStreamsStatsResponse.ts#L36-L42 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/data_streams_stats/IndicesDataStreamsStatsResponse.ts#L45-L65 type DataStreamsStatsItem struct { - BackingIndices int `json:"backing_indices"` - DataStream string `json:"data_stream"` - MaximumTimestamp int64 `json:"maximum_timestamp"` - StoreSize ByteSize `json:"store_size,omitempty"` - StoreSizeBytes int `json:"store_size_bytes"` + // BackingIndices Current number of backing indices for the data stream. + BackingIndices int `json:"backing_indices"` + // DataStream Name of the data stream. + DataStream string `json:"data_stream"` + // MaximumTimestamp The data stream’s highest `@timestamp` value, converted to milliseconds since + // the Unix epoch. + // NOTE: This timestamp is provided as a best effort. + // The data stream may contain `@timestamp` values higher than this if one or + // more of the following conditions are met: + // The stream contains closed backing indices; + // Backing indices with a lower generation contain higher `@timestamp` values. + MaximumTimestamp int64 `json:"maximum_timestamp"` + // StoreSize Total size of all shards for the data stream’s backing indices. + // This parameter is only returned if the `human` query parameter is `true`. + StoreSize ByteSize `json:"store_size,omitempty"` + // StoreSizeBytes Total size, in bytes, of all shards for the data stream’s backing indices. + StoreSizeBytes int `json:"store_size_bytes"` +} + +func (s *DataStreamsStatsItem) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "backing_indices": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.BackingIndices = value + case float64: + f := int(v) + s.BackingIndices = f + } + + case "data_stream": + if err := dec.Decode(&s.DataStream); err != nil { + return err + } + + case "maximum_timestamp": + if err := dec.Decode(&s.MaximumTimestamp); err != nil { + return err + } + + case "store_size": + if err := dec.Decode(&s.StoreSize); err != nil { + return err + } + + case "store_size_bytes": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.StoreSizeBytes = value + case float64: + f := int(v) + s.StoreSizeBytes = f + } + + } + } + return nil } // NewDataStreamsStatsItem returns a DataStreamsStatsItem. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreamtimestamp.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreamtimestamp.go index e0da4a7a4..645738062 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreamtimestamp.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreamtimestamp.go @@ -16,17 +16,59 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DataStreamTimestamp type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/TypeMapping.ts#L57-L59 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/TypeMapping.ts#L58-L60 type DataStreamTimestamp struct { Enabled bool `json:"enabled"` } +func (s *DataStreamTimestamp) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = value + case bool: + s.Enabled = v + } + + } + } + return nil +} + // NewDataStreamTimestamp returns a DataStreamTimestamp. func NewDataStreamTimestamp() *DataStreamTimestamp { r := &DataStreamTimestamp{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreamtimestampfield.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreamtimestampfield.go index f14bd5ca8..911880446 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreamtimestampfield.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreamtimestampfield.go @@ -16,17 +16,52 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // DataStreamTimestampField type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/DataStream.ts#L48-L50 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/DataStream.ts#L98-L103 type DataStreamTimestampField struct { + // Name Name of the timestamp field for the data stream, which must be `@timestamp`. + // The `@timestamp` field must be included in every document indexed to the data + // stream. Name string `json:"name"` } +func (s *DataStreamTimestampField) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + } + } + return nil +} + // NewDataStreamTimestampField returns a DataStreamTimestampField. func NewDataStreamTimestampField() *DataStreamTimestampField { r := &DataStreamTimestampField{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreamvisibility.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreamvisibility.go index dced4a286..0ee6b6065 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreamvisibility.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreamvisibility.go @@ -16,17 +16,59 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DataStreamVisibility type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/DataStream.ts#L57-L59 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/DataStream.ts#L116-L118 type DataStreamVisibility struct { Hidden *bool `json:"hidden,omitempty"` } +func (s *DataStreamVisibility) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "hidden": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Hidden = &value + case bool: + s.Hidden = &v + } + + } + } + return nil +} + // NewDataStreamVisibility returns a DataStreamVisibility. func NewDataStreamVisibility() *DataStreamVisibility { r := &DataStreamVisibility{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreamwithlifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreamwithlifecycle.go new file mode 100644 index 000000000..8614f7145 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datastreamwithlifecycle.go @@ -0,0 +1,73 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + +// DataStreamWithLifecycle type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/get_data_lifecycle/IndicesGetDataLifecycleResponse.ts#L27-L30 +type DataStreamWithLifecycle struct { + Lifecycle *DataStreamLifecycle `json:"lifecycle,omitempty"` + Name string `json:"name"` +} + +func (s *DataStreamWithLifecycle) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "lifecycle": + if err := dec.Decode(&s.Lifecycle); err != nil { + return err + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + } + } + return nil +} + +// NewDataStreamWithLifecycle returns a DataStreamWithLifecycle. +func NewDataStreamWithLifecycle() *DataStreamWithLifecycle { + r := &DataStreamWithLifecycle{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datatierphasestatistics.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datatierphasestatistics.go index 61cffdafa..ca0cd75ce 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datatierphasestatistics.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datatierphasestatistics.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DataTierPhaseStatistics type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L86-L97 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L86-L97 type DataTierPhaseStatistics struct { DocCount int64 `json:"doc_count"` IndexCount int64 `json:"index_count"` @@ -36,6 +44,176 @@ type DataTierPhaseStatistics struct { TotalSizeBytes int64 `json:"total_size_bytes"` } +func (s *DataTierPhaseStatistics) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f + } + + case "index_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.IndexCount = value + case float64: + f := int64(v) + s.IndexCount = f + } + + case "node_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.NodeCount = value + case float64: + f := int64(v) + s.NodeCount = f + } + + case "primary_shard_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.PrimaryShardCount = value + case float64: + f := int64(v) + s.PrimaryShardCount = f + } + + case "primary_shard_size_avg_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.PrimaryShardSizeAvgBytes = value + case float64: + f := int64(v) + s.PrimaryShardSizeAvgBytes = f + } + + case "primary_shard_size_mad_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.PrimaryShardSizeMadBytes = value + case float64: + f := int64(v) + s.PrimaryShardSizeMadBytes = f + } + + case "primary_shard_size_median_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.PrimaryShardSizeMedianBytes = value + case float64: + f := int64(v) + s.PrimaryShardSizeMedianBytes = f + } + + case "primary_size_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.PrimarySizeBytes = value + case float64: + f := int64(v) + s.PrimarySizeBytes = f + } + + case "total_shard_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TotalShardCount = value + case float64: + f := int64(v) + s.TotalShardCount = f + } + + case "total_size_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TotalSizeBytes = value + case float64: + f := int64(v) + s.TotalSizeBytes = f + } + + } + } + return nil +} + // NewDataTierPhaseStatistics returns a DataTierPhaseStatistics. func NewDataTierPhaseStatistics() *DataTierPhaseStatistics { r := &DataTierPhaseStatistics{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datatiers.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datatiers.go index ab133b583..32a340d48 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datatiers.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datatiers.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DataTiers type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L333-L340 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L339-L349 type DataTiers struct { Available bool `json:"available"` DataCold DataTierPhaseStatistics `json:"data_cold"` @@ -33,6 +41,79 @@ type DataTiers struct { Enabled bool `json:"enabled"` } +func (s *DataTiers) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "available": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Available = value + case bool: + s.Available = v + } + + case "data_cold": + if err := dec.Decode(&s.DataCold); err != nil { + return err + } + + case "data_content": + if err := dec.Decode(&s.DataContent); err != nil { + return err + } + + case "data_frozen": + if err := dec.Decode(&s.DataFrozen); err != nil { + return err + } + + case "data_hot": + if err := dec.Decode(&s.DataHot); err != nil { + return err + } + + case "data_warm": + if err := dec.Decode(&s.DataWarm); err != nil { + return err + } + + case "enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = value + case bool: + s.Enabled = v + } + + } + } + return nil +} + // NewDataTiers returns a DataTiers. func NewDataTiers() *DataTiers { r := &DataTiers{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datedecayfunction.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datedecayfunction.go index acb2f4eeb..5c6f4e727 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datedecayfunction.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datedecayfunction.go @@ -16,23 +16,25 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/multivaluemode" - "encoding/json" "fmt" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/multivaluemode" ) // DateDecayFunction type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/compound.ts#L92-L94 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/compound.ts#L186-L188 type DateDecayFunction struct { - DateDecayFunction map[string]DecayPlacementDateMathDuration `json:"-"` - MultiValueMode *multivaluemode.MultiValueMode `json:"multi_value_mode,omitempty"` + DateDecayFunction map[string]DecayPlacementDateMathDuration `json:"DateDecayFunction,omitempty"` + // MultiValueMode Determines how the distance is calculated when a field used for computing the + // decay contains multiple values. + MultiValueMode *multivaluemode.MultiValueMode `json:"multi_value_mode,omitempty"` } // MarhsalJSON overrides marshalling for types with additional properties @@ -54,6 +56,7 @@ func (s DateDecayFunction) MarshalJSON() ([]byte, error) { for key, value := range s.DateDecayFunction { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "DateDecayFunction") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datedistancefeaturequery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datedistancefeaturequery.go index a20e0c1a4..5b0010ee5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datedistancefeaturequery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datedistancefeaturequery.go @@ -16,21 +16,114 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DateDistanceFeatureQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/specialized.ts#L51-L54 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/specialized.ts#L67-L70 type DateDistanceFeatureQuery struct { - Boost *float32 `json:"boost,omitempty"` - Field string `json:"field"` - Origin string `json:"origin"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Field Name of the field used to calculate distances. This field must meet the + // following criteria: + // be a `date`, `date_nanos` or `geo_point` field; + // have an `index` mapping parameter value of `true`, which is the default; + // have an `doc_values` mapping parameter value of `true`, which is the default. + Field string `json:"field"` + // Origin Date or point of origin used to calculate distances. + // If the `field` value is a `date` or `date_nanos` field, the `origin` value + // must be a date. + // Date Math, such as `now-1h`, is supported. + // If the field value is a `geo_point` field, the `origin` value must be a + // geopoint. + Origin string `json:"origin"` + // Pivot Distance from the `origin` at which relevance scores receive half of the + // `boost` value. + // If the `field` value is a `date` or `date_nanos` field, the `pivot` value + // must be a time unit, such as `1h` or `10d`. If the `field` value is a + // `geo_point` field, the `pivot` value must be a distance unit, such as `1km` + // or `12m`. Pivot Duration `json:"pivot"` QueryName_ *string `json:"_name,omitempty"` } +func (s *DateDistanceFeatureQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "origin": + if err := dec.Decode(&s.Origin); err != nil { + return err + } + + case "pivot": + if err := dec.Decode(&s.Pivot); err != nil { + return err + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + } + } + return nil +} + // NewDateDistanceFeatureQuery returns a DateDistanceFeatureQuery. func NewDateDistanceFeatureQuery() *DateDistanceFeatureQuery { r := &DateDistanceFeatureQuery{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datehistogramaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datehistogramaggregate.go index 5554cf921..8bfbd280c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datehistogramaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datehistogramaggregate.go @@ -16,27 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" ) // DateHistogramAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L347-L348 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L348-L349 type DateHistogramAggregate struct { Buckets BucketsDateHistogramBucket `json:"buckets"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Meta Metadata `json:"meta,omitempty"` } func (s *DateHistogramAggregate) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -57,15 +57,17 @@ func (s *DateHistogramAggregate) UnmarshalJSON(data []byte) error { source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]DateHistogramBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []DateHistogramBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datehistogramaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datehistogramaggregation.go index d635037aa..26cd5b720 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datehistogramaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datehistogramaggregation.go @@ -16,45 +16,69 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/calendarinterval" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortorder" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/calendarinterval" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortorder" ) // DateHistogramAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L93-L110 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L189-L247 type DateHistogramAggregation struct { + // CalendarInterval Calendar-aware interval. + // Can be specified using the unit name, such as `month`, or as a single unit + // quantity, such as `1M`. CalendarInterval *calendarinterval.CalendarInterval `json:"calendar_interval,omitempty"` - ExtendedBounds *ExtendedBoundsFieldDateMath `json:"extended_bounds,omitempty"` - Field *string `json:"field,omitempty"` - FixedInterval Duration `json:"fixed_interval,omitempty"` - Format *string `json:"format,omitempty"` - HardBounds *ExtendedBoundsFieldDateMath `json:"hard_bounds,omitempty"` - Interval Duration `json:"interval,omitempty"` - Keyed *bool `json:"keyed,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - MinDocCount *int `json:"min_doc_count,omitempty"` - Missing DateTime `json:"missing,omitempty"` - Name *string `json:"name,omitempty"` - Offset Duration `json:"offset,omitempty"` - Order AggregateOrder `json:"order,omitempty"` - Params map[string]json.RawMessage `json:"params,omitempty"` - Script Script `json:"script,omitempty"` - TimeZone *string `json:"time_zone,omitempty"` + // ExtendedBounds Enables extending the bounds of the histogram beyond the data itself. + ExtendedBounds *ExtendedBoundsFieldDateMath `json:"extended_bounds,omitempty"` + // Field The date field whose values are use to build a histogram. + Field *string `json:"field,omitempty"` + // FixedInterval Fixed intervals: a fixed number of SI units and never deviate, regardless of + // where they fall on the calendar. + FixedInterval Duration `json:"fixed_interval,omitempty"` + // Format The date format used to format `key_as_string` in the response. + // If no `format` is specified, the first date format specified in the field + // mapping is used. + Format *string `json:"format,omitempty"` + // HardBounds Limits the histogram to specified bounds. + HardBounds *ExtendedBoundsFieldDateMath `json:"hard_bounds,omitempty"` + Interval Duration `json:"interval,omitempty"` + // Keyed Set to `true` to associate a unique string key with each bucket and return + // the ranges as a hash rather than an array. + Keyed *bool `json:"keyed,omitempty"` + Meta Metadata `json:"meta,omitempty"` + // MinDocCount Only returns buckets that have `min_doc_count` number of documents. + // By default, all buckets between the first bucket that matches documents and + // the last one are returned. + MinDocCount *int `json:"min_doc_count,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing DateTime `json:"missing,omitempty"` + Name *string `json:"name,omitempty"` + // Offset Changes the start value of each bucket by the specified positive (`+`) or + // negative offset (`-`) duration. + Offset Duration `json:"offset,omitempty"` + // Order The sort order of the returned buckets. + Order AggregateOrder `json:"order,omitempty"` + Params map[string]json.RawMessage `json:"params,omitempty"` + Script Script `json:"script,omitempty"` + // TimeZone Time zone used for bucketing and rounding. + // Defaults to Coordinated Universal Time (UTC). + TimeZone *string `json:"time_zone,omitempty"` } func (s *DateHistogramAggregation) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -89,9 +113,16 @@ func (s *DateHistogramAggregation) UnmarshalJSON(data []byte) error { } case "format": - if err := dec.Decode(&s.Format); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o case "hard_bounds": if err := dec.Decode(&s.HardBounds); err != nil { @@ -104,8 +135,17 @@ func (s *DateHistogramAggregation) UnmarshalJSON(data []byte) error { } case "keyed": - if err := dec.Decode(&s.Keyed); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Keyed = &value + case bool: + s.Keyed = &v } case "meta": @@ -114,8 +154,19 @@ func (s *DateHistogramAggregation) UnmarshalJSON(data []byte) error { } case "min_doc_count": - if err := dec.Decode(&s.MinDocCount); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MinDocCount = &value + case float64: + f := int(v) + s.MinDocCount = &f } case "missing": @@ -124,9 +175,16 @@ func (s *DateHistogramAggregation) UnmarshalJSON(data []byte) error { } case "name": - if err := dec.Decode(&s.Name); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o case "offset": if err := dec.Decode(&s.Offset); err != nil { @@ -140,19 +198,24 @@ func (s *DateHistogramAggregation) UnmarshalJSON(data []byte) error { source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]sortorder.SortOrder, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Order = o - case '[': o := make([]map[string]sortorder.SortOrder, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Order = o } case "params": + if s.Params == nil { + s.Params = make(map[string]json.RawMessage, 0) + } if err := dec.Decode(&s.Params); err != nil { return err } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datehistogrambucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datehistogrambucket.go index 37dd53f13..25f01d45b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datehistogrambucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datehistogrambucket.go @@ -16,25 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "fmt" - "bytes" + "encoding/json" "errors" + "fmt" "io" - + "strconv" "strings" - - "encoding/json" ) // DateHistogramBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L350-L353 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L351-L354 type DateHistogramBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -43,6 +41,7 @@ type DateHistogramBucket struct { } func (s *DateHistogramBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -56,451 +55,19 @@ func (s *DateHistogramBucket) UnmarshalJSON(data []byte) error { switch t { - case "aggregations": - for dec.More() { - tt, err := dec.Token() + case "doc_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) if err != nil { - if errors.Is(err, io.EOF) { - break - } return err } - if value, ok := tt.(string); ok { - if strings.Contains(value, "#") { - elems := strings.Split(value, "#") - if len(elems) == 2 { - switch elems[0] { - case "cardinality": - o := NewCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentiles": - o := NewHdrPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentile_ranks": - o := NewHdrPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentiles": - o := NewTDigestPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentile_ranks": - o := NewTDigestPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "percentiles_bucket": - o := NewPercentilesBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "median_absolute_deviation": - o := NewMedianAbsoluteDeviationAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "min": - o := NewMinAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "max": - o := NewMaxAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sum": - o := NewSumAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "avg": - o := NewAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "weighted_avg": - o := NewWeightedAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "value_count": - o := NewValueCountAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_value": - o := NewSimpleValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "derivative": - o := NewDerivativeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "bucket_metric_value": - o := NewBucketMetricValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats": - o := NewStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats_bucket": - o := NewStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats": - o := NewExtendedStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats_bucket": - o := NewExtendedStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_bounds": - o := NewGeoBoundsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_centroid": - o := NewGeoCentroidAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "histogram": - o := NewHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_histogram": - o := NewDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "auto_date_histogram": - o := NewAutoDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "variable_width_histogram": - o := NewVariableWidthHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sterms": - o := NewStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lterms": - o := NewLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "dterms": - o := NewDoubleTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umterms": - o := NewUnmappedTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lrareterms": - o := NewLongRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "srareterms": - o := NewStringRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umrareterms": - o := NewUnmappedRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "multi_terms": - o := NewMultiTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "missing": - o := NewMissingAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "nested": - o := NewNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "reverse_nested": - o := NewReverseNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "global": - o := NewGlobalAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filter": - o := NewFilterAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "children": - o := NewChildrenAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "parent": - o := NewParentAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sampler": - o := NewSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "unmapped_sampler": - o := NewUnmappedSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohash_grid": - o := NewGeoHashGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geotile_grid": - o := NewGeoTileGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohex_grid": - o := NewGeoHexGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "range": - o := NewRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_range": - o := NewDateRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_distance": - o := NewGeoDistanceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_range": - o := NewIpRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_prefix": - o := NewIpPrefixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filters": - o := NewFiltersAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "adjacency_matrix": - o := NewAdjacencyMatrixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "siglterms": - o := NewSignificantLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sigsterms": - o := NewSignificantStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umsigterms": - o := NewUnmappedSignificantTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "composite": - o := NewCompositeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "scripted_metric": - o := NewScriptedMetricAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_hits": - o := NewTopHitsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "inference": - o := NewInferenceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "string_stats": - o := NewStringStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "box_plot": - o := NewBoxPlotAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_metrics": - o := NewTopMetricsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "t_test": - o := NewTTestAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "rate": - o := NewRateAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_long_value": - o := NewCumulativeCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "matrix_stats": - o := NewMatrixStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_line": - o := NewGeoLineAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - default: - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - } - } else { - return errors.New("cannot decode JSON for field Aggregations") - } - } else { - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[value] = o - } - } - } - - case "doc_count": - if err := dec.Decode(&s.DocCount); err != nil { - return err + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f } case "key": @@ -509,9 +76,529 @@ func (s *DateHistogramBucket) UnmarshalJSON(data []byte) error { } case "key_as_string": - if err := dec.Decode(&s.KeyAsString); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.KeyAsString = &o + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "box_plot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[value] = o + } + } } } @@ -537,6 +624,7 @@ func (s DateHistogramBucket) MarshalJSON() ([]byte, error) { for key, value := range s.Aggregations { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "Aggregations") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datehistogramgrouping.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datehistogramgrouping.go index cca3eaa18..cdb6df130 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datehistogramgrouping.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datehistogramgrouping.go @@ -16,21 +16,105 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DateHistogramGrouping type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/rollup/_types/Groupings.ts#L30-L38 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/rollup/_types/Groupings.ts#L42-L73 type DateHistogramGrouping struct { + // CalendarInterval The interval of time buckets to be generated when rolling up. CalendarInterval Duration `json:"calendar_interval,omitempty"` - Delay Duration `json:"delay,omitempty"` - Field string `json:"field"` - FixedInterval Duration `json:"fixed_interval,omitempty"` - Format *string `json:"format,omitempty"` - Interval Duration `json:"interval,omitempty"` - TimeZone *string `json:"time_zone,omitempty"` + // Delay How long to wait before rolling up new documents. + // By default, the indexer attempts to roll up all data that is available. + // However, it is not uncommon for data to arrive out of order. + // The indexer is unable to deal with data that arrives after a time-span has + // been rolled up. + // You need to specify a delay that matches the longest period of time you + // expect out-of-order data to arrive. + Delay Duration `json:"delay,omitempty"` + // Field The date field that is to be rolled up. + Field string `json:"field"` + // FixedInterval The interval of time buckets to be generated when rolling up. + FixedInterval Duration `json:"fixed_interval,omitempty"` + Format *string `json:"format,omitempty"` + Interval Duration `json:"interval,omitempty"` + // TimeZone Defines what `time_zone` the rollup documents are stored as. + // Unlike raw data, which can shift timezones on the fly, rolled documents have + // to be stored with a specific timezone. + // By default, rollup documents are stored in `UTC`. + TimeZone *string `json:"time_zone,omitempty"` +} + +func (s *DateHistogramGrouping) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "calendar_interval": + if err := dec.Decode(&s.CalendarInterval); err != nil { + return err + } + + case "delay": + if err := dec.Decode(&s.Delay); err != nil { + return err + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "fixed_interval": + if err := dec.Decode(&s.FixedInterval); err != nil { + return err + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "interval": + if err := dec.Decode(&s.Interval); err != nil { + return err + } + + case "time_zone": + if err := dec.Decode(&s.TimeZone); err != nil { + return err + } + + } + } + return nil } // NewDateHistogramGrouping returns a DateHistogramGrouping. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dateindexnameprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dateindexnameprocessor.go index a93273f8c..610396b3f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dateindexnameprocessor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dateindexnameprocessor.go @@ -16,31 +16,205 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DateIndexNameProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/_types/Processors.ts#L164-L177 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/_types/Processors.ts#L491-L529 type DateIndexNameProcessor struct { + // DateFormats An array of the expected date formats for parsing dates / timestamps in the + // document being preprocessed. + // Can be a java time pattern or one of the following formats: ISO8601, UNIX, + // UNIX_MS, or TAI64N. DateFormats []string `json:"date_formats"` // DateRounding How to round the date when formatting the date into the index name. Valid // values are: // `y` (year), `M` (month), `w` (week), `d` (day), `h` (hour), `m` (minute) and // `s` (second). // Supports template snippets. - DateRounding string `json:"date_rounding"` - Description *string `json:"description,omitempty"` - Field string `json:"field"` - If *string `json:"if,omitempty"` - IgnoreFailure *bool `json:"ignore_failure,omitempty"` - IndexNameFormat *string `json:"index_name_format,omitempty"` - IndexNamePrefix *string `json:"index_name_prefix,omitempty"` - Locale *string `json:"locale,omitempty"` - OnFailure []ProcessorContainer `json:"on_failure,omitempty"` - Tag *string `json:"tag,omitempty"` - Timezone *string `json:"timezone,omitempty"` + DateRounding string `json:"date_rounding"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field to get the date or timestamp from. + Field string `json:"field"` + // If Conditionally execute the processor. + If *string `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IndexNameFormat The format to be used when printing the parsed date into the index name. + // A valid java time pattern is expected here. + // Supports template snippets. + IndexNameFormat *string `json:"index_name_format,omitempty"` + // IndexNamePrefix A prefix of the index name to be prepended before the printed date. + // Supports template snippets. + IndexNamePrefix *string `json:"index_name_prefix,omitempty"` + // Locale The locale to use when parsing the date from the document being preprocessed, + // relevant when parsing month names or week days. + Locale *string `json:"locale,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // Timezone The timezone to use when parsing the date and when date math index supports + // resolves expressions into concrete index names. + Timezone *string `json:"timezone,omitempty"` +} + +func (s *DateIndexNameProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "date_formats": + if err := dec.Decode(&s.DateFormats); err != nil { + return err + } + + case "date_rounding": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DateRounding = o + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.If = &o + + case "ignore_failure": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "index_name_format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexNameFormat = &o + + case "index_name_prefix": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexNamePrefix = &o + + case "locale": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Locale = &o + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return err + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "timezone": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Timezone = &o + + } + } + return nil } // NewDateIndexNameProcessor returns a DateIndexNameProcessor. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datenanosproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datenanosproperty.go index ab6bffecf..28f8b96db 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datenanosproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datenanosproperty.go @@ -16,23 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" ) // DateNanosProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/core.ts#L73-L81 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/core.ts#L73-L81 type DateNanosProperty struct { Boost *Float64 `json:"boost,omitempty"` CopyTo []string `json:"copy_to,omitempty"` @@ -54,6 +54,7 @@ type DateNanosProperty struct { } func (s *DateNanosProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -68,18 +69,49 @@ func (s *DateNanosProperty) UnmarshalJSON(data []byte) error { switch t { case "boost": - if err := dec.Decode(&s.Boost); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f } case "copy_to": - if err := dec.Decode(&s.CopyTo); err != nil { - return err + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return err + } } case "doc_values": - if err := dec.Decode(&s.DocValues); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DocValues = &value + case bool: + s.DocValues = &v } case "dynamic": @@ -88,6 +120,9 @@ func (s *DateNanosProperty) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -96,7 +131,9 @@ func (s *DateNanosProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -375,33 +412,74 @@ func (s *DateNanosProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "format": - if err := dec.Decode(&s.Format); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "ignore_malformed": - if err := dec.Decode(&s.IgnoreMalformed); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreMalformed = &value + case bool: + s.IgnoreMalformed = &v } case "index": - if err := dec.Decode(&s.Index); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Index = &value + case bool: + s.Index = &v } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } @@ -412,11 +490,25 @@ func (s *DateNanosProperty) UnmarshalJSON(data []byte) error { } case "precision_step": - if err := dec.Decode(&s.PrecisionStep); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.PrecisionStep = &value + case float64: + f := int(v) + s.PrecisionStep = &f } case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -425,7 +517,9 @@ func (s *DateNanosProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -704,20 +798,38 @@ func (s *DateNanosProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } case "similarity": - if err := dec.Decode(&s.Similarity); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Similarity = &o case "store": - if err := dec.Decode(&s.Store); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Store = &value + case bool: + s.Store = &v } case "type": @@ -730,6 +842,33 @@ func (s *DateNanosProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s DateNanosProperty) MarshalJSON() ([]byte, error) { + type innerDateNanosProperty DateNanosProperty + tmp := innerDateNanosProperty{ + Boost: s.Boost, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + Format: s.Format, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + Index: s.Index, + Meta: s.Meta, + NullValue: s.NullValue, + PrecisionStep: s.PrecisionStep, + Properties: s.Properties, + Similarity: s.Similarity, + Store: s.Store, + Type: s.Type, + } + + tmp.Type = "date_nanos" + + return json.Marshal(tmp) +} + // NewDateNanosProperty returns a DateNanosProperty. func NewDateNanosProperty() *DateNanosProperty { r := &DateNanosProperty{ @@ -738,7 +877,5 @@ func NewDateNanosProperty() *DateNanosProperty { Properties: make(map[string]Property, 0), } - r.Type = "date_nanos" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dateprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dateprocessor.go index 22d0fed13..bb5661979 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dateprocessor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dateprocessor.go @@ -16,24 +16,163 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DateProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/_types/Processors.ts#L179-L185 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/_types/Processors.ts#L531-L558 type DateProcessor struct { - Description *string `json:"description,omitempty"` - Field string `json:"field"` - Formats []string `json:"formats"` - If *string `json:"if,omitempty"` - IgnoreFailure *bool `json:"ignore_failure,omitempty"` - Locale *string `json:"locale,omitempty"` - OnFailure []ProcessorContainer `json:"on_failure,omitempty"` - Tag *string `json:"tag,omitempty"` - TargetField *string `json:"target_field,omitempty"` - Timezone *string `json:"timezone,omitempty"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field to get the date from. + Field string `json:"field"` + // Formats An array of the expected date formats. + // Can be a java time pattern or one of the following formats: ISO8601, UNIX, + // UNIX_MS, or TAI64N. + Formats []string `json:"formats"` + // If Conditionally execute the processor. + If *string `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // Locale The locale to use when parsing the date, relevant when parsing month names or + // week days. + // Supports template snippets. + Locale *string `json:"locale,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField The field that will hold the parsed date. + TargetField *string `json:"target_field,omitempty"` + // Timezone The timezone to use when parsing the date. + // Supports template snippets. + Timezone *string `json:"timezone,omitempty"` +} + +func (s *DateProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "formats": + if err := dec.Decode(&s.Formats); err != nil { + return err + } + + case "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.If = &o + + case "ignore_failure": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "locale": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Locale = &o + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return err + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return err + } + + case "timezone": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Timezone = &o + + } + } + return nil } // NewDateProcessor returns a DateProcessor. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dateproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dateproperty.go index e77e06c4d..6fd294882 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dateproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dateproperty.go @@ -16,23 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" ) // DateProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/core.ts#L61-L71 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/core.ts#L61-L71 type DateProperty struct { Boost *Float64 `json:"boost,omitempty"` CopyTo []string `json:"copy_to,omitempty"` @@ -56,6 +56,7 @@ type DateProperty struct { } func (s *DateProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -70,18 +71,49 @@ func (s *DateProperty) UnmarshalJSON(data []byte) error { switch t { case "boost": - if err := dec.Decode(&s.Boost); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f } case "copy_to": - if err := dec.Decode(&s.CopyTo); err != nil { - return err + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return err + } } case "doc_values": - if err := dec.Decode(&s.DocValues); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DocValues = &value + case bool: + s.DocValues = &v } case "dynamic": @@ -95,6 +127,9 @@ func (s *DateProperty) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -103,7 +138,9 @@ func (s *DateProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -382,38 +419,86 @@ func (s *DateProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "format": - if err := dec.Decode(&s.Format); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "ignore_malformed": - if err := dec.Decode(&s.IgnoreMalformed); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreMalformed = &value + case bool: + s.IgnoreMalformed = &v } case "index": - if err := dec.Decode(&s.Index); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Index = &value + case bool: + s.Index = &v } case "locale": - if err := dec.Decode(&s.Locale); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Locale = &o case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } @@ -424,11 +509,25 @@ func (s *DateProperty) UnmarshalJSON(data []byte) error { } case "precision_step": - if err := dec.Decode(&s.PrecisionStep); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.PrecisionStep = &value + case float64: + f := int(v) + s.PrecisionStep = &f } case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -437,7 +536,9 @@ func (s *DateProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -716,20 +817,38 @@ func (s *DateProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } case "similarity": - if err := dec.Decode(&s.Similarity); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Similarity = &o case "store": - if err := dec.Decode(&s.Store); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Store = &value + case bool: + s.Store = &v } case "type": @@ -742,6 +861,35 @@ func (s *DateProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s DateProperty) MarshalJSON() ([]byte, error) { + type innerDateProperty DateProperty + tmp := innerDateProperty{ + Boost: s.Boost, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fielddata: s.Fielddata, + Fields: s.Fields, + Format: s.Format, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + Index: s.Index, + Locale: s.Locale, + Meta: s.Meta, + NullValue: s.NullValue, + PrecisionStep: s.PrecisionStep, + Properties: s.Properties, + Similarity: s.Similarity, + Store: s.Store, + Type: s.Type, + } + + tmp.Type = "date" + + return json.Marshal(tmp) +} + // NewDateProperty returns a DateProperty. func NewDateProperty() *DateProperty { r := &DateProperty{ @@ -750,7 +898,5 @@ func NewDateProperty() *DateProperty { Properties: make(map[string]Property, 0), } - r.Type = "date" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/daterangeaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/daterangeaggregate.go index 506152002..3a661303d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/daterangeaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/daterangeaggregate.go @@ -16,27 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" ) // DateRangeAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L542-L547 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L543-L548 type DateRangeAggregate struct { - Buckets BucketsRangeBucket `json:"buckets"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Buckets BucketsRangeBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` } func (s *DateRangeAggregate) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -57,15 +57,17 @@ func (s *DateRangeAggregate) UnmarshalJSON(data []byte) error { source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]RangeBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []RangeBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/daterangeaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/daterangeaggregation.go index 97cbc7c31..48483a8f4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/daterangeaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/daterangeaggregation.go @@ -16,26 +16,121 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // DateRangeAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L131-L138 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L268-L294 type DateRangeAggregation struct { - Field *string `json:"field,omitempty"` - Format *string `json:"format,omitempty"` - Keyed *bool `json:"keyed,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Missing Missing `json:"missing,omitempty"` - Name *string `json:"name,omitempty"` - Ranges []DateRangeExpression `json:"ranges,omitempty"` - TimeZone *string `json:"time_zone,omitempty"` + // Field The date field whose values are use to build ranges. + Field *string `json:"field,omitempty"` + // Format The date format used to format `from` and `to` in the response. + Format *string `json:"format,omitempty"` + // Keyed Set to `true` to associate a unique string key with each bucket and returns + // the ranges as a hash rather than an array. + Keyed *bool `json:"keyed,omitempty"` + Meta Metadata `json:"meta,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing Missing `json:"missing,omitempty"` + Name *string `json:"name,omitempty"` + // Ranges Array of date ranges. + Ranges []DateRangeExpression `json:"ranges,omitempty"` + // TimeZone Time zone used to convert dates from another time zone to UTC. + TimeZone *string `json:"time_zone,omitempty"` +} + +func (s *DateRangeAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "keyed": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Keyed = &value + case bool: + s.Keyed = &v + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return err + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + case "ranges": + if err := dec.Decode(&s.Ranges); err != nil { + return err + } + + case "time_zone": + if err := dec.Decode(&s.TimeZone); err != nil { + return err + } + + } + } + return nil } // NewDateRangeAggregation returns a DateRangeAggregation. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/daterangeexpression.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/daterangeexpression.go index 0bfd055cc..482ba0450 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/daterangeexpression.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/daterangeexpression.go @@ -16,17 +16,70 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DateRangeExpression type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L149-L153 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L305-L318 type DateRangeExpression struct { + // From Start of the range (inclusive). From FieldDateMath `json:"from,omitempty"` - Key *string `json:"key,omitempty"` - To FieldDateMath `json:"to,omitempty"` + // Key Custom key to return the range with. + Key *string `json:"key,omitempty"` + // To End of the range (exclusive). + To FieldDateMath `json:"to,omitempty"` +} + +func (s *DateRangeExpression) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "from": + if err := dec.Decode(&s.From); err != nil { + return err + } + + case "key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Key = &o + + case "to": + if err := dec.Decode(&s.To); err != nil { + return err + } + + } + } + return nil } // NewDateRangeExpression returns a DateRangeExpression. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/daterangeproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/daterangeproperty.go index 5879dae34..7fba14fff 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/daterangeproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/daterangeproperty.go @@ -16,23 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" ) // DateRangeProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/range.ts#L29-L32 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/range.ts#L29-L32 type DateRangeProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -52,6 +52,7 @@ type DateRangeProperty struct { } func (s *DateRangeProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -66,23 +67,63 @@ func (s *DateRangeProperty) UnmarshalJSON(data []byte) error { switch t { case "boost": - if err := dec.Decode(&s.Boost); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f } case "coerce": - if err := dec.Decode(&s.Coerce); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Coerce = &value + case bool: + s.Coerce = &v } case "copy_to": - if err := dec.Decode(&s.CopyTo); err != nil { - return err + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return err + } } case "doc_values": - if err := dec.Decode(&s.DocValues); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DocValues = &value + case bool: + s.DocValues = &v } case "dynamic": @@ -91,6 +132,9 @@ func (s *DateRangeProperty) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -99,7 +143,9 @@ func (s *DateRangeProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -378,33 +424,68 @@ func (s *DateRangeProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "format": - if err := dec.Decode(&s.Format); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "index": - if err := dec.Decode(&s.Index); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Index = &value + case bool: + s.Index = &v } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -413,7 +494,9 @@ func (s *DateRangeProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -692,20 +775,38 @@ func (s *DateRangeProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } case "similarity": - if err := dec.Decode(&s.Similarity); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Similarity = &o case "store": - if err := dec.Decode(&s.Store); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Store = &value + case bool: + s.Store = &v } case "type": @@ -718,6 +819,31 @@ func (s *DateRangeProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s DateRangeProperty) MarshalJSON() ([]byte, error) { + type innerDateRangeProperty DateRangeProperty + tmp := innerDateRangeProperty{ + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + Format: s.Format, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + Meta: s.Meta, + Properties: s.Properties, + Similarity: s.Similarity, + Store: s.Store, + Type: s.Type, + } + + tmp.Type = "date_range" + + return json.Marshal(tmp) +} + // NewDateRangeProperty returns a DateRangeProperty. func NewDateRangeProperty() *DateRangeProperty { r := &DateRangeProperty{ @@ -726,7 +852,5 @@ func NewDateRangeProperty() *DateRangeProperty { Properties: make(map[string]Property, 0), } - r.Type = "date_range" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/daterangequery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/daterangequery.go index edf87eced..8bca56d14 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/daterangequery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/daterangequery.go @@ -16,29 +16,141 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/rangerelation" ) // DateRangeQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/term.ts#L72-L81 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/term.ts#L116-L143 type DateRangeQuery struct { - Boost *float32 `json:"boost,omitempty"` - Format *string `json:"format,omitempty"` - From string `json:"from,omitempty"` - Gt *string `json:"gt,omitempty"` - Gte *string `json:"gte,omitempty"` - Lt *string `json:"lt,omitempty"` - Lte *string `json:"lte,omitempty"` - QueryName_ *string `json:"_name,omitempty"` - Relation *rangerelation.RangeRelation `json:"relation,omitempty"` - TimeZone *string `json:"time_zone,omitempty"` - To string `json:"to,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Format Date format used to convert `date` values in the query. + Format *string `json:"format,omitempty"` + From string `json:"from,omitempty"` + // Gt Greater than. + Gt *string `json:"gt,omitempty"` + // Gte Greater than or equal to. + Gte *string `json:"gte,omitempty"` + // Lt Less than. + Lt *string `json:"lt,omitempty"` + // Lte Less than or equal to. + Lte *string `json:"lte,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + // Relation Indicates how the range query matches values for `range` fields. + Relation *rangerelation.RangeRelation `json:"relation,omitempty"` + // TimeZone Coordinated Universal Time (UTC) offset or IANA time zone used to convert + // `date` values in the query to UTC. + TimeZone *string `json:"time_zone,omitempty"` + To string `json:"to,omitempty"` +} + +func (s *DateRangeQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "format": + if err := dec.Decode(&s.Format); err != nil { + return err + } + + case "from": + if err := dec.Decode(&s.From); err != nil { + return err + } + + case "gt": + if err := dec.Decode(&s.Gt); err != nil { + return err + } + + case "gte": + if err := dec.Decode(&s.Gte); err != nil { + return err + } + + case "lt": + if err := dec.Decode(&s.Lt); err != nil { + return err + } + + case "lte": + if err := dec.Decode(&s.Lte); err != nil { + return err + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "relation": + if err := dec.Decode(&s.Relation); err != nil { + return err + } + + case "time_zone": + if err := dec.Decode(&s.TimeZone); err != nil { + return err + } + + case "to": + if err := dec.Decode(&s.To); err != nil { + return err + } + + } + } + return nil } // NewDateRangeQuery returns a DateRangeQuery. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datetime.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datetime.go index 21607e7d5..d655b713e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datetime.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/datetime.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // string // int64 // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Time.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Time.ts#L22-L27 type DateTime interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/decayfunction.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/decayfunction.go index ced987fe1..943df0d83 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/decayfunction.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/decayfunction.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -26,5 +26,5 @@ package types // NumericDecayFunction // GeoDecayFunction // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/compound.ts#L100-L105 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/compound.ts#L194-L199 type DecayFunction interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/decayplacementdatemathduration.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/decayplacementdatemathduration.go index 84e33e398..dcf485de3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/decayplacementdatemathduration.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/decayplacementdatemathduration.go @@ -16,18 +16,84 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DecayPlacementDateMathDuration type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/compound.ts#L77-L82 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/compound.ts#L153-L172 type DecayPlacementDateMathDuration struct { - Decay *Float64 `json:"decay,omitempty"` + // Decay Defines how documents are scored at the distance given at scale. + Decay *Float64 `json:"decay,omitempty"` + // Offset If defined, the decay function will only compute the decay function for + // documents with a distance greater than the defined `offset`. Offset Duration `json:"offset,omitempty"` - Origin *string `json:"origin,omitempty"` - Scale Duration `json:"scale,omitempty"` + // Origin The point of origin used for calculating distance. Must be given as a number + // for numeric field, date for date fields and geo point for geo fields. + Origin *string `json:"origin,omitempty"` + // Scale Defines the distance from origin + offset at which the computed score will + // equal `decay` parameter. + Scale Duration `json:"scale,omitempty"` +} + +func (s *DecayPlacementDateMathDuration) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "decay": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Decay = &f + case float64: + f := Float64(v) + s.Decay = &f + } + + case "offset": + if err := dec.Decode(&s.Offset); err != nil { + return err + } + + case "origin": + if err := dec.Decode(&s.Origin); err != nil { + return err + } + + case "scale": + if err := dec.Decode(&s.Scale); err != nil { + return err + } + + } + } + return nil } // NewDecayPlacementDateMathDuration returns a DecayPlacementDateMathDuration. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/decayplacementdoubledouble.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/decayplacementdoubledouble.go index 19e41150e..6fbf094e6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/decayplacementdoubledouble.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/decayplacementdoubledouble.go @@ -16,18 +16,117 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DecayPlacementdoubledouble type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/compound.ts#L77-L82 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/compound.ts#L153-L172 type DecayPlacementdoubledouble struct { - Decay *Float64 `json:"decay,omitempty"` + // Decay Defines how documents are scored at the distance given at scale. + Decay *Float64 `json:"decay,omitempty"` + // Offset If defined, the decay function will only compute the decay function for + // documents with a distance greater than the defined `offset`. Offset *Float64 `json:"offset,omitempty"` + // Origin The point of origin used for calculating distance. Must be given as a number + // for numeric field, date for date fields and geo point for geo fields. Origin *Float64 `json:"origin,omitempty"` - Scale *Float64 `json:"scale,omitempty"` + // Scale Defines the distance from origin + offset at which the computed score will + // equal `decay` parameter. + Scale *Float64 `json:"scale,omitempty"` +} + +func (s *DecayPlacementdoubledouble) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "decay": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Decay = &f + case float64: + f := Float64(v) + s.Decay = &f + } + + case "offset": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Offset = &f + case float64: + f := Float64(v) + s.Offset = &f + } + + case "origin": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Origin = &f + case float64: + f := Float64(v) + s.Origin = &f + } + + case "scale": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Scale = &f + case float64: + f := Float64(v) + s.Scale = &f + } + + } + } + return nil } // NewDecayPlacementdoubledouble returns a DecayPlacementdoubledouble. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/decayplacementgeolocationdistance.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/decayplacementgeolocationdistance.go index 41a784de5..641648fab 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/decayplacementgeolocationdistance.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/decayplacementgeolocationdistance.go @@ -16,18 +16,84 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DecayPlacementGeoLocationDistance type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/compound.ts#L77-L82 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/compound.ts#L153-L172 type DecayPlacementGeoLocationDistance struct { - Decay *Float64 `json:"decay,omitempty"` - Offset *string `json:"offset,omitempty"` + // Decay Defines how documents are scored at the distance given at scale. + Decay *Float64 `json:"decay,omitempty"` + // Offset If defined, the decay function will only compute the decay function for + // documents with a distance greater than the defined `offset`. + Offset *string `json:"offset,omitempty"` + // Origin The point of origin used for calculating distance. Must be given as a number + // for numeric field, date for date fields and geo point for geo fields. Origin GeoLocation `json:"origin,omitempty"` - Scale *string `json:"scale,omitempty"` + // Scale Defines the distance from origin + offset at which the computed score will + // equal `decay` parameter. + Scale *string `json:"scale,omitempty"` +} + +func (s *DecayPlacementGeoLocationDistance) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "decay": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Decay = &f + case float64: + f := Float64(v) + s.Decay = &f + } + + case "offset": + if err := dec.Decode(&s.Offset); err != nil { + return err + } + + case "origin": + if err := dec.Decode(&s.Origin); err != nil { + return err + } + + case "scale": + if err := dec.Decode(&s.Scale); err != nil { + return err + } + + } + } + return nil } // NewDecayPlacementGeoLocationDistance returns a DecayPlacementGeoLocationDistance. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/defaults.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/defaults.go index 11c1926f6..18ec36837 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/defaults.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/defaults.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // Defaults type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/info/types.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/info/types.ts#L24-L27 type Defaults struct { AnomalyDetectors AnomalyDetectors `json:"anomaly_detectors"` Datafeeds Datafeeds `json:"datafeeds"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/definition.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/definition.go index 6ada3be36..1312f9d75 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/definition.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/definition.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // Definition type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/put_trained_model/types.ts#L24-L29 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/put_trained_model/types.ts#L24-L29 type Definition struct { // Preprocessors Collection of preprocessors Preprocessors []Preprocessor `json:"preprocessors,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/delayeddatacheckconfig.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/delayeddatacheckconfig.go index 6737e95a1..853e706bd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/delayeddatacheckconfig.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/delayeddatacheckconfig.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DelayedDataCheckConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Datafeed.ts#L119-L130 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Datafeed.ts#L119-L130 type DelayedDataCheckConfig struct { // CheckWindow The window of time that is searched for late data. This window of time ends // with the latest finalized bucket. @@ -35,6 +43,45 @@ type DelayedDataCheckConfig struct { Enabled bool `json:"enabled"` } +func (s *DelayedDataCheckConfig) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "check_window": + if err := dec.Decode(&s.CheckWindow); err != nil { + return err + } + + case "enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = value + case bool: + s.Enabled = v + } + + } + } + return nil +} + // NewDelayedDataCheckConfig returns a DelayedDataCheckConfig. func NewDelayedDataCheckConfig() *DelayedDataCheckConfig { r := &DelayedDataCheckConfig{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/deleteoperation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/deleteoperation.go new file mode 100644 index 000000000..8d455946a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/deleteoperation.go @@ -0,0 +1,119 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/versiontype" +) + +// DeleteOperation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/bulk/types.ts#L134-L134 +type DeleteOperation struct { + // Id_ The document ID. + Id_ *string `json:"_id,omitempty"` + IfPrimaryTerm *int64 `json:"if_primary_term,omitempty"` + IfSeqNo *int64 `json:"if_seq_no,omitempty"` + // Index_ Name of the index or index alias to perform the action on. + Index_ *string `json:"_index,omitempty"` + // Routing Custom value used to route operations to a specific shard. + Routing *string `json:"routing,omitempty"` + Version *int64 `json:"version,omitempty"` + VersionType *versiontype.VersionType `json:"version_type,omitempty"` +} + +func (s *DeleteOperation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return err + } + + case "if_primary_term": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.IfPrimaryTerm = &value + case float64: + f := int64(v) + s.IfPrimaryTerm = &f + } + + case "if_seq_no": + if err := dec.Decode(&s.IfSeqNo); err != nil { + return err + } + + case "_index": + if err := dec.Decode(&s.Index_); err != nil { + return err + } + + case "routing": + if err := dec.Decode(&s.Routing); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + case "version_type": + if err := dec.Decode(&s.VersionType); err != nil { + return err + } + + } + } + return nil +} + +// NewDeleteOperation returns a DeleteOperation. +func NewDeleteOperation() *DeleteOperation { + r := &DeleteOperation{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/delimitedpayloadtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/delimitedpayloadtokenfilter.go index 79cf1bbc8..a7dcdd753 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/delimitedpayloadtokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/delimitedpayloadtokenfilter.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/delimitedpayloadencoding" ) // DelimitedPayloadTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L67-L71 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L68-L72 type DelimitedPayloadTokenFilter struct { Delimiter *string `json:"delimiter,omitempty"` Encoding *delimitedpayloadencoding.DelimitedPayloadEncoding `json:"encoding,omitempty"` @@ -34,11 +40,71 @@ type DelimitedPayloadTokenFilter struct { Version *string `json:"version,omitempty"` } +func (s *DelimitedPayloadTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "delimiter": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Delimiter = &o + + case "encoding": + if err := dec.Decode(&s.Encoding); err != nil { + return err + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s DelimitedPayloadTokenFilter) MarshalJSON() ([]byte, error) { + type innerDelimitedPayloadTokenFilter DelimitedPayloadTokenFilter + tmp := innerDelimitedPayloadTokenFilter{ + Delimiter: s.Delimiter, + Encoding: s.Encoding, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "delimited_payload" + + return json.Marshal(tmp) +} + // NewDelimitedPayloadTokenFilter returns a DelimitedPayloadTokenFilter. func NewDelimitedPayloadTokenFilter() *DelimitedPayloadTokenFilter { r := &DelimitedPayloadTokenFilter{} - r.Type = "delimited_payload" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/densevectorindexoptions.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/densevectorindexoptions.go index 2d54d0b06..2014e0e7a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/densevectorindexoptions.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/densevectorindexoptions.go @@ -16,19 +16,91 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DenseVectorIndexOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/DenseVectorIndexOptions.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/DenseVectorIndexOptions.ts#L22-L26 type DenseVectorIndexOptions struct { EfConstruction int `json:"ef_construction"` M int `json:"m"` Type string `json:"type"` } +func (s *DenseVectorIndexOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "ef_construction": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.EfConstruction = value + case float64: + f := int(v) + s.EfConstruction = f + } + + case "m": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.M = value + case float64: + f := int(v) + s.M = f + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + // NewDenseVectorIndexOptions returns a DenseVectorIndexOptions. func NewDenseVectorIndexOptions() *DenseVectorIndexOptions { r := &DenseVectorIndexOptions{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/densevectorproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/densevectorproperty.go index 572958b0b..db1913ac3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/densevectorproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/densevectorproperty.go @@ -16,23 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" ) // DenseVectorProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/complex.ts#L51-L57 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/complex.ts#L51-L57 type DenseVectorProperty struct { Dims int `json:"dims"` Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` @@ -48,6 +48,7 @@ type DenseVectorProperty struct { } func (s *DenseVectorProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -62,8 +63,19 @@ func (s *DenseVectorProperty) UnmarshalJSON(data []byte) error { switch t { case "dims": - if err := dec.Decode(&s.Dims); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Dims = value + case float64: + f := int(v) + s.Dims = f } case "dynamic": @@ -72,6 +84,9 @@ func (s *DenseVectorProperty) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -80,7 +95,9 @@ func (s *DenseVectorProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -359,20 +376,42 @@ func (s *DenseVectorProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "index": - if err := dec.Decode(&s.Index); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Index = &value + case bool: + s.Index = &v } case "index_options": @@ -381,11 +420,17 @@ func (s *DenseVectorProperty) UnmarshalJSON(data []byte) error { } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -394,7 +439,9 @@ func (s *DenseVectorProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -673,16 +720,25 @@ func (s *DenseVectorProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } case "similarity": - if err := dec.Decode(&s.Similarity); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Similarity = &o case "type": if err := dec.Decode(&s.Type); err != nil { @@ -694,6 +750,27 @@ func (s *DenseVectorProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s DenseVectorProperty) MarshalJSON() ([]byte, error) { + type innerDenseVectorProperty DenseVectorProperty + tmp := innerDenseVectorProperty{ + Dims: s.Dims, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + IndexOptions: s.IndexOptions, + Meta: s.Meta, + Properties: s.Properties, + Similarity: s.Similarity, + Type: s.Type, + } + + tmp.Type = "dense_vector" + + return json.Marshal(tmp) +} + // NewDenseVectorProperty returns a DenseVectorProperty. func NewDenseVectorProperty() *DenseVectorProperty { r := &DenseVectorProperty{ @@ -702,7 +779,5 @@ func NewDenseVectorProperty() *DenseVectorProperty { Properties: make(map[string]Property, 0), } - r.Type = "dense_vector" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/deprecation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/deprecation.go index 444f01d3a..7aa61e4d9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/deprecation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/deprecation.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/deprecationlevel" ) // Deprecation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/migration/deprecations/types.ts#L29-L35 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/migration/deprecations/types.ts#L29-L35 type Deprecation struct { Details string `json:"details"` // Level The level property describes the significance of the issue. @@ -35,6 +41,67 @@ type Deprecation struct { Url string `json:"url"` } +func (s *Deprecation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "details": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Details = o + + case "level": + if err := dec.Decode(&s.Level); err != nil { + return err + } + + case "message": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Message = o + + case "url": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Url = o + + } + } + return nil +} + // NewDeprecation returns a Deprecation. func NewDeprecation() *Deprecation { r := &Deprecation{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/deprecationindexing.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/deprecationindexing.go index d3f96beee..8207531d1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/deprecationindexing.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/deprecationindexing.go @@ -16,17 +16,57 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DeprecationIndexing type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L140-L142 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L143-L145 type DeprecationIndexing struct { Enabled string `json:"enabled"` } +func (s *DeprecationIndexing) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "enabled": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Enabled = o + + } + } + return nil +} + // NewDeprecationIndexing returns a DeprecationIndexing. func NewDeprecationIndexing() *DeprecationIndexing { r := &DeprecationIndexing{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/derivativeaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/derivativeaggregate.go index cc8653216..39ed43229 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/derivativeaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/derivativeaggregate.go @@ -16,21 +16,25 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // DerivativeAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L226-L230 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L227-L231 type DerivativeAggregate struct { - Meta map[string]json.RawMessage `json:"meta,omitempty"` - NormalizedValue *Float64 `json:"normalized_value,omitempty"` - NormalizedValueAsString *string `json:"normalized_value_as_string,omitempty"` + Meta Metadata `json:"meta,omitempty"` + NormalizedValue *Float64 `json:"normalized_value,omitempty"` + NormalizedValueAsString *string `json:"normalized_value_as_string,omitempty"` // Value The metric value. A missing value generally means that there was no data to // aggregate, // unless specified otherwise. @@ -38,6 +42,76 @@ type DerivativeAggregate struct { ValueAsString *string `json:"value_as_string,omitempty"` } +func (s *DerivativeAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "normalized_value": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.NormalizedValue = &f + case float64: + f := Float64(v) + s.NormalizedValue = &f + } + + case "normalized_value_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NormalizedValueAsString = &o + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return err + } + + case "value_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ValueAsString = &o + + } + } + return nil +} + // NewDerivativeAggregate returns a DerivativeAggregate. func NewDerivativeAggregate() *DerivativeAggregate { r := &DerivativeAggregate{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/derivativeaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/derivativeaggregation.go index 9bf9ec43d..ea0be6724 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/derivativeaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/derivativeaggregation.go @@ -16,33 +16,38 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" ) // DerivativeAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/pipeline.ts#L165-L165 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/pipeline.ts#L196-L196 type DerivativeAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. - BucketsPath BucketsPath `json:"buckets_path,omitempty"` - Format *string `json:"format,omitempty"` - GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Name *string `json:"name,omitempty"` + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Name *string `json:"name,omitempty"` } func (s *DerivativeAggregation) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -62,9 +67,16 @@ func (s *DerivativeAggregation) UnmarshalJSON(data []byte) error { } case "format": - if err := dec.Decode(&s.Format); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o case "gap_policy": if err := dec.Decode(&s.GapPolicy); err != nil { @@ -77,9 +89,16 @@ func (s *DerivativeAggregation) UnmarshalJSON(data []byte) error { } case "name": - if err := dec.Decode(&s.Name); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/detectionrule.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/detectionrule.go index 11054de8a..3274c59af 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/detectionrule.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/detectionrule.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -26,7 +26,7 @@ import ( // DetectionRule type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Rule.ts#L25-L39 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Rule.ts#L25-L39 type DetectionRule struct { // Actions The set of actions to be triggered when the rule applies. If more than one // action is specified the effects of all actions are combined. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/detector.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/detector.go index 637022091..f85d29ff2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/detector.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/detector.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/excludefrequent" ) // Detector type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Detector.ts#L25-L67 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Detector.ts#L25-L67 type Detector struct { // ByFieldName The field used to split the data. In particular, this property is used for // analyzing the splits with respect to their own history. It is used for @@ -66,6 +72,110 @@ type Detector struct { UseNull *bool `json:"use_null,omitempty"` } +func (s *Detector) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "by_field_name": + if err := dec.Decode(&s.ByFieldName); err != nil { + return err + } + + case "custom_rules": + if err := dec.Decode(&s.CustomRules); err != nil { + return err + } + + case "detector_description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DetectorDescription = &o + + case "detector_index": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.DetectorIndex = &value + case float64: + f := int(v) + s.DetectorIndex = &f + } + + case "exclude_frequent": + if err := dec.Decode(&s.ExcludeFrequent); err != nil { + return err + } + + case "field_name": + if err := dec.Decode(&s.FieldName); err != nil { + return err + } + + case "function": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Function = &o + + case "over_field_name": + if err := dec.Decode(&s.OverFieldName); err != nil { + return err + } + + case "partition_field_name": + if err := dec.Decode(&s.PartitionFieldName); err != nil { + return err + } + + case "use_null": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.UseNull = &value + case bool: + s.UseNull = &v + } + + } + } + return nil +} + // NewDetector returns a Detector. func NewDetector() *Detector { r := &Detector{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/detectorread.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/detectorread.go index 6b78783f6..9e91b5b81 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/detectorread.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/detectorread.go @@ -16,56 +16,173 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/excludefrequent" ) // DetectorRead type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Detector.ts#L69-L80 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Detector.ts#L69-L125 type DetectorRead struct { - // ByFieldName The field used to split the data. In particular, this property is used for - // analyzing the splits with respect to their own history. It is used for - // finding unusual values in the context of the split. + // ByFieldName The field used to split the data. + // In particular, this property is used for analyzing the splits with respect to + // their own history. + // It is used for finding unusual values in the context of the split. ByFieldName *string `json:"by_field_name,omitempty"` - // CustomRules Custom rules enable you to customize the way detectors operate. For example, - // a rule may dictate conditions under which results should be skipped. Kibana - // refers to custom rules as job rules. + // CustomRules An array of custom rule objects, which enable you to customize the way + // detectors operate. + // For example, a rule may dictate to the detector conditions under which + // results should be skipped. + // Kibana refers to custom rules as job rules. CustomRules []DetectionRule `json:"custom_rules,omitempty"` // DetectorDescription A description of the detector. DetectorDescription *string `json:"detector_description,omitempty"` - // DetectorIndex A unique identifier for the detector. This identifier is based on the order - // of the detectors in the `analysis_config`, starting at zero. If you specify a - // value for this property, it is ignored. + // DetectorIndex A unique identifier for the detector. + // This identifier is based on the order of the detectors in the + // `analysis_config`, starting at zero. DetectorIndex *int `json:"detector_index,omitempty"` - // ExcludeFrequent If set, frequent entities are excluded from influencing the anomaly results. - // Entities can be considered frequent over time or frequent in a population. If - // you are working with both over and by fields, you can set `exclude_frequent` - // to `all` for both fields, or to `by` or `over` for those specific fields. + // ExcludeFrequent Contains one of the following values: `all`, `none`, `by`, or `over`. + // If set, frequent entities are excluded from influencing the anomaly results. + // Entities can be considered frequent over time or frequent in a population. + // If you are working with both over and by fields, then you can set + // `exclude_frequent` to all for both fields, or to `by` or `over` for those + // specific fields. ExcludeFrequent *excludefrequent.ExcludeFrequent `json:"exclude_frequent,omitempty"` - // FieldName The field that the detector uses in the function. If you use an event rate - // function such as count or rare, do not specify this field. The `field_name` - // cannot contain double quotes or backslashes. + // FieldName The field that the detector uses in the function. + // If you use an event rate function such as `count` or `rare`, do not specify + // this field. FieldName *string `json:"field_name,omitempty"` - // Function The analysis function that is used. For example, `count`, `rare`, `mean`, - // `min`, `max`, or `sum`. + // Function The analysis function that is used. + // For example, `count`, `rare`, `mean`, `min`, `max`, and `sum`. Function string `json:"function"` - // OverFieldName The field used to split the data. In particular, this property is used for - // analyzing the splits with respect to the history of all splits. It is used - // for finding unusual values in the population of all splits. + // OverFieldName The field used to split the data. + // In particular, this property is used for analyzing the splits with respect to + // the history of all splits. + // It is used for finding unusual values in the population of all splits. OverFieldName *string `json:"over_field_name,omitempty"` - // PartitionFieldName The field used to segment the analysis. When you use this property, you have - // completely independent baselines for each value of this field. + // PartitionFieldName The field used to segment the analysis. + // When you use this property, you have completely independent baselines for + // each value of this field. PartitionFieldName *string `json:"partition_field_name,omitempty"` // UseNull Defines whether a new series is used as the null series when there is no // value for the by or partition fields. UseNull *bool `json:"use_null,omitempty"` } +func (s *DetectorRead) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "by_field_name": + if err := dec.Decode(&s.ByFieldName); err != nil { + return err + } + + case "custom_rules": + if err := dec.Decode(&s.CustomRules); err != nil { + return err + } + + case "detector_description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DetectorDescription = &o + + case "detector_index": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.DetectorIndex = &value + case float64: + f := int(v) + s.DetectorIndex = &f + } + + case "exclude_frequent": + if err := dec.Decode(&s.ExcludeFrequent); err != nil { + return err + } + + case "field_name": + if err := dec.Decode(&s.FieldName); err != nil { + return err + } + + case "function": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Function = o + + case "over_field_name": + if err := dec.Decode(&s.OverFieldName); err != nil { + return err + } + + case "partition_field_name": + if err := dec.Decode(&s.PartitionFieldName); err != nil { + return err + } + + case "use_null": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.UseNull = &value + case bool: + s.UseNull = &v + } + + } + } + return nil +} + // NewDetectorRead returns a DetectorRead. func NewDetectorRead() *DetectorRead { r := &DetectorRead{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/diagnosis.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/diagnosis.go new file mode 100644 index 000000000..427f8ea7e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/diagnosis.go @@ -0,0 +1,120 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + +// Diagnosis type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/health_report/types.ts#L49-L55 +type Diagnosis struct { + Action string `json:"action"` + AffectedResources DiagnosisAffectedResources `json:"affected_resources"` + Cause string `json:"cause"` + HelpUrl string `json:"help_url"` + Id string `json:"id"` +} + +func (s *Diagnosis) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "action": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Action = o + + case "affected_resources": + if err := dec.Decode(&s.AffectedResources); err != nil { + return err + } + + case "cause": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Cause = o + + case "help_url": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.HelpUrl = o + + case "id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Id = o + + } + } + return nil +} + +// NewDiagnosis returns a Diagnosis. +func NewDiagnosis() *Diagnosis { + r := &Diagnosis{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/diagnosisaffectedresources.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/diagnosisaffectedresources.go new file mode 100644 index 000000000..f8a15d878 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/diagnosisaffectedresources.go @@ -0,0 +1,102 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + +// DiagnosisAffectedResources type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/health_report/types.ts#L57-L63 +type DiagnosisAffectedResources struct { + FeatureStates []string `json:"feature_states,omitempty"` + Indices []string `json:"indices,omitempty"` + Nodes []IndicatorNode `json:"nodes,omitempty"` + SlmPolicies []string `json:"slm_policies,omitempty"` + SnapshotRepositories []string `json:"snapshot_repositories,omitempty"` +} + +func (s *DiagnosisAffectedResources) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "feature_states": + if err := dec.Decode(&s.FeatureStates); err != nil { + return err + } + + case "indices": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Indices = append(s.Indices, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Indices); err != nil { + return err + } + } + + case "nodes": + if err := dec.Decode(&s.Nodes); err != nil { + return err + } + + case "slm_policies": + if err := dec.Decode(&s.SlmPolicies); err != nil { + return err + } + + case "snapshot_repositories": + if err := dec.Decode(&s.SnapshotRepositories); err != nil { + return err + } + + } + } + return nil +} + +// NewDiagnosisAffectedResources returns a DiagnosisAffectedResources. +func NewDiagnosisAffectedResources() *DiagnosisAffectedResources { + r := &DiagnosisAffectedResources{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dictionarydecompoundertokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dictionarydecompoundertokenfilter.go index 8cd4d0098..12cc1ce01 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dictionarydecompoundertokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dictionarydecompoundertokenfilter.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DictionaryDecompounderTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L53-L55 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L54-L56 type DictionaryDecompounderTokenFilter struct { HyphenationPatternsPath *string `json:"hyphenation_patterns_path,omitempty"` MaxSubwordSize *int `json:"max_subword_size,omitempty"` @@ -35,11 +43,150 @@ type DictionaryDecompounderTokenFilter struct { WordListPath *string `json:"word_list_path,omitempty"` } +func (s *DictionaryDecompounderTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "hyphenation_patterns_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.HyphenationPatternsPath = &o + + case "max_subword_size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxSubwordSize = &value + case float64: + f := int(v) + s.MaxSubwordSize = &f + } + + case "min_subword_size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MinSubwordSize = &value + case float64: + f := int(v) + s.MinSubwordSize = &f + } + + case "min_word_size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MinWordSize = &value + case float64: + f := int(v) + s.MinWordSize = &f + } + + case "only_longest_match": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.OnlyLongestMatch = &value + case bool: + s.OnlyLongestMatch = &v + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + case "word_list": + if err := dec.Decode(&s.WordList); err != nil { + return err + } + + case "word_list_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.WordListPath = &o + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s DictionaryDecompounderTokenFilter) MarshalJSON() ([]byte, error) { + type innerDictionaryDecompounderTokenFilter DictionaryDecompounderTokenFilter + tmp := innerDictionaryDecompounderTokenFilter{ + HyphenationPatternsPath: s.HyphenationPatternsPath, + MaxSubwordSize: s.MaxSubwordSize, + MinSubwordSize: s.MinSubwordSize, + MinWordSize: s.MinWordSize, + OnlyLongestMatch: s.OnlyLongestMatch, + Type: s.Type, + Version: s.Version, + WordList: s.WordList, + WordListPath: s.WordListPath, + } + + tmp.Type = "dictionary_decompounder" + + return json.Marshal(tmp) +} + // NewDictionaryDecompounderTokenFilter returns a DictionaryDecompounderTokenFilter. func NewDictionaryDecompounderTokenFilter() *DictionaryDecompounderTokenFilter { r := &DictionaryDecompounderTokenFilter{} - r.Type = "dictionary_decompounder" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/directgenerator.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/directgenerator.go index ba1ae51b8..e1c05ee4c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/directgenerator.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/directgenerator.go @@ -16,29 +16,233 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/suggestmode" ) // DirectGenerator type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/suggester.ts#L166-L178 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/suggester.ts#L265-L328 type DirectGenerator struct { - Field string `json:"field"` - MaxEdits *int `json:"max_edits,omitempty"` - MaxInspections *float32 `json:"max_inspections,omitempty"` - MaxTermFreq *float32 `json:"max_term_freq,omitempty"` - MinDocFreq *float32 `json:"min_doc_freq,omitempty"` - MinWordLength *int `json:"min_word_length,omitempty"` - PostFilter *string `json:"post_filter,omitempty"` - PreFilter *string `json:"pre_filter,omitempty"` - PrefixLength *int `json:"prefix_length,omitempty"` - Size *int `json:"size,omitempty"` - SuggestMode *suggestmode.SuggestMode `json:"suggest_mode,omitempty"` + // Field The field to fetch the candidate suggestions from. + // Needs to be set globally or per suggestion. + Field string `json:"field"` + // MaxEdits The maximum edit distance candidate suggestions can have in order to be + // considered as a suggestion. + // Can only be `1` or `2`. + MaxEdits *int `json:"max_edits,omitempty"` + // MaxInspections A factor that is used to multiply with the shard_size in order to inspect + // more candidate spelling corrections on the shard level. + // Can improve accuracy at the cost of performance. + MaxInspections *float32 `json:"max_inspections,omitempty"` + // MaxTermFreq The maximum threshold in number of documents in which a suggest text token + // can exist in order to be included. + // This can be used to exclude high frequency terms — which are usually spelled + // correctly — from being spellchecked. + // Can be a relative percentage number (for example `0.4`) or an absolute number + // to represent document frequencies. + // If a value higher than 1 is specified, then fractional can not be specified. + MaxTermFreq *float32 `json:"max_term_freq,omitempty"` + // MinDocFreq The minimal threshold in number of documents a suggestion should appear in. + // This can improve quality by only suggesting high frequency terms. + // Can be specified as an absolute number or as a relative percentage of number + // of documents. + // If a value higher than 1 is specified, the number cannot be fractional. + MinDocFreq *float32 `json:"min_doc_freq,omitempty"` + // MinWordLength The minimum length a suggest text term must have in order to be included. + MinWordLength *int `json:"min_word_length,omitempty"` + // PostFilter A filter (analyzer) that is applied to each of the generated tokens before + // they are passed to the actual phrase scorer. + PostFilter *string `json:"post_filter,omitempty"` + // PreFilter A filter (analyzer) that is applied to each of the tokens passed to this + // candidate generator. + // This filter is applied to the original token before candidates are generated. + PreFilter *string `json:"pre_filter,omitempty"` + // PrefixLength The number of minimal prefix characters that must match in order be a + // candidate suggestions. + // Increasing this number improves spellcheck performance. + PrefixLength *int `json:"prefix_length,omitempty"` + // Size The maximum corrections to be returned per suggest text token. + Size *int `json:"size,omitempty"` + // SuggestMode Controls what suggestions are included on the suggestions generated on each + // shard. + SuggestMode *suggestmode.SuggestMode `json:"suggest_mode,omitempty"` +} + +func (s *DirectGenerator) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "max_edits": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxEdits = &value + case float64: + f := int(v) + s.MaxEdits = &f + } + + case "max_inspections": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.MaxInspections = &f + case float64: + f := float32(v) + s.MaxInspections = &f + } + + case "max_term_freq": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.MaxTermFreq = &f + case float64: + f := float32(v) + s.MaxTermFreq = &f + } + + case "min_doc_freq": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.MinDocFreq = &f + case float64: + f := float32(v) + s.MinDocFreq = &f + } + + case "min_word_length": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MinWordLength = &value + case float64: + f := int(v) + s.MinWordLength = &f + } + + case "post_filter": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PostFilter = &o + + case "pre_filter": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PreFilter = &o + + case "prefix_length": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.PrefixLength = &value + case float64: + f := int(v) + s.PrefixLength = &f + } + + case "size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + case "suggest_mode": + if err := dec.Decode(&s.SuggestMode); err != nil { + return err + } + + } + } + return nil } // NewDirectGenerator returns a DirectGenerator. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/discovery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/discovery.go index 26b979ebe..bd769148e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/discovery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/discovery.go @@ -16,19 +16,28 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // Discovery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L82-L88 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L201-L219 type Discovery struct { - ClusterApplierStats *ClusterAppliedStats `json:"cluster_applier_stats,omitempty"` - ClusterStateQueue *ClusterStateQueue `json:"cluster_state_queue,omitempty"` - ClusterStateUpdate map[string]ClusterStateUpdate `json:"cluster_state_update,omitempty"` - PublishedClusterStates *PublishedClusterStates `json:"published_cluster_states,omitempty"` - SerializedClusterStates *SerializedClusterState `json:"serialized_cluster_states,omitempty"` + ClusterApplierStats *ClusterAppliedStats `json:"cluster_applier_stats,omitempty"` + // ClusterStateQueue Contains statistics for the cluster state queue of the node. + ClusterStateQueue *ClusterStateQueue `json:"cluster_state_queue,omitempty"` + // ClusterStateUpdate Contains low-level statistics about how long various activities took during + // cluster state updates while the node was the elected master. + // Omitted if the node is not master-eligible. + // Every field whose name ends in `_time` within this object is also represented + // as a raw number of milliseconds in a field whose name ends in `_time_millis`. + // The human-readable fields with a `_time` suffix are only returned if + // requested with the `?human=true` query parameter. + ClusterStateUpdate map[string]ClusterStateUpdate `json:"cluster_state_update,omitempty"` + // PublishedClusterStates Contains statistics for the published cluster states of the node. + PublishedClusterStates *PublishedClusterStates `json:"published_cluster_states,omitempty"` + SerializedClusterStates *SerializedClusterState `json:"serialized_cluster_states,omitempty"` } // NewDiscovery returns a Discovery. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/discoverynode.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/discoverynode.go index 1d9d2aea4..215096d16 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/discoverynode.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/discoverynode.go @@ -16,13 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // DiscoveryNode type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/DiscoveryNode.ts#L24-L30 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/DiscoveryNode.ts#L24-L30 type DiscoveryNode struct { Attributes map[string]string `json:"attributes"` EphemeralId string `json:"ephemeral_id"` @@ -31,6 +38,54 @@ type DiscoveryNode struct { TransportAddress string `json:"transport_address"` } +func (s *DiscoveryNode) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "attributes": + if s.Attributes == nil { + s.Attributes = make(map[string]string, 0) + } + if err := dec.Decode(&s.Attributes); err != nil { + return err + } + + case "ephemeral_id": + if err := dec.Decode(&s.EphemeralId); err != nil { + return err + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return err + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "transport_address": + if err := dec.Decode(&s.TransportAddress); err != nil { + return err + } + + } + } + return nil +} + // NewDiscoveryNode returns a DiscoveryNode. func NewDiscoveryNode() *DiscoveryNode { r := &DiscoveryNode{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/diskindicator.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/diskindicator.go new file mode 100644 index 000000000..02959fd28 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/diskindicator.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indicatorhealthstatus" +) + +// DiskIndicator type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/health_report/types.ts#L121-L125 +type DiskIndicator struct { + Details *DiskIndicatorDetails `json:"details,omitempty"` + Diagnosis []Diagnosis `json:"diagnosis,omitempty"` + Impacts []Impact `json:"impacts,omitempty"` + Status indicatorhealthstatus.IndicatorHealthStatus `json:"status"` + Symptom string `json:"symptom"` +} + +func (s *DiskIndicator) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "details": + if err := dec.Decode(&s.Details); err != nil { + return err + } + + case "diagnosis": + if err := dec.Decode(&s.Diagnosis); err != nil { + return err + } + + case "impacts": + if err := dec.Decode(&s.Impacts); err != nil { + return err + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return err + } + + case "symptom": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Symptom = o + + } + } + return nil +} + +// NewDiskIndicator returns a DiskIndicator. +func NewDiskIndicator() *DiskIndicator { + r := &DiskIndicator{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/diskindicatordetails.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/diskindicatordetails.go new file mode 100644 index 000000000..269e01fa2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/diskindicatordetails.go @@ -0,0 +1,142 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + +// DiskIndicatorDetails type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/health_report/types.ts#L126-L132 +type DiskIndicatorDetails struct { + IndicesWithReadonlyBlock int64 `json:"indices_with_readonly_block"` + NodesOverFloodStageWatermark int64 `json:"nodes_over_flood_stage_watermark"` + NodesOverHighWatermark int64 `json:"nodes_over_high_watermark"` + NodesWithEnoughDiskSpace int64 `json:"nodes_with_enough_disk_space"` + NodesWithUnknownDiskStatus int64 `json:"nodes_with_unknown_disk_status"` +} + +func (s *DiskIndicatorDetails) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "indices_with_readonly_block": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.IndicesWithReadonlyBlock = value + case float64: + f := int64(v) + s.IndicesWithReadonlyBlock = f + } + + case "nodes_over_flood_stage_watermark": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.NodesOverFloodStageWatermark = value + case float64: + f := int64(v) + s.NodesOverFloodStageWatermark = f + } + + case "nodes_over_high_watermark": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.NodesOverHighWatermark = value + case float64: + f := int64(v) + s.NodesOverHighWatermark = f + } + + case "nodes_with_enough_disk_space": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.NodesWithEnoughDiskSpace = value + case float64: + f := int64(v) + s.NodesWithEnoughDiskSpace = f + } + + case "nodes_with_unknown_disk_status": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.NodesWithUnknownDiskStatus = value + case float64: + f := int64(v) + s.NodesWithUnknownDiskStatus = f + } + + } + } + return nil +} + +// NewDiskIndicatorDetails returns a DiskIndicatorDetails. +func NewDiskIndicatorDetails() *DiskIndicatorDetails { + r := &DiskIndicatorDetails{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/diskusage.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/diskusage.go index 2210595ff..6ab579f30 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/diskusage.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/diskusage.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DiskUsage type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/allocation_explain/types.ts#L62-L69 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/allocation_explain/types.ts#L62-L69 type DiskUsage struct { FreeBytes int64 `json:"free_bytes"` FreeDiskPercent Float64 `json:"free_disk_percent"` @@ -32,6 +40,115 @@ type DiskUsage struct { UsedDiskPercent Float64 `json:"used_disk_percent"` } +func (s *DiskUsage) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "free_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.FreeBytes = value + case float64: + f := int64(v) + s.FreeBytes = f + } + + case "free_disk_percent": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.FreeDiskPercent = f + case float64: + f := Float64(v) + s.FreeDiskPercent = f + } + + case "path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Path = o + + case "total_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TotalBytes = value + case float64: + f := int64(v) + s.TotalBytes = f + } + + case "used_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.UsedBytes = value + case float64: + f := int64(v) + s.UsedBytes = f + } + + case "used_disk_percent": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.UsedDiskPercent = f + case float64: + f := Float64(v) + s.UsedDiskPercent = f + } + + } + } + return nil +} + // NewDiskUsage returns a DiskUsage. func NewDiskUsage() *DiskUsage { r := &DiskUsage{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dismaxquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dismaxquery.go index 99fcaed46..799daee1e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dismaxquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dismaxquery.go @@ -16,20 +16,108 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DisMaxQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/compound.ts#L46-L50 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/compound.ts#L78-L90 type DisMaxQuery struct { - Boost *float32 `json:"boost,omitempty"` - Queries []Query `json:"queries"` - QueryName_ *string `json:"_name,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Queries One or more query clauses. + // Returned documents must match one or more of these queries. + // If a document matches multiple queries, Elasticsearch uses the highest + // relevance score. + Queries []Query `json:"queries"` + QueryName_ *string `json:"_name,omitempty"` + // TieBreaker Floating point number between 0 and 1.0 used to increase the relevance scores + // of documents matching multiple query clauses. TieBreaker *Float64 `json:"tie_breaker,omitempty"` } +func (s *DisMaxQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "queries": + if err := dec.Decode(&s.Queries); err != nil { + return err + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "tie_breaker": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.TieBreaker = &f + case float64: + f := Float64(v) + s.TieBreaker = &f + } + + } + } + return nil +} + // NewDisMaxQuery returns a DisMaxQuery. func NewDisMaxQuery() *DisMaxQuery { r := &DisMaxQuery{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dissectprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dissectprocessor.go index ed278cfdc..09077f1fd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dissectprocessor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dissectprocessor.go @@ -16,23 +16,161 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DissectProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/_types/Processors.ts#L187-L192 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/_types/Processors.ts#L560-L579 type DissectProcessor struct { - AppendSeparator *string `json:"append_separator,omitempty"` - Description *string `json:"description,omitempty"` - Field string `json:"field"` - If *string `json:"if,omitempty"` - IgnoreFailure *bool `json:"ignore_failure,omitempty"` - IgnoreMissing *bool `json:"ignore_missing,omitempty"` - OnFailure []ProcessorContainer `json:"on_failure,omitempty"` - Pattern string `json:"pattern"` - Tag *string `json:"tag,omitempty"` + // AppendSeparator The character(s) that separate the appended fields. + AppendSeparator *string `json:"append_separator,omitempty"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field to dissect. + Field string `json:"field"` + // If Conditionally execute the processor. + If *string `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist or is `null`, the processor quietly + // exits without modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Pattern The pattern to apply to the field. + Pattern string `json:"pattern"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` +} + +func (s *DissectProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "append_separator": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AppendSeparator = &o + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.If = &o + + case "ignore_failure": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return err + } + + case "pattern": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pattern = o + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + } + } + return nil } // NewDissectProcessor returns a DissectProcessor. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/distancefeaturequery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/distancefeaturequery.go index 1917fb792..769866a83 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/distancefeaturequery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/distancefeaturequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // GeoDistanceFeatureQuery // DateDistanceFeatureQuery // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/specialized.ts#L56-L60 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/specialized.ts#L72-L76 type DistanceFeatureQuery interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/distancefeaturequerybasedatemathduration.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/distancefeaturequerybasedatemathduration.go index e5d02812e..b61c1f519 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/distancefeaturequerybasedatemathduration.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/distancefeaturequerybasedatemathduration.go @@ -16,21 +16,114 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DistanceFeatureQueryBaseDateMathDuration type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/specialized.ts#L40-L44 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/specialized.ts#L40-L60 type DistanceFeatureQueryBaseDateMathDuration struct { - Boost *float32 `json:"boost,omitempty"` - Field string `json:"field"` - Origin string `json:"origin"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Field Name of the field used to calculate distances. This field must meet the + // following criteria: + // be a `date`, `date_nanos` or `geo_point` field; + // have an `index` mapping parameter value of `true`, which is the default; + // have an `doc_values` mapping parameter value of `true`, which is the default. + Field string `json:"field"` + // Origin Date or point of origin used to calculate distances. + // If the `field` value is a `date` or `date_nanos` field, the `origin` value + // must be a date. + // Date Math, such as `now-1h`, is supported. + // If the field value is a `geo_point` field, the `origin` value must be a + // geopoint. + Origin string `json:"origin"` + // Pivot Distance from the `origin` at which relevance scores receive half of the + // `boost` value. + // If the `field` value is a `date` or `date_nanos` field, the `pivot` value + // must be a time unit, such as `1h` or `10d`. If the `field` value is a + // `geo_point` field, the `pivot` value must be a distance unit, such as `1km` + // or `12m`. Pivot Duration `json:"pivot"` QueryName_ *string `json:"_name,omitempty"` } +func (s *DistanceFeatureQueryBaseDateMathDuration) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "origin": + if err := dec.Decode(&s.Origin); err != nil { + return err + } + + case "pivot": + if err := dec.Decode(&s.Pivot); err != nil { + return err + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + } + } + return nil +} + // NewDistanceFeatureQueryBaseDateMathDuration returns a DistanceFeatureQueryBaseDateMathDuration. func NewDistanceFeatureQueryBaseDateMathDuration() *DistanceFeatureQueryBaseDateMathDuration { r := &DistanceFeatureQueryBaseDateMathDuration{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/distancefeaturequerybasegeolocationdistance.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/distancefeaturequerybasegeolocationdistance.go index e2a648073..9d53379e4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/distancefeaturequerybasegeolocationdistance.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/distancefeaturequerybasegeolocationdistance.go @@ -16,19 +16,112 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DistanceFeatureQueryBaseGeoLocationDistance type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/specialized.ts#L40-L44 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/specialized.ts#L40-L60 type DistanceFeatureQueryBaseGeoLocationDistance struct { - Boost *float32 `json:"boost,omitempty"` - Field string `json:"field"` - Origin GeoLocation `json:"origin"` - Pivot string `json:"pivot"` - QueryName_ *string `json:"_name,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Field Name of the field used to calculate distances. This field must meet the + // following criteria: + // be a `date`, `date_nanos` or `geo_point` field; + // have an `index` mapping parameter value of `true`, which is the default; + // have an `doc_values` mapping parameter value of `true`, which is the default. + Field string `json:"field"` + // Origin Date or point of origin used to calculate distances. + // If the `field` value is a `date` or `date_nanos` field, the `origin` value + // must be a date. + // Date Math, such as `now-1h`, is supported. + // If the field value is a `geo_point` field, the `origin` value must be a + // geopoint. + Origin GeoLocation `json:"origin"` + // Pivot Distance from the `origin` at which relevance scores receive half of the + // `boost` value. + // If the `field` value is a `date` or `date_nanos` field, the `pivot` value + // must be a time unit, such as `1h` or `10d`. If the `field` value is a + // `geo_point` field, the `pivot` value must be a distance unit, such as `1km` + // or `12m`. + Pivot string `json:"pivot"` + QueryName_ *string `json:"_name,omitempty"` +} + +func (s *DistanceFeatureQueryBaseGeoLocationDistance) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "origin": + if err := dec.Decode(&s.Origin); err != nil { + return err + } + + case "pivot": + if err := dec.Decode(&s.Pivot); err != nil { + return err + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + } + } + return nil } // NewDistanceFeatureQueryBaseGeoLocationDistance returns a DistanceFeatureQueryBaseGeoLocationDistance. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/diversifiedsampleraggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/diversifiedsampleraggregation.go index b3fb82600..79a82dd45 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/diversifiedsampleraggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/diversifiedsampleraggregation.go @@ -16,27 +16,120 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sampleraggregationexecutionhint" ) // DiversifiedSamplerAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L155-L161 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L320-L341 type DiversifiedSamplerAggregation struct { - ExecutionHint *sampleraggregationexecutionhint.SamplerAggregationExecutionHint `json:"execution_hint,omitempty"` - Field *string `json:"field,omitempty"` - MaxDocsPerValue *int `json:"max_docs_per_value,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Name *string `json:"name,omitempty"` - Script Script `json:"script,omitempty"` - ShardSize *int `json:"shard_size,omitempty"` + // ExecutionHint The type of value used for de-duplication. + ExecutionHint *sampleraggregationexecutionhint.SamplerAggregationExecutionHint `json:"execution_hint,omitempty"` + // Field The field used to provide values used for de-duplication. + Field *string `json:"field,omitempty"` + // MaxDocsPerValue Limits how many documents are permitted per choice of de-duplicating value. + MaxDocsPerValue *int `json:"max_docs_per_value,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Name *string `json:"name,omitempty"` + Script Script `json:"script,omitempty"` + // ShardSize Limits how many top-scoring documents are collected in the sample processed + // on each shard. + ShardSize *int `json:"shard_size,omitempty"` +} + +func (s *DiversifiedSamplerAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "execution_hint": + if err := dec.Decode(&s.ExecutionHint); err != nil { + return err + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "max_docs_per_value": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxDocsPerValue = &value + case float64: + f := int(v) + s.MaxDocsPerValue = &f + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + case "shard_size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.ShardSize = &value + case float64: + f := int(v) + s.ShardSize = &f + } + + } + } + return nil } // NewDiversifiedSamplerAggregation returns a DiversifiedSamplerAggregation. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/docstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/docstats.go index 64e968f31..ceba09797 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/docstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/docstats.go @@ -16,18 +16,85 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DocStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Stats.ts#L64-L67 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Stats.ts#L97-L109 type DocStats struct { - Count int64 `json:"count"` + // Count Total number of non-deleted documents across all primary shards assigned to + // selected nodes. + // This number is based on documents in Lucene segments and may include + // documents from nested fields. + Count int64 `json:"count"` + // Deleted Total number of deleted documents across all primary shards assigned to + // selected nodes. + // This number is based on documents in Lucene segments. + // Elasticsearch reclaims the disk space of deleted Lucene documents when a + // segment is merged. Deleted *int64 `json:"deleted,omitempty"` } +func (s *DocStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + case "deleted": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Deleted = &value + case float64: + f := int64(v) + s.Deleted = &f + } + + } + } + return nil +} + // NewDocStats returns a DocStats. func NewDocStats() *DocStats { r := &DocStats{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/document.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/document.go index 2e4e59405..31f14e710 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/document.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/document.go @@ -16,23 +16,65 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" ) // Document type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/simulate/types.ts#L41-L45 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/simulate/types.ts#L41-L55 type Document struct { - Id_ *string `json:"_id,omitempty"` - Index_ *string `json:"_index,omitempty"` + // Id_ Unique identifier for the document. + // This ID must be unique within the `_index`. + Id_ *string `json:"_id,omitempty"` + // Index_ Name of the index containing the document. + Index_ *string `json:"_index,omitempty"` + // Source_ JSON body for the document. Source_ json.RawMessage `json:"_source,omitempty"` } +func (s *Document) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return err + } + + case "_index": + if err := dec.Decode(&s.Index_); err != nil { + return err + } + + case "_source": + if err := dec.Decode(&s.Source_); err != nil { + return err + } + + } + } + return nil +} + // NewDocument returns a Document. func NewDocument() *Document { r := &Document{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/documentrating.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/documentrating.go index e00ec8bf7..85fb21819 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/documentrating.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/documentrating.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DocumentRating type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/rank_eval/types.ts#L116-L123 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/rank_eval/types.ts#L116-L123 type DocumentRating struct { // Id_ The document ID. Id_ string `json:"_id"` @@ -33,6 +41,52 @@ type DocumentRating struct { Rating int `json:"rating"` } +func (s *DocumentRating) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return err + } + + case "_index": + if err := dec.Decode(&s.Index_); err != nil { + return err + } + + case "rating": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Rating = value + case float64: + f := int(v) + s.Rating = f + } + + } + } + return nil +} + // NewDocumentRating returns a DocumentRating. func NewDocumentRating() *DocumentRating { r := &DocumentRating{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/documentsimulation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/documentsimulation.go index fb22dccd1..260995991 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/documentsimulation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/documentsimulation.go @@ -16,29 +16,116 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/versiontype" - + "bytes" "encoding/json" + "errors" "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/versiontype" ) // DocumentSimulation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/simulate/types.ts#L47-L60 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/simulate/types.ts#L57-L85 type DocumentSimulation struct { - DocumentSimulation map[string]string `json:"-"` - Id_ string `json:"_id"` - Index_ string `json:"_index"` - Ingest_ SimulateIngest `json:"_ingest"` - Routing_ *string `json:"_routing,omitempty"` - Source_ map[string]json.RawMessage `json:"_source"` - VersionType_ *versiontype.VersionType `json:"_version_type,omitempty"` - Version_ StringifiedVersionNumber `json:"_version,omitempty"` + DocumentSimulation map[string]string `json:"-"` + // Id_ Unique identifier for the document. This ID must be unique within the + // `_index`. + Id_ string `json:"_id"` + // Index_ Name of the index containing the document. + Index_ string `json:"_index"` + Ingest_ SimulateIngest `json:"_ingest"` + // Routing_ Value used to send the document to a specific primary shard. + Routing_ *string `json:"_routing,omitempty"` + // Source_ JSON body for the document. + Source_ map[string]json.RawMessage `json:"_source"` + VersionType_ *versiontype.VersionType `json:"_version_type,omitempty"` + Version_ StringifiedVersionNumber `json:"_version,omitempty"` +} + +func (s *DocumentSimulation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return err + } + + case "_index": + if err := dec.Decode(&s.Index_); err != nil { + return err + } + + case "_ingest": + if err := dec.Decode(&s.Ingest_); err != nil { + return err + } + + case "_routing": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Routing_ = &o + + case "_source": + if s.Source_ == nil { + s.Source_ = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Source_); err != nil { + return err + } + + case "_version_type": + if err := dec.Decode(&s.VersionType_); err != nil { + return err + } + + case "_version": + if err := dec.Decode(&s.Version_); err != nil { + return err + } + + default: + + if key, ok := t.(string); ok { + if s.DocumentSimulation == nil { + s.DocumentSimulation = make(map[string]string, 0) + } + raw := new(string) + if err := dec.Decode(&raw); err != nil { + return err + } + s.DocumentSimulation[key] = *raw + } + + } + } + return nil } // MarhsalJSON overrides marshalling for types with additional properties @@ -60,6 +147,7 @@ func (s DocumentSimulation) MarshalJSON() ([]byte, error) { for key, value := range s.DocumentSimulation { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "DocumentSimulation") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dotexpanderprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dotexpanderprocessor.go index dd3b8d872..343ef9c3a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dotexpanderprocessor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dotexpanderprocessor.go @@ -16,21 +16,133 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DotExpanderProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/_types/Processors.ts#L194-L197 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/_types/Processors.ts#L581-L592 type DotExpanderProcessor struct { - Description *string `json:"description,omitempty"` - Field string `json:"field"` - If *string `json:"if,omitempty"` - IgnoreFailure *bool `json:"ignore_failure,omitempty"` - OnFailure []ProcessorContainer `json:"on_failure,omitempty"` - Path *string `json:"path,omitempty"` - Tag *string `json:"tag,omitempty"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field to expand into an object field. + // If set to `*`, all top-level fields will be expanded. + Field string `json:"field"` + // If Conditionally execute the processor. + If *string `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Path The field that contains the field to expand. + // Only required if the field to expand is part another object field, because + // the `field` option can only understand leaf fields. + Path *string `json:"path,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` +} + +func (s *DotExpanderProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.If = &o + + case "ignore_failure": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return err + } + + case "path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Path = &o + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + } + } + return nil } // NewDotExpanderProcessor returns a DotExpanderProcessor. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/doublenumberproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/doublenumberproperty.go index ff180ae9b..50dd7dbff 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/doublenumberproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/doublenumberproperty.go @@ -16,25 +16,25 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" ) // DoubleNumberProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/core.ts#L141-L144 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/core.ts#L144-L147 type DoubleNumberProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -63,6 +63,7 @@ type DoubleNumberProperty struct { } func (s *DoubleNumberProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -77,23 +78,63 @@ func (s *DoubleNumberProperty) UnmarshalJSON(data []byte) error { switch t { case "boost": - if err := dec.Decode(&s.Boost); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f } case "coerce": - if err := dec.Decode(&s.Coerce); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Coerce = &value + case bool: + s.Coerce = &v } case "copy_to": - if err := dec.Decode(&s.CopyTo); err != nil { - return err + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return err + } } case "doc_values": - if err := dec.Decode(&s.DocValues); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DocValues = &value + case bool: + s.DocValues = &v } case "dynamic": @@ -102,6 +143,9 @@ func (s *DoubleNumberProperty) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -110,7 +154,9 @@ func (s *DoubleNumberProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -389,35 +435,80 @@ func (s *DoubleNumberProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "ignore_malformed": - if err := dec.Decode(&s.IgnoreMalformed); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreMalformed = &value + case bool: + s.IgnoreMalformed = &v } case "index": - if err := dec.Decode(&s.Index); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Index = &value + case bool: + s.Index = &v } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } case "null_value": - if err := dec.Decode(&s.NullValue); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.NullValue = &f + case float64: + f := Float64(v) + s.NullValue = &f } case "on_script_error": @@ -426,6 +517,9 @@ func (s *DoubleNumberProperty) UnmarshalJSON(data []byte) error { } case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -434,7 +528,9 @@ func (s *DoubleNumberProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -713,9 +809,11 @@ func (s *DoubleNumberProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } @@ -725,18 +823,43 @@ func (s *DoubleNumberProperty) UnmarshalJSON(data []byte) error { } case "similarity": - if err := dec.Decode(&s.Similarity); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Similarity = &o case "store": - if err := dec.Decode(&s.Store); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Store = &value + case bool: + s.Store = &v } case "time_series_dimension": - if err := dec.Decode(&s.TimeSeriesDimension); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.TimeSeriesDimension = &value + case bool: + s.TimeSeriesDimension = &v } case "time_series_metric": @@ -754,6 +877,36 @@ func (s *DoubleNumberProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s DoubleNumberProperty) MarshalJSON() ([]byte, error) { + type innerDoubleNumberProperty DoubleNumberProperty + tmp := innerDoubleNumberProperty{ + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + Index: s.Index, + Meta: s.Meta, + NullValue: s.NullValue, + OnScriptError: s.OnScriptError, + Properties: s.Properties, + Script: s.Script, + Similarity: s.Similarity, + Store: s.Store, + TimeSeriesDimension: s.TimeSeriesDimension, + TimeSeriesMetric: s.TimeSeriesMetric, + Type: s.Type, + } + + tmp.Type = "double" + + return json.Marshal(tmp) +} + // NewDoubleNumberProperty returns a DoubleNumberProperty. func NewDoubleNumberProperty() *DoubleNumberProperty { r := &DoubleNumberProperty{ @@ -762,7 +915,5 @@ func NewDoubleNumberProperty() *DoubleNumberProperty { Properties: make(map[string]Property, 0), } - r.Type = "double" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/doublerangeproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/doublerangeproperty.go index 92f1ccddf..d5bb6e000 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/doublerangeproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/doublerangeproperty.go @@ -16,23 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" ) // DoubleRangeProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/range.ts#L34-L36 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/range.ts#L34-L36 type DoubleRangeProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -51,6 +51,7 @@ type DoubleRangeProperty struct { } func (s *DoubleRangeProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -65,23 +66,63 @@ func (s *DoubleRangeProperty) UnmarshalJSON(data []byte) error { switch t { case "boost": - if err := dec.Decode(&s.Boost); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f } case "coerce": - if err := dec.Decode(&s.Coerce); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Coerce = &value + case bool: + s.Coerce = &v } case "copy_to": - if err := dec.Decode(&s.CopyTo); err != nil { - return err + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return err + } } case "doc_values": - if err := dec.Decode(&s.DocValues); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DocValues = &value + case bool: + s.DocValues = &v } case "dynamic": @@ -90,6 +131,9 @@ func (s *DoubleRangeProperty) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -98,7 +142,9 @@ func (s *DoubleRangeProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -377,28 +423,56 @@ func (s *DoubleRangeProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "index": - if err := dec.Decode(&s.Index); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Index = &value + case bool: + s.Index = &v } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -407,7 +481,9 @@ func (s *DoubleRangeProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -686,20 +762,38 @@ func (s *DoubleRangeProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } case "similarity": - if err := dec.Decode(&s.Similarity); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Similarity = &o case "store": - if err := dec.Decode(&s.Store); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Store = &value + case bool: + s.Store = &v } case "type": @@ -712,6 +806,30 @@ func (s *DoubleRangeProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s DoubleRangeProperty) MarshalJSON() ([]byte, error) { + type innerDoubleRangeProperty DoubleRangeProperty + tmp := innerDoubleRangeProperty{ + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + Meta: s.Meta, + Properties: s.Properties, + Similarity: s.Similarity, + Store: s.Store, + Type: s.Type, + } + + tmp.Type = "double_range" + + return json.Marshal(tmp) +} + // NewDoubleRangeProperty returns a DoubleRangeProperty. func NewDoubleRangeProperty() *DoubleRangeProperty { r := &DoubleRangeProperty{ @@ -720,7 +838,5 @@ func NewDoubleRangeProperty() *DoubleRangeProperty { Properties: make(map[string]Property, 0), } - r.Type = "double_range" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/doubletermsaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/doubletermsaggregate.go index 74914f293..0f65f5170 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/doubletermsaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/doubletermsaggregate.go @@ -16,29 +16,30 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" + "strconv" ) // DoubleTermsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L410-L415 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L411-L416 type DoubleTermsAggregate struct { - Buckets BucketsDoubleTermsBucket `json:"buckets"` - DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - SumOtherDocCount *int64 `json:"sum_other_doc_count,omitempty"` + Buckets BucketsDoubleTermsBucket `json:"buckets"` + DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` + Meta Metadata `json:"meta,omitempty"` + SumOtherDocCount *int64 `json:"sum_other_doc_count,omitempty"` } func (s *DoubleTermsAggregate) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -59,21 +60,33 @@ func (s *DoubleTermsAggregate) UnmarshalJSON(data []byte) error { source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]DoubleTermsBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []DoubleTermsBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } case "doc_count_error_upper_bound": - if err := dec.Decode(&s.DocCountErrorUpperBound); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DocCountErrorUpperBound = &value + case float64: + f := int64(v) + s.DocCountErrorUpperBound = &f } case "meta": @@ -82,8 +95,18 @@ func (s *DoubleTermsAggregate) UnmarshalJSON(data []byte) error { } case "sum_other_doc_count": - if err := dec.Decode(&s.SumOtherDocCount); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.SumOtherDocCount = &value + case float64: + f := int64(v) + s.SumOtherDocCount = &f } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/doubletermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/doubletermsbucket.go index 4d44fe9e3..7e7ae6019 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/doubletermsbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/doubletermsbucket.go @@ -16,25 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "fmt" - "bytes" + "encoding/json" "errors" + "fmt" "io" - + "strconv" "strings" - - "encoding/json" ) // DoubleTermsBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L417-L420 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L418-L421 type DoubleTermsBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -44,6 +42,7 @@ type DoubleTermsBucket struct { } func (s *DoubleTermsBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -57,467 +56,576 @@ func (s *DoubleTermsBucket) UnmarshalJSON(data []byte) error { switch t { - case "aggregations": - for dec.More() { - tt, err := dec.Token() + case "doc_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) if err != nil { - if errors.Is(err, io.EOF) { - break - } return err } - if value, ok := tt.(string); ok { - if strings.Contains(value, "#") { - elems := strings.Split(value, "#") - if len(elems) == 2 { - switch elems[0] { - case "cardinality": - o := NewCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentiles": - o := NewHdrPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentile_ranks": - o := NewHdrPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentiles": - o := NewTDigestPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentile_ranks": - o := NewTDigestPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "percentiles_bucket": - o := NewPercentilesBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "median_absolute_deviation": - o := NewMedianAbsoluteDeviationAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "min": - o := NewMinAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "max": - o := NewMaxAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sum": - o := NewSumAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "avg": - o := NewAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "weighted_avg": - o := NewWeightedAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "value_count": - o := NewValueCountAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_value": - o := NewSimpleValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "derivative": - o := NewDerivativeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "bucket_metric_value": - o := NewBucketMetricValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats": - o := NewStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats_bucket": - o := NewStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats": - o := NewExtendedStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats_bucket": - o := NewExtendedStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_bounds": - o := NewGeoBoundsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_centroid": - o := NewGeoCentroidAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "histogram": - o := NewHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_histogram": - o := NewDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "auto_date_histogram": - o := NewAutoDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "variable_width_histogram": - o := NewVariableWidthHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sterms": - o := NewStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lterms": - o := NewLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "dterms": - o := NewDoubleTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umterms": - o := NewUnmappedTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lrareterms": - o := NewLongRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "srareterms": - o := NewStringRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umrareterms": - o := NewUnmappedRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "multi_terms": - o := NewMultiTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "missing": - o := NewMissingAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "nested": - o := NewNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "reverse_nested": - o := NewReverseNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "global": - o := NewGlobalAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filter": - o := NewFilterAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "children": - o := NewChildrenAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "parent": - o := NewParentAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sampler": - o := NewSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "unmapped_sampler": - o := NewUnmappedSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohash_grid": - o := NewGeoHashGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geotile_grid": - o := NewGeoTileGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohex_grid": - o := NewGeoHexGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "range": - o := NewRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_range": - o := NewDateRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_distance": - o := NewGeoDistanceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_range": - o := NewIpRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_prefix": - o := NewIpPrefixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filters": - o := NewFiltersAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "adjacency_matrix": - o := NewAdjacencyMatrixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "siglterms": - o := NewSignificantLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sigsterms": - o := NewSignificantStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umsigterms": - o := NewUnmappedSignificantTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "composite": - o := NewCompositeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "scripted_metric": - o := NewScriptedMetricAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_hits": - o := NewTopHitsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "inference": - o := NewInferenceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "string_stats": - o := NewStringStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "box_plot": - o := NewBoxPlotAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_metrics": - o := NewTopMetricsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "t_test": - o := NewTTestAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "rate": - o := NewRateAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_long_value": - o := NewCumulativeCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "matrix_stats": - o := NewMatrixStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_line": - o := NewGeoLineAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - default: - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - } - } else { - return errors.New("cannot decode JSON for field Aggregations") - } - } else { - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[value] = o - } - } - } - - case "doc_count": - if err := dec.Decode(&s.DocCount); err != nil { - return err + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f } case "doc_count_error": - if err := dec.Decode(&s.DocCountError); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DocCountError = &value + case float64: + f := int64(v) + s.DocCountError = &f } case "key": - if err := dec.Decode(&s.Key); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Key = f + case float64: + f := Float64(v) + s.Key = f } case "key_as_string": - if err := dec.Decode(&s.KeyAsString); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.KeyAsString = &o + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "box_plot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[value] = o + } + } } } @@ -543,6 +651,7 @@ func (s DoubleTermsBucket) MarshalJSON() ([]byte, error) { for key, value := range s.Aggregations { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "Aggregations") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/downsampleconfig.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/downsampleconfig.go index bea3d9307..2a6d504a0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/downsampleconfig.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/downsampleconfig.go @@ -16,17 +16,50 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // DownsampleConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/Downsample.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/Downsample.ts#L22-L27 type DownsampleConfig struct { + // FixedInterval The interval at which to aggregate the original time series index. FixedInterval string `json:"fixed_interval"` } +func (s *DownsampleConfig) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fixed_interval": + if err := dec.Decode(&s.FixedInterval); err != nil { + return err + } + + } + } + return nil +} + // NewDownsampleConfig returns a DownsampleConfig. func NewDownsampleConfig() *DownsampleConfig { r := &DownsampleConfig{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/downsamplinground.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/downsamplinground.go new file mode 100644 index 000000000..d50708239 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/downsamplinground.go @@ -0,0 +1,75 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + +// DownsamplingRound type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/DownsamplingRound.ts#L23-L32 +type DownsamplingRound struct { + // After The duration since rollover when this downsampling round should execute + After Duration `json:"after"` + // Config The downsample configuration to execute. + Config DownsampleConfig `json:"config"` +} + +func (s *DownsamplingRound) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "after": + if err := dec.Decode(&s.After); err != nil { + return err + } + + case "config": + if err := dec.Decode(&s.Config); err != nil { + return err + } + + } + } + return nil +} + +// NewDownsamplingRound returns a DownsamplingRound. +func NewDownsamplingRound() *DownsamplingRound { + r := &DownsamplingRound{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dropprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dropprocessor.go index 769698308..483b4114f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dropprocessor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dropprocessor.go @@ -16,19 +16,109 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // DropProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/_types/Processors.ts#L199-L199 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/_types/Processors.ts#L594-L594 type DropProcessor struct { - Description *string `json:"description,omitempty"` - If *string `json:"if,omitempty"` - IgnoreFailure *bool `json:"ignore_failure,omitempty"` - OnFailure []ProcessorContainer `json:"on_failure,omitempty"` - Tag *string `json:"tag,omitempty"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // If Conditionally execute the processor. + If *string `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` +} + +func (s *DropProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.If = &o + + case "ignore_failure": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return err + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + } + } + return nil } // NewDropProcessor returns a DropProcessor. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/duration.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/duration.go index 91be51421..d027dbc24 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/duration.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/duration.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -24,5 +24,5 @@ package types // // string // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Time.ts#L52-L58 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Time.ts#L52-L58 type Duration interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/durationvalueunitfloatmillis.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/durationvalueunitfloatmillis.go index 39f0a0165..205d1c272 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/durationvalueunitfloatmillis.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/durationvalueunitfloatmillis.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // DurationValueUnitFloatMillis type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Time.ts#L67-L67 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Time.ts#L67-L67 type DurationValueUnitFloatMillis Float64 diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/durationvalueunitmillis.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/durationvalueunitmillis.go index 011f96511..a860d5092 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/durationvalueunitmillis.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/durationvalueunitmillis.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // DurationValueUnitMillis type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Time.ts#L67-L67 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Time.ts#L67-L67 type DurationValueUnitMillis int64 diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/durationvalueunitnanos.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/durationvalueunitnanos.go index 12fb5a9ff..5ced13592 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/durationvalueunitnanos.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/durationvalueunitnanos.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // DurationValueUnitNanos type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Time.ts#L67-L67 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Time.ts#L67-L67 type DurationValueUnitNanos int64 diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/durationvalueunitseconds.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/durationvalueunitseconds.go index 21f476779..709e90860 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/durationvalueunitseconds.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/durationvalueunitseconds.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // DurationValueUnitSeconds type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Time.ts#L67-L67 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Time.ts#L67-L67 type DurationValueUnitSeconds int64 diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dutchanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dutchanalyzer.go index 3c6328b6f..9e2431913 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dutchanalyzer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dutchanalyzer.go @@ -16,23 +16,82 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // DutchAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/analyzers.ts#L61-L64 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/analyzers.ts#L61-L64 type DutchAnalyzer struct { Stopwords []string `json:"stopwords,omitempty"` Type string `json:"type,omitempty"` } +func (s *DutchAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stopwords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Stopwords = append(s.Stopwords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { + return err + } + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s DutchAnalyzer) MarshalJSON() ([]byte, error) { + type innerDutchAnalyzer DutchAnalyzer + tmp := innerDutchAnalyzer{ + Stopwords: s.Stopwords, + Type: s.Type, + } + + tmp.Type = "dutch" + + return json.Marshal(tmp) +} + // NewDutchAnalyzer returns a DutchAnalyzer. func NewDutchAnalyzer() *DutchAnalyzer { r := &DutchAnalyzer{} - r.Type = "dutch" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dynamicproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dynamicproperty.go index dc3ccfcbc..a4fb45b1b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dynamicproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dynamicproperty.go @@ -16,27 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexoptions" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/termvectoroption" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" - - "bytes" - "errors" - "io" - - "encoding/json" ) // DynamicProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/core.ts#L275-L306 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/core.ts#L285-L316 type DynamicProperty struct { Analyzer *string `json:"analyzer,omitempty"` Boost *Float64 `json:"boost,omitempty"` @@ -74,6 +74,7 @@ type DynamicProperty struct { } func (s *DynamicProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -88,28 +89,75 @@ func (s *DynamicProperty) UnmarshalJSON(data []byte) error { switch t { case "analyzer": - if err := dec.Decode(&s.Analyzer); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o case "boost": - if err := dec.Decode(&s.Boost); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f } case "coerce": - if err := dec.Decode(&s.Coerce); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Coerce = &value + case bool: + s.Coerce = &v } case "copy_to": - if err := dec.Decode(&s.CopyTo); err != nil { - return err + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return err + } } case "doc_values": - if err := dec.Decode(&s.DocValues); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DocValues = &value + case bool: + s.DocValues = &v } case "dynamic": @@ -118,16 +166,37 @@ func (s *DynamicProperty) UnmarshalJSON(data []byte) error { } case "eager_global_ordinals": - if err := dec.Decode(&s.EagerGlobalOrdinals); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.EagerGlobalOrdinals = &value + case bool: + s.EagerGlobalOrdinals = &v } case "enabled": - if err := dec.Decode(&s.Enabled); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = &value + case bool: + s.Enabled = &v } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -136,7 +205,9 @@ func (s *DynamicProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -415,30 +486,68 @@ func (s *DynamicProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "format": - if err := dec.Decode(&s.Format); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "ignore_malformed": - if err := dec.Decode(&s.IgnoreMalformed); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreMalformed = &value + case bool: + s.IgnoreMalformed = &v } case "index": - if err := dec.Decode(&s.Index); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Index = &value + case bool: + s.Index = &v } case "index_options": @@ -447,8 +556,17 @@ func (s *DynamicProperty) UnmarshalJSON(data []byte) error { } case "index_phrases": - if err := dec.Decode(&s.IndexPhrases); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IndexPhrases = &value + case bool: + s.IndexPhrases = &v } case "index_prefixes": @@ -457,18 +575,37 @@ func (s *DynamicProperty) UnmarshalJSON(data []byte) error { } case "locale": - if err := dec.Decode(&s.Locale); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Locale = &o case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } case "norms": - if err := dec.Decode(&s.Norms); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Norms = &value + case bool: + s.Norms = &v } case "null_value": @@ -482,16 +619,41 @@ func (s *DynamicProperty) UnmarshalJSON(data []byte) error { } case "position_increment_gap": - if err := dec.Decode(&s.PositionIncrementGap); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.PositionIncrementGap = &value + case float64: + f := int(v) + s.PositionIncrementGap = &f } case "precision_step": - if err := dec.Decode(&s.PrecisionStep); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.PrecisionStep = &value + case float64: + f := int(v) + s.PrecisionStep = &f } case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -500,7 +662,9 @@ func (s *DynamicProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -779,9 +943,11 @@ func (s *DynamicProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } @@ -791,23 +957,53 @@ func (s *DynamicProperty) UnmarshalJSON(data []byte) error { } case "search_analyzer": - if err := dec.Decode(&s.SearchAnalyzer); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchAnalyzer = &o case "search_quote_analyzer": - if err := dec.Decode(&s.SearchQuoteAnalyzer); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchQuoteAnalyzer = &o case "similarity": - if err := dec.Decode(&s.Similarity); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Similarity = &o case "store": - if err := dec.Decode(&s.Store); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Store = &value + case bool: + s.Store = &v } case "term_vector": @@ -830,6 +1026,49 @@ func (s *DynamicProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s DynamicProperty) MarshalJSON() ([]byte, error) { + type innerDynamicProperty DynamicProperty + tmp := innerDynamicProperty{ + Analyzer: s.Analyzer, + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + EagerGlobalOrdinals: s.EagerGlobalOrdinals, + Enabled: s.Enabled, + Fields: s.Fields, + Format: s.Format, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + Index: s.Index, + IndexOptions: s.IndexOptions, + IndexPhrases: s.IndexPhrases, + IndexPrefixes: s.IndexPrefixes, + Locale: s.Locale, + Meta: s.Meta, + Norms: s.Norms, + NullValue: s.NullValue, + OnScriptError: s.OnScriptError, + PositionIncrementGap: s.PositionIncrementGap, + PrecisionStep: s.PrecisionStep, + Properties: s.Properties, + Script: s.Script, + SearchAnalyzer: s.SearchAnalyzer, + SearchQuoteAnalyzer: s.SearchQuoteAnalyzer, + Similarity: s.Similarity, + Store: s.Store, + TermVector: s.TermVector, + TimeSeriesMetric: s.TimeSeriesMetric, + Type: s.Type, + } + + tmp.Type = "{dynamic_property}" + + return json.Marshal(tmp) +} + // NewDynamicProperty returns a DynamicProperty. func NewDynamicProperty() *DynamicProperty { r := &DynamicProperty{ @@ -838,7 +1077,5 @@ func NewDynamicProperty() *DynamicProperty { Properties: make(map[string]Property, 0), } - r.Type = "{dynamic_property}" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dynamictemplate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dynamictemplate.go index 80b3fbeae..c41168318 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dynamictemplate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/dynamictemplate.go @@ -16,23 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/matchtype" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/matchtype" ) // DynamicTemplate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/dynamic-template.ts#L22-L30 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/dynamic-template.ts#L22-L30 type DynamicTemplate struct { Mapping Property `json:"mapping,omitempty"` Match *string `json:"match,omitempty"` @@ -44,6 +44,7 @@ type DynamicTemplate struct { } func (s *DynamicTemplate) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -66,300 +67,316 @@ func (s *DynamicTemplate) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(source) localDec.Decode(&kind) source.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": o := NewBinaryProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "boolean": o := NewBooleanProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "{dynamic_property}": o := NewDynamicProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "join": o := NewJoinProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "keyword": o := NewKeywordProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "match_only_text": o := NewMatchOnlyTextProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "percolator": o := NewPercolatorProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "rank_feature": o := NewRankFeatureProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "rank_features": o := NewRankFeaturesProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "search_as_you_type": o := NewSearchAsYouTypeProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "text": o := NewTextProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "version": o := NewVersionProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "wildcard": o := NewWildcardProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "date_nanos": o := NewDateNanosProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "date": o := NewDateProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "aggregate_metric_double": o := NewAggregateMetricDoubleProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "dense_vector": o := NewDenseVectorProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "flattened": o := NewFlattenedProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "nested": o := NewNestedProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "object": o := NewObjectProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "completion": o := NewCompletionProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "constant_keyword": o := NewConstantKeywordProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "alias": o := NewFieldAliasProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "histogram": o := NewHistogramProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "ip": o := NewIpProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "murmur3": o := NewMurmur3HashProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "token_count": o := NewTokenCountProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "geo_point": o := NewGeoPointProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "geo_shape": o := NewGeoShapeProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "point": o := NewPointProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "shape": o := NewShapeProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "byte": o := NewByteNumberProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "double": o := NewDoubleNumberProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "float": o := NewFloatNumberProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "half_float": o := NewHalfFloatNumberProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "integer": o := NewIntegerNumberProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "long": o := NewLongNumberProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "scaled_float": o := NewScaledFloatNumberProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "short": o := NewShortNumberProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "unsigned_long": o := NewUnsignedLongNumberProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "date_range": o := NewDateRangeProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "double_range": o := NewDoubleRangeProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "float_range": o := NewFloatRangeProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "integer_range": o := NewIntegerRangeProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "ip_range": o := NewIpRangeProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o case "long_range": o := NewLongRangeProperty() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Mapping = *o default: - if err := dec.Decode(&s.Mapping); err != nil { + if err := localDec.Decode(&s.Mapping); err != nil { return err } } case "match": - if err := dec.Decode(&s.Match); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Match = &o case "match_mapping_type": - if err := dec.Decode(&s.MatchMappingType); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MatchMappingType = &o case "match_pattern": if err := dec.Decode(&s.MatchPattern); err != nil { @@ -367,19 +384,40 @@ func (s *DynamicTemplate) UnmarshalJSON(data []byte) error { } case "path_match": - if err := dec.Decode(&s.PathMatch); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PathMatch = &o case "path_unmatch": - if err := dec.Decode(&s.PathUnmatch); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PathUnmatch = &o case "unmatch": - if err := dec.Decode(&s.Unmatch); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Unmatch = &o } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/edgengramtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/edgengramtokenfilter.go index 4128c266d..c33ee8dc1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/edgengramtokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/edgengramtokenfilter.go @@ -16,31 +16,124 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/edgengramside" ) // EdgeNGramTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L78-L84 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L79-L85 type EdgeNGramTokenFilter struct { MaxGram *int `json:"max_gram,omitempty"` MinGram *int `json:"min_gram,omitempty"` - PreserveOriginal *bool `json:"preserve_original,omitempty"` + PreserveOriginal Stringifiedboolean `json:"preserve_original,omitempty"` Side *edgengramside.EdgeNGramSide `json:"side,omitempty"` Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` } +func (s *EdgeNGramTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_gram": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxGram = &value + case float64: + f := int(v) + s.MaxGram = &f + } + + case "min_gram": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MinGram = &value + case float64: + f := int(v) + s.MinGram = &f + } + + case "preserve_original": + if err := dec.Decode(&s.PreserveOriginal); err != nil { + return err + } + + case "side": + if err := dec.Decode(&s.Side); err != nil { + return err + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s EdgeNGramTokenFilter) MarshalJSON() ([]byte, error) { + type innerEdgeNGramTokenFilter EdgeNGramTokenFilter + tmp := innerEdgeNGramTokenFilter{ + MaxGram: s.MaxGram, + MinGram: s.MinGram, + PreserveOriginal: s.PreserveOriginal, + Side: s.Side, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "edge_ngram" + + return json.Marshal(tmp) +} + // NewEdgeNGramTokenFilter returns a EdgeNGramTokenFilter. func NewEdgeNGramTokenFilter() *EdgeNGramTokenFilter { r := &EdgeNGramTokenFilter{} - r.Type = "edge_ngram" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/edgengramtokenizer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/edgengramtokenizer.go index 4c38ce7b8..3f23d7513 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/edgengramtokenizer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/edgengramtokenizer.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/tokenchar" ) // EdgeNGramTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/tokenizers.ts#L30-L36 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/tokenizers.ts#L31-L37 type EdgeNGramTokenizer struct { CustomTokenChars *string `json:"custom_token_chars,omitempty"` MaxGram int `json:"max_gram"` @@ -36,11 +42,105 @@ type EdgeNGramTokenizer struct { Version *string `json:"version,omitempty"` } +func (s *EdgeNGramTokenizer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "custom_token_chars": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CustomTokenChars = &o + + case "max_gram": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxGram = value + case float64: + f := int(v) + s.MaxGram = f + } + + case "min_gram": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MinGram = value + case float64: + f := int(v) + s.MinGram = f + } + + case "token_chars": + if err := dec.Decode(&s.TokenChars); err != nil { + return err + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s EdgeNGramTokenizer) MarshalJSON() ([]byte, error) { + type innerEdgeNGramTokenizer EdgeNGramTokenizer + tmp := innerEdgeNGramTokenizer{ + CustomTokenChars: s.CustomTokenChars, + MaxGram: s.MaxGram, + MinGram: s.MinGram, + TokenChars: s.TokenChars, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "edge_ngram" + + return json.Marshal(tmp) +} + // NewEdgeNGramTokenizer returns a EdgeNGramTokenizer. func NewEdgeNGramTokenizer() *EdgeNGramTokenizer { r := &EdgeNGramTokenizer{} - r.Type = "edge_ngram" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/elasticsearchversioninfo.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/elasticsearchversioninfo.go index 212fa5d9d..1b14ca0e3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/elasticsearchversioninfo.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/elasticsearchversioninfo.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ElasticsearchVersionInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Base.ts#L54-L64 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Base.ts#L54-L64 type ElasticsearchVersionInfo struct { BuildDate DateTime `json:"build_date"` BuildFlavor string `json:"build_flavor"` @@ -35,6 +43,108 @@ type ElasticsearchVersionInfo struct { MinimumWireCompatibilityVersion string `json:"minimum_wire_compatibility_version"` } +func (s *ElasticsearchVersionInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "build_date": + if err := dec.Decode(&s.BuildDate); err != nil { + return err + } + + case "build_flavor": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BuildFlavor = o + + case "build_hash": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BuildHash = o + + case "build_snapshot": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.BuildSnapshot = value + case bool: + s.BuildSnapshot = v + } + + case "build_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BuildType = o + + case "number": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Int = o + + case "lucene_version": + if err := dec.Decode(&s.LuceneVersion); err != nil { + return err + } + + case "minimum_index_compatibility_version": + if err := dec.Decode(&s.MinimumIndexCompatibilityVersion); err != nil { + return err + } + + case "minimum_wire_compatibility_version": + if err := dec.Decode(&s.MinimumWireCompatibilityVersion); err != nil { + return err + } + + } + } + return nil +} + // NewElasticsearchVersionInfo returns a ElasticsearchVersionInfo. func NewElasticsearchVersionInfo() *ElasticsearchVersionInfo { r := &ElasticsearchVersionInfo{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/elisiontokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/elisiontokenfilter.go index 987c0b079..6b03b7754 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/elisiontokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/elisiontokenfilter.go @@ -16,26 +16,100 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ElisionTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L186-L191 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L187-L192 type ElisionTokenFilter struct { - Articles []string `json:"articles,omitempty"` - ArticlesCase *bool `json:"articles_case,omitempty"` - ArticlesPath *string `json:"articles_path,omitempty"` - Type string `json:"type,omitempty"` - Version *string `json:"version,omitempty"` + Articles []string `json:"articles,omitempty"` + ArticlesCase Stringifiedboolean `json:"articles_case,omitempty"` + ArticlesPath *string `json:"articles_path,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *ElisionTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "articles": + if err := dec.Decode(&s.Articles); err != nil { + return err + } + + case "articles_case": + if err := dec.Decode(&s.ArticlesCase); err != nil { + return err + } + + case "articles_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ArticlesPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s ElisionTokenFilter) MarshalJSON() ([]byte, error) { + type innerElisionTokenFilter ElisionTokenFilter + tmp := innerElisionTokenFilter{ + Articles: s.Articles, + ArticlesCase: s.ArticlesCase, + ArticlesPath: s.ArticlesPath, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "elision" + + return json.Marshal(tmp) } // NewElisionTokenFilter returns a ElisionTokenFilter. func NewElisionTokenFilter() *ElisionTokenFilter { r := &ElisionTokenFilter{} - r.Type = "elision" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/email.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/email.go index 6fe0c271a..9f3db1285 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/email.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/email.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/emailpriority" ) // Email type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Actions.ts#L238-L250 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Actions.ts#L238-L250 type Email struct { Attachments map[string]EmailAttachmentContainer `json:"attachments,omitempty"` Bcc []string `json:"bcc,omitempty"` @@ -41,6 +47,98 @@ type Email struct { To []string `json:"to"` } +func (s *Email) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "attachments": + if s.Attachments == nil { + s.Attachments = make(map[string]EmailAttachmentContainer, 0) + } + if err := dec.Decode(&s.Attachments); err != nil { + return err + } + + case "bcc": + if err := dec.Decode(&s.Bcc); err != nil { + return err + } + + case "body": + if err := dec.Decode(&s.Body); err != nil { + return err + } + + case "cc": + if err := dec.Decode(&s.Cc); err != nil { + return err + } + + case "from": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.From = &o + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return err + } + + case "priority": + if err := dec.Decode(&s.Priority); err != nil { + return err + } + + case "reply_to": + if err := dec.Decode(&s.ReplyTo); err != nil { + return err + } + + case "sent_date": + if err := dec.Decode(&s.SentDate); err != nil { + return err + } + + case "subject": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Subject = o + + case "to": + if err := dec.Decode(&s.To); err != nil { + return err + } + + } + } + return nil +} + // NewEmail returns a Email. func NewEmail() *Email { r := &Email{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/emailaction.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/emailaction.go index 098b05a53..ff38f5447 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/emailaction.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/emailaction.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/emailpriority" ) // EmailAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Actions.ts#L252-L252 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Actions.ts#L252-L252 type EmailAction struct { Attachments map[string]EmailAttachmentContainer `json:"attachments,omitempty"` Bcc []string `json:"bcc,omitempty"` @@ -41,6 +47,98 @@ type EmailAction struct { To []string `json:"to"` } +func (s *EmailAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "attachments": + if s.Attachments == nil { + s.Attachments = make(map[string]EmailAttachmentContainer, 0) + } + if err := dec.Decode(&s.Attachments); err != nil { + return err + } + + case "bcc": + if err := dec.Decode(&s.Bcc); err != nil { + return err + } + + case "body": + if err := dec.Decode(&s.Body); err != nil { + return err + } + + case "cc": + if err := dec.Decode(&s.Cc); err != nil { + return err + } + + case "from": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.From = &o + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return err + } + + case "priority": + if err := dec.Decode(&s.Priority); err != nil { + return err + } + + case "reply_to": + if err := dec.Decode(&s.ReplyTo); err != nil { + return err + } + + case "sent_date": + if err := dec.Decode(&s.SentDate); err != nil { + return err + } + + case "subject": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Subject = o + + case "to": + if err := dec.Decode(&s.To); err != nil { + return err + } + + } + } + return nil +} + // NewEmailAction returns a EmailAction. func NewEmailAction() *EmailAction { r := &EmailAction{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/emailattachmentcontainer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/emailattachmentcontainer.go index 8fd4b6c00..ccccf2b9f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/emailattachmentcontainer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/emailattachmentcontainer.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // EmailAttachmentContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Actions.ts#L211-L216 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Actions.ts#L211-L216 type EmailAttachmentContainer struct { Data *DataEmailAttachment `json:"data,omitempty"` Http *HttpEmailAttachment `json:"http,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/emailbody.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/emailbody.go index 60d4a20f0..8bebfebb3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/emailbody.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/emailbody.go @@ -16,18 +16,70 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // EmailBody type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Actions.ts#L192-L195 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Actions.ts#L192-L195 type EmailBody struct { Html *string `json:"html,omitempty"` Text *string `json:"text,omitempty"` } +func (s *EmailBody) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "html": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Html = &o + + case "text": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Text = &o + + } + } + return nil +} + // NewEmailBody returns a EmailBody. func NewEmailBody() *EmailBody { r := &EmailBody{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/emailresult.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/emailresult.go index 40800593f..fe2b35901 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/emailresult.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/emailresult.go @@ -16,19 +16,76 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // EmailResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Actions.ts#L205-L209 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Actions.ts#L205-L209 type EmailResult struct { Account *string `json:"account,omitempty"` Message Email `json:"message"` Reason *string `json:"reason,omitempty"` } +func (s *EmailResult) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "account": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Account = &o + + case "message": + if err := dec.Decode(&s.Message); err != nil { + return err + } + + case "reason": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Reason = &o + + } + } + return nil +} + // NewEmailResult returns a EmailResult. func NewEmailResult() *EmailResult { r := &EmailResult{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/emptyobject.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/emptyobject.go index 99a41c8e4..c8db930ce 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/emptyobject.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/emptyobject.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // EmptyObject type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/common.ts#L140-L141 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/common.ts#L154-L155 type EmptyObject struct { } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enrichpolicy.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enrichpolicy.go index 8d7b94092..15ad8d512 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enrichpolicy.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enrichpolicy.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // EnrichPolicy type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/enrich/_types/Policy.ts#L33-L40 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/enrich/_types/Policy.ts#L33-L40 type EnrichPolicy struct { ElasticsearchVersion *string `json:"elasticsearch_version,omitempty"` EnrichFields []string `json:"enrich_fields"` @@ -32,6 +40,92 @@ type EnrichPolicy struct { Query *string `json:"query,omitempty"` } +func (s *EnrichPolicy) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "elasticsearch_version": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ElasticsearchVersion = &o + + case "enrich_fields": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.EnrichFields = append(s.EnrichFields, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.EnrichFields); err != nil { + return err + } + } + + case "indices": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Indices = append(s.Indices, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Indices); err != nil { + return err + } + } + + case "match_field": + if err := dec.Decode(&s.MatchField); err != nil { + return err + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "query": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Query = &o + + } + } + return nil +} + // NewEnrichPolicy returns a EnrichPolicy. func NewEnrichPolicy() *EnrichPolicy { r := &EnrichPolicy{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enrichprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enrichprocessor.go index 6672fbdd4..02211ea3f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enrichprocessor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enrichprocessor.go @@ -16,30 +16,209 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geoshaperelation" ) // EnrichProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/_types/Processors.ts#L201-L209 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/_types/Processors.ts#L596-L635 type EnrichProcessor struct { - Description *string `json:"description,omitempty"` - Field string `json:"field"` - If *string `json:"if,omitempty"` - IgnoreFailure *bool `json:"ignore_failure,omitempty"` - IgnoreMissing *bool `json:"ignore_missing,omitempty"` - MaxMatches *int `json:"max_matches,omitempty"` - OnFailure []ProcessorContainer `json:"on_failure,omitempty"` - Override *bool `json:"override,omitempty"` - PolicyName string `json:"policy_name"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field in the input document that matches the policies match_field used to + // retrieve the enrichment data. + // Supports template snippets. + Field string `json:"field"` + // If Conditionally execute the processor. + If *string `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist, the processor quietly exits without + // modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // MaxMatches The maximum number of matched documents to include under the configured + // target field. + // The `target_field` will be turned into a json array if `max_matches` is + // higher than 1, otherwise `target_field` will become a json object. + // In order to avoid documents getting too large, the maximum allowed value is + // 128. + MaxMatches *int `json:"max_matches,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Override If processor will update fields with pre-existing non-null-valued field. + // When set to `false`, such fields will not be touched. + Override *bool `json:"override,omitempty"` + // PolicyName The name of the enrich policy to use. + PolicyName string `json:"policy_name"` + // ShapeRelation A spatial relation operator used to match the geoshape of incoming documents + // to documents in the enrich index. + // This option is only used for `geo_match` enrich policy types. ShapeRelation *geoshaperelation.GeoShapeRelation `json:"shape_relation,omitempty"` - Tag *string `json:"tag,omitempty"` - TargetField string `json:"target_field"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField Field added to incoming documents to contain enrich data. This field contains + // both the `match_field` and `enrich_fields` specified in the enrich policy. + // Supports template snippets. + TargetField string `json:"target_field"` +} + +func (s *EnrichProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.If = &o + + case "ignore_failure": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "max_matches": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxMatches = &value + case float64: + f := int(v) + s.MaxMatches = &f + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return err + } + + case "override": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Override = &value + case bool: + s.Override = &v + } + + case "policy_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PolicyName = o + + case "shape_relation": + if err := dec.Decode(&s.ShapeRelation); err != nil { + return err + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return err + } + + } + } + return nil } // NewEnrichProcessor returns a EnrichProcessor. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ensemble.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ensemble.go index 7678563d0..07b5a2f6a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ensemble.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ensemble.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Ensemble type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/put_trained_model/types.ts#L93-L99 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/put_trained_model/types.ts#L93-L99 type Ensemble struct { AggregateOutput *AggregateOutput `json:"aggregate_output,omitempty"` ClassificationLabels []string `json:"classification_labels,omitempty"` @@ -31,6 +39,58 @@ type Ensemble struct { TrainedModels []TrainedModel `json:"trained_models"` } +func (s *Ensemble) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aggregate_output": + if err := dec.Decode(&s.AggregateOutput); err != nil { + return err + } + + case "classification_labels": + if err := dec.Decode(&s.ClassificationLabels); err != nil { + return err + } + + case "feature_names": + if err := dec.Decode(&s.FeatureNames); err != nil { + return err + } + + case "target_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TargetType = &o + + case "trained_models": + if err := dec.Decode(&s.TrainedModels); err != nil { + return err + } + + } + } + return nil +} + // NewEnsemble returns a Ensemble. func NewEnsemble() *Ensemble { r := &Ensemble{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/accesstokengranttype/accesstokengranttype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/accesstokengranttype/accesstokengranttype.go index cc0fb9962..218c4eccc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/accesstokengranttype/accesstokengranttype.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/accesstokengranttype/accesstokengranttype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package accesstokengranttype package accesstokengranttype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/get_token/types.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/get_token/types.ts#L23-L28 type AccessTokenGrantType struct { Name string } @@ -43,7 +43,7 @@ func (a AccessTokenGrantType) MarshalText() (text []byte, err error) { } func (a *AccessTokenGrantType) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "password": *a = Password diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/acknowledgementoptions/acknowledgementoptions.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/acknowledgementoptions/acknowledgementoptions.go index 770ace4c6..ebbc784c0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/acknowledgementoptions/acknowledgementoptions.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/acknowledgementoptions/acknowledgementoptions.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package acknowledgementoptions package acknowledgementoptions import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Action.ts#L106-L110 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Action.ts#L109-L113 type AcknowledgementOptions struct { Name string } @@ -41,7 +41,7 @@ func (a AcknowledgementOptions) MarshalText() (text []byte, err error) { } func (a *AcknowledgementOptions) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "awaits_successful_execution": *a = Awaitssuccessfulexecution diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/actionexecutionmode/actionexecutionmode.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/actionexecutionmode/actionexecutionmode.go index 32c20abf4..4555dc718 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/actionexecutionmode/actionexecutionmode.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/actionexecutionmode/actionexecutionmode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package actionexecutionmode package actionexecutionmode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Action.ts#L70-L91 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Action.ts#L73-L94 type ActionExecutionMode struct { Name string } @@ -45,7 +45,7 @@ func (a ActionExecutionMode) MarshalText() (text []byte, err error) { } func (a *ActionExecutionMode) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "simulate": *a = Simulate diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/actionstatusoptions/actionstatusoptions.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/actionstatusoptions/actionstatusoptions.go index 3ae908810..bc4fe9b32 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/actionstatusoptions/actionstatusoptions.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/actionstatusoptions/actionstatusoptions.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package actionstatusoptions package actionstatusoptions import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Action.ts#L99-L104 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Action.ts#L102-L107 type ActionStatusOptions struct { Name string } @@ -43,7 +43,7 @@ func (a ActionStatusOptions) MarshalText() (text []byte, err error) { } func (a *ActionStatusOptions) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "success": *a = Success diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/actiontype/actiontype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/actiontype/actiontype.go index fedfcffe0..dcfbcc2cb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/actiontype/actiontype.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/actiontype/actiontype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package actiontype package actiontype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Action.ts#L61-L68 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Action.ts#L64-L71 type ActionType struct { Name string } @@ -47,7 +47,7 @@ func (a ActionType) MarshalText() (text []byte, err error) { } func (a *ActionType) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "email": *a = Email diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/allocationexplaindecision/allocationexplaindecision.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/allocationexplaindecision/allocationexplaindecision.go index 7bccc7b42..a4ed606e5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/allocationexplaindecision/allocationexplaindecision.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/allocationexplaindecision/allocationexplaindecision.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package allocationexplaindecision package allocationexplaindecision import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/allocation_explain/types.ts#L32-L37 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/allocation_explain/types.ts#L32-L37 type AllocationExplainDecision struct { Name string } @@ -43,7 +43,7 @@ func (a AllocationExplainDecision) MarshalText() (text []byte, err error) { } func (a *AllocationExplainDecision) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "NO": *a = NO diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/apikeygranttype/apikeygranttype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/apikeygranttype/apikeygranttype.go index 4a293e32e..cb9614019 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/apikeygranttype/apikeygranttype.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/apikeygranttype/apikeygranttype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package apikeygranttype package apikeygranttype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/grant_api_key/types.ts#L34-L37 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/grant_api_key/types.ts#L48-L51 type ApiKeyGrantType struct { Name string } @@ -39,7 +39,7 @@ func (a ApiKeyGrantType) MarshalText() (text []byte, err error) { } func (a *ApiKeyGrantType) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "access_token": *a = Accesstoken diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/appliesto/appliesto.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/appliesto/appliesto.go index 53c829143..7fe7974c0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/appliesto/appliesto.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/appliesto/appliesto.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package appliesto package appliesto import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Rule.ts#L67-L72 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Rule.ts#L67-L72 type AppliesTo struct { Name string } @@ -43,7 +43,7 @@ func (a AppliesTo) MarshalText() (text []byte, err error) { } func (a *AppliesTo) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "actual": *a = Actual diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/boundaryscanner/boundaryscanner.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/boundaryscanner/boundaryscanner.go index a8064f988..612b3c8ee 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/boundaryscanner/boundaryscanner.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/boundaryscanner/boundaryscanner.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package boundaryscanner package boundaryscanner import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/highlighting.ts#L27-L31 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/highlighting.ts#L27-L46 type BoundaryScanner struct { Name string } @@ -41,7 +41,7 @@ func (b BoundaryScanner) MarshalText() (text []byte, err error) { } func (b *BoundaryScanner) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "chars": *b = Chars diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/bytes/bytes.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/bytes/bytes.go index 3a81eb2aa..1e8f3599b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/bytes/bytes.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/bytes/bytes.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package bytes package bytes import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/common.ts#L149-L167 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/common.ts#L163-L181 type Bytes struct { Name string } @@ -47,7 +47,7 @@ func (b Bytes) MarshalText() (text []byte, err error) { } func (b *Bytes) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "b": *b = B diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/calendarinterval/calendarinterval.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/calendarinterval/calendarinterval.go index ed171585f..4f82952a2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/calendarinterval/calendarinterval.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/calendarinterval/calendarinterval.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package calendarinterval package calendarinterval import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L112-L129 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L249-L266 type CalendarInterval struct { Name string } @@ -51,7 +51,7 @@ func (c CalendarInterval) MarshalText() (text []byte, err error) { } func (c *CalendarInterval) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "second": *c = Second diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/cardinalityexecutionmode/cardinalityexecutionmode.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/cardinalityexecutionmode/cardinalityexecutionmode.go index 0faa56571..0de6f9457 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/cardinalityexecutionmode/cardinalityexecutionmode.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/cardinalityexecutionmode/cardinalityexecutionmode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package cardinalityexecutionmode package cardinalityexecutionmode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/metric.ts#L54-L60 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/metric.ts#L64-L85 type CardinalityExecutionMode struct { Name string } @@ -45,7 +45,7 @@ func (c CardinalityExecutionMode) MarshalText() (text []byte, err error) { } func (c *CardinalityExecutionMode) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "global_ordinals": *c = Globalordinals diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/catanomalydetectorcolumn/catanomalydetectorcolumn.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/catanomalydetectorcolumn/catanomalydetectorcolumn.go index 0c648d815..24c6236a4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/catanomalydetectorcolumn/catanomalydetectorcolumn.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/catanomalydetectorcolumn/catanomalydetectorcolumn.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package catanomalydetectorcolumn package catanomalydetectorcolumn import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/_types/CatBase.ts#L32-L401 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/_types/CatBase.ts#L32-L401 type CatAnomalyDetectorColumn struct { Name string } @@ -155,7 +155,7 @@ func (c CatAnomalyDetectorColumn) MarshalText() (text []byte, err error) { } func (c *CatAnomalyDetectorColumn) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "assignment_explanation": *c = Assignmentexplanation diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/catdatafeedcolumn/catdatafeedcolumn.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/catdatafeedcolumn/catdatafeedcolumn.go index 41faf5a67..bae6623f0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/catdatafeedcolumn/catdatafeedcolumn.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/catdatafeedcolumn/catdatafeedcolumn.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package catdatafeedcolumn package catdatafeedcolumn import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/_types/CatBase.ts#L405-L471 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/_types/CatBase.ts#L405-L471 type CatDatafeedColumn struct { Name string } @@ -59,7 +59,7 @@ func (c CatDatafeedColumn) MarshalText() (text []byte, err error) { } func (c *CatDatafeedColumn) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "ae": *c = Ae diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/catdfacolumn/catdfacolumn.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/catdfacolumn/catdfacolumn.go index a9115cc25..e6a2ce31b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/catdfacolumn/catdfacolumn.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/catdfacolumn/catdfacolumn.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package catdfacolumn package catdfacolumn import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/_types/CatBase.ts#L472-L557 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/_types/CatBase.ts#L472-L557 type CatDfaColumn struct { Name string } @@ -67,7 +67,7 @@ func (c CatDfaColumn) MarshalText() (text []byte, err error) { } func (c *CatDfaColumn) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "assignment_explanation": *c = Assignmentexplanation diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/categorizationstatus/categorizationstatus.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/categorizationstatus/categorizationstatus.go index 949d42d17..a12f694c5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/categorizationstatus/categorizationstatus.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/categorizationstatus/categorizationstatus.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package categorizationstatus package categorizationstatus import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Model.ts#L80-L83 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Model.ts#L83-L86 type CategorizationStatus struct { Name string } @@ -39,7 +39,7 @@ func (c CategorizationStatus) MarshalText() (text []byte, err error) { } func (c *CategorizationStatus) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "ok": *c = Ok diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/cattrainedmodelscolumn/cattrainedmodelscolumn.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/cattrainedmodelscolumn/cattrainedmodelscolumn.go index d2a4346d0..51e53b633 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/cattrainedmodelscolumn/cattrainedmodelscolumn.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/cattrainedmodelscolumn/cattrainedmodelscolumn.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package cattrainedmodelscolumn package cattrainedmodelscolumn import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/_types/CatBase.ts#L561-L635 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/_types/CatBase.ts#L561-L635 type CatTrainedModelsColumn struct { Name string } @@ -63,7 +63,7 @@ func (c CatTrainedModelsColumn) MarshalText() (text []byte, err error) { } func (c *CatTrainedModelsColumn) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "create_time": *c = Createtime diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/cattransformcolumn/cattransformcolumn.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/cattransformcolumn/cattransformcolumn.go index 1dd3f5c49..55f08c046 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/cattransformcolumn/cattransformcolumn.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/cattransformcolumn/cattransformcolumn.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package cattransformcolumn package cattransformcolumn import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/_types/CatBase.ts#L640-L844 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/_types/CatBase.ts#L640-L844 type CatTransformColumn struct { Name string } @@ -101,7 +101,7 @@ func (c CatTransformColumn) MarshalText() (text []byte, err error) { } func (c *CatTransformColumn) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "changes_last_detection_time": *c = Changeslastdetectiontime diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/childscoremode/childscoremode.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/childscoremode/childscoremode.go index 07aed00b9..643fbf846 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/childscoremode/childscoremode.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/childscoremode/childscoremode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package childscoremode package childscoremode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/joining.ts#L25-L39 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/joining.ts#L25-L39 type ChildScoreMode struct { Name string } @@ -45,7 +45,7 @@ func (c ChildScoreMode) MarshalText() (text []byte, err error) { } func (c *ChildScoreMode) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "none": *c = None diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/chunkingmode/chunkingmode.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/chunkingmode/chunkingmode.go index 2bcfcfbde..d8b113658 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/chunkingmode/chunkingmode.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/chunkingmode/chunkingmode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package chunkingmode package chunkingmode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Datafeed.ts#L171-L175 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Datafeed.ts#L233-L237 type ChunkingMode struct { Name string } @@ -41,7 +41,7 @@ func (c ChunkingMode) MarshalText() (text []byte, err error) { } func (c *ChunkingMode) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "auto": *c = Auto diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/clusterinfotarget/clusterinfotarget.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/clusterinfotarget/clusterinfotarget.go new file mode 100644 index 000000000..f49ed093b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/clusterinfotarget/clusterinfotarget.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Package clusterinfotarget +package clusterinfotarget + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/common.ts#L378-L384 +type ClusterInfoTarget struct { + Name string +} + +var ( + All = ClusterInfoTarget{"_all"} + + Http = ClusterInfoTarget{"http"} + + Ingest = ClusterInfoTarget{"ingest"} + + Threadpool = ClusterInfoTarget{"thread_pool"} + + Script = ClusterInfoTarget{"script"} +) + +func (c ClusterInfoTarget) MarshalText() (text []byte, err error) { + return []byte(c.String()), nil +} + +func (c *ClusterInfoTarget) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "_all": + *c = All + case "http": + *c = Http + case "ingest": + *c = Ingest + case "thread_pool": + *c = Threadpool + case "script": + *c = Script + default: + *c = ClusterInfoTarget{string(text)} + } + + return nil +} + +func (c ClusterInfoTarget) String() string { + return c.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/clusterprivilege/clusterprivilege.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/clusterprivilege/clusterprivilege.go index c57234e24..30cdef544 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/clusterprivilege/clusterprivilege.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/clusterprivilege/clusterprivilege.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package clusterprivilege package clusterprivilege import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/_types/Privileges.ts#L41-L79 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/_types/Privileges.ts#L41-L80 type ClusterPrivilege struct { Name string } @@ -109,7 +109,7 @@ func (c ClusterPrivilege) MarshalText() (text []byte, err error) { } func (c *ClusterPrivilege) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "all": *c = All diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/clustersearchstatus/clustersearchstatus.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/clustersearchstatus/clustersearchstatus.go new file mode 100644 index 000000000..db59a1b25 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/clustersearchstatus/clustersearchstatus.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Package clustersearchstatus +package clustersearchstatus + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Stats.ts#L37-L43 +type ClusterSearchStatus struct { + Name string +} + +var ( + Running = ClusterSearchStatus{"running"} + + Successful = ClusterSearchStatus{"successful"} + + Partial = ClusterSearchStatus{"partial"} + + Skipped = ClusterSearchStatus{"skipped"} + + Failed = ClusterSearchStatus{"failed"} +) + +func (c ClusterSearchStatus) MarshalText() (text []byte, err error) { + return []byte(c.String()), nil +} + +func (c *ClusterSearchStatus) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "running": + *c = Running + case "successful": + *c = Successful + case "partial": + *c = Partial + case "skipped": + *c = Skipped + case "failed": + *c = Failed + default: + *c = ClusterSearchStatus{string(text)} + } + + return nil +} + +func (c ClusterSearchStatus) String() string { + return c.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/combinedfieldsoperator/combinedfieldsoperator.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/combinedfieldsoperator/combinedfieldsoperator.go index 246ec6861..e68a3f313 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/combinedfieldsoperator/combinedfieldsoperator.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/combinedfieldsoperator/combinedfieldsoperator.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package combinedfieldsoperator package combinedfieldsoperator import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/abstractions.ts#L202-L205 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/abstractions.ts#L473-L476 type CombinedFieldsOperator struct { Name string } @@ -39,7 +39,7 @@ func (c CombinedFieldsOperator) MarshalText() (text []byte, err error) { } func (c *CombinedFieldsOperator) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "or": *c = Or diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/combinedfieldszeroterms/combinedfieldszeroterms.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/combinedfieldszeroterms/combinedfieldszeroterms.go index b5a717428..13bcd55df 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/combinedfieldszeroterms/combinedfieldszeroterms.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/combinedfieldszeroterms/combinedfieldszeroterms.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package combinedfieldszeroterms package combinedfieldszeroterms import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/abstractions.ts#L207-L210 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/abstractions.ts#L478-L487 type CombinedFieldsZeroTerms struct { Name string } @@ -39,7 +39,7 @@ func (c CombinedFieldsZeroTerms) MarshalText() (text []byte, err error) { } func (c *CombinedFieldsZeroTerms) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "none": *c = None diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/conditionop/conditionop.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/conditionop/conditionop.go index 3885dd504..7ab11b5d4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/conditionop/conditionop.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/conditionop/conditionop.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package conditionop package conditionop import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Conditions.ts#L38-L45 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Conditions.ts#L38-L45 type ConditionOp struct { Name string } @@ -47,7 +47,7 @@ func (c ConditionOp) MarshalText() (text []byte, err error) { } func (c *ConditionOp) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "not_eq": *c = Noteq diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/conditionoperator/conditionoperator.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/conditionoperator/conditionoperator.go index d485328c8..97ec3b25d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/conditionoperator/conditionoperator.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/conditionoperator/conditionoperator.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package conditionoperator package conditionoperator import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Rule.ts#L74-L79 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Rule.ts#L74-L79 type ConditionOperator struct { Name string } @@ -43,7 +43,7 @@ func (c ConditionOperator) MarshalText() (text []byte, err error) { } func (c *ConditionOperator) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "gt": *c = Gt diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/conditiontype/conditiontype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/conditiontype/conditiontype.go index 8dfa5eaaf..4b84bc204 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/conditiontype/conditiontype.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/conditiontype/conditiontype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package conditiontype package conditiontype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Conditions.ts#L61-L67 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Conditions.ts#L61-L67 type ConditionType struct { Name string } @@ -45,7 +45,7 @@ func (c ConditionType) MarshalText() (text []byte, err error) { } func (c *ConditionType) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "always": *c = Always diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/conflicts/conflicts.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/conflicts/conflicts.go index 21287e3d7..14b50e18f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/conflicts/conflicts.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/conflicts/conflicts.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package conflicts package conflicts import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/common.ts#L169-L172 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/common.ts#L183-L192 type Conflicts struct { Name string } @@ -39,7 +39,7 @@ func (c Conflicts) MarshalText() (text []byte, err error) { } func (c *Conflicts) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "abort": *c = Abort diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/connectionscheme/connectionscheme.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/connectionscheme/connectionscheme.go index 3b18de029..ca59f620f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/connectionscheme/connectionscheme.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/connectionscheme/connectionscheme.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package connectionscheme package connectionscheme import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Input.ts#L39-L42 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Input.ts#L39-L42 type ConnectionScheme struct { Name string } @@ -39,7 +39,7 @@ func (c ConnectionScheme) MarshalText() (text []byte, err error) { } func (c *ConnectionScheme) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "http": *c = Http diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/converttype/converttype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/converttype/converttype.go index 7dace7e7d..9bcf54d92 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/converttype/converttype.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/converttype/converttype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package converttype package converttype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/_types/Processors.ts#L137-L145 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/_types/Processors.ts#L424-L432 type ConvertType struct { Name string } @@ -49,7 +49,7 @@ func (c ConvertType) MarshalText() (text []byte, err error) { } func (c *ConvertType) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "integer": *c = Integer diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dataattachmentformat/dataattachmentformat.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dataattachmentformat/dataattachmentformat.go index b11424061..c6316dc22 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dataattachmentformat/dataattachmentformat.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dataattachmentformat/dataattachmentformat.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package dataattachmentformat package dataattachmentformat import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Actions.ts#L187-L190 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Actions.ts#L187-L190 type DataAttachmentFormat struct { Name string } @@ -39,7 +39,7 @@ func (d DataAttachmentFormat) MarshalText() (text []byte, err error) { } func (d *DataAttachmentFormat) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "json": *d = Json diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/datafeedstate/datafeedstate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/datafeedstate/datafeedstate.go index dfbb916a9..500685e27 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/datafeedstate/datafeedstate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/datafeedstate/datafeedstate.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package datafeedstate package datafeedstate import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Datafeed.ts#L133-L138 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Datafeed.ts#L133-L138 type DatafeedState struct { Name string } @@ -43,7 +43,7 @@ func (d DatafeedState) MarshalText() (text []byte, err error) { } func (d *DatafeedState) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "started": *d = Started diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dataframestate/dataframestate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dataframestate/dataframestate.go index fe67eb8b5..7e5e55988 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dataframestate/dataframestate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dataframestate/dataframestate.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package dataframestate package dataframestate import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Dataframe.ts#L20-L26 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Dataframe.ts#L20-L26 type DataframeState struct { Name string } @@ -45,7 +45,7 @@ func (d DataframeState) MarshalText() (text []byte, err error) { } func (d *DataframeState) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "started": *d = Started diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/day/day.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/day/day.go index 5695139fe..36d0eea08 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/day/day.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/day/day.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package day package day import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Schedule.ts#L37-L45 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Schedule.ts#L37-L45 type Day struct { Name string } @@ -49,7 +49,7 @@ func (d Day) MarshalText() (text []byte, err error) { } func (d *Day) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "sunday": *d = Sunday diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/decision/decision.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/decision/decision.go index 9522be006..96b253d05 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/decision/decision.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/decision/decision.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package decision package decision import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/allocation_explain/types.ts#L86-L95 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/allocation_explain/types.ts#L86-L95 type Decision struct { Name string } @@ -51,7 +51,7 @@ func (d Decision) MarshalText() (text []byte, err error) { } func (d *Decision) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "yes": *d = Yes diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/delimitedpayloadencoding/delimitedpayloadencoding.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/delimitedpayloadencoding/delimitedpayloadencoding.go index 648072016..8167619e3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/delimitedpayloadencoding/delimitedpayloadencoding.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/delimitedpayloadencoding/delimitedpayloadencoding.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package delimitedpayloadencoding package delimitedpayloadencoding import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L61-L65 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L62-L66 type DelimitedPayloadEncoding struct { Name string } @@ -41,7 +41,7 @@ func (d DelimitedPayloadEncoding) MarshalText() (text []byte, err error) { } func (d *DelimitedPayloadEncoding) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "int": *d = Int diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/deploymentallocationstate/deploymentallocationstate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/deploymentallocationstate/deploymentallocationstate.go index a4351884f..f19abba1a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/deploymentallocationstate/deploymentallocationstate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/deploymentallocationstate/deploymentallocationstate.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package deploymentallocationstate package deploymentallocationstate import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/TrainedModel.ts#L278-L291 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/TrainedModel.ts#L288-L301 type DeploymentAllocationState struct { Name string } @@ -41,7 +41,7 @@ func (d DeploymentAllocationState) MarshalText() (text []byte, err error) { } func (d *DeploymentAllocationState) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "started": *d = Started diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/deploymentassignmentstate/deploymentassignmentstate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/deploymentassignmentstate/deploymentassignmentstate.go index cb692a051..4f3a684a2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/deploymentassignmentstate/deploymentassignmentstate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/deploymentassignmentstate/deploymentassignmentstate.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package deploymentassignmentstate package deploymentassignmentstate import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/TrainedModel.ts#L293-L298 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/TrainedModel.ts#L303-L308 type DeploymentAssignmentState struct { Name string } @@ -43,7 +43,7 @@ func (d DeploymentAssignmentState) MarshalText() (text []byte, err error) { } func (d *DeploymentAssignmentState) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "starting": *d = Starting diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/deploymentstate/deploymentstate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/deploymentstate/deploymentstate.go index 7e6a9e168..a71e5aa57 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/deploymentstate/deploymentstate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/deploymentstate/deploymentstate.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package deploymentstate package deploymentstate import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/TrainedModel.ts#L263-L276 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/TrainedModel.ts#L273-L286 type DeploymentState struct { Name string } @@ -41,7 +41,7 @@ func (d DeploymentState) MarshalText() (text []byte, err error) { } func (d *DeploymentState) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "started": *d = Started diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/deprecationlevel/deprecationlevel.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/deprecationlevel/deprecationlevel.go index bbf357e5a..11f68626b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/deprecationlevel/deprecationlevel.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/deprecationlevel/deprecationlevel.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package deprecationlevel package deprecationlevel import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/migration/deprecations/types.ts#L20-L27 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/migration/deprecations/types.ts#L20-L27 type DeprecationLevel struct { Name string } @@ -43,7 +43,7 @@ func (d DeprecationLevel) MarshalText() (text []byte, err error) { } func (d *DeprecationLevel) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "none": *d = None diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dfiindependencemeasure/dfiindependencemeasure.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dfiindependencemeasure/dfiindependencemeasure.go index f2a6d5d8f..fe4fa82a7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dfiindependencemeasure/dfiindependencemeasure.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dfiindependencemeasure/dfiindependencemeasure.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package dfiindependencemeasure package dfiindependencemeasure import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Similarity.ts#L20-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Similarity.ts#L20-L24 type DFIIndependenceMeasure struct { Name string } @@ -41,7 +41,7 @@ func (d DFIIndependenceMeasure) MarshalText() (text []byte, err error) { } func (d *DFIIndependenceMeasure) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "standardized": *d = Standardized diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dfraftereffect/dfraftereffect.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dfraftereffect/dfraftereffect.go index 64a8167c3..8b997d883 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dfraftereffect/dfraftereffect.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dfraftereffect/dfraftereffect.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package dfraftereffect package dfraftereffect import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Similarity.ts#L26-L30 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Similarity.ts#L26-L30 type DFRAfterEffect struct { Name string } @@ -41,7 +41,7 @@ func (d DFRAfterEffect) MarshalText() (text []byte, err error) { } func (d *DFRAfterEffect) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "no": *d = No diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dfrbasicmodel/dfrbasicmodel.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dfrbasicmodel/dfrbasicmodel.go index 840993d5c..629661a34 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dfrbasicmodel/dfrbasicmodel.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dfrbasicmodel/dfrbasicmodel.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package dfrbasicmodel package dfrbasicmodel import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Similarity.ts#L32-L40 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Similarity.ts#L32-L40 type DFRBasicModel struct { Name string } @@ -49,7 +49,7 @@ func (d DFRBasicModel) MarshalText() (text []byte, err error) { } func (d *DFRBasicModel) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "be": *d = Be diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/distanceunit/distanceunit.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/distanceunit/distanceunit.go index 5228cb5d0..1788666aa 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/distanceunit/distanceunit.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/distanceunit/distanceunit.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package distanceunit package distanceunit import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Geo.ts#L30-L49 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Geo.ts#L30-L49 type DistanceUnit struct { Name string } @@ -53,7 +53,7 @@ func (d DistanceUnit) MarshalText() (text []byte, err error) { } func (d *DistanceUnit) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "in": *d = In diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping/dynamicmapping.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping/dynamicmapping.go index dc00bcf8a..5a044d4a4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping/dynamicmapping.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping/dynamicmapping.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package dynamicmapping package dynamicmapping import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/dynamic-template.ts#L37-L46 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/dynamic-template.ts#L37-L46 type DynamicMapping struct { Name string } @@ -38,12 +38,16 @@ var ( False = DynamicMapping{"false"} ) +func (d *DynamicMapping) UnmarshalJSON(data []byte) error { + return d.UnmarshalText(data) +} + func (d DynamicMapping) MarshalText() (text []byte, err error) { return []byte(d.String()), nil } func (d *DynamicMapping) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "strict": *d = Strict diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/edgengramside/edgengramside.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/edgengramside/edgengramside.go index e51a32750..a4a7ba845 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/edgengramside/edgengramside.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/edgengramside/edgengramside.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package edgengramside package edgengramside import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L73-L76 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L74-L77 type EdgeNGramSide struct { Name string } @@ -39,7 +39,7 @@ func (e EdgeNGramSide) MarshalText() (text []byte, err error) { } func (e *EdgeNGramSide) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "front": *e = Front diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/emailpriority/emailpriority.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/emailpriority/emailpriority.go index b5b8a49d4..d6707667f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/emailpriority/emailpriority.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/emailpriority/emailpriority.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package emailpriority package emailpriority import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Actions.ts#L197-L203 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Actions.ts#L197-L203 type EmailPriority struct { Name string } @@ -45,7 +45,7 @@ func (e EmailPriority) MarshalText() (text []byte, err error) { } func (e *EmailPriority) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "lowest": *e = Lowest diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/enrichpolicyphase/enrichpolicyphase.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/enrichpolicyphase/enrichpolicyphase.go index 57d058f9b..ba861429c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/enrichpolicyphase/enrichpolicyphase.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/enrichpolicyphase/enrichpolicyphase.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package enrichpolicyphase package enrichpolicyphase import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/enrich/execute_policy/types.ts#L24-L29 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/enrich/execute_policy/types.ts#L24-L29 type EnrichPolicyPhase struct { Name string } @@ -43,7 +43,7 @@ func (e EnrichPolicyPhase) MarshalText() (text []byte, err error) { } func (e *EnrichPolicyPhase) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "SCHEDULED": *e = SCHEDULED diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/excludefrequent/excludefrequent.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/excludefrequent/excludefrequent.go index 66b16564a..5c317e766 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/excludefrequent/excludefrequent.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/excludefrequent/excludefrequent.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package excludefrequent package excludefrequent import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Detector.ts#L82-L87 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Detector.ts#L127-L132 type ExcludeFrequent struct { Name string } @@ -43,7 +43,7 @@ func (e ExcludeFrequent) MarshalText() (text []byte, err error) { } func (e *ExcludeFrequent) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "all": *e = All diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/executionphase/executionphase.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/executionphase/executionphase.go index 6937bf1aa..17f31adb2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/executionphase/executionphase.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/executionphase/executionphase.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package executionphase package executionphase import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Execution.ts#L49-L58 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Execution.ts#L49-L58 type ExecutionPhase struct { Name string } @@ -51,7 +51,7 @@ func (e ExecutionPhase) MarshalText() (text []byte, err error) { } func (e *ExecutionPhase) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "awaits_execution": *e = Awaitsexecution diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/executionstatus/executionstatus.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/executionstatus/executionstatus.go index 5c2bc79d5..94da2913d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/executionstatus/executionstatus.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/executionstatus/executionstatus.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package executionstatus package executionstatus import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Execution.ts#L38-L47 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Execution.ts#L38-L47 type ExecutionStatus struct { Name string } @@ -51,7 +51,7 @@ func (e ExecutionStatus) MarshalText() (text []byte, err error) { } func (e *ExecutionStatus) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "awaits_execution": *e = Awaitsexecution diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard/expandwildcard.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard/expandwildcard.go index d28967e6c..7745561d3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard/expandwildcard.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard/expandwildcard.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package expandwildcard package expandwildcard import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/common.ts#L181-L195 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/common.ts#L201-L215 type ExpandWildcard struct { Name string } @@ -45,7 +45,7 @@ func (e ExpandWildcard) MarshalText() (text []byte, err error) { } func (e *ExpandWildcard) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "all": *e = All diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/feature/feature.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/feature/feature.go index a45ce2cf9..74deca79a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/feature/feature.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/feature/feature.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package feature package feature import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/get/IndicesGetRequest.ts#L89-L93 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/get/IndicesGetRequest.ts#L90-L94 type Feature struct { Name string } @@ -41,7 +41,7 @@ func (f Feature) MarshalText() (text []byte, err error) { } func (f *Feature) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "aliases": *f = Aliases diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/fieldsortnumerictype/fieldsortnumerictype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/fieldsortnumerictype/fieldsortnumerictype.go index 8b88353ff..b1cef6c19 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/fieldsortnumerictype/fieldsortnumerictype.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/fieldsortnumerictype/fieldsortnumerictype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package fieldsortnumerictype package fieldsortnumerictype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/sort.ts#L37-L42 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/sort.ts#L37-L42 type FieldSortNumericType struct { Name string } @@ -43,7 +43,7 @@ func (f FieldSortNumericType) MarshalText() (text []byte, err error) { } func (f *FieldSortNumericType) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "long": *f = Long diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/fieldtype/fieldtype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/fieldtype/fieldtype.go index 6b47b7e13..6e10051ce 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/fieldtype/fieldtype.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/fieldtype/fieldtype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package fieldtype package fieldtype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/Property.ts#L158-L201 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/Property.ts#L158-L202 type FieldType struct { Name string } @@ -111,6 +111,8 @@ var ( Densevector = FieldType{"dense_vector"} + Sparsevector = FieldType{"sparse_vector"} + Matchonlytext = FieldType{"match_only_text"} ) @@ -119,7 +121,7 @@ func (f FieldType) MarshalText() (text []byte, err error) { } func (f *FieldType) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "none": *f = None @@ -203,6 +205,8 @@ func (f *FieldType) UnmarshalText(text []byte) error { *f = Aggregatemetricdouble case "dense_vector": *f = Densevector + case "sparse_vector": + *f = Sparsevector case "match_only_text": *f = Matchonlytext default: diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/fieldvaluefactormodifier/fieldvaluefactormodifier.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/fieldvaluefactormodifier/fieldvaluefactormodifier.go index d39c3821d..faf3e216d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/fieldvaluefactormodifier/fieldvaluefactormodifier.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/fieldvaluefactormodifier/fieldvaluefactormodifier.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package fieldvaluefactormodifier package fieldvaluefactormodifier import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/compound.ts#L147-L158 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/compound.ts#L298-L341 type FieldValueFactorModifier struct { Name string } @@ -55,7 +55,7 @@ func (f FieldValueFactorModifier) MarshalText() (text []byte, err error) { } func (f *FieldValueFactorModifier) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "none": *f = None diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/filtertype/filtertype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/filtertype/filtertype.go index a4f5f36ad..3cf6c58bd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/filtertype/filtertype.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/filtertype/filtertype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package filtertype package filtertype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Filter.ts#L43-L46 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Filter.ts#L43-L46 type FilterType struct { Name string } @@ -39,7 +39,7 @@ func (f FilterType) MarshalText() (text []byte, err error) { } func (f *FilterType) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "include": *f = Include diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/followerindexstatus/followerindexstatus.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/followerindexstatus/followerindexstatus.go index 1050f895f..28e4479fe 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/followerindexstatus/followerindexstatus.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/followerindexstatus/followerindexstatus.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package followerindexstatus package followerindexstatus import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ccr/follow_info/types.ts#L30-L33 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ccr/follow_info/types.ts#L30-L33 type FollowerIndexStatus struct { Name string } @@ -39,7 +39,7 @@ func (f FollowerIndexStatus) MarshalText() (text []byte, err error) { } func (f *FollowerIndexStatus) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "active": *f = Active diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/functionboostmode/functionboostmode.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/functionboostmode/functionboostmode.go index abd5e450e..f6a75584e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/functionboostmode/functionboostmode.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/functionboostmode/functionboostmode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package functionboostmode package functionboostmode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/compound.ts#L138-L145 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/compound.ts#L270-L296 type FunctionBoostMode struct { Name string } @@ -47,7 +47,7 @@ func (f FunctionBoostMode) MarshalText() (text []byte, err error) { } func (f *FunctionBoostMode) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "multiply": *f = Multiply diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/functionscoremode/functionscoremode.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/functionscoremode/functionscoremode.go index d188289c7..255cc3686 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/functionscoremode/functionscoremode.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/functionscoremode/functionscoremode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package functionscoremode package functionscoremode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/compound.ts#L129-L136 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/compound.ts#L243-L268 type FunctionScoreMode struct { Name string } @@ -47,7 +47,7 @@ func (f FunctionScoreMode) MarshalText() (text []byte, err error) { } func (f *FunctionScoreMode) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "multiply": *f = Multiply diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy/gappolicy.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy/gappolicy.go index 30784bbb2..28110f87d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy/gappolicy.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy/gappolicy.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package gappolicy package gappolicy import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/pipeline.ts#L52-L67 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/pipeline.ts#L61-L76 type GapPolicy struct { Name string } @@ -41,7 +41,7 @@ func (g GapPolicy) MarshalText() (text []byte, err error) { } func (g *GapPolicy) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "skip": *g = Skip diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geodistancetype/geodistancetype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geodistancetype/geodistancetype.go index 141c5ea99..f20790f90 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geodistancetype/geodistancetype.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geodistancetype/geodistancetype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package geodistancetype package geodistancetype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Geo.ts#L51-L54 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Geo.ts#L51-L60 type GeoDistanceType struct { Name string } @@ -39,7 +39,7 @@ func (g GeoDistanceType) MarshalText() (text []byte, err error) { } func (g *GeoDistanceType) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "arc": *g = Arc diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geoexecution/geoexecution.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geoexecution/geoexecution.go index 25aafe5c8..a357cb72e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geoexecution/geoexecution.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geoexecution/geoexecution.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package geoexecution package geoexecution import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/geo.ts#L43-L46 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/geo.ts#L52-L55 type GeoExecution struct { Name string } @@ -39,7 +39,7 @@ func (g GeoExecution) MarshalText() (text []byte, err error) { } func (g *GeoExecution) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "memory": *g = Memory diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geoorientation/geoorientation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geoorientation/geoorientation.go index a0a7a727e..690281d3f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geoorientation/geoorientation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geoorientation/geoorientation.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package geoorientation package geoorientation import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/geo.ts#L30-L35 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/geo.ts#L30-L35 type GeoOrientation struct { Name string } @@ -39,7 +39,7 @@ func (g GeoOrientation) MarshalText() (text []byte, err error) { } func (g *GeoOrientation) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "right": *g = Right diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geoshaperelation/geoshaperelation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geoshaperelation/geoshaperelation.go index ff990dc09..9a0b1c1f1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geoshaperelation/geoshaperelation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geoshaperelation/geoshaperelation.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package geoshaperelation package geoshaperelation import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Geo.ts#L67-L72 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Geo.ts#L73-L91 type GeoShapeRelation struct { Name string } @@ -43,7 +43,7 @@ func (g GeoShapeRelation) MarshalText() (text []byte, err error) { } func (g *GeoShapeRelation) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "intersects": *g = Intersects diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geostrategy/geostrategy.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geostrategy/geostrategy.go index 8b726d3e6..ee1d8098b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geostrategy/geostrategy.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geostrategy/geostrategy.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package geostrategy package geostrategy import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/geo.ts#L52-L55 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/geo.ts#L52-L55 type GeoStrategy struct { Name string } @@ -39,7 +39,7 @@ func (g GeoStrategy) MarshalText() (text []byte, err error) { } func (g *GeoStrategy) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "recursive": *g = Recursive diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geovalidationmethod/geovalidationmethod.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geovalidationmethod/geovalidationmethod.go index 172313228..e2fbe2853 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geovalidationmethod/geovalidationmethod.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geovalidationmethod/geovalidationmethod.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package geovalidationmethod package geovalidationmethod import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/geo.ts#L107-L111 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/geo.ts#L141-L151 type GeoValidationMethod struct { Name string } @@ -41,7 +41,7 @@ func (g GeoValidationMethod) MarshalText() (text []byte, err error) { } func (g *GeoValidationMethod) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "coerce": *g = Coerce diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/granttype/granttype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/granttype/granttype.go index 1fccea4f1..a4446499e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/granttype/granttype.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/granttype/granttype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package granttype package granttype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/_types/GrantType.ts#L20-L23 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/_types/GrantType.ts#L20-L29 type GrantType struct { Name string } @@ -39,7 +39,7 @@ func (g GrantType) MarshalText() (text []byte, err error) { } func (g *GrantType) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "password": *g = Password diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gridaggregationtype/gridaggregationtype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gridaggregationtype/gridaggregationtype.go index 3673e11a5..501d90d17 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gridaggregationtype/gridaggregationtype.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gridaggregationtype/gridaggregationtype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package gridaggregationtype package gridaggregationtype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search_mvt/_types/GridType.ts#L27-L30 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search_mvt/_types/GridType.ts#L30-L33 type GridAggregationType struct { Name string } @@ -39,7 +39,7 @@ func (g GridAggregationType) MarshalText() (text []byte, err error) { } func (g *GridAggregationType) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "geotile": *g = Geotile diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gridtype/gridtype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gridtype/gridtype.go index a48717008..1325dac93 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gridtype/gridtype.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gridtype/gridtype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package gridtype package gridtype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search_mvt/_types/GridType.ts#L20-L25 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search_mvt/_types/GridType.ts#L20-L28 type GridType struct { Name string } @@ -41,7 +41,7 @@ func (g GridType) MarshalText() (text []byte, err error) { } func (g *GridType) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "grid": *g = Grid diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/groupby/groupby.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/groupby/groupby.go index 16e5e5a4d..a6a458c3a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/groupby/groupby.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/groupby/groupby.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package groupby package groupby import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/tasks/_types/GroupBy.ts#L20-L27 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/tasks/_types/GroupBy.ts#L20-L27 type GroupBy struct { Name string } @@ -41,7 +41,7 @@ func (g GroupBy) MarshalText() (text []byte, err error) { } func (g *GroupBy) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "nodes": *g = Nodes diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/healthstatus/healthstatus.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/healthstatus/healthstatus.go index b9972b164..3666dfbca 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/healthstatus/healthstatus.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/healthstatus/healthstatus.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package healthstatus package healthstatus import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/common.ts#L199-L219 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/common.ts#L219-L239 type HealthStatus struct { Name string } @@ -41,7 +41,7 @@ func (h HealthStatus) MarshalText() (text []byte, err error) { } func (h *HealthStatus) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "green": *h = Green diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/highlighterencoder/highlighterencoder.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/highlighterencoder/highlighterencoder.go index cbf3a19dd..d55f4531f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/highlighterencoder/highlighterencoder.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/highlighterencoder/highlighterencoder.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package highlighterencoder package highlighterencoder import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/highlighting.ts#L62-L65 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/highlighting.ts#L158-L161 type HighlighterEncoder struct { Name string } @@ -39,7 +39,7 @@ func (h HighlighterEncoder) MarshalText() (text []byte, err error) { } func (h *HighlighterEncoder) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "default": *h = Default diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/highlighterfragmenter/highlighterfragmenter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/highlighterfragmenter/highlighterfragmenter.go index 1f8a52790..cecfd770f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/highlighterfragmenter/highlighterfragmenter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/highlighterfragmenter/highlighterfragmenter.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package highlighterfragmenter package highlighterfragmenter import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/highlighting.ts#L67-L70 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/highlighting.ts#L163-L166 type HighlighterFragmenter struct { Name string } @@ -39,7 +39,7 @@ func (h HighlighterFragmenter) MarshalText() (text []byte, err error) { } func (h *HighlighterFragmenter) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "simple": *h = Simple diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/highlighterorder/highlighterorder.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/highlighterorder/highlighterorder.go index 729e4eb30..ad2ee7c70 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/highlighterorder/highlighterorder.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/highlighterorder/highlighterorder.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package highlighterorder package highlighterorder import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/highlighting.ts#L72-L74 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/highlighting.ts#L168-L170 type HighlighterOrder struct { Name string } @@ -37,7 +37,7 @@ func (h HighlighterOrder) MarshalText() (text []byte, err error) { } func (h *HighlighterOrder) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "score": *h = Score diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/highlightertagsschema/highlightertagsschema.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/highlightertagsschema/highlightertagsschema.go index 28ead9d53..ab39fc1dc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/highlightertagsschema/highlightertagsschema.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/highlightertagsschema/highlightertagsschema.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package highlightertagsschema package highlightertagsschema import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/highlighting.ts#L76-L78 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/highlighting.ts#L172-L174 type HighlighterTagsSchema struct { Name string } @@ -37,7 +37,7 @@ func (h HighlighterTagsSchema) MarshalText() (text []byte, err error) { } func (h *HighlighterTagsSchema) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "styled": *h = Styled diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/highlightertype/highlightertype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/highlightertype/highlightertype.go index 79c79d9e8..298cf243a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/highlightertype/highlightertype.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/highlightertype/highlightertype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package highlightertype package highlightertype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/highlighting.ts#L80-L86 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/highlighting.ts#L176-L191 type HighlighterType struct { Name string } @@ -41,7 +41,7 @@ func (h HighlighterType) MarshalText() (text []byte, err error) { } func (h *HighlighterType) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "plain": *h = Plain diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/holtwinterstype/holtwinterstype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/holtwinterstype/holtwinterstype.go index 9e6ea54ca..8b2dd56ce 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/holtwinterstype/holtwinterstype.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/holtwinterstype/holtwinterstype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package holtwinterstype package holtwinterstype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/pipeline.ts#L243-L248 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/pipeline.ts#L283-L288 type HoltWintersType struct { Name string } @@ -39,7 +39,7 @@ func (h HoltWintersType) MarshalText() (text []byte, err error) { } func (h *HoltWintersType) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "add": *h = Add diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/httpinputmethod/httpinputmethod.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/httpinputmethod/httpinputmethod.go index 6ab1c6375..128197e38 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/httpinputmethod/httpinputmethod.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/httpinputmethod/httpinputmethod.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package httpinputmethod package httpinputmethod import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Input.ts#L59-L65 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Input.ts#L59-L65 type HttpInputMethod struct { Name string } @@ -45,7 +45,7 @@ func (h HttpInputMethod) MarshalText() (text []byte, err error) { } func (h *HttpInputMethod) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "head": *h = Head diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/ibdistribution/ibdistribution.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/ibdistribution/ibdistribution.go index 0f4facf16..a4cbcd48d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/ibdistribution/ibdistribution.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/ibdistribution/ibdistribution.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package ibdistribution package ibdistribution import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Similarity.ts#L42-L45 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Similarity.ts#L42-L45 type IBDistribution struct { Name string } @@ -39,7 +39,7 @@ func (i IBDistribution) MarshalText() (text []byte, err error) { } func (i *IBDistribution) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "ll": *i = Ll diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/iblambda/iblambda.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/iblambda/iblambda.go index a4e35cced..5c91a98fe 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/iblambda/iblambda.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/iblambda/iblambda.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package iblambda package iblambda import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Similarity.ts#L47-L50 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Similarity.ts#L47-L50 type IBLambda struct { Name string } @@ -39,7 +39,7 @@ func (i IBLambda) MarshalText() (text []byte, err error) { } func (i *IBLambda) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "df": *i = Df diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icucollationalternate/icucollationalternate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icucollationalternate/icucollationalternate.go index 39a79f806..7d587d639 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icucollationalternate/icucollationalternate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icucollationalternate/icucollationalternate.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package icucollationalternate package icucollationalternate import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/icu-plugin.ts#L89-L92 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/icu-plugin.ts#L89-L92 type IcuCollationAlternate struct { Name string } @@ -39,7 +39,7 @@ func (i IcuCollationAlternate) MarshalText() (text []byte, err error) { } func (i *IcuCollationAlternate) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "shifted": *i = Shifted diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icucollationcasefirst/icucollationcasefirst.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icucollationcasefirst/icucollationcasefirst.go index 6974dbd1c..728c83157 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icucollationcasefirst/icucollationcasefirst.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icucollationcasefirst/icucollationcasefirst.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package icucollationcasefirst package icucollationcasefirst import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/icu-plugin.ts#L94-L97 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/icu-plugin.ts#L94-L97 type IcuCollationCaseFirst struct { Name string } @@ -39,7 +39,7 @@ func (i IcuCollationCaseFirst) MarshalText() (text []byte, err error) { } func (i *IcuCollationCaseFirst) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "lower": *i = Lower diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icucollationdecomposition/icucollationdecomposition.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icucollationdecomposition/icucollationdecomposition.go index 80f0a052d..ff808d019 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icucollationdecomposition/icucollationdecomposition.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icucollationdecomposition/icucollationdecomposition.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package icucollationdecomposition package icucollationdecomposition import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/icu-plugin.ts#L99-L102 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/icu-plugin.ts#L99-L102 type IcuCollationDecomposition struct { Name string } @@ -39,7 +39,7 @@ func (i IcuCollationDecomposition) MarshalText() (text []byte, err error) { } func (i *IcuCollationDecomposition) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "no": *i = No diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icucollationstrength/icucollationstrength.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icucollationstrength/icucollationstrength.go index 8bf1afa02..0fb6543e7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icucollationstrength/icucollationstrength.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icucollationstrength/icucollationstrength.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package icucollationstrength package icucollationstrength import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/icu-plugin.ts#L104-L110 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/icu-plugin.ts#L104-L110 type IcuCollationStrength struct { Name string } @@ -45,7 +45,7 @@ func (i IcuCollationStrength) MarshalText() (text []byte, err error) { } func (i *IcuCollationStrength) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "primary": *i = Primary diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icunormalizationmode/icunormalizationmode.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icunormalizationmode/icunormalizationmode.go index b67af64c6..4cf6942ef 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icunormalizationmode/icunormalizationmode.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icunormalizationmode/icunormalizationmode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package icunormalizationmode package icunormalizationmode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/icu-plugin.ts#L78-L81 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/icu-plugin.ts#L78-L81 type IcuNormalizationMode struct { Name string } @@ -39,7 +39,7 @@ func (i IcuNormalizationMode) MarshalText() (text []byte, err error) { } func (i *IcuNormalizationMode) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "decompose": *i = Decompose diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icunormalizationtype/icunormalizationtype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icunormalizationtype/icunormalizationtype.go index 908e105cd..de246ff16 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icunormalizationtype/icunormalizationtype.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icunormalizationtype/icunormalizationtype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package icunormalizationtype package icunormalizationtype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/icu-plugin.ts#L83-L87 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/icu-plugin.ts#L83-L87 type IcuNormalizationType struct { Name string } @@ -41,7 +41,7 @@ func (i IcuNormalizationType) MarshalText() (text []byte, err error) { } func (i *IcuNormalizationType) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "nfc": *i = Nfc diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icutransformdirection/icutransformdirection.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icutransformdirection/icutransformdirection.go index a286b7c42..ea3be1d9d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icutransformdirection/icutransformdirection.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icutransformdirection/icutransformdirection.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package icutransformdirection package icutransformdirection import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/icu-plugin.ts#L73-L76 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/icu-plugin.ts#L73-L76 type IcuTransformDirection struct { Name string } @@ -39,7 +39,7 @@ func (i IcuTransformDirection) MarshalText() (text []byte, err error) { } func (i *IcuTransformDirection) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "forward": *i = Forward diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/impactarea/impactarea.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/impactarea/impactarea.go new file mode 100644 index 000000000..8f33c3406 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/impactarea/impactarea.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Package impactarea +package impactarea + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/health_report/types.ts#L72-L77 +type ImpactArea struct { + Name string +} + +var ( + Search = ImpactArea{"search"} + + Ingest = ImpactArea{"ingest"} + + Backup = ImpactArea{"backup"} + + Deploymentmanagement = ImpactArea{"deployment_management"} +) + +func (i ImpactArea) MarshalText() (text []byte, err error) { + return []byte(i.String()), nil +} + +func (i *ImpactArea) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "search": + *i = Search + case "ingest": + *i = Ingest + case "backup": + *i = Backup + case "deployment_management": + *i = Deploymentmanagement + default: + *i = ImpactArea{string(text)} + } + + return nil +} + +func (i ImpactArea) String() string { + return i.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/include/include.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/include/include.go index 2073ac6a2..75fdb4e68 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/include/include.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/include/include.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package include package include import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Include.ts#L20-L42 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Include.ts#L20-L47 type Include struct { Name string } @@ -36,6 +36,8 @@ var ( Hyperparameters = Include{"hyperparameters"} Totalfeatureimportance = Include{"total_feature_importance"} + + Definitionstatus = Include{"definition_status"} ) func (i Include) MarshalText() (text []byte, err error) { @@ -43,7 +45,7 @@ func (i Include) MarshalText() (text []byte, err error) { } func (i *Include) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "definition": *i = Definition @@ -53,6 +55,8 @@ func (i *Include) UnmarshalText(text []byte) error { *i = Hyperparameters case "total_feature_importance": *i = Totalfeatureimportance + case "definition_status": + *i = Definitionstatus default: *i = Include{string(text)} } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexcheckonstartup/indexcheckonstartup.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexcheckonstartup/indexcheckonstartup.go index 36a475fb0..08021cf04 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexcheckonstartup/indexcheckonstartup.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexcheckonstartup/indexcheckonstartup.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package indexcheckonstartup package indexcheckonstartup import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L253-L260 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L253-L260 type IndexCheckOnStartup struct { Name string } @@ -36,12 +36,16 @@ var ( Checksum = IndexCheckOnStartup{"checksum"} ) +func (i *IndexCheckOnStartup) UnmarshalJSON(data []byte) error { + return i.UnmarshalText(data) +} + func (i IndexCheckOnStartup) MarshalText() (text []byte, err error) { return []byte(i.String()), nil } func (i *IndexCheckOnStartup) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "true": *i = True diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexingjobstate/indexingjobstate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexingjobstate/indexingjobstate.go index af29e14f2..c8e50c1ab 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexingjobstate/indexingjobstate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexingjobstate/indexingjobstate.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package indexingjobstate package indexingjobstate import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/rollup/get_jobs/types.ts#L66-L72 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/rollup/get_jobs/types.ts#L66-L72 type IndexingJobState struct { Name string } @@ -45,7 +45,7 @@ func (i IndexingJobState) MarshalText() (text []byte, err error) { } func (i *IndexingJobState) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "started": *i = Started diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexmetadatastate/indexmetadatastate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexmetadatastate/indexmetadatastate.go index 0ec1dc82f..78df1f4eb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexmetadatastate/indexmetadatastate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexmetadatastate/indexmetadatastate.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package indexmetadatastate package indexmetadatastate import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/stats/types.ts#L213-L219 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/stats/types.ts#L225-L232 type IndexMetadataState struct { Name string } @@ -39,7 +39,7 @@ func (i IndexMetadataState) MarshalText() (text []byte, err error) { } func (i *IndexMetadataState) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "open": *i = Open diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexoptions/indexoptions.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexoptions/indexoptions.go index 9c832074f..c4be54c1f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexoptions/indexoptions.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexoptions/indexoptions.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package indexoptions package indexoptions import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/core.ts#L235-L240 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/core.ts#L242-L247 type IndexOptions struct { Name string } @@ -43,7 +43,7 @@ func (i IndexOptions) MarshalText() (text []byte, err error) { } func (i *IndexOptions) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "docs": *i = Docs diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexprivilege/indexprivilege.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexprivilege/indexprivilege.go index b6c772b90..e52cdbc49 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexprivilege/indexprivilege.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexprivilege/indexprivilege.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package indexprivilege package indexprivilege import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/_types/Privileges.ts#L165-L185 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/_types/Privileges.ts#L166-L187 type IndexPrivilege struct { Name string } @@ -73,7 +73,7 @@ func (i IndexPrivilege) MarshalText() (text []byte, err error) { } func (i *IndexPrivilege) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "none": *i = None diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexroutingallocationoptions/indexroutingallocationoptions.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexroutingallocationoptions/indexroutingallocationoptions.go index 2e20c6c65..64eb3a249 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexroutingallocationoptions/indexroutingallocationoptions.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexroutingallocationoptions/indexroutingallocationoptions.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package indexroutingallocationoptions package indexroutingallocationoptions import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexRouting.ts#L38-L43 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexRouting.ts#L38-L43 type IndexRoutingAllocationOptions struct { Name string } @@ -43,7 +43,7 @@ func (i IndexRoutingAllocationOptions) MarshalText() (text []byte, err error) { } func (i *IndexRoutingAllocationOptions) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "all": *i = All diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexroutingrebalanceoptions/indexroutingrebalanceoptions.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexroutingrebalanceoptions/indexroutingrebalanceoptions.go index f7d2b3efe..9d789321b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexroutingrebalanceoptions/indexroutingrebalanceoptions.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexroutingrebalanceoptions/indexroutingrebalanceoptions.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package indexroutingrebalanceoptions package indexroutingrebalanceoptions import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexRouting.ts#L45-L50 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexRouting.ts#L45-L50 type IndexRoutingRebalanceOptions struct { Name string } @@ -43,7 +43,7 @@ func (i IndexRoutingRebalanceOptions) MarshalText() (text []byte, err error) { } func (i *IndexRoutingRebalanceOptions) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "all": *i = All diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indicatorhealthstatus/indicatorhealthstatus.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indicatorhealthstatus/indicatorhealthstatus.go new file mode 100644 index 000000000..2ec95625e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indicatorhealthstatus/indicatorhealthstatus.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Package indicatorhealthstatus +package indicatorhealthstatus + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/health_report/types.ts#L25-L30 +type IndicatorHealthStatus struct { + Name string +} + +var ( + Green = IndicatorHealthStatus{"green"} + + Yellow = IndicatorHealthStatus{"yellow"} + + Red = IndicatorHealthStatus{"red"} + + Unknown = IndicatorHealthStatus{"unknown"} +) + +func (i IndicatorHealthStatus) MarshalText() (text []byte, err error) { + return []byte(i.String()), nil +} + +func (i *IndicatorHealthStatus) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "green": + *i = Green + case "yellow": + *i = Yellow + case "red": + *i = Red + case "unknown": + *i = Unknown + default: + *i = IndicatorHealthStatus{string(text)} + } + + return nil +} + +func (i IndicatorHealthStatus) String() string { + return i.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/inputtype/inputtype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/inputtype/inputtype.go index bd424a098..ceab383ee 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/inputtype/inputtype.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/inputtype/inputtype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package inputtype package inputtype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Input.ts#L100-L104 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Input.ts#L100-L104 type InputType struct { Name string } @@ -41,7 +41,7 @@ func (i InputType) MarshalText() (text []byte, err error) { } func (i *InputType) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "http": *i = Http diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/jobblockedreason/jobblockedreason.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/jobblockedreason/jobblockedreason.go index 46c0afad0..b3cfecfd3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/jobblockedreason/jobblockedreason.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/jobblockedreason/jobblockedreason.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package jobblockedreason package jobblockedreason import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Job.ts#L174-L178 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Job.ts#L397-L401 type JobBlockedReason struct { Name string } @@ -41,7 +41,7 @@ func (j JobBlockedReason) MarshalText() (text []byte, err error) { } func (j *JobBlockedReason) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "delete": *j = Delete diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/jobstate/jobstate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/jobstate/jobstate.go index 5ab751f46..b6ada942f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/jobstate/jobstate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/jobstate/jobstate.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package jobstate package jobstate import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Job.ts#L36-L42 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Job.ts#L36-L52 type JobState struct { Name string } @@ -45,7 +45,7 @@ func (j JobState) MarshalText() (text []byte, err error) { } func (j *JobState) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "closing": *j = Closing diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/jsonprocessorconflictstrategy/jsonprocessorconflictstrategy.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/jsonprocessorconflictstrategy/jsonprocessorconflictstrategy.go index c434fa616..97f499769 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/jsonprocessorconflictstrategy/jsonprocessorconflictstrategy.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/jsonprocessorconflictstrategy/jsonprocessorconflictstrategy.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package jsonprocessorconflictstrategy package jsonprocessorconflictstrategy import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/_types/Processors.ts#L279-L284 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/_types/Processors.ts#L838-L843 type JsonProcessorConflictStrategy struct { Name string } @@ -39,7 +39,7 @@ func (j JsonProcessorConflictStrategy) MarshalText() (text []byte, err error) { } func (j *JsonProcessorConflictStrategy) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "replace": *j = Replace diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/keeptypesmode/keeptypesmode.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/keeptypesmode/keeptypesmode.go index 77e1a7cbc..0b8916452 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/keeptypesmode/keeptypesmode.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/keeptypesmode/keeptypesmode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package keeptypesmode package keeptypesmode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L212-L215 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L213-L216 type KeepTypesMode struct { Name string } @@ -39,7 +39,7 @@ func (k KeepTypesMode) MarshalText() (text []byte, err error) { } func (k *KeepTypesMode) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "include": *k = Include diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/kuromojitokenizationmode/kuromojitokenizationmode.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/kuromojitokenizationmode/kuromojitokenizationmode.go index 9c4703d6e..c76376086 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/kuromojitokenizationmode/kuromojitokenizationmode.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/kuromojitokenizationmode/kuromojitokenizationmode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package kuromojitokenizationmode package kuromojitokenizationmode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/kuromoji-plugin.ts#L52-L56 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/kuromoji-plugin.ts#L52-L56 type KuromojiTokenizationMode struct { Name string } @@ -41,7 +41,7 @@ func (k KuromojiTokenizationMode) MarshalText() (text []byte, err error) { } func (k *KuromojiTokenizationMode) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "normal": *k = Normal diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/language/language.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/language/language.go index 7dfb04b55..3248079e0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/language/language.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/language/language.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package language package language import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/languages.ts#L20-L55 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/languages.ts#L20-L55 type Language struct { Name string } @@ -103,7 +103,7 @@ func (l Language) MarshalText() (text []byte, err error) { } func (l *Language) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "Arabic": *l = Arabic diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/level/level.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/level/level.go index 8ddbb7774..45a4b4c85 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/level/level.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/level/level.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package level package level import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/common.ts#L229-L233 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/common.ts#L249-L253 type Level struct { Name string } @@ -41,7 +41,7 @@ func (l Level) MarshalText() (text []byte, err error) { } func (l *Level) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "cluster": *l = Cluster diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/licensestatus/licensestatus.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/licensestatus/licensestatus.go index 378658c50..caff70c54 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/licensestatus/licensestatus.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/licensestatus/licensestatus.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package licensestatus package licensestatus import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/license/_types/License.ts#L35-L40 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/license/_types/License.ts#L35-L40 type LicenseStatus struct { Name string } @@ -43,7 +43,7 @@ func (l LicenseStatus) MarshalText() (text []byte, err error) { } func (l *LicenseStatus) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "active": *l = Active diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/licensetype/licensetype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/licensetype/licensetype.go index 2359c5998..e27d3f6f9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/licensetype/licensetype.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/licensetype/licensetype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package licensetype package licensetype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/license/_types/License.ts#L23-L33 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/license/_types/License.ts#L23-L33 type LicenseType struct { Name string } @@ -53,7 +53,7 @@ func (l LicenseType) MarshalText() (text []byte, err error) { } func (l *LicenseType) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "missing": *l = Missing diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/lifecycleoperationmode/lifecycleoperationmode.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/lifecycleoperationmode/lifecycleoperationmode.go index 09add1ce4..249a3cab8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/lifecycleoperationmode/lifecycleoperationmode.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/lifecycleoperationmode/lifecycleoperationmode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package lifecycleoperationmode package lifecycleoperationmode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Lifecycle.ts#L20-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Lifecycle.ts#L20-L24 type LifecycleOperationMode struct { Name string } @@ -41,7 +41,7 @@ func (l LifecycleOperationMode) MarshalText() (text []byte, err error) { } func (l *LifecycleOperationMode) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "RUNNING": *l = RUNNING diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/matchtype/matchtype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/matchtype/matchtype.go index 0be5dc027..7354f0486 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/matchtype/matchtype.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/matchtype/matchtype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package matchtype package matchtype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/dynamic-template.ts#L32-L35 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/dynamic-template.ts#L32-L35 type MatchType struct { Name string } @@ -39,7 +39,7 @@ func (m MatchType) MarshalText() (text []byte, err error) { } func (m *MatchType) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "simple": *m = Simple diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/memorystatus/memorystatus.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/memorystatus/memorystatus.go index b7e12ff42..6c1bac177 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/memorystatus/memorystatus.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/memorystatus/memorystatus.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package memorystatus package memorystatus import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Model.ts#L85-L89 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Model.ts#L88-L92 type MemoryStatus struct { Name string } @@ -41,7 +41,7 @@ func (m MemoryStatus) MarshalText() (text []byte, err error) { } func (m *MemoryStatus) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "ok": *m = Ok diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/metric/metric.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/metric/metric.go index cc5ab81e6..4151c87c8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/metric/metric.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/metric/metric.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package metric package metric import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/rollup/_types/Metric.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/rollup/_types/Metric.ts#L22-L28 type Metric struct { Name string } @@ -45,7 +45,7 @@ func (m Metric) MarshalText() (text []byte, err error) { } func (m *Metric) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "min": *m = Min diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/migrationstatus/migrationstatus.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/migrationstatus/migrationstatus.go index e7ef792be..b94504807 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/migrationstatus/migrationstatus.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/migrationstatus/migrationstatus.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package migrationstatus package migrationstatus import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/migration/get_feature_upgrade_status/GetFeatureUpgradeStatusResponse.ts#L30-L35 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/migration/get_feature_upgrade_status/GetFeatureUpgradeStatusResponse.ts#L30-L35 type MigrationStatus struct { Name string } @@ -43,7 +43,7 @@ func (m MigrationStatus) MarshalText() (text []byte, err error) { } func (m *MigrationStatus) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "NO_MIGRATION_NEEDED": *m = NOMIGRATIONNEEDED diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/minimuminterval/minimuminterval.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/minimuminterval/minimuminterval.go index b3ca7a2fd..eb2230802 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/minimuminterval/minimuminterval.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/minimuminterval/minimuminterval.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package minimuminterval package minimuminterval import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L64-L71 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L102-L109 type MinimumInterval struct { Name string } @@ -47,7 +47,7 @@ func (m MinimumInterval) MarshalText() (text []byte, err error) { } func (m *MinimumInterval) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "second": *m = Second diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/missingorder/missingorder.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/missingorder/missingorder.go index 35d1babaa..2591f747b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/missingorder/missingorder.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/missingorder/missingorder.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package missingorder package missingorder import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/AggregationContainer.ts#L212-L216 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/AggregationContainer.ts#L518-L522 type MissingOrder struct { Name string } @@ -41,7 +41,7 @@ func (m MissingOrder) MarshalText() (text []byte, err error) { } func (m *MissingOrder) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "first": *m = First diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/month/month.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/month/month.go index be76b7b1a..acba1899c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/month/month.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/month/month.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package month package month import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Schedule.ts#L70-L83 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Schedule.ts#L70-L83 type Month struct { Name string } @@ -59,7 +59,7 @@ func (m Month) MarshalText() (text []byte, err error) { } func (m *Month) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "january": *m = January diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/multivaluemode/multivaluemode.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/multivaluemode/multivaluemode.go index 675d02da8..f7e386199 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/multivaluemode/multivaluemode.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/multivaluemode/multivaluemode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package multivaluemode package multivaluemode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/compound.ts#L160-L165 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/compound.ts#L343-L360 type MultiValueMode struct { Name string } @@ -43,7 +43,7 @@ func (m MultiValueMode) MarshalText() (text []byte, err error) { } func (m *MultiValueMode) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "min": *m = Min diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/noderole/noderole.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/noderole/noderole.go index dca9fbedd..037f11df4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/noderole/noderole.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/noderole/noderole.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package noderole package noderole import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Node.ts#L76-L94 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Node.ts#L77-L95 type NodeRole struct { Name string } @@ -63,7 +63,7 @@ func (n NodeRole) MarshalText() (text []byte, err error) { } func (n *NodeRole) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "master": *n = Master diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/noridecompoundmode/noridecompoundmode.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/noridecompoundmode/noridecompoundmode.go index db7e9b01d..1414ff6f4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/noridecompoundmode/noridecompoundmode.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/noridecompoundmode/noridecompoundmode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package noridecompoundmode package noridecompoundmode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/tokenizers.ts#L74-L78 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/tokenizers.ts#L75-L79 type NoriDecompoundMode struct { Name string } @@ -41,7 +41,7 @@ func (n NoriDecompoundMode) MarshalText() (text []byte, err error) { } func (n *NoriDecompoundMode) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "discard": *n = Discard diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/normalization/normalization.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/normalization/normalization.go index 0c05de2c5..f22452f22 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/normalization/normalization.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/normalization/normalization.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package normalization package normalization import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Similarity.ts#L52-L58 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Similarity.ts#L52-L58 type Normalization struct { Name string } @@ -45,7 +45,7 @@ func (n Normalization) MarshalText() (text []byte, err error) { } func (n *Normalization) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "no": *n = No diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/normalizemethod/normalizemethod.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/normalizemethod/normalizemethod.go index 3e338433b..44c609080 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/normalizemethod/normalizemethod.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/normalizemethod/normalizemethod.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package normalizemethod package normalizemethod import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/pipeline.ts#L266-L274 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/pipeline.ts#L328-L354 type NormalizeMethod struct { Name string } @@ -47,7 +47,7 @@ func (n NormalizeMethod) MarshalText() (text []byte, err error) { } func (n *NormalizeMethod) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "rescale_0_1": *n = Rescale01 diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/numericfielddataformat/numericfielddataformat.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/numericfielddataformat/numericfielddataformat.go index 15b95efa7..61a5b08ca 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/numericfielddataformat/numericfielddataformat.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/numericfielddataformat/numericfielddataformat.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package numericfielddataformat package numericfielddataformat import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/NumericFielddataFormat.ts#L20-L23 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/NumericFielddataFormat.ts#L20-L23 type NumericFielddataFormat struct { Name string } @@ -39,7 +39,7 @@ func (n NumericFielddataFormat) MarshalText() (text []byte, err error) { } func (n *NumericFielddataFormat) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "array": *n = Array diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror/onscripterror.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror/onscripterror.go index 4f8f9b7c7..94d24f322 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror/onscripterror.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror/onscripterror.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package onscripterror package onscripterror import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/core.ts#L126-L129 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/core.ts#L129-L132 type OnScriptError struct { Name string } @@ -39,7 +39,7 @@ func (o OnScriptError) MarshalText() (text []byte, err error) { } func (o *OnScriptError) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "fail": *o = Fail diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/operationtype/operationtype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/operationtype/operationtype.go new file mode 100644 index 000000000..5c2156bb1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/operationtype/operationtype.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Package operationtype +package operationtype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/bulk/types.ts#L83-L88 +type OperationType struct { + Name string +} + +var ( + Index = OperationType{"index"} + + Create = OperationType{"create"} + + Update = OperationType{"update"} + + Delete = OperationType{"delete"} +) + +func (o OperationType) MarshalText() (text []byte, err error) { + return []byte(o.String()), nil +} + +func (o *OperationType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "index": + *o = Index + case "create": + *o = Create + case "update": + *o = Update + case "delete": + *o = Delete + default: + *o = OperationType{string(text)} + } + + return nil +} + +func (o OperationType) String() string { + return o.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/operator/operator.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/operator/operator.go index 7cee550a0..cd7c1ea98 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/operator/operator.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/operator/operator.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package operator package operator import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/Operator.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/Operator.ts#L22-L27 type Operator struct { Name string } @@ -39,7 +39,7 @@ func (o Operator) MarshalText() (text []byte, err error) { } func (o *Operator) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "and": *o = And diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/optype/optype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/optype/optype.go index d8d902636..232553e90 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/optype/optype.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/optype/optype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package optype package optype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/common.ts#L235-L238 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/common.ts#L255-L264 type OpType struct { Name string } @@ -39,7 +39,7 @@ func (o OpType) MarshalText() (text []byte, err error) { } func (o *OpType) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "index": *o = Index diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/pagerdutycontexttype/pagerdutycontexttype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/pagerdutycontexttype/pagerdutycontexttype.go index b01fef54c..539ddfc20 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/pagerdutycontexttype/pagerdutycontexttype.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/pagerdutycontexttype/pagerdutycontexttype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package pagerdutycontexttype package pagerdutycontexttype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Actions.ts#L67-L70 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Actions.ts#L67-L70 type PagerDutyContextType struct { Name string } @@ -39,7 +39,7 @@ func (p PagerDutyContextType) MarshalText() (text []byte, err error) { } func (p *PagerDutyContextType) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "link": *p = Link diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/pagerdutyeventtype/pagerdutyeventtype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/pagerdutyeventtype/pagerdutyeventtype.go index fd61c0f87..1e2719602 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/pagerdutyeventtype/pagerdutyeventtype.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/pagerdutyeventtype/pagerdutyeventtype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package pagerdutyeventtype package pagerdutyeventtype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Actions.ts#L72-L76 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Actions.ts#L72-L76 type PagerDutyEventType struct { Name string } @@ -41,7 +41,7 @@ func (p PagerDutyEventType) MarshalText() (text []byte, err error) { } func (p *PagerDutyEventType) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "trigger": *p = Trigger diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/phoneticencoder/phoneticencoder.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/phoneticencoder/phoneticencoder.go index 7840491b2..1ca1fab6f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/phoneticencoder/phoneticencoder.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/phoneticencoder/phoneticencoder.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package phoneticencoder package phoneticencoder import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/phonetic-plugin.ts#L23-L36 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/phonetic-plugin.ts#L23-L36 type PhoneticEncoder struct { Name string } @@ -59,7 +59,7 @@ func (p PhoneticEncoder) MarshalText() (text []byte, err error) { } func (p *PhoneticEncoder) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "metaphone": *p = Metaphone diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/phoneticlanguage/phoneticlanguage.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/phoneticlanguage/phoneticlanguage.go index c34b48cf0..c0c142595 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/phoneticlanguage/phoneticlanguage.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/phoneticlanguage/phoneticlanguage.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package phoneticlanguage package phoneticlanguage import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/phonetic-plugin.ts#L38-L51 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/phonetic-plugin.ts#L38-L51 type PhoneticLanguage struct { Name string } @@ -59,7 +59,7 @@ func (p PhoneticLanguage) MarshalText() (text []byte, err error) { } func (p *PhoneticLanguage) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "any": *p = Any diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/phoneticnametype/phoneticnametype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/phoneticnametype/phoneticnametype.go index 36fd90238..5a87ce06b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/phoneticnametype/phoneticnametype.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/phoneticnametype/phoneticnametype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package phoneticnametype package phoneticnametype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/phonetic-plugin.ts#L53-L57 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/phonetic-plugin.ts#L53-L57 type PhoneticNameType struct { Name string } @@ -41,7 +41,7 @@ func (p PhoneticNameType) MarshalText() (text []byte, err error) { } func (p *PhoneticNameType) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "generic": *p = Generic diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/phoneticruletype/phoneticruletype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/phoneticruletype/phoneticruletype.go index 6238724dc..e6722f4a9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/phoneticruletype/phoneticruletype.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/phoneticruletype/phoneticruletype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package phoneticruletype package phoneticruletype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/phonetic-plugin.ts#L59-L62 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/phonetic-plugin.ts#L59-L62 type PhoneticRuleType struct { Name string } @@ -39,7 +39,7 @@ func (p PhoneticRuleType) MarshalText() (text []byte, err error) { } func (p *PhoneticRuleType) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "approx": *p = Approx diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/policytype/policytype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/policytype/policytype.go index 65a77c75f..03a6a5c64 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/policytype/policytype.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/policytype/policytype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package policytype package policytype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/enrich/_types/Policy.ts#L27-L31 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/enrich/_types/Policy.ts#L27-L31 type PolicyType struct { Name string } @@ -41,7 +41,7 @@ func (p PolicyType) MarshalText() (text []byte, err error) { } func (p *PolicyType) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "geo_match": *p = Geomatch diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/quantifier/quantifier.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/quantifier/quantifier.go index c2dae6387..3c0b9ff50 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/quantifier/quantifier.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/quantifier/quantifier.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package quantifier package quantifier import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Conditions.ts#L71-L74 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Conditions.ts#L71-L74 type Quantifier struct { Name string } @@ -39,7 +39,7 @@ func (q Quantifier) MarshalText() (text []byte, err error) { } func (q *Quantifier) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "some": *q = Some diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/queryrulecriteriatype/queryrulecriteriatype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/queryrulecriteriatype/queryrulecriteriatype.go new file mode 100644 index 000000000..1773c3887 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/queryrulecriteriatype/queryrulecriteriatype.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Package queryrulecriteriatype +package queryrulecriteriatype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/query_ruleset/_types/QueryRuleset.ts#L54-L65 +type QueryRuleCriteriaType struct { + Name string +} + +var ( + Global = QueryRuleCriteriaType{"global"} + + Exact = QueryRuleCriteriaType{"exact"} + + Exactfuzzy = QueryRuleCriteriaType{"exact_fuzzy"} + + Prefix = QueryRuleCriteriaType{"prefix"} + + Suffix = QueryRuleCriteriaType{"suffix"} + + Contains = QueryRuleCriteriaType{"contains"} + + Lt = QueryRuleCriteriaType{"lt"} + + Lte = QueryRuleCriteriaType{"lte"} + + Gt = QueryRuleCriteriaType{"gt"} + + Gte = QueryRuleCriteriaType{"gte"} +) + +func (q QueryRuleCriteriaType) MarshalText() (text []byte, err error) { + return []byte(q.String()), nil +} + +func (q *QueryRuleCriteriaType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "global": + *q = Global + case "exact": + *q = Exact + case "exact_fuzzy": + *q = Exactfuzzy + case "prefix": + *q = Prefix + case "suffix": + *q = Suffix + case "contains": + *q = Contains + case "lt": + *q = Lt + case "lte": + *q = Lte + case "gt": + *q = Gt + case "gte": + *q = Gte + default: + *q = QueryRuleCriteriaType{string(text)} + } + + return nil +} + +func (q QueryRuleCriteriaType) String() string { + return q.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/queryruletype/queryruletype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/queryruletype/queryruletype.go new file mode 100644 index 000000000..d0ed63b7a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/queryruletype/queryruletype.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Package queryruletype +package queryruletype + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/query_ruleset/_types/QueryRuleset.ts#L44-L46 +type QueryRuleType struct { + Name string +} + +var ( + Pinned = QueryRuleType{"pinned"} +) + +func (q QueryRuleType) MarshalText() (text []byte, err error) { + return []byte(q.String()), nil +} + +func (q *QueryRuleType) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "pinned": + *q = Pinned + default: + *q = QueryRuleType{string(text)} + } + + return nil +} + +func (q QueryRuleType) String() string { + return q.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/rangerelation/rangerelation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/rangerelation/rangerelation.go index 38b4016a7..c656810d7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/rangerelation/rangerelation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/rangerelation/rangerelation.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package rangerelation package rangerelation import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/term.ts#L96-L100 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/term.ts#L170-L183 type RangeRelation struct { Name string } @@ -41,7 +41,7 @@ func (r RangeRelation) MarshalText() (text []byte, err error) { } func (r *RangeRelation) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "within": *r = Within diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/ratemode/ratemode.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/ratemode/ratemode.go index 28673607c..bc21a703f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/ratemode/ratemode.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/ratemode/ratemode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package ratemode package ratemode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/metric.ts#L132-L135 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/metric.ts#L243-L252 type RateMode struct { Name string } @@ -39,7 +39,7 @@ func (r RateMode) MarshalText() (text []byte, err error) { } func (r *RateMode) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "sum": *r = Sum diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/refresh/refresh.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/refresh/refresh.go index 8e225b757..f4ddf119d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/refresh/refresh.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/refresh/refresh.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package refresh package refresh import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/common.ts#L240-L247 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/common.ts#L266-L273 type Refresh struct { Name string } @@ -36,12 +36,16 @@ var ( Waitfor = Refresh{"wait_for"} ) +func (r *Refresh) UnmarshalJSON(data []byte) error { + return r.UnmarshalText(data) +} + func (r Refresh) MarshalText() (text []byte, err error) { return []byte(r.String()), nil } func (r *Refresh) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "true": *r = True diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/responsecontenttype/responsecontenttype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/responsecontenttype/responsecontenttype.go index 77fcb912b..f3e54f5ef 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/responsecontenttype/responsecontenttype.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/responsecontenttype/responsecontenttype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package responsecontenttype package responsecontenttype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Input.ts#L106-L110 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Input.ts#L106-L110 type ResponseContentType struct { Name string } @@ -41,7 +41,7 @@ func (r ResponseContentType) MarshalText() (text []byte, err error) { } func (r *ResponseContentType) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "json": *r = Json diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/result/result.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/result/result.go index c775fbe5f..aaf51c179 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/result/result.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/result/result.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package result package result import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Result.ts#L20-L27 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Result.ts#L20-L27 type Result struct { Name string } @@ -45,7 +45,7 @@ func (r Result) MarshalText() (text []byte, err error) { } func (r *Result) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "created": *r = Created diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/resultposition/resultposition.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/resultposition/resultposition.go index ae87305d0..1f70c2c3b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/resultposition/resultposition.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/resultposition/resultposition.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package resultposition package resultposition import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/eql/search/types.ts#L20-L32 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/eql/search/types.ts#L20-L32 type ResultPosition struct { Name string } @@ -39,7 +39,7 @@ func (r ResultPosition) MarshalText() (text []byte, err error) { } func (r *ResultPosition) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "tail": *r = Tail diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/routingstate/routingstate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/routingstate/routingstate.go index b9004700a..4472b5bde 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/routingstate/routingstate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/routingstate/routingstate.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package routingstate package routingstate import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/TrainedModel.ts#L335-L356 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/TrainedModel.ts#L350-L371 type RoutingState struct { Name string } @@ -45,7 +45,7 @@ func (r RoutingState) MarshalText() (text []byte, err error) { } func (r *RoutingState) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "failed": *r = Failed diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/ruleaction/ruleaction.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/ruleaction/ruleaction.go index 75ad41ce8..3483b4baa 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/ruleaction/ruleaction.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/ruleaction/ruleaction.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package ruleaction package ruleaction import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Rule.ts#L41-L50 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Rule.ts#L41-L50 type RuleAction struct { Name string } @@ -39,7 +39,7 @@ func (r RuleAction) MarshalText() (text []byte, err error) { } func (r *RuleAction) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "skip_result": *r = Skipresult diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/runtimefieldtype/runtimefieldtype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/runtimefieldtype/runtimefieldtype.go index a3a1e04b3..84841f393 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/runtimefieldtype/runtimefieldtype.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/runtimefieldtype/runtimefieldtype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package runtimefieldtype package runtimefieldtype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/RuntimeFields.ts#L46-L55 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/RuntimeFields.ts#L56-L65 type RuntimeFieldType struct { Name string } @@ -51,7 +51,7 @@ func (r RuntimeFieldType) MarshalText() (text []byte, err error) { } func (r *RuntimeFieldType) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "boolean": *r = Boolean diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sampleraggregationexecutionhint/sampleraggregationexecutionhint.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sampleraggregationexecutionhint/sampleraggregationexecutionhint.go index 191b08946..0282c6f19 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sampleraggregationexecutionhint/sampleraggregationexecutionhint.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sampleraggregationexecutionhint/sampleraggregationexecutionhint.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package sampleraggregationexecutionhint package sampleraggregationexecutionhint import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L163-L167 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L343-L356 type SamplerAggregationExecutionHint struct { Name string } @@ -41,7 +41,7 @@ func (s SamplerAggregationExecutionHint) MarshalText() (text []byte, err error) } func (s *SamplerAggregationExecutionHint) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "map": *s = Map diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/scoremode/scoremode.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/scoremode/scoremode.go index 83f8c3920..1b10376fc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/scoremode/scoremode.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/scoremode/scoremode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package scoremode package scoremode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/rescoring.ts#L36-L42 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/rescoring.ts#L52-L74 type ScoreMode struct { Name string } @@ -45,7 +45,7 @@ func (s ScoreMode) MarshalText() (text []byte, err error) { } func (s *ScoreMode) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "avg": *s = Avg diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/scriptlanguage/scriptlanguage.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/scriptlanguage/scriptlanguage.go index 7288b05db..956f5e1c6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/scriptlanguage/scriptlanguage.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/scriptlanguage/scriptlanguage.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package scriptlanguage package scriptlanguage import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Scripting.ts#L24-L33 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Scripting.ts#L24-L45 type ScriptLanguage struct { Name string } @@ -43,7 +43,7 @@ func (s ScriptLanguage) MarshalText() (text []byte, err error) { } func (s *ScriptLanguage) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "painless": *s = Painless diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/scriptsorttype/scriptsorttype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/scriptsorttype/scriptsorttype.go index b3efef0bc..d50680a33 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/scriptsorttype/scriptsorttype.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/scriptsorttype/scriptsorttype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package scriptsorttype package scriptsorttype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/sort.ts#L76-L80 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/sort.ts#L76-L80 type ScriptSortType struct { Name string } @@ -41,7 +41,7 @@ func (s ScriptSortType) MarshalText() (text []byte, err error) { } func (s *ScriptSortType) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "string": *s = String diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/searchtype/searchtype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/searchtype/searchtype.go index 2be262c12..21e0c90d0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/searchtype/searchtype.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/searchtype/searchtype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package searchtype package searchtype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/common.ts#L249-L254 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/common.ts#L275-L280 type SearchType struct { Name string } @@ -39,7 +39,7 @@ func (s SearchType) MarshalText() (text []byte, err error) { } func (s *SearchType) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "query_then_fetch": *s = Querythenfetch diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/segmentsortmissing/segmentsortmissing.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/segmentsortmissing/segmentsortmissing.go index b69122029..7fe349273 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/segmentsortmissing/segmentsortmissing.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/segmentsortmissing/segmentsortmissing.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package segmentsortmissing package segmentsortmissing import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSegmentSort.ts#L43-L48 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSegmentSort.ts#L43-L48 type SegmentSortMissing struct { Name string } @@ -39,7 +39,7 @@ func (s SegmentSortMissing) MarshalText() (text []byte, err error) { } func (s *SegmentSortMissing) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "_last": *s = Last diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/segmentsortmode/segmentsortmode.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/segmentsortmode/segmentsortmode.go index a98e577d0..e6475ac17 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/segmentsortmode/segmentsortmode.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/segmentsortmode/segmentsortmode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package segmentsortmode package segmentsortmode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSegmentSort.ts#L36-L41 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSegmentSort.ts#L36-L41 type SegmentSortMode struct { Name string } @@ -39,7 +39,7 @@ func (s SegmentSortMode) MarshalText() (text []byte, err error) { } func (s *SegmentSortMode) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "min": *s = Min diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/segmentsortorder/segmentsortorder.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/segmentsortorder/segmentsortorder.go index 997759762..925740ffe 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/segmentsortorder/segmentsortorder.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/segmentsortorder/segmentsortorder.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package segmentsortorder package segmentsortorder import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSegmentSort.ts#L29-L34 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSegmentSort.ts#L29-L34 type SegmentSortOrder struct { Name string } @@ -39,7 +39,7 @@ func (s SegmentSortOrder) MarshalText() (text []byte, err error) { } func (s *SegmentSortOrder) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "asc": *s = Asc diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/shapetype/shapetype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/shapetype/shapetype.go index d35cd5938..88d1e0763 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/shapetype/shapetype.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/shapetype/shapetype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package shapetype package shapetype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/_types/Processors.ts#L343-L346 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/_types/Processors.ts#L1029-L1032 type ShapeType struct { Name string } @@ -39,7 +39,7 @@ func (s ShapeType) MarshalText() (text []byte, err error) { } func (s *ShapeType) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "geo_shape": *s = Geoshape diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/shardroutingstate/shardroutingstate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/shardroutingstate/shardroutingstate.go index 613ec437e..f29772e70 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/shardroutingstate/shardroutingstate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/shardroutingstate/shardroutingstate.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package shardroutingstate package shardroutingstate import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/stats/types.ts#L160-L165 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/stats/types.ts#L169-L174 type ShardRoutingState struct { Name string } @@ -43,7 +43,7 @@ func (s ShardRoutingState) MarshalText() (text []byte, err error) { } func (s *ShardRoutingState) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "UNASSIGNED": *s = UNASSIGNED diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/shardsstatsstage/shardsstatsstage.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/shardsstatsstage/shardsstatsstage.go index ce762a16f..edc1291ae 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/shardsstatsstage/shardsstatsstage.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/shardsstatsstage/shardsstatsstage.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package shardsstatsstage package shardsstatsstage import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/snapshot/_types/SnapshotShardsStatsStage.ts#L20-L31 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/snapshot/_types/SnapshotShardsStatsStage.ts#L20-L31 type ShardsStatsStage struct { Name string } @@ -45,7 +45,7 @@ func (s ShardsStatsStage) MarshalText() (text []byte, err error) { } func (s *ShardsStatsStage) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "DONE": *s = DONE diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/shardstoreallocation/shardstoreallocation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/shardstoreallocation/shardstoreallocation.go index 9346e2ff1..13c1c5386 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/shardstoreallocation/shardstoreallocation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/shardstoreallocation/shardstoreallocation.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package shardstoreallocation package shardstoreallocation import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/shard_stores/types.ts#L45-L49 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/shard_stores/types.ts#L45-L49 type ShardStoreAllocation struct { Name string } @@ -41,7 +41,7 @@ func (s ShardStoreAllocation) MarshalText() (text []byte, err error) { } func (s *ShardStoreAllocation) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "primary": *s = Primary diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/shardstorestatus/shardstorestatus.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/shardstorestatus/shardstorestatus.go new file mode 100644 index 000000000..34491ff8f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/shardstorestatus/shardstorestatus.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Package shardstorestatus +package shardstorestatus + +import "strings" + +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/shard_stores/types.ts#L60-L69 +type ShardStoreStatus struct { + Name string +} + +var ( + Green = ShardStoreStatus{"green"} + + Yellow = ShardStoreStatus{"yellow"} + + Red = ShardStoreStatus{"red"} + + All = ShardStoreStatus{"all"} +) + +func (s ShardStoreStatus) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *ShardStoreStatus) UnmarshalText(text []byte) error { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { + + case "green": + *s = Green + case "yellow": + *s = Yellow + case "red": + *s = Red + case "all": + *s = All + default: + *s = ShardStoreStatus{string(text)} + } + + return nil +} + +func (s ShardStoreStatus) String() string { + return s.Name +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/shutdownstatus/shutdownstatus.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/shutdownstatus/shutdownstatus.go index 1d6db5f84..ff7fc4806 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/shutdownstatus/shutdownstatus.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/shutdownstatus/shutdownstatus.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package shutdownstatus package shutdownstatus import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L45-L50 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L45-L50 type ShutdownStatus struct { Name string } @@ -43,7 +43,7 @@ func (s ShutdownStatus) MarshalText() (text []byte, err error) { } func (s *ShutdownStatus) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "not_started": *s = Notstarted diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/shutdowntype/shutdowntype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/shutdowntype/shutdowntype.go index 6560589ff..6fbefba19 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/shutdowntype/shutdowntype.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/shutdowntype/shutdowntype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package shutdowntype package shutdowntype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L40-L43 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L40-L43 type ShutdownType struct { Name string } @@ -39,7 +39,7 @@ func (s ShutdownType) MarshalText() (text []byte, err error) { } func (s *ShutdownType) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "remove": *s = Remove diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/simplequerystringflag/simplequerystringflag.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/simplequerystringflag/simplequerystringflag.go deleted file mode 100644 index 65c05e465..000000000 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/simplequerystringflag/simplequerystringflag.go +++ /dev/null @@ -1,101 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 - -// Package simplequerystringflag -package simplequerystringflag - -import "strings" - -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/fulltext.ts#L278-L292 -type SimpleQueryStringFlag struct { - Name string -} - -var ( - NONE = SimpleQueryStringFlag{"NONE"} - - AND = SimpleQueryStringFlag{"AND"} - - OR = SimpleQueryStringFlag{"OR"} - - NOT = SimpleQueryStringFlag{"NOT"} - - PREFIX = SimpleQueryStringFlag{"PREFIX"} - - PHRASE = SimpleQueryStringFlag{"PHRASE"} - - PRECEDENCE = SimpleQueryStringFlag{"PRECEDENCE"} - - ESCAPE = SimpleQueryStringFlag{"ESCAPE"} - - WHITESPACE = SimpleQueryStringFlag{"WHITESPACE"} - - FUZZY = SimpleQueryStringFlag{"FUZZY"} - - NEAR = SimpleQueryStringFlag{"NEAR"} - - SLOP = SimpleQueryStringFlag{"SLOP"} - - ALL = SimpleQueryStringFlag{"ALL"} -) - -func (s SimpleQueryStringFlag) MarshalText() (text []byte, err error) { - return []byte(s.String()), nil -} - -func (s *SimpleQueryStringFlag) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { - - case "NONE": - *s = NONE - case "AND": - *s = AND - case "OR": - *s = OR - case "NOT": - *s = NOT - case "PREFIX": - *s = PREFIX - case "PHRASE": - *s = PHRASE - case "PRECEDENCE": - *s = PRECEDENCE - case "ESCAPE": - *s = ESCAPE - case "WHITESPACE": - *s = WHITESPACE - case "FUZZY": - *s = FUZZY - case "NEAR": - *s = NEAR - case "SLOP": - *s = SLOP - case "ALL": - *s = ALL - default: - *s = SimpleQueryStringFlag{string(text)} - } - - return nil -} - -func (s SimpleQueryStringFlag) String() string { - return s.Name -} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/snapshotsort/snapshotsort.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/snapshotsort/snapshotsort.go index b123ab44e..dc67eb842 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/snapshotsort/snapshotsort.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/snapshotsort/snapshotsort.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package snapshotsort package snapshotsort import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/snapshot/_types/SnapshotInfo.ts#L67-L78 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/snapshot/_types/SnapshotInfo.ts#L73-L93 type SnapshotSort struct { Name string } @@ -49,7 +49,7 @@ func (s SnapshotSort) MarshalText() (text []byte, err error) { } func (s *SnapshotSort) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "start_time": *s = Starttime diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/snapshotupgradestate/snapshotupgradestate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/snapshotupgradestate/snapshotupgradestate.go index 7b8d41334..35ee18296 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/snapshotupgradestate/snapshotupgradestate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/snapshotupgradestate/snapshotupgradestate.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package snapshotupgradestate package snapshotupgradestate import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Model.ts#L91-L96 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Model.ts#L94-L99 type SnapshotUpgradeState struct { Name string } @@ -43,7 +43,7 @@ func (s SnapshotUpgradeState) MarshalText() (text []byte, err error) { } func (s *SnapshotUpgradeState) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "loading_old_state": *s = Loadingoldstate diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/snowballlanguage/snowballlanguage.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/snowballlanguage/snowballlanguage.go index f3571d876..4e9608729 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/snowballlanguage/snowballlanguage.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/snowballlanguage/snowballlanguage.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package snowballlanguage package snowballlanguage import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/languages.ts#L57-L80 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/languages.ts#L57-L80 type SnowballLanguage struct { Name string } @@ -79,7 +79,7 @@ func (s SnowballLanguage) MarshalText() (text []byte, err error) { } func (s *SnowballLanguage) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "Armenian": *s = Armenian diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortmode/sortmode.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortmode/sortmode.go index 8a530f42f..b2670c50b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortmode/sortmode.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortmode/sortmode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package sortmode package sortmode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/sort.ts#L103-L112 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/sort.ts#L103-L112 type SortMode struct { Name string } @@ -45,7 +45,7 @@ func (s SortMode) MarshalText() (text []byte, err error) { } func (s *SortMode) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "min": *s = Min diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortorder/sortorder.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortorder/sortorder.go index ea1d943c8..581943e9a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortorder/sortorder.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortorder/sortorder.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package sortorder package sortorder import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/sort.ts#L114-L117 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/sort.ts#L114-L123 type SortOrder struct { Name string } @@ -39,7 +39,7 @@ func (s SortOrder) MarshalText() (text []byte, err error) { } func (s *SortOrder) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "asc": *s = Asc diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sourcefieldmode/sourcefieldmode.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sourcefieldmode/sourcefieldmode.go index 4e0d48718..a6a174a46 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sourcefieldmode/sourcefieldmode.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sourcefieldmode/sourcefieldmode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package sourcefieldmode package sourcefieldmode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/meta-fields.ts#L67-L75 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/meta-fields.ts#L67-L75 type SourceFieldMode struct { Name string } @@ -41,7 +41,7 @@ func (s SourceFieldMode) MarshalText() (text []byte, err error) { } func (s *SourceFieldMode) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "disabled": *s = Disabled diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/statslevel/statslevel.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/statslevel/statslevel.go index beee20112..11ff02944 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/statslevel/statslevel.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/statslevel/statslevel.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package statslevel package statslevel import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/searchable_snapshots/_types/stats.ts#L20-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/searchable_snapshots/_types/stats.ts#L20-L24 type StatsLevel struct { Name string } @@ -41,7 +41,7 @@ func (s StatsLevel) MarshalText() (text []byte, err error) { } func (s *StatsLevel) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "cluster": *s = Cluster diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/storagetype/storagetype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/storagetype/storagetype.go index 4ed42ec69..65550f14c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/storagetype/storagetype.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/storagetype/storagetype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package storagetype package storagetype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L508-L538 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L502-L532 type StorageType struct { Name string } @@ -43,7 +43,7 @@ func (s StorageType) MarshalText() (text []byte, err error) { } func (s *StorageType) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "fs": *s = Fs diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/stringdistance/stringdistance.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/stringdistance/stringdistance.go index da166a67f..304c3be9c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/stringdistance/stringdistance.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/stringdistance/stringdistance.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package stringdistance package stringdistance import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/suggester.ts#L239-L245 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/suggester.ts#L469-L490 type StringDistance struct { Name string } @@ -45,7 +45,7 @@ func (s StringDistance) MarshalText() (text []byte, err error) { } func (s *StringDistance) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "internal": *s = Internal diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/suggestmode/suggestmode.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/suggestmode/suggestmode.go index 64139f53b..1a1fff17f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/suggestmode/suggestmode.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/suggestmode/suggestmode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package suggestmode package suggestmode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/common.ts#L256-L260 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/common.ts#L282-L295 type SuggestMode struct { Name string } @@ -41,7 +41,7 @@ func (s SuggestMode) MarshalText() (text []byte, err error) { } func (s *SuggestMode) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "missing": *s = Missing diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/suggestsort/suggestsort.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/suggestsort/suggestsort.go index 004657938..066184aad 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/suggestsort/suggestsort.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/suggestsort/suggestsort.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package suggestsort package suggestsort import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/suggester.ts#L247-L250 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/suggester.ts#L492-L501 type SuggestSort struct { Name string } @@ -39,7 +39,7 @@ func (s SuggestSort) MarshalText() (text []byte, err error) { } func (s *SuggestSort) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "score": *s = Score diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/synonymformat/synonymformat.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/synonymformat/synonymformat.go index 001268e37..6c774af9d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/synonymformat/synonymformat.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/synonymformat/synonymformat.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package synonymformat package synonymformat import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L104-L107 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L105-L108 type SynonymFormat struct { Name string } @@ -39,7 +39,7 @@ func (s SynonymFormat) MarshalText() (text []byte, err error) { } func (s *SynonymFormat) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "solr": *s = Solr diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/templateformat/templateformat.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/templateformat/templateformat.go index 5e36d0414..444c5bf95 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/templateformat/templateformat.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/templateformat/templateformat.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package templateformat package templateformat import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/get_role/types.ts#L41-L44 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/get_role/types.ts#L44-L47 type TemplateFormat struct { Name string } @@ -39,7 +39,7 @@ func (t TemplateFormat) MarshalText() (text []byte, err error) { } func (t *TemplateFormat) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "string": *t = String diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/termsaggregationcollectmode/termsaggregationcollectmode.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/termsaggregationcollectmode/termsaggregationcollectmode.go index 51a643b4c..ef1bf09a0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/termsaggregationcollectmode/termsaggregationcollectmode.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/termsaggregationcollectmode/termsaggregationcollectmode.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package termsaggregationcollectmode package termsaggregationcollectmode import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L407-L410 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L980-L989 type TermsAggregationCollectMode struct { Name string } @@ -39,7 +39,7 @@ func (t TermsAggregationCollectMode) MarshalText() (text []byte, err error) { } func (t *TermsAggregationCollectMode) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "depth_first": *t = Depthfirst diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/termsaggregationexecutionhint/termsaggregationexecutionhint.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/termsaggregationexecutionhint/termsaggregationexecutionhint.go index 4c211b991..2bafea269 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/termsaggregationexecutionhint/termsaggregationexecutionhint.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/termsaggregationexecutionhint/termsaggregationexecutionhint.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package termsaggregationexecutionhint package termsaggregationexecutionhint import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L412-L417 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L991-L996 type TermsAggregationExecutionHint struct { Name string } @@ -43,7 +43,7 @@ func (t TermsAggregationExecutionHint) MarshalText() (text []byte, err error) { } func (t *TermsAggregationExecutionHint) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "map": *t = Map diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/termvectoroption/termvectoroption.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/termvectoroption/termvectoroption.go index 9ed86c914..8e1444088 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/termvectoroption/termvectoroption.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/termvectoroption/termvectoroption.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package termvectoroption package termvectoroption import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/TermVectorOption.ts#L20-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/TermVectorOption.ts#L20-L28 type TermVectorOption struct { Name string } @@ -49,7 +49,7 @@ func (t TermVectorOption) MarshalText() (text []byte, err error) { } func (t *TermVectorOption) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "no": *t = No diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/textquerytype/textquerytype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/textquerytype/textquerytype.go index ac41f70e2..7dad68229 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/textquerytype/textquerytype.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/textquerytype/textquerytype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package textquerytype package textquerytype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/fulltext.ts#L219-L226 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/fulltext.ts#L541-L567 type TextQueryType struct { Name string } @@ -47,7 +47,7 @@ func (t TextQueryType) MarshalText() (text []byte, err error) { } func (t *TextQueryType) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "best_fields": *t = Bestfields diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/threadtype/threadtype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/threadtype/threadtype.go index 898f2ff5d..4b555a4fb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/threadtype/threadtype.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/threadtype/threadtype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package threadtype package threadtype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/common.ts#L262-L268 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/common.ts#L297-L303 type ThreadType struct { Name string } @@ -45,7 +45,7 @@ func (t ThreadType) MarshalText() (text []byte, err error) { } func (t *ThreadType) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "cpu": *t = Cpu diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype/timeseriesmetrictype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype/timeseriesmetrictype.go index be1d7cb82..979e68ed5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype/timeseriesmetrictype.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype/timeseriesmetrictype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package timeseriesmetrictype package timeseriesmetrictype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/TimeSeriesMetricType.ts#L20-L25 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/TimeSeriesMetricType.ts#L20-L26 type TimeSeriesMetricType struct { Name string } @@ -36,6 +36,8 @@ var ( Summary = TimeSeriesMetricType{"summary"} Histogram = TimeSeriesMetricType{"histogram"} + + Position = TimeSeriesMetricType{"position"} ) func (t TimeSeriesMetricType) MarshalText() (text []byte, err error) { @@ -43,7 +45,7 @@ func (t TimeSeriesMetricType) MarshalText() (text []byte, err error) { } func (t *TimeSeriesMetricType) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "gauge": *t = Gauge @@ -53,6 +55,8 @@ func (t *TimeSeriesMetricType) UnmarshalText(text []byte) error { *t = Summary case "histogram": *t = Histogram + case "position": + *t = Position default: *t = TimeSeriesMetricType{string(text)} } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeunit/timeunit.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeunit/timeunit.go index 820cdb216..5bcbc7253 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeunit/timeunit.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeunit/timeunit.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package timeunit package timeunit import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Time.ts#L69-L84 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Time.ts#L69-L84 type TimeUnit struct { Name string } @@ -49,7 +49,7 @@ func (t TimeUnit) MarshalText() (text []byte, err error) { } func (t *TimeUnit) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "nanos": *t = Nanos diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/tokenchar/tokenchar.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/tokenchar/tokenchar.go index a9a13ce59..c862a8ac5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/tokenchar/tokenchar.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/tokenchar/tokenchar.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package tokenchar package tokenchar import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/tokenizers.ts#L46-L53 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/tokenizers.ts#L47-L54 type TokenChar struct { Name string } @@ -47,7 +47,7 @@ func (t TokenChar) MarshalText() (text []byte, err error) { } func (t *TokenChar) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "letter": *t = Letter diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/tokenizationtruncate/tokenizationtruncate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/tokenizationtruncate/tokenizationtruncate.go index e4fffad1d..d0e12794d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/tokenizationtruncate/tokenizationtruncate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/tokenizationtruncate/tokenizationtruncate.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package tokenizationtruncate package tokenizationtruncate import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/inference.ts#L315-L319 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/inference.ts#L350-L354 type TokenizationTruncate struct { Name string } @@ -41,7 +41,7 @@ func (t TokenizationTruncate) MarshalText() (text []byte, err error) { } func (t *TokenizationTruncate) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "first": *t = First diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/totalhitsrelation/totalhitsrelation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/totalhitsrelation/totalhitsrelation.go index eebf40189..0ac16270c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/totalhitsrelation/totalhitsrelation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/totalhitsrelation/totalhitsrelation.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package totalhitsrelation package totalhitsrelation import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/hits.ts#L99-L104 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/hits.ts#L99-L104 type TotalHitsRelation struct { Name string } @@ -39,7 +39,7 @@ func (t TotalHitsRelation) MarshalText() (text []byte, err error) { } func (t *TotalHitsRelation) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "eq": *t = Eq diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/trainedmodeltype/trainedmodeltype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/trainedmodeltype/trainedmodeltype.go index 78b1eb8c8..6a81e02fa 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/trainedmodeltype/trainedmodeltype.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/trainedmodeltype/trainedmodeltype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package trainedmodeltype package trainedmodeltype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/TrainedModel.ts#L247-L261 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/TrainedModel.ts#L257-L271 type TrainedModelType struct { Name string } @@ -41,7 +41,7 @@ func (t TrainedModelType) MarshalText() (text []byte, err error) { } func (t *TrainedModelType) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "tree_ensemble": *t = Treeensemble diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/trainingpriority/trainingpriority.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/trainingpriority/trainingpriority.go index 9a4434174..45638e830 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/trainingpriority/trainingpriority.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/trainingpriority/trainingpriority.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package trainingpriority package trainingpriority import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/TrainedModel.ts#L300-L303 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/TrainedModel.ts#L310-L313 type TrainingPriority struct { Name string } @@ -39,7 +39,7 @@ func (t TrainingPriority) MarshalText() (text []byte, err error) { } func (t *TrainingPriority) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "normal": *t = Normal diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/translogdurability/translogdurability.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/translogdurability/translogdurability.go index c43909ff5..8604adee7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/translogdurability/translogdurability.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/translogdurability/translogdurability.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package translogdurability package translogdurability import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L356-L371 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L356-L371 type TranslogDurability struct { Name string } @@ -39,7 +39,7 @@ func (t TranslogDurability) MarshalText() (text []byte, err error) { } func (t *TranslogDurability) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "request": *t = Request diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/ttesttype/ttesttype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/ttesttype/ttesttype.go index ca874ea80..51fab6b16 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/ttesttype/ttesttype.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/ttesttype/ttesttype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package ttesttype package ttesttype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/metric.ts#L165-L169 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/metric.ts#L322-L335 type TTestType struct { Name string } @@ -41,7 +41,7 @@ func (t TTestType) MarshalText() (text []byte, err error) { } func (t *TTestType) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "paired": *t = Paired diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/type_/type_.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/type_/type_.go index ef376bb69..fae0d67f4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/type_/type_.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/type_/type_.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package type_ package type_ import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/shutdown/_types/types.ts#L20-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/shutdown/_types/types.ts#L20-L24 type Type struct { Name string } @@ -41,7 +41,7 @@ func (t Type) MarshalText() (text []byte, err error) { } func (t *Type) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "restart": *t = Restart diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/unassignedinformationreason/unassignedinformationreason.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/unassignedinformationreason/unassignedinformationreason.go index 8d206f9e7..d52cd33c9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/unassignedinformationreason/unassignedinformationreason.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/unassignedinformationreason/unassignedinformationreason.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package unassignedinformationreason package unassignedinformationreason import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/allocation_explain/types.ts#L127-L146 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/allocation_explain/types.ts#L127-L146 type UnassignedInformationReason struct { Name string } @@ -65,7 +65,7 @@ func (u UnassignedInformationReason) MarshalText() (text []byte, err error) { } func (u *UnassignedInformationReason) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "INDEX_CREATED": *u = INDEXCREATED diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/useragentproperty/useragentproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/useragentproperty/useragentproperty.go index 6377c0ecd..09e2915fd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/useragentproperty/useragentproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/useragentproperty/useragentproperty.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package useragentproperty package useragentproperty import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/_types/Processors.ts#L77-L88 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/_types/Processors.ts#L260-L271 type UserAgentProperty struct { Name string } @@ -55,7 +55,7 @@ func (u UserAgentProperty) MarshalText() (text []byte, err error) { } func (u *UserAgentProperty) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "NAME": *u = NAME diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/valuetype/valuetype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/valuetype/valuetype.go index 51667e95d..a0495a231 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/valuetype/valuetype.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/valuetype/valuetype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package valuetype package valuetype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/metric.ts#L198-L209 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/metric.ts#L419-L430 type ValueType struct { Name string } @@ -55,7 +55,7 @@ func (v ValueType) MarshalText() (text []byte, err error) { } func (v *ValueType) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "string": *v = String diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/versiontype/versiontype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/versiontype/versiontype.go index d9fd592d4..387378649 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/versiontype/versiontype.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/versiontype/versiontype.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package versiontype package versiontype import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/common.ts#L98-L103 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/common.ts#L100-L116 type VersionType struct { Name string } @@ -43,7 +43,7 @@ func (v VersionType) MarshalText() (text []byte, err error) { } func (v *VersionType) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "internal": *v = Internal diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/waitforevents/waitforevents.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/waitforevents/waitforevents.go index bd97b04e0..a1e2cd77d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/waitforevents/waitforevents.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/waitforevents/waitforevents.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package waitforevents package waitforevents import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/common.ts#L276-L283 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/common.ts#L311-L318 type WaitForEvents struct { Name string } @@ -47,7 +47,7 @@ func (w WaitForEvents) MarshalText() (text []byte, err error) { } func (w *WaitForEvents) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "immediate": *w = Immediate diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/watcherstate/watcherstate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/watcherstate/watcherstate.go index 5f63242d8..50306403e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/watcherstate/watcherstate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/watcherstate/watcherstate.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package watcherstate package watcherstate import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/stats/types.ts#L26-L31 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/stats/types.ts#L26-L31 type WatcherState struct { Name string } @@ -43,7 +43,7 @@ func (w WatcherState) MarshalText() (text []byte, err error) { } func (w *WatcherState) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "stopped": *w = Stopped diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/zerotermsquery/zerotermsquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/zerotermsquery/zerotermsquery.go index 4d2a86348..7d8d5fe52 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/zerotermsquery/zerotermsquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/zerotermsquery/zerotermsquery.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Package zerotermsquery package zerotermsquery import "strings" -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/fulltext.ts#L228-L231 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/fulltext.ts#L569-L578 type ZeroTermsQuery struct { Name string } @@ -39,7 +39,7 @@ func (z ZeroTermsQuery) MarshalText() (text []byte, err error) { } func (z *ZeroTermsQuery) UnmarshalText(text []byte) error { - switch strings.ToLower(string(text)) { + switch strings.ReplaceAll(strings.ToLower(string(text)), "\"", "") { case "all": *z = All diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/epochtimeunitmillis.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/epochtimeunitmillis.go index 0433e14af..719c22caf 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/epochtimeunitmillis.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/epochtimeunitmillis.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // EpochTimeUnitMillis type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Time.ts#L40-L40 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Time.ts#L40-L40 type EpochTimeUnitMillis int64 diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/epochtimeunitseconds.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/epochtimeunitseconds.go index f7a5ecfc0..fcdec9bea 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/epochtimeunitseconds.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/epochtimeunitseconds.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // EpochTimeUnitSeconds type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Time.ts#L40-L40 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Time.ts#L40-L40 type EpochTimeUnitSeconds int64 diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/eql.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/eql.go index 5dc3fcf48..354746e31 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/eql.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/eql.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Eql type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L342-L345 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L351-L354 type Eql struct { Available bool `json:"available"` Enabled bool `json:"enabled"` @@ -30,6 +38,67 @@ type Eql struct { Queries map[string]XpackQuery `json:"queries"` } +func (s *Eql) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "available": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Available = value + case bool: + s.Available = v + } + + case "enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "features": + if err := dec.Decode(&s.Features); err != nil { + return err + } + + case "queries": + if s.Queries == nil { + s.Queries = make(map[string]XpackQuery, 0) + } + if err := dec.Decode(&s.Queries); err != nil { + return err + } + + } + } + return nil +} + // NewEql returns a Eql. func NewEql() *Eql { r := &Eql{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/eqlfeatures.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/eqlfeatures.go index e48edbd7e..b871fb4a7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/eqlfeatures.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/eqlfeatures.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // EqlFeatures type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L99-L107 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L99-L107 type EqlFeatures struct { Event uint `json:"event"` Join uint `json:"join"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/eqlfeaturesjoin.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/eqlfeaturesjoin.go index 481110e63..dd1f470a0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/eqlfeaturesjoin.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/eqlfeaturesjoin.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // EqlFeaturesJoin type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L109-L115 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L109-L115 type EqlFeaturesJoin struct { JoinQueriesFiveOrMore uint `json:"join_queries_five_or_more"` JoinQueriesFour uint `json:"join_queries_four"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/eqlfeatureskeys.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/eqlfeatureskeys.go index ea37269f5..d62d93146 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/eqlfeatureskeys.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/eqlfeatureskeys.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // EqlFeaturesKeys type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L117-L123 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L117-L123 type EqlFeaturesKeys struct { JoinKeysFiveOrMore uint `json:"join_keys_five_or_more"` JoinKeysFour uint `json:"join_keys_four"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/eqlfeaturespipes.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/eqlfeaturespipes.go index a462f72ef..9678672bf 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/eqlfeaturespipes.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/eqlfeaturespipes.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // EqlFeaturesPipes type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L125-L128 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L125-L128 type EqlFeaturesPipes struct { PipeHead uint `json:"pipe_head"` PipeTail uint `json:"pipe_tail"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/eqlfeaturessequences.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/eqlfeaturessequences.go index 9533aedba..190552b7e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/eqlfeaturessequences.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/eqlfeaturessequences.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // EqlFeaturesSequences type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L130-L137 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L130-L137 type EqlFeaturesSequences struct { SequenceMaxspan uint `json:"sequence_maxspan"` SequenceQueriesFiveOrMore uint `json:"sequence_queries_five_or_more"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/eqlhits.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/eqlhits.go index 9c5629b5e..961fca359 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/eqlhits.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/eqlhits.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // EqlHits type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/eql/_types/EqlHits.ts#L25-L39 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/eql/_types/EqlHits.ts#L25-L39 type EqlHits struct { // Events Contains events matching the query. Each object represents a matching event. Events []HitsEvent `json:"events,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/errorcause.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/errorcause.go index 6e6308211..1d394a24e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/errorcause.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/errorcause.go @@ -16,18 +16,22 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" "fmt" + "io" + "strconv" ) // ErrorCause type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Errors.ts#L25-L48 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Errors.ts#L25-L48 type ErrorCause struct { CausedBy *ErrorCause `json:"caused_by,omitempty"` Metadata map[string]json.RawMessage `json:"-"` @@ -42,6 +46,96 @@ type ErrorCause struct { Type string `json:"type"` } +func (s *ErrorCause) UnmarshalJSON(data []byte) error { + + if bytes.HasPrefix(data, []byte(`"`)) { + reason := string(data) + s.Reason = &reason + return nil + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "caused_by": + if err := dec.Decode(&s.CausedBy); err != nil { + return err + } + + case "reason": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Reason = &o + + case "root_cause": + if err := dec.Decode(&s.RootCause); err != nil { + return err + } + + case "stack_trace": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StackTrace = &o + + case "suppressed": + if err := dec.Decode(&s.Suppressed); err != nil { + return err + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + default: + + if key, ok := t.(string); ok { + if s.Metadata == nil { + s.Metadata = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return err + } + s.Metadata[key] = *raw + } + + } + } + return nil +} + // MarhsalJSON overrides marshalling for types with additional properties func (s ErrorCause) MarshalJSON() ([]byte, error) { type opt ErrorCause @@ -61,6 +155,7 @@ func (s ErrorCause) MarshalJSON() ([]byte, error) { for key, value := range s.Metadata { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "Metadata") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/eventdatastream.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/eventdatastream.go new file mode 100644 index 000000000..64c57cc16 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/eventdatastream.go @@ -0,0 +1,67 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + +// EventDataStream type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/search_application/_types/BehavioralAnalytics.ts#L29-L31 +type EventDataStream struct { + Name string `json:"name"` +} + +func (s *EventDataStream) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + } + } + return nil +} + +// NewEventDataStream returns a EventDataStream. +func NewEventDataStream() *EventDataStream { + r := &EventDataStream{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ewmamodelsettings.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ewmamodelsettings.go index 96dbf34f4..0177c756d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ewmamodelsettings.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ewmamodelsettings.go @@ -16,17 +16,61 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // EwmaModelSettings type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/pipeline.ts#L227-L229 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/pipeline.ts#L267-L269 type EwmaModelSettings struct { Alpha *float32 `json:"alpha,omitempty"` } +func (s *EwmaModelSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "alpha": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Alpha = &f + case float64: + f := float32(v) + s.Alpha = &f + } + + } + } + return nil +} + // NewEwmaModelSettings returns a EwmaModelSettings. func NewEwmaModelSettings() *EwmaModelSettings { r := &EwmaModelSettings{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ewmamovingaverageaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ewmamovingaverageaggregation.go index b9e0afa5e..d1b9253c5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ewmamovingaverageaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ewmamovingaverageaggregation.go @@ -16,38 +16,43 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" ) // EwmaMovingAverageAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/pipeline.ts#L212-L215 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/pipeline.ts#L252-L255 type EwmaMovingAverageAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. - BucketsPath BucketsPath `json:"buckets_path,omitempty"` - Format *string `json:"format,omitempty"` - GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Minimize *bool `json:"minimize,omitempty"` - Model string `json:"model,omitempty"` - Name *string `json:"name,omitempty"` - Predict *int `json:"predict,omitempty"` - Settings EwmaModelSettings `json:"settings"` - Window *int `json:"window,omitempty"` + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Minimize *bool `json:"minimize,omitempty"` + Model string `json:"model,omitempty"` + Name *string `json:"name,omitempty"` + Predict *int `json:"predict,omitempty"` + Settings EwmaModelSettings `json:"settings"` + Window *int `json:"window,omitempty"` } func (s *EwmaMovingAverageAggregation) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -67,9 +72,16 @@ func (s *EwmaMovingAverageAggregation) UnmarshalJSON(data []byte) error { } case "format": - if err := dec.Decode(&s.Format); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o case "gap_policy": if err := dec.Decode(&s.GapPolicy); err != nil { @@ -82,8 +94,17 @@ func (s *EwmaMovingAverageAggregation) UnmarshalJSON(data []byte) error { } case "minimize": - if err := dec.Decode(&s.Minimize); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Minimize = &value + case bool: + s.Minimize = &v } case "model": @@ -92,13 +113,31 @@ func (s *EwmaMovingAverageAggregation) UnmarshalJSON(data []byte) error { } case "name": - if err := dec.Decode(&s.Name); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o case "predict": - if err := dec.Decode(&s.Predict); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Predict = &value + case float64: + f := int(v) + s.Predict = &f } case "settings": @@ -107,8 +146,19 @@ func (s *EwmaMovingAverageAggregation) UnmarshalJSON(data []byte) error { } case "window": - if err := dec.Decode(&s.Window); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Window = &value + case float64: + f := int(v) + s.Window = &f } } @@ -116,11 +166,30 @@ func (s *EwmaMovingAverageAggregation) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s EwmaMovingAverageAggregation) MarshalJSON() ([]byte, error) { + type innerEwmaMovingAverageAggregation EwmaMovingAverageAggregation + tmp := innerEwmaMovingAverageAggregation{ + BucketsPath: s.BucketsPath, + Format: s.Format, + GapPolicy: s.GapPolicy, + Meta: s.Meta, + Minimize: s.Minimize, + Model: s.Model, + Name: s.Name, + Predict: s.Predict, + Settings: s.Settings, + Window: s.Window, + } + + tmp.Model = "ewma" + + return json.Marshal(tmp) +} + // NewEwmaMovingAverageAggregation returns a EwmaMovingAverageAggregation. func NewEwmaMovingAverageAggregation() *EwmaMovingAverageAggregation { r := &EwmaMovingAverageAggregation{} - r.Model = "ewma" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/executeenrichpolicystatus.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/executeenrichpolicystatus.go index b838d7dbb..89c416f23 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/executeenrichpolicystatus.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/executeenrichpolicystatus.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -26,7 +26,7 @@ import ( // ExecuteEnrichPolicyStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/enrich/execute_policy/types.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/enrich/execute_policy/types.ts#L20-L22 type ExecuteEnrichPolicyStatus struct { Phase enrichpolicyphase.EnrichPolicyPhase `json:"phase"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/executingpolicy.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/executingpolicy.go index 7eb646555..c0a3253d2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/executingpolicy.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/executingpolicy.go @@ -16,18 +16,55 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // ExecutingPolicy type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/enrich/stats/types.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/enrich/stats/types.ts#L24-L27 type ExecutingPolicy struct { Name string `json:"name"` Task TaskInfo `json:"task"` } +func (s *ExecutingPolicy) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "task": + if err := dec.Decode(&s.Task); err != nil { + return err + } + + } + } + return nil +} + // NewExecutingPolicy returns a ExecutingPolicy. func NewExecutingPolicy() *ExecutingPolicy { r := &ExecutingPolicy{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/executionresult.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/executionresult.go index 7ee566424..d155dbde1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/executionresult.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/executionresult.go @@ -16,13 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // ExecutionResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Execution.ts#L60-L66 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Execution.ts#L60-L66 type ExecutionResult struct { Actions []ExecutionResultAction `json:"actions"` Condition ExecutionResultCondition `json:"condition"` @@ -31,6 +38,51 @@ type ExecutionResult struct { Input ExecutionResultInput `json:"input"` } +func (s *ExecutionResult) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "actions": + if err := dec.Decode(&s.Actions); err != nil { + return err + } + + case "condition": + if err := dec.Decode(&s.Condition); err != nil { + return err + } + + case "execution_duration": + if err := dec.Decode(&s.ExecutionDuration); err != nil { + return err + } + + case "execution_time": + if err := dec.Decode(&s.ExecutionTime); err != nil { + return err + } + + case "input": + if err := dec.Decode(&s.Input); err != nil { + return err + } + + } + } + return nil +} + // NewExecutionResult returns a ExecutionResult. func NewExecutionResult() *ExecutionResult { r := &ExecutionResult{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/executionresultaction.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/executionresultaction.go index 6687c6b81..a44ef7d15 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/executionresultaction.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/executionresultaction.go @@ -16,18 +16,24 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/actionstatusoptions" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/actiontype" ) // ExecutionResultAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Execution.ts#L74-L86 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Execution.ts#L74-L86 type ExecutionResultAction struct { Email *EmailResult `json:"email,omitempty"` Error *ErrorCause `json:"error,omitempty"` @@ -42,6 +48,88 @@ type ExecutionResultAction struct { Webhook *WebhookResult `json:"webhook,omitempty"` } +func (s *ExecutionResultAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "email": + if err := dec.Decode(&s.Email); err != nil { + return err + } + + case "error": + if err := dec.Decode(&s.Error); err != nil { + return err + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return err + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return err + } + + case "logging": + if err := dec.Decode(&s.Logging); err != nil { + return err + } + + case "pagerduty": + if err := dec.Decode(&s.Pagerduty); err != nil { + return err + } + + case "reason": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Reason = &o + + case "slack": + if err := dec.Decode(&s.Slack); err != nil { + return err + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return err + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "webhook": + if err := dec.Decode(&s.Webhook); err != nil { + return err + } + + } + } + return nil +} + // NewExecutionResultAction returns a ExecutionResultAction. func NewExecutionResultAction() *ExecutionResultAction { r := &ExecutionResultAction{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/executionresultcondition.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/executionresultcondition.go index c0847c698..fbc245d85 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/executionresultcondition.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/executionresultcondition.go @@ -16,24 +16,74 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/actionstatusoptions" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/conditiontype" ) // ExecutionResultCondition type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Execution.ts#L68-L72 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Execution.ts#L68-L72 type ExecutionResultCondition struct { Met bool `json:"met"` Status actionstatusoptions.ActionStatusOptions `json:"status"` Type conditiontype.ConditionType `json:"type"` } +func (s *ExecutionResultCondition) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "met": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Met = value + case bool: + s.Met = v + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return err + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + } + } + return nil +} + // NewExecutionResultCondition returns a ExecutionResultCondition. func NewExecutionResultCondition() *ExecutionResultCondition { r := &ExecutionResultCondition{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/executionresultinput.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/executionresultinput.go index 1844f2487..259abfb97 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/executionresultinput.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/executionresultinput.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -29,7 +29,7 @@ import ( // ExecutionResultInput type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Execution.ts#L88-L92 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Execution.ts#L88-L92 type ExecutionResultInput struct { Payload map[string]json.RawMessage `json:"payload"` Status actionstatusoptions.ActionStatusOptions `json:"status"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/executionstate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/executionstate.go index a9e9fa048..f27231928 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/executionstate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/executionstate.go @@ -16,19 +16,78 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ExecutionState type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Action.ts#L117-L121 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Action.ts#L120-L124 type ExecutionState struct { Reason *string `json:"reason,omitempty"` Successful bool `json:"successful"` Timestamp DateTime `json:"timestamp"` } +func (s *ExecutionState) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "reason": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Reason = &o + + case "successful": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Successful = value + case bool: + s.Successful = v + } + + case "timestamp": + if err := dec.Decode(&s.Timestamp); err != nil { + return err + } + + } + } + return nil +} + // NewExecutionState returns a ExecutionState. func NewExecutionState() *ExecutionState { r := &ExecutionState{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/executionthreadpool.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/executionthreadpool.go index 8a5577266..412ecbc35 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/executionthreadpool.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/executionthreadpool.go @@ -16,18 +16,76 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ExecutionThreadPool type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Execution.ts#L94-L97 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Execution.ts#L94-L97 type ExecutionThreadPool struct { MaxSize int64 `json:"max_size"` QueueSize int64 `json:"queue_size"` } +func (s *ExecutionThreadPool) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_size": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.MaxSize = value + case float64: + f := int64(v) + s.MaxSize = f + } + + case "queue_size": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.QueueSize = value + case float64: + f := int64(v) + s.QueueSize = f + } + + } + } + return nil +} + // NewExecutionThreadPool returns a ExecutionThreadPool. func NewExecutionThreadPool() *ExecutionThreadPool { r := &ExecutionThreadPool{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/existsquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/existsquery.go index a6bfe17d6..9e14b4eb8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/existsquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/existsquery.go @@ -16,17 +16,84 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ExistsQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/term.ts#L36-L38 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/term.ts#L36-L41 type ExistsQuery struct { - Boost *float32 `json:"boost,omitempty"` - Field string `json:"field"` - QueryName_ *string `json:"_name,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Field Name of the field you wish to search. + Field string `json:"field"` + QueryName_ *string `json:"_name,omitempty"` +} + +func (s *ExistsQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + } + } + return nil } // NewExistsQuery returns a ExistsQuery. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/expandwildcards.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/expandwildcards.go index ea713754d..a85d9f9d0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/expandwildcards.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/expandwildcards.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -26,5 +26,5 @@ import ( // ExpandWildcards type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/common.ts#L197-L197 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/common.ts#L217-L217 type ExpandWildcards []expandwildcard.ExpandWildcard diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/explainanalyzetoken.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/explainanalyzetoken.go index c1cc8ea57..215b22977 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/explainanalyzetoken.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/explainanalyzetoken.go @@ -16,18 +16,22 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" "fmt" + "io" + "strconv" ) // ExplainAnalyzeToken type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/analyze/types.ts#L52-L64 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/analyze/types.ts#L52-L64 type ExplainAnalyzeToken struct { Bytes string `json:"bytes"` EndOffset int64 `json:"end_offset"` @@ -41,6 +45,164 @@ type ExplainAnalyzeToken struct { Type string `json:"type"` } +func (s *ExplainAnalyzeToken) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bytes": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Bytes = o + + case "end_offset": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.EndOffset = value + case float64: + f := int64(v) + s.EndOffset = f + } + + case "keyword": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Keyword = &value + case bool: + s.Keyword = &v + } + + case "position": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Position = value + case float64: + f := int64(v) + s.Position = f + } + + case "positionLength": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.PositionLength = value + case float64: + f := int64(v) + s.PositionLength = f + } + + case "start_offset": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.StartOffset = value + case float64: + f := int64(v) + s.StartOffset = f + } + + case "termFrequency": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TermFrequency = value + case float64: + f := int64(v) + s.TermFrequency = f + } + + case "token": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Token = o + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + default: + + if key, ok := t.(string); ok { + if s.ExplainAnalyzeToken == nil { + s.ExplainAnalyzeToken = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return err + } + s.ExplainAnalyzeToken[key] = *raw + } + + } + } + return nil +} + // MarhsalJSON overrides marshalling for types with additional properties func (s ExplainAnalyzeToken) MarshalJSON() ([]byte, error) { type opt ExplainAnalyzeToken @@ -60,6 +222,7 @@ func (s ExplainAnalyzeToken) MarshalJSON() ([]byte, error) { for key, value := range s.ExplainAnalyzeToken { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "ExplainAnalyzeToken") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/explanation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/explanation.go index eb941fad2..149260e14 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/explanation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/explanation.go @@ -16,19 +16,80 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Explanation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/explain/types.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/explain/types.ts#L22-L26 type Explanation struct { Description string `json:"description"` Details []ExplanationDetail `json:"details"` Value float32 `json:"value"` } +func (s *Explanation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = o + + case "details": + if err := dec.Decode(&s.Details); err != nil { + return err + } + + case "value": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Value = f + case float64: + f := float32(v) + s.Value = f + } + + } + } + return nil +} + // NewExplanation returns a Explanation. func NewExplanation() *Explanation { r := &Explanation{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/explanationdetail.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/explanationdetail.go index 13f76c66f..b282d3a0c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/explanationdetail.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/explanationdetail.go @@ -16,19 +16,80 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ExplanationDetail type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/explain/types.ts#L28-L32 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/explain/types.ts#L28-L32 type ExplanationDetail struct { Description string `json:"description"` Details []ExplanationDetail `json:"details,omitempty"` Value float32 `json:"value"` } +func (s *ExplanationDetail) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = o + + case "details": + if err := dec.Decode(&s.Details); err != nil { + return err + } + + case "value": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Value = f + case float64: + f := float32(v) + s.Value = f + } + + } + } + return nil +} + // NewExplanationDetail returns a ExplanationDetail. func NewExplanationDetail() *ExplanationDetail { r := &ExplanationDetail{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/explorecontrols.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/explorecontrols.go index d9a41d023..39b578dd9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/explorecontrols.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/explorecontrols.go @@ -16,18 +16,104 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ExploreControls type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/graph/_types/ExploreControls.ts#L24-L29 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/graph/_types/ExploreControls.ts#L24-L49 type ExploreControls struct { + // SampleDiversity To avoid the top-matching documents sample being dominated by a single source + // of results, it is sometimes necessary to request diversity in the sample. + // You can do this by selecting a single-value field and setting a maximum + // number of documents per value for that field. SampleDiversity *SampleDiversity `json:"sample_diversity,omitempty"` - SampleSize *int `json:"sample_size,omitempty"` - Timeout Duration `json:"timeout,omitempty"` - UseSignificance bool `json:"use_significance"` + // SampleSize Each hop considers a sample of the best-matching documents on each shard. + // Using samples improves the speed of execution and keeps exploration focused + // on meaningfully-connected terms. + // Very small values (less than 50) might not provide sufficient + // weight-of-evidence to identify significant connections between terms. + // Very large sample sizes can dilute the quality of the results and increase + // execution times. + SampleSize *int `json:"sample_size,omitempty"` + // Timeout The length of time in milliseconds after which exploration will be halted and + // the results gathered so far are returned. + // This timeout is honored on a best-effort basis. + // Execution might overrun this timeout if, for example, a long pause is + // encountered while FieldData is loaded for a field. + Timeout Duration `json:"timeout,omitempty"` + // UseSignificance Filters associated terms so only those that are significantly associated with + // your query are included. + UseSignificance bool `json:"use_significance"` +} + +func (s *ExploreControls) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "sample_diversity": + if err := dec.Decode(&s.SampleDiversity); err != nil { + return err + } + + case "sample_size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.SampleSize = &value + case float64: + f := int(v) + s.SampleSize = &f + } + + case "timeout": + if err := dec.Decode(&s.Timeout); err != nil { + return err + } + + case "use_significance": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.UseSignificance = value + case bool: + s.UseSignificance = v + } + + } + } + return nil } // NewExploreControls returns a ExploreControls. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/extendedboundsdouble.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/extendedboundsdouble.go index 249d1ec80..635f63ba2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/extendedboundsdouble.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/extendedboundsdouble.go @@ -16,18 +16,80 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ExtendedBoundsdouble type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L230-L233 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L489-L498 type ExtendedBoundsdouble struct { + // Max Maximum value for the bound. Max Float64 `json:"max"` + // Min Minimum value for the bound. Min Float64 `json:"min"` } +func (s *ExtendedBoundsdouble) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Max = f + case float64: + f := Float64(v) + s.Max = f + } + + case "min": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Min = f + case float64: + f := Float64(v) + s.Min = f + } + + } + } + return nil +} + // NewExtendedBoundsdouble returns a ExtendedBoundsdouble. func NewExtendedBoundsdouble() *ExtendedBoundsdouble { r := &ExtendedBoundsdouble{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/extendedboundsfielddatemath.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/extendedboundsfielddatemath.go index 30f11c791..71dc5c571 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/extendedboundsfielddatemath.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/extendedboundsfielddatemath.go @@ -16,18 +16,57 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // ExtendedBoundsFieldDateMath type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L230-L233 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L489-L498 type ExtendedBoundsFieldDateMath struct { + // Max Maximum value for the bound. Max FieldDateMath `json:"max"` + // Min Minimum value for the bound. Min FieldDateMath `json:"min"` } +func (s *ExtendedBoundsFieldDateMath) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max": + if err := dec.Decode(&s.Max); err != nil { + return err + } + + case "min": + if err := dec.Decode(&s.Min); err != nil { + return err + } + + } + } + return nil +} + // NewExtendedBoundsFieldDateMath returns a ExtendedBoundsFieldDateMath. func NewExtendedBoundsFieldDateMath() *ExtendedBoundsFieldDateMath { r := &ExtendedBoundsFieldDateMath{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/extendedmemorystats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/extendedmemorystats.go index 30f8e70c3..bce9a0f39 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/extendedmemorystats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/extendedmemorystats.go @@ -16,26 +16,236 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ExtendedMemoryStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L261-L264 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L622-L631 type ExtendedMemoryStats struct { - AdjustedTotalInBytes *int64 `json:"adjusted_total_in_bytes,omitempty"` - FreeInBytes *int64 `json:"free_in_bytes,omitempty"` - FreePercent *int `json:"free_percent,omitempty"` - Resident *string `json:"resident,omitempty"` - ResidentInBytes *int64 `json:"resident_in_bytes,omitempty"` - Share *string `json:"share,omitempty"` - ShareInBytes *int64 `json:"share_in_bytes,omitempty"` - TotalInBytes *int64 `json:"total_in_bytes,omitempty"` - TotalVirtual *string `json:"total_virtual,omitempty"` - TotalVirtualInBytes *int64 `json:"total_virtual_in_bytes,omitempty"` - UsedInBytes *int64 `json:"used_in_bytes,omitempty"` - UsedPercent *int `json:"used_percent,omitempty"` + // AdjustedTotalInBytes If the amount of physical memory has been overridden using the + // `es`.`total_memory_bytes` system property then this reports the overridden + // value in bytes. + // Otherwise it reports the same value as `total_in_bytes`. + AdjustedTotalInBytes *int64 `json:"adjusted_total_in_bytes,omitempty"` + // FreeInBytes Amount of free physical memory in bytes. + FreeInBytes *int64 `json:"free_in_bytes,omitempty"` + // FreePercent Percentage of free memory. + FreePercent *int `json:"free_percent,omitempty"` + Resident *string `json:"resident,omitempty"` + ResidentInBytes *int64 `json:"resident_in_bytes,omitempty"` + Share *string `json:"share,omitempty"` + ShareInBytes *int64 `json:"share_in_bytes,omitempty"` + // TotalInBytes Total amount of physical memory in bytes. + TotalInBytes *int64 `json:"total_in_bytes,omitempty"` + TotalVirtual *string `json:"total_virtual,omitempty"` + TotalVirtualInBytes *int64 `json:"total_virtual_in_bytes,omitempty"` + // UsedInBytes Amount of used physical memory in bytes. + UsedInBytes *int64 `json:"used_in_bytes,omitempty"` + // UsedPercent Percentage of used memory. + UsedPercent *int `json:"used_percent,omitempty"` +} + +func (s *ExtendedMemoryStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "adjusted_total_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.AdjustedTotalInBytes = &value + case float64: + f := int64(v) + s.AdjustedTotalInBytes = &f + } + + case "free_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.FreeInBytes = &value + case float64: + f := int64(v) + s.FreeInBytes = &f + } + + case "free_percent": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.FreePercent = &value + case float64: + f := int(v) + s.FreePercent = &f + } + + case "resident": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Resident = &o + + case "resident_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.ResidentInBytes = &value + case float64: + f := int64(v) + s.ResidentInBytes = &f + } + + case "share": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Share = &o + + case "share_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.ShareInBytes = &value + case float64: + f := int64(v) + s.ShareInBytes = &f + } + + case "total_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TotalInBytes = &value + case float64: + f := int64(v) + s.TotalInBytes = &f + } + + case "total_virtual": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TotalVirtual = &o + + case "total_virtual_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TotalVirtualInBytes = &value + case float64: + f := int64(v) + s.TotalVirtualInBytes = &f + } + + case "used_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.UsedInBytes = &value + case float64: + f := int64(v) + s.UsedInBytes = &f + } + + case "used_percent": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.UsedPercent = &value + case float64: + f := int(v) + s.UsedPercent = &f + } + + } + } + return nil } // NewExtendedMemoryStats returns a ExtendedMemoryStats. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/extendedstatsaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/extendedstatsaggregate.go index f3102b92b..c77b79ffa 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/extendedstatsaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/extendedstatsaggregate.go @@ -16,24 +16,28 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // ExtendedStatsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L277-L295 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L278-L296 type ExtendedStatsAggregate struct { Avg Float64 `json:"avg,omitempty"` AvgAsString *string `json:"avg_as_string,omitempty"` Count int64 `json:"count"` Max Float64 `json:"max,omitempty"` MaxAsString *string `json:"max_as_string,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Meta Metadata `json:"meta,omitempty"` Min Float64 `json:"min,omitempty"` MinAsString *string `json:"min_as_string,omitempty"` StdDeviation Float64 `json:"std_deviation,omitempty"` @@ -54,6 +58,230 @@ type ExtendedStatsAggregate struct { VarianceSamplingAsString *string `json:"variance_sampling_as_string,omitempty"` } +func (s *ExtendedStatsAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "avg": + if err := dec.Decode(&s.Avg); err != nil { + return err + } + + case "avg_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AvgAsString = &o + + case "count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + case "max": + if err := dec.Decode(&s.Max); err != nil { + return err + } + + case "max_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MaxAsString = &o + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "min": + if err := dec.Decode(&s.Min); err != nil { + return err + } + + case "min_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MinAsString = &o + + case "std_deviation": + if err := dec.Decode(&s.StdDeviation); err != nil { + return err + } + + case "std_deviation_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StdDeviationAsString = &o + + case "std_deviation_bounds": + if err := dec.Decode(&s.StdDeviationBounds); err != nil { + return err + } + + case "std_deviation_bounds_as_string": + if err := dec.Decode(&s.StdDeviationBoundsAsString); err != nil { + return err + } + + case "std_deviation_population": + if err := dec.Decode(&s.StdDeviationPopulation); err != nil { + return err + } + + case "std_deviation_sampling": + if err := dec.Decode(&s.StdDeviationSampling); err != nil { + return err + } + + case "sum": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Sum = f + case float64: + f := Float64(v) + s.Sum = f + } + + case "sum_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SumAsString = &o + + case "sum_of_squares": + if err := dec.Decode(&s.SumOfSquares); err != nil { + return err + } + + case "sum_of_squares_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SumOfSquaresAsString = &o + + case "variance": + if err := dec.Decode(&s.Variance); err != nil { + return err + } + + case "variance_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.VarianceAsString = &o + + case "variance_population": + if err := dec.Decode(&s.VariancePopulation); err != nil { + return err + } + + case "variance_population_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.VariancePopulationAsString = &o + + case "variance_sampling": + if err := dec.Decode(&s.VarianceSampling); err != nil { + return err + } + + case "variance_sampling_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.VarianceSamplingAsString = &o + + } + } + return nil +} + // NewExtendedStatsAggregate returns a ExtendedStatsAggregate. func NewExtendedStatsAggregate() *ExtendedStatsAggregate { r := &ExtendedStatsAggregate{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/extendedstatsaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/extendedstatsaggregation.go index 641fe75f7..747d7d74a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/extendedstatsaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/extendedstatsaggregation.go @@ -16,19 +16,94 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ExtendedStatsAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/metric.ts#L68-L70 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/metric.ts#L101-L106 type ExtendedStatsAggregation struct { - Field *string `json:"field,omitempty"` - Format *string `json:"format,omitempty"` - Missing Missing `json:"missing,omitempty"` - Script Script `json:"script,omitempty"` - Sigma *Float64 `json:"sigma,omitempty"` + // Field The field on which to run the aggregation. + Field *string `json:"field,omitempty"` + Format *string `json:"format,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing Missing `json:"missing,omitempty"` + Script Script `json:"script,omitempty"` + // Sigma The number of standard deviations above/below the mean to display. + Sigma *Float64 `json:"sigma,omitempty"` +} + +func (s *ExtendedStatsAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return err + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + case "sigma": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Sigma = &f + case float64: + f := Float64(v) + s.Sigma = &f + } + + } + } + return nil } // NewExtendedStatsAggregation returns a ExtendedStatsAggregation. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/extendedstatsbucketaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/extendedstatsbucketaggregate.go index 363d18f75..6a573dff8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/extendedstatsbucketaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/extendedstatsbucketaggregate.go @@ -16,24 +16,28 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // ExtendedStatsBucketAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L297-L298 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L298-L299 type ExtendedStatsBucketAggregate struct { Avg Float64 `json:"avg,omitempty"` AvgAsString *string `json:"avg_as_string,omitempty"` Count int64 `json:"count"` Max Float64 `json:"max,omitempty"` MaxAsString *string `json:"max_as_string,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Meta Metadata `json:"meta,omitempty"` Min Float64 `json:"min,omitempty"` MinAsString *string `json:"min_as_string,omitempty"` StdDeviation Float64 `json:"std_deviation,omitempty"` @@ -54,6 +58,230 @@ type ExtendedStatsBucketAggregate struct { VarianceSamplingAsString *string `json:"variance_sampling_as_string,omitempty"` } +func (s *ExtendedStatsBucketAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "avg": + if err := dec.Decode(&s.Avg); err != nil { + return err + } + + case "avg_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AvgAsString = &o + + case "count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + case "max": + if err := dec.Decode(&s.Max); err != nil { + return err + } + + case "max_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MaxAsString = &o + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "min": + if err := dec.Decode(&s.Min); err != nil { + return err + } + + case "min_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MinAsString = &o + + case "std_deviation": + if err := dec.Decode(&s.StdDeviation); err != nil { + return err + } + + case "std_deviation_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StdDeviationAsString = &o + + case "std_deviation_bounds": + if err := dec.Decode(&s.StdDeviationBounds); err != nil { + return err + } + + case "std_deviation_bounds_as_string": + if err := dec.Decode(&s.StdDeviationBoundsAsString); err != nil { + return err + } + + case "std_deviation_population": + if err := dec.Decode(&s.StdDeviationPopulation); err != nil { + return err + } + + case "std_deviation_sampling": + if err := dec.Decode(&s.StdDeviationSampling); err != nil { + return err + } + + case "sum": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Sum = f + case float64: + f := Float64(v) + s.Sum = f + } + + case "sum_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SumAsString = &o + + case "sum_of_squares": + if err := dec.Decode(&s.SumOfSquares); err != nil { + return err + } + + case "sum_of_squares_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SumOfSquaresAsString = &o + + case "variance": + if err := dec.Decode(&s.Variance); err != nil { + return err + } + + case "variance_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.VarianceAsString = &o + + case "variance_population": + if err := dec.Decode(&s.VariancePopulation); err != nil { + return err + } + + case "variance_population_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.VariancePopulationAsString = &o + + case "variance_sampling": + if err := dec.Decode(&s.VarianceSampling); err != nil { + return err + } + + case "variance_sampling_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.VarianceSamplingAsString = &o + + } + } + return nil +} + // NewExtendedStatsBucketAggregate returns a ExtendedStatsBucketAggregate. func NewExtendedStatsBucketAggregate() *ExtendedStatsBucketAggregate { r := &ExtendedStatsBucketAggregate{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/extendedstatsbucketaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/extendedstatsbucketaggregation.go index a3d207e98..2ef80fa91 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/extendedstatsbucketaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/extendedstatsbucketaggregation.go @@ -16,34 +16,40 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" ) // ExtendedStatsBucketAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/pipeline.ts#L167-L169 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/pipeline.ts#L198-L203 type ExtendedStatsBucketAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. - BucketsPath BucketsPath `json:"buckets_path,omitempty"` - Format *string `json:"format,omitempty"` - GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Name *string `json:"name,omitempty"` - Sigma *Float64 `json:"sigma,omitempty"` + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Name *string `json:"name,omitempty"` + // Sigma The number of standard deviations above/below the mean to display. + Sigma *Float64 `json:"sigma,omitempty"` } func (s *ExtendedStatsBucketAggregation) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -63,9 +69,16 @@ func (s *ExtendedStatsBucketAggregation) UnmarshalJSON(data []byte) error { } case "format": - if err := dec.Decode(&s.Format); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o case "gap_policy": if err := dec.Decode(&s.GapPolicy); err != nil { @@ -78,13 +91,31 @@ func (s *ExtendedStatsBucketAggregation) UnmarshalJSON(data []byte) error { } case "name": - if err := dec.Decode(&s.Name); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o case "sigma": - if err := dec.Decode(&s.Sigma); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Sigma = &f + case float64: + f := Float64(v) + s.Sigma = &f } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/failprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/failprocessor.go index cb6419a5b..f2765b1a0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/failprocessor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/failprocessor.go @@ -16,20 +16,124 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // FailProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/_types/Processors.ts#L211-L213 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/_types/Processors.ts#L637-L643 type FailProcessor struct { - Description *string `json:"description,omitempty"` - If *string `json:"if,omitempty"` - IgnoreFailure *bool `json:"ignore_failure,omitempty"` - Message string `json:"message"` - OnFailure []ProcessorContainer `json:"on_failure,omitempty"` - Tag *string `json:"tag,omitempty"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // If Conditionally execute the processor. + If *string `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // Message The error message thrown by the processor. + // Supports template snippets. + Message string `json:"message"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` +} + +func (s *FailProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.If = &o + + case "ignore_failure": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "message": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Message = o + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return err + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + } + } + return nil } // NewFailProcessor returns a FailProcessor. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/feature.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/feature.go index 5f5bf6597..357ca9eab 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/feature.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/feature.go @@ -16,18 +16,70 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Feature type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/features/_types/Feature.ts#L20-L23 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/features/_types/Feature.ts#L20-L23 type Feature struct { Description string `json:"description"` Name string `json:"name"` } +func (s *Feature) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = o + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = o + + } + } + return nil +} + // NewFeature returns a Feature. func NewFeature() *Feature { r := &Feature{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/features.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/features.go index 7849935b5..54a9a6c12 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/features.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/features.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -26,5 +26,5 @@ import ( // Features type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/get/IndicesGetRequest.ts#L94-L94 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/get/IndicesGetRequest.ts#L95-L95 type Features []feature.Feature diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/featuretoggle.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/featuretoggle.go index 235da3f46..1f3223422 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/featuretoggle.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/featuretoggle.go @@ -16,17 +16,59 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // FeatureToggle type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L40-L42 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L40-L42 type FeatureToggle struct { Enabled bool `json:"enabled"` } +func (s *FeatureToggle) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = value + case bool: + s.Enabled = v + } + + } + } + return nil +} + // NewFeatureToggle returns a FeatureToggle. func NewFeatureToggle() *FeatureToggle { r := &FeatureToggle{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fetchprofile.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fetchprofile.go index c80b973d6..cb83ed2d8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fetchprofile.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fetchprofile.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // FetchProfile type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/profile.ts#L139-L146 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/profile.ts#L139-L146 type FetchProfile struct { Breakdown FetchProfileBreakdown `json:"breakdown"` Children []FetchProfile `json:"children,omitempty"` @@ -32,6 +40,70 @@ type FetchProfile struct { Type string `json:"type"` } +func (s *FetchProfile) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "breakdown": + if err := dec.Decode(&s.Breakdown); err != nil { + return err + } + + case "children": + if err := dec.Decode(&s.Children); err != nil { + return err + } + + case "debug": + if err := dec.Decode(&s.Debug); err != nil { + return err + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = o + + case "time_in_nanos": + if err := dec.Decode(&s.TimeInNanos); err != nil { + return err + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + // NewFetchProfile returns a FetchProfile. func NewFetchProfile() *FetchProfile { r := &FetchProfile{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fetchprofilebreakdown.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fetchprofilebreakdown.go index a18811fa3..cb32e2b6d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fetchprofilebreakdown.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fetchprofilebreakdown.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // FetchProfileBreakdown type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/profile.ts#L148-L157 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/profile.ts#L148-L157 type FetchProfileBreakdown struct { LoadSource *int `json:"load_source,omitempty"` LoadSourceCount *int `json:"load_source_count,omitempty"` @@ -34,6 +42,154 @@ type FetchProfileBreakdown struct { ProcessCount *int `json:"process_count,omitempty"` } +func (s *FetchProfileBreakdown) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "load_source": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.LoadSource = &value + case float64: + f := int(v) + s.LoadSource = &f + } + + case "load_source_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.LoadSourceCount = &value + case float64: + f := int(v) + s.LoadSourceCount = &f + } + + case "load_stored_fields": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.LoadStoredFields = &value + case float64: + f := int(v) + s.LoadStoredFields = &f + } + + case "load_stored_fields_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.LoadStoredFieldsCount = &value + case float64: + f := int(v) + s.LoadStoredFieldsCount = &f + } + + case "next_reader": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NextReader = &value + case float64: + f := int(v) + s.NextReader = &f + } + + case "next_reader_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NextReaderCount = &value + case float64: + f := int(v) + s.NextReaderCount = &f + } + + case "process": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Process = &value + case float64: + f := int(v) + s.Process = &f + } + + case "process_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.ProcessCount = &value + case float64: + f := int(v) + s.ProcessCount = &f + } + + } + } + return nil +} + // NewFetchProfileBreakdown returns a FetchProfileBreakdown. func NewFetchProfileBreakdown() *FetchProfileBreakdown { r := &FetchProfileBreakdown{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fetchprofiledebug.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fetchprofiledebug.go index 6ea1cd456..296a569ef 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fetchprofiledebug.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fetchprofiledebug.go @@ -16,18 +16,67 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // FetchProfileDebug type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/profile.ts#L159-L162 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/profile.ts#L159-L162 type FetchProfileDebug struct { FastPath *int `json:"fast_path,omitempty"` StoredFields []string `json:"stored_fields,omitempty"` } +func (s *FetchProfileDebug) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fast_path": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.FastPath = &value + case float64: + f := int(v) + s.FastPath = &f + } + + case "stored_fields": + if err := dec.Decode(&s.StoredFields); err != nil { + return err + } + + } + } + return nil +} + // NewFetchProfileDebug returns a FetchProfileDebug. func NewFetchProfileDebug() *FetchProfileDebug { r := &FetchProfileDebug{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldaliasproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldaliasproperty.go index 4c1a350ab..357bd881a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldaliasproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldaliasproperty.go @@ -16,23 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" ) // FieldAliasProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/specialized.ts#L49-L52 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/specialized.ts#L49-L52 type FieldAliasProperty struct { Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` Fields map[string]Property `json:"fields,omitempty"` @@ -45,6 +45,7 @@ type FieldAliasProperty struct { } func (s *FieldAliasProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -64,6 +65,9 @@ func (s *FieldAliasProperty) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -72,7 +76,9 @@ func (s *FieldAliasProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -351,18 +357,34 @@ func (s *FieldAliasProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } @@ -373,6 +395,9 @@ func (s *FieldAliasProperty) UnmarshalJSON(data []byte) error { } case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -381,7 +406,9 @@ func (s *FieldAliasProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -660,9 +687,11 @@ func (s *FieldAliasProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } @@ -676,6 +705,24 @@ func (s *FieldAliasProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s FieldAliasProperty) MarshalJSON() ([]byte, error) { + type innerFieldAliasProperty FieldAliasProperty + tmp := innerFieldAliasProperty{ + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + Path: s.Path, + Properties: s.Properties, + Type: s.Type, + } + + tmp.Type = "alias" + + return json.Marshal(tmp) +} + // NewFieldAliasProperty returns a FieldAliasProperty. func NewFieldAliasProperty() *FieldAliasProperty { r := &FieldAliasProperty{ @@ -684,7 +731,5 @@ func NewFieldAliasProperty() *FieldAliasProperty { Properties: make(map[string]Property, 0), } - r.Type = "alias" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldandformat.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldandformat.go index 5c3865c7c..600a056d2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldandformat.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldandformat.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // FieldAndFormat type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/abstractions.ts#L212-L226 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/abstractions.ts#L489-L503 type FieldAndFormat struct { // Field Wildcard pattern. The request returns values for field names matching this // pattern. @@ -32,6 +40,62 @@ type FieldAndFormat struct { IncludeUnmapped *bool `json:"include_unmapped,omitempty"` } +func (s *FieldAndFormat) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Field) + return err + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "include_unmapped": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IncludeUnmapped = &value + case bool: + s.IncludeUnmapped = &v + } + + } + } + return nil +} + // NewFieldAndFormat returns a FieldAndFormat. func NewFieldAndFormat() *FieldAndFormat { r := &FieldAndFormat{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldcapability.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldcapability.go index 0ea4d5b38..6d791bed1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldcapability.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldcapability.go @@ -16,19 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" ) // FieldCapability type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/field_caps/types.ts#L23-L81 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/field_caps/types.ts#L23-L81 type FieldCapability struct { // Aggregatable Whether this field can be aggregated on all indices. Aggregatable bool `json:"aggregatable"` @@ -39,7 +43,7 @@ type FieldCapability struct { // values. A value length of 1 indicates that all indices had the same value for // this key, while a length of 2 or more indicates that not all indices had the // same value for this key. - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Meta Metadata `json:"meta,omitempty"` // MetadataField Whether this field is registered as a metadata field. MetadataField *bool `json:"metadata_field,omitempty"` // MetricConflictsIndices The list of indices where this field is present if these indices @@ -64,6 +68,162 @@ type FieldCapability struct { Type string `json:"type"` } +func (s *FieldCapability) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aggregatable": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Aggregatable = value + case bool: + s.Aggregatable = v + } + + case "indices": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Indices = append(s.Indices, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Indices); err != nil { + return err + } + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "metadata_field": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.MetadataField = &value + case bool: + s.MetadataField = &v + } + + case "metric_conflicts_indices": + if err := dec.Decode(&s.MetricConflictsIndices); err != nil { + return err + } + + case "non_aggregatable_indices": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.NonAggregatableIndices = append(s.NonAggregatableIndices, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.NonAggregatableIndices); err != nil { + return err + } + } + + case "non_dimension_indices": + if err := dec.Decode(&s.NonDimensionIndices); err != nil { + return err + } + + case "non_searchable_indices": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.NonSearchableIndices = append(s.NonSearchableIndices, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.NonSearchableIndices); err != nil { + return err + } + } + + case "searchable": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Searchable = value + case bool: + s.Searchable = v + } + + case "time_series_dimension": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.TimeSeriesDimension = &value + case bool: + s.TimeSeriesDimension = &v + } + + case "time_series_metric": + if err := dec.Decode(&s.TimeSeriesMetric); err != nil { + return err + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + // NewFieldCapability returns a FieldCapability. func NewFieldCapability() *FieldCapability { r := &FieldCapability{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldcollapse.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldcollapse.go index 7ab06ee6a..591263798 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldcollapse.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldcollapse.go @@ -16,18 +16,92 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // FieldCollapse type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/FieldCollapse.ts#L24-L29 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/FieldCollapse.ts#L24-L38 type FieldCollapse struct { - Collapse *FieldCollapse `json:"collapse,omitempty"` - Field string `json:"field"` - InnerHits []InnerHits `json:"inner_hits,omitempty"` - MaxConcurrentGroupSearches *int `json:"max_concurrent_group_searches,omitempty"` + Collapse *FieldCollapse `json:"collapse,omitempty"` + // Field The field to collapse the result set on + Field string `json:"field"` + // InnerHits The number of inner hits and their sort order + InnerHits []InnerHits `json:"inner_hits,omitempty"` + // MaxConcurrentGroupSearches The number of concurrent requests allowed to retrieve the inner_hits per + // group + MaxConcurrentGroupSearches *int `json:"max_concurrent_group_searches,omitempty"` +} + +func (s *FieldCollapse) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "collapse": + if err := dec.Decode(&s.Collapse); err != nil { + return err + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "inner_hits": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewInnerHits() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.InnerHits = append(s.InnerHits, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.InnerHits); err != nil { + return err + } + } + + case "max_concurrent_group_searches": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxConcurrentGroupSearches = &value + case float64: + f := int(v) + s.MaxConcurrentGroupSearches = &f + } + + } + } + return nil } // NewFieldCollapse returns a FieldCollapse. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fielddatafrequencyfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fielddatafrequencyfilter.go index 7f1bcd38d..e7f6c9e0f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fielddatafrequencyfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fielddatafrequencyfilter.go @@ -16,19 +16,95 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // FielddataFrequencyFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/FielddataFrequencyFilter.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/FielddataFrequencyFilter.ts#L22-L26 type FielddataFrequencyFilter struct { Max Float64 `json:"max"` Min Float64 `json:"min"` MinSegmentSize int `json:"min_segment_size"` } +func (s *FielddataFrequencyFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Max = f + case float64: + f := Float64(v) + s.Max = f + } + + case "min": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Min = f + case float64: + f := Float64(v) + s.Min = f + } + + case "min_segment_size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MinSegmentSize = value + case float64: + f := int(v) + s.MinSegmentSize = f + } + + } + } + return nil +} + // NewFielddataFrequencyFilter returns a FielddataFrequencyFilter. func NewFielddataFrequencyFilter() *FielddataFrequencyFilter { r := &FielddataFrequencyFilter{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fielddatarecord.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fielddatarecord.go index 1a1725a20..da7f21591 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fielddatarecord.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fielddatarecord.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // FielddataRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/fielddata/types.ts#L20-L48 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/fielddata/types.ts#L20-L48 type FielddataRecord struct { // Field field name Field *string `json:"field,omitempty"` @@ -38,6 +46,98 @@ type FielddataRecord struct { Size *string `json:"size,omitempty"` } +func (s *FielddataRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field", "f": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Field = &o + + case "host", "h": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Host = &o + + case "id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Id = &o + + case "ip": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Ip = &o + + case "node", "n": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Node = &o + + case "size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Size = &o + + } + } + return nil +} + // NewFielddataRecord returns a FielddataRecord. func NewFielddataRecord() *FielddataRecord { r := &FielddataRecord{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fielddatastats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fielddatastats.go index b78306406..d9ccf23cc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fielddatastats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fielddatastats.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // FielddataStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Stats.ts#L69-L74 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Stats.ts#L111-L116 type FielddataStats struct { Evictions *int64 `json:"evictions,omitempty"` Fields map[string]FieldMemoryUsage `json:"fields,omitempty"` @@ -30,6 +38,69 @@ type FielddataStats struct { MemorySizeInBytes int64 `json:"memory_size_in_bytes"` } +func (s *FielddataStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "evictions": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Evictions = &value + case float64: + f := int64(v) + s.Evictions = &f + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]FieldMemoryUsage, 0) + } + if err := dec.Decode(&s.Fields); err != nil { + return err + } + + case "memory_size": + if err := dec.Decode(&s.MemorySize); err != nil { + return err + } + + case "memory_size_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.MemorySizeInBytes = value + case float64: + f := int64(v) + s.MemorySizeInBytes = f + } + + } + } + return nil +} + // NewFielddataStats returns a FielddataStats. func NewFielddataStats() *FielddataStats { r := &FielddataStats{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fielddatemath.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fielddatemath.go index a1a1e9dca..da89713a7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fielddatemath.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fielddatemath.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // string // Float64 // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L140-L147 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L296-L303 type FieldDateMath interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldlookup.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldlookup.go index 29eca513a..ca2a80c87 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldlookup.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldlookup.go @@ -16,20 +16,71 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // FieldLookup type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/abstractions.ts#L164-L169 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/abstractions.ts#L393-L410 type FieldLookup struct { - Id string `json:"id"` - Index *string `json:"index,omitempty"` - Path *string `json:"path,omitempty"` + // Id `id` of the document. + Id string `json:"id"` + // Index Index from which to retrieve the document. + Index *string `json:"index,omitempty"` + // Path Name of the field. + Path *string `json:"path,omitempty"` + // Routing Custom routing value. Routing *string `json:"routing,omitempty"` } +func (s *FieldLookup) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return err + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return err + } + + case "path": + if err := dec.Decode(&s.Path); err != nil { + return err + } + + case "routing": + if err := dec.Decode(&s.Routing); err != nil { + return err + } + + } + } + return nil +} + // NewFieldLookup returns a FieldLookup. func NewFieldLookup() *FieldLookup { r := &FieldLookup{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldmapping.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldmapping.go index 153cc3557..4afa36f8b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldmapping.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldmapping.go @@ -16,27 +16,28 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" + "strconv" ) // FieldMapping type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/meta-fields.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/meta-fields.ts#L24-L27 type FieldMapping struct { FullName string `json:"full_name"` Mapping map[string]Property `json:"mapping"` } func (s *FieldMapping) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -51,11 +52,21 @@ func (s *FieldMapping) UnmarshalJSON(data []byte) error { switch t { case "full_name": - if err := dec.Decode(&s.FullName); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FullName = o case "mapping": + if s.Mapping == nil { + s.Mapping = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -64,7 +75,9 @@ func (s *FieldMapping) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -343,9 +356,11 @@ func (s *FieldMapping) UnmarshalJSON(data []byte) error { } s.Mapping[key] = oo default: - if err := dec.Decode(&s.Mapping); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Mapping[key] = oo } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldmemoryusage.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldmemoryusage.go index b39b86cdc..35af8d41f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldmemoryusage.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldmemoryusage.go @@ -16,18 +16,66 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // FieldMemoryUsage type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Stats.ts#L76-L79 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Stats.ts#L118-L121 type FieldMemoryUsage struct { MemorySize ByteSize `json:"memory_size,omitempty"` MemorySizeInBytes int64 `json:"memory_size_in_bytes"` } +func (s *FieldMemoryUsage) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "memory_size": + if err := dec.Decode(&s.MemorySize); err != nil { + return err + } + + case "memory_size_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.MemorySizeInBytes = value + case float64: + f := int64(v) + s.MemorySizeInBytes = f + } + + } + } + return nil +} + // NewFieldMemoryUsage returns a FieldMemoryUsage. func NewFieldMemoryUsage() *FieldMemoryUsage { r := &FieldMemoryUsage{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldmetric.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldmetric.go index 362dfc85d..e7fe7dde5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldmetric.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldmetric.go @@ -16,17 +16,22 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/metric" ) // FieldMetric type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/rollup/_types/Metric.ts#L30-L35 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/rollup/_types/Metric.ts#L30-L35 type FieldMetric struct { // Field The field to collect metrics for. This must be a numeric of some kind. Field string `json:"field"` @@ -35,6 +40,36 @@ type FieldMetric struct { Metrics []metric.Metric `json:"metrics"` } +func (s *FieldMetric) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "metrics": + if err := dec.Decode(&s.Metrics); err != nil { + return err + } + + } + } + return nil +} + // NewFieldMetric returns a FieldMetric. func NewFieldMetric() *FieldMetric { r := &FieldMetric{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldnamesfield.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldnamesfield.go index f7a522b9b..0ab0a5179 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldnamesfield.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldnamesfield.go @@ -16,17 +16,59 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // FieldNamesField type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/meta-fields.ts#L42-L44 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/meta-fields.ts#L42-L44 type FieldNamesField struct { Enabled bool `json:"enabled"` } +func (s *FieldNamesField) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = value + case bool: + s.Enabled = v + } + + } + } + return nil +} + // NewFieldNamesField returns a FieldNamesField. func NewFieldNamesField() *FieldNamesField { r := &FieldNamesField{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldrule.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldrule.go index ea983af5b..6ac94bb11 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldrule.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldrule.go @@ -16,17 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" ) // FieldRule type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/_types/RoleMappingRule.ts#L33-L42 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/_types/RoleMappingRule.ts#L33-L42 type FieldRule struct { Dn []string `json:"dn,omitempty"` Groups []string `json:"groups,omitempty"` @@ -35,6 +38,73 @@ type FieldRule struct { Username *string `json:"username,omitempty"` } +func (s *FieldRule) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "dn": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Dn = append(s.Dn, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Dn); err != nil { + return err + } + } + + case "groups": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Groups = append(s.Groups, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Groups); err != nil { + return err + } + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return err + } + + case "realm": + if err := dec.Decode(&s.Realm); err != nil { + return err + } + + case "username": + if err := dec.Decode(&s.Username); err != nil { + return err + } + + } + } + return nil +} + // NewFieldRule returns a FieldRule. func NewFieldRule() *FieldRule { r := &FieldRule{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fields.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fields.go index 65b0fdadd..cfb45346c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fields.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fields.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // Fields type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/common.ts#L120-L120 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/common.ts#L134-L134 type Fields []string diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldsecurity.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldsecurity.go index e9821646d..e0d9d1aab 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldsecurity.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldsecurity.go @@ -16,18 +16,77 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // FieldSecurity type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/_types/FieldSecurity.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/_types/FieldSecurity.ts#L22-L25 type FieldSecurity struct { Except []string `json:"except,omitempty"` Grant []string `json:"grant,omitempty"` } +func (s *FieldSecurity) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "except": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Except = append(s.Except, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Except); err != nil { + return err + } + } + + case "grant": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Grant = append(s.Grant, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Grant); err != nil { + return err + } + } + + } + } + return nil +} + // NewFieldSecurity returns a FieldSecurity. func NewFieldSecurity() *FieldSecurity { r := &FieldSecurity{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldsizeusage.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldsizeusage.go index 28a936ca2..0a00d4e71 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldsizeusage.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldsizeusage.go @@ -16,18 +16,66 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // FieldSizeUsage type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Stats.ts#L59-L62 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Stats.ts#L92-L95 type FieldSizeUsage struct { Size ByteSize `json:"size,omitempty"` SizeInBytes int64 `json:"size_in_bytes"` } +func (s *FieldSizeUsage) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "size": + if err := dec.Decode(&s.Size); err != nil { + return err + } + + case "size_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.SizeInBytes = value + case float64: + f := int64(v) + s.SizeInBytes = f + } + + } + } + return nil +} + // NewFieldSizeUsage returns a FieldSizeUsage. func NewFieldSizeUsage() *FieldSizeUsage { r := &FieldSizeUsage{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldsort.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldsort.go index 3f9089923..9db729a9a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldsort.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldsort.go @@ -16,11 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/fieldsortnumerictype" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/fieldtype" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortmode" @@ -29,7 +35,7 @@ import ( // FieldSort type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/sort.ts#L44-L53 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/sort.ts#L44-L53 type FieldSort struct { Format *string `json:"format,omitempty"` Missing Missing `json:"missing,omitempty"` @@ -40,6 +46,73 @@ type FieldSort struct { UnmappedType *fieldtype.FieldType `json:"unmapped_type,omitempty"` } +func (s *FieldSort) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Order) + return err + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return err + } + + case "mode": + if err := dec.Decode(&s.Mode); err != nil { + return err + } + + case "nested": + if err := dec.Decode(&s.Nested); err != nil { + return err + } + + case "numeric_type": + if err := dec.Decode(&s.NumericType); err != nil { + return err + } + + case "order": + if err := dec.Decode(&s.Order); err != nil { + return err + } + + case "unmapped_type": + if err := dec.Decode(&s.UnmappedType); err != nil { + return err + } + + } + } + return nil +} + // NewFieldSort returns a FieldSort. func NewFieldSort() *FieldSort { r := &FieldSort{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldstat.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldstat.go new file mode 100644 index 000000000..0e52deb62 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldstat.go @@ -0,0 +1,196 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + +// FieldStat type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/text_structure/find_structure/types.ts#L23-L33 +type FieldStat struct { + Cardinality int `json:"cardinality"` + Count int `json:"count"` + Earliest *string `json:"earliest,omitempty"` + Latest *string `json:"latest,omitempty"` + MaxValue *int `json:"max_value,omitempty"` + MeanValue *int `json:"mean_value,omitempty"` + MedianValue *int `json:"median_value,omitempty"` + MinValue *int `json:"min_value,omitempty"` + TopHits []TopHit `json:"top_hits"` +} + +func (s *FieldStat) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "cardinality": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Cardinality = value + case float64: + f := int(v) + s.Cardinality = f + } + + case "count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Count = value + case float64: + f := int(v) + s.Count = f + } + + case "earliest": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Earliest = &o + + case "latest": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Latest = &o + + case "max_value": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxValue = &value + case float64: + f := int(v) + s.MaxValue = &f + } + + case "mean_value": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MeanValue = &value + case float64: + f := int(v) + s.MeanValue = &f + } + + case "median_value": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MedianValue = &value + case float64: + f := int(v) + s.MedianValue = &f + } + + case "min_value": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MinValue = &value + case float64: + f := int(v) + s.MinValue = &f + } + + case "top_hits": + if err := dec.Decode(&s.TopHits); err != nil { + return err + } + + } + } + return nil +} + +// NewFieldStat returns a FieldStat. +func NewFieldStat() *FieldStat { + r := &FieldStat{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldstatistics.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldstatistics.go index 209c31539..a0956ed42 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldstatistics.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldstatistics.go @@ -16,19 +16,93 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // FieldStatistics type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/termvectors/types.ts#L28-L32 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/termvectors/types.ts#L28-L32 type FieldStatistics struct { DocCount int `json:"doc_count"` SumDocFreq int64 `json:"sum_doc_freq"` SumTtf int64 `json:"sum_ttf"` } +func (s *FieldStatistics) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.DocCount = value + case float64: + f := int(v) + s.DocCount = f + } + + case "sum_doc_freq": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.SumDocFreq = value + case float64: + f := int64(v) + s.SumDocFreq = f + } + + case "sum_ttf": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.SumTtf = value + case float64: + f := int64(v) + s.SumTtf = f + } + + } + } + return nil +} + // NewFieldStatistics returns a FieldStatistics. func NewFieldStatistics() *FieldStatistics { r := &FieldStatistics{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldsuggester.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldsuggester.go index d8221ac03..8f44c7ec8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldsuggester.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldsuggester.go @@ -16,20 +16,107 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // FieldSuggester type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/suggester.ts#L106-L120 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/suggester.ts#L106-L139 type FieldSuggester struct { + // Completion Provides auto-complete/search-as-you-type functionality. Completion *CompletionSuggester `json:"completion,omitempty"` - Phrase *PhraseSuggester `json:"phrase,omitempty"` - Prefix *string `json:"prefix,omitempty"` - Regex *string `json:"regex,omitempty"` - Term *TermSuggester `json:"term,omitempty"` - Text *string `json:"text,omitempty"` + // Phrase Provides access to word alternatives on a per token basis within a certain + // string distance. + Phrase *PhraseSuggester `json:"phrase,omitempty"` + // Prefix Prefix used to search for suggestions. + Prefix *string `json:"prefix,omitempty"` + // Regex A prefix expressed as a regular expression. + Regex *string `json:"regex,omitempty"` + // Term Suggests terms based on edit distance. + Term *TermSuggester `json:"term,omitempty"` + // Text The text to use as input for the suggester. + // Needs to be set globally or per suggestion. + Text *string `json:"text,omitempty"` +} + +func (s *FieldSuggester) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "completion": + if err := dec.Decode(&s.Completion); err != nil { + return err + } + + case "phrase": + if err := dec.Decode(&s.Phrase); err != nil { + return err + } + + case "prefix": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Prefix = &o + + case "regex": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Regex = &o + + case "term": + if err := dec.Decode(&s.Term); err != nil { + return err + } + + case "text": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Text = &o + + } + } + return nil } // NewFieldSuggester returns a FieldSuggester. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldsummary.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldsummary.go index 460e6c238..003337eab 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldsummary.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldsummary.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // FieldSummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L54-L63 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L54-L63 type FieldSummary struct { Any uint `json:"any"` DocValues uint `json:"doc_values"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldsusagebody.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldsusagebody.go index f64728125..466e88b02 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldsusagebody.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldsusagebody.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -27,7 +27,7 @@ import ( // FieldsUsageBody type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L32-L36 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L32-L36 type FieldsUsageBody struct { FieldsUsageBody map[string]UsageStatsIndex `json:"-"` Shards_ ShardStatistics `json:"_shards"` @@ -52,6 +52,7 @@ func (s FieldsUsageBody) MarshalJSON() ([]byte, error) { for key, value := range s.FieldsUsageBody { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "FieldsUsageBody") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldtypes.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldtypes.go index 2a671ff94..52f898a5f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldtypes.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldtypes.go @@ -16,21 +16,157 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // FieldTypes type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/stats/types.ts#L105-L114 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/stats/types.ts#L136-L167 type FieldTypes struct { - Count int `json:"count"` - IndexCount int `json:"index_count"` - IndexedVectorCount *int64 `json:"indexed_vector_count,omitempty"` + // Count The number of occurrences of the field type in selected nodes. + Count int `json:"count"` + // IndexCount The number of indices containing the field type in selected nodes. + IndexCount int `json:"index_count"` + // IndexedVectorCount For dense_vector field types, number of indexed vector types in selected + // nodes. + IndexedVectorCount *int64 `json:"indexed_vector_count,omitempty"` + // IndexedVectorDimMax For dense_vector field types, the maximum dimension of all indexed vector + // types in selected nodes. IndexedVectorDimMax *int64 `json:"indexed_vector_dim_max,omitempty"` + // IndexedVectorDimMin For dense_vector field types, the minimum dimension of all indexed vector + // types in selected nodes. IndexedVectorDimMin *int64 `json:"indexed_vector_dim_min,omitempty"` - Name string `json:"name"` - ScriptCount *int `json:"script_count,omitempty"` + // Name The name for the field type in selected nodes. + Name string `json:"name"` + // ScriptCount The number of fields that declare a script. + ScriptCount *int `json:"script_count,omitempty"` +} + +func (s *FieldTypes) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Count = value + case float64: + f := int(v) + s.Count = f + } + + case "index_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IndexCount = value + case float64: + f := int(v) + s.IndexCount = f + } + + case "indexed_vector_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.IndexedVectorCount = &value + case float64: + f := int64(v) + s.IndexedVectorCount = &f + } + + case "indexed_vector_dim_max": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.IndexedVectorDimMax = &value + case float64: + f := int64(v) + s.IndexedVectorDimMax = &f + } + + case "indexed_vector_dim_min": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.IndexedVectorDimMin = &value + case float64: + f := int64(v) + s.IndexedVectorDimMin = &f + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "script_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.ScriptCount = &value + case float64: + f := int(v) + s.ScriptCount = &f + } + + } + } + return nil } // NewFieldTypes returns a FieldTypes. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldtypesmappings.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldtypesmappings.go index afe2968ea..7181b4905 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldtypesmappings.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldtypesmappings.go @@ -16,20 +16,117 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // FieldTypesMappings type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/stats/types.ts#L96-L103 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/stats/types.ts#L109-L134 type FieldTypesMappings struct { - FieldTypes []FieldTypes `json:"field_types"` - RuntimeFieldTypes []ClusterRuntimeFieldTypes `json:"runtime_field_types,omitempty"` - TotalDeduplicatedFieldCount *int `json:"total_deduplicated_field_count,omitempty"` - TotalDeduplicatedMappingSize ByteSize `json:"total_deduplicated_mapping_size,omitempty"` - TotalDeduplicatedMappingSizeInBytes *int64 `json:"total_deduplicated_mapping_size_in_bytes,omitempty"` - TotalFieldCount *int `json:"total_field_count,omitempty"` + // FieldTypes Contains statistics about field data types used in selected nodes. + FieldTypes []FieldTypes `json:"field_types"` + // RuntimeFieldTypes Contains statistics about runtime field data types used in selected nodes. + RuntimeFieldTypes []ClusterRuntimeFieldTypes `json:"runtime_field_types,omitempty"` + // TotalDeduplicatedFieldCount Total number of fields in all non-system indices, accounting for mapping + // deduplication. + TotalDeduplicatedFieldCount *int `json:"total_deduplicated_field_count,omitempty"` + // TotalDeduplicatedMappingSize Total size of all mappings after deduplication and compression. + TotalDeduplicatedMappingSize ByteSize `json:"total_deduplicated_mapping_size,omitempty"` + // TotalDeduplicatedMappingSizeInBytes Total size of all mappings, in bytes, after deduplication and compression. + TotalDeduplicatedMappingSizeInBytes *int64 `json:"total_deduplicated_mapping_size_in_bytes,omitempty"` + // TotalFieldCount Total number of fields in all non-system indices. + TotalFieldCount *int `json:"total_field_count,omitempty"` +} + +func (s *FieldTypesMappings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field_types": + if err := dec.Decode(&s.FieldTypes); err != nil { + return err + } + + case "runtime_field_types": + if err := dec.Decode(&s.RuntimeFieldTypes); err != nil { + return err + } + + case "total_deduplicated_field_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.TotalDeduplicatedFieldCount = &value + case float64: + f := int(v) + s.TotalDeduplicatedFieldCount = &f + } + + case "total_deduplicated_mapping_size": + if err := dec.Decode(&s.TotalDeduplicatedMappingSize); err != nil { + return err + } + + case "total_deduplicated_mapping_size_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TotalDeduplicatedMappingSizeInBytes = &value + case float64: + f := int64(v) + s.TotalDeduplicatedMappingSizeInBytes = &f + } + + case "total_field_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.TotalFieldCount = &value + case float64: + f := int(v) + s.TotalFieldCount = &f + } + + } + } + return nil } // NewFieldTypesMappings returns a FieldTypesMappings. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldvalue.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldvalue.go index 6928aa98d..199897073 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldvalue.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldvalue.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -29,5 +29,5 @@ package types // nil // json.RawMessage // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/common.ts#L25-L37 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/common.ts#L25-L37 type FieldValue interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldvaluefactorscorefunction.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldvaluefactorscorefunction.go index 3f56971de..454609519 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldvaluefactorscorefunction.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fieldvaluefactorscorefunction.go @@ -16,24 +16,98 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/fieldvaluefactormodifier" ) // FieldValueFactorScoreFunction type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/compound.ts#L70-L75 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/compound.ts#L132-L151 type FieldValueFactorScoreFunction struct { - Factor *Float64 `json:"factor,omitempty"` - Field string `json:"field"` - Missing *Float64 `json:"missing,omitempty"` + // Factor Optional factor to multiply the field value with. + Factor *Float64 `json:"factor,omitempty"` + // Field Field to be extracted from the document. + Field string `json:"field"` + // Missing Value used if the document doesn’t have that field. + // The modifier and factor are still applied to it as though it were read from + // the document. + Missing *Float64 `json:"missing,omitempty"` + // Modifier Modifier to apply to the field value. Modifier *fieldvaluefactormodifier.FieldValueFactorModifier `json:"modifier,omitempty"` } +func (s *FieldValueFactorScoreFunction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "factor": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Factor = &f + case float64: + f := Float64(v) + s.Factor = &f + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "missing": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Missing = &f + case float64: + f := Float64(v) + s.Missing = &f + } + + case "modifier": + if err := dec.Decode(&s.Modifier); err != nil { + return err + } + + } + } + return nil +} + // NewFieldValueFactorScoreFunction returns a FieldValueFactorScoreFunction. func NewFieldValueFactorScoreFunction() *FieldValueFactorScoreFunction { r := &FieldValueFactorScoreFunction{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/filecountsnapshotstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/filecountsnapshotstats.go index 2c723f6fc..06c9f50fa 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/filecountsnapshotstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/filecountsnapshotstats.go @@ -16,18 +16,77 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // FileCountSnapshotStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/snapshot/_types/FileCountSnapshotStats.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/snapshot/_types/FileCountSnapshotStats.ts#L22-L25 type FileCountSnapshotStats struct { FileCount int `json:"file_count"` SizeInBytes int64 `json:"size_in_bytes"` } +func (s *FileCountSnapshotStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "file_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.FileCount = value + case float64: + f := int(v) + s.FileCount = f + } + + case "size_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.SizeInBytes = value + case float64: + f := int64(v) + s.SizeInBytes = f + } + + } + } + return nil +} + // NewFileCountSnapshotStats returns a FileCountSnapshotStats. func NewFileCountSnapshotStats() *FileCountSnapshotStats { r := &FileCountSnapshotStats{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/filedetails.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/filedetails.go index f82a3ebb3..fe7c8882b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/filedetails.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/filedetails.go @@ -16,19 +16,89 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // FileDetails type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/recovery/types.ts#L50-L54 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/recovery/types.ts#L50-L54 type FileDetails struct { Length int64 `json:"length"` Name string `json:"name"` Recovered int64 `json:"recovered"` } +func (s *FileDetails) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "length": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Length = value + case float64: + f := int64(v) + s.Length = f + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = o + + case "recovered": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Recovered = value + case float64: + f := int64(v) + s.Recovered = f + } + + } + } + return nil +} + // NewFileDetails returns a FileDetails. func NewFileDetails() *FileDetails { r := &FileDetails{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/filesystem.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/filesystem.go index f24e4fc0d..70a790f7a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/filesystem.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/filesystem.go @@ -16,18 +16,81 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // FileSystem type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L286-L291 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L698-L716 type FileSystem struct { - Data []DataPathStats `json:"data,omitempty"` - IoStats *IoStats `json:"io_stats,omitempty"` - Timestamp *int64 `json:"timestamp,omitempty"` - Total *FileSystemTotal `json:"total,omitempty"` + // Data List of all file stores. + Data []DataPathStats `json:"data,omitempty"` + // IoStats Contains I/O statistics for the node. + IoStats *IoStats `json:"io_stats,omitempty"` + // Timestamp Last time the file stores statistics were refreshed. + // Recorded in milliseconds since the Unix Epoch. + Timestamp *int64 `json:"timestamp,omitempty"` + // Total Contains statistics for all file stores of the node. + Total *FileSystemTotal `json:"total,omitempty"` +} + +func (s *FileSystem) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "data": + if err := dec.Decode(&s.Data); err != nil { + return err + } + + case "io_stats": + if err := dec.Decode(&s.IoStats); err != nil { + return err + } + + case "timestamp": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Timestamp = &value + case float64: + f := int64(v) + s.Timestamp = &f + } + + case "total": + if err := dec.Decode(&s.Total); err != nil { + return err + } + + } + } + return nil } // NewFileSystem returns a FileSystem. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/filesystemtotal.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/filesystemtotal.go index db7b4232a..8c373dd7c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/filesystemtotal.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/filesystemtotal.go @@ -16,20 +16,144 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // FileSystemTotal type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L307-L314 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L757-L786 type FileSystemTotal struct { - Available *string `json:"available,omitempty"` - AvailableInBytes *int64 `json:"available_in_bytes,omitempty"` - Free *string `json:"free,omitempty"` - FreeInBytes *int64 `json:"free_in_bytes,omitempty"` - Total *string `json:"total,omitempty"` - TotalInBytes *int64 `json:"total_in_bytes,omitempty"` + // Available Total disk space available to this Java virtual machine on all file stores. + // Depending on OS or process level restrictions, this might appear less than + // `free`. + // This is the actual amount of free disk space the Elasticsearch node can + // utilise. + Available *string `json:"available,omitempty"` + // AvailableInBytes Total number of bytes available to this Java virtual machine on all file + // stores. + // Depending on OS or process level restrictions, this might appear less than + // `free_in_bytes`. + // This is the actual amount of free disk space the Elasticsearch node can + // utilise. + AvailableInBytes *int64 `json:"available_in_bytes,omitempty"` + // Free Total unallocated disk space in all file stores. + Free *string `json:"free,omitempty"` + // FreeInBytes Total number of unallocated bytes in all file stores. + FreeInBytes *int64 `json:"free_in_bytes,omitempty"` + // Total Total size of all file stores. + Total *string `json:"total,omitempty"` + // TotalInBytes Total size of all file stores in bytes. + TotalInBytes *int64 `json:"total_in_bytes,omitempty"` +} + +func (s *FileSystemTotal) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "available": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Available = &o + + case "available_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.AvailableInBytes = &value + case float64: + f := int64(v) + s.AvailableInBytes = &f + } + + case "free": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Free = &o + + case "free_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.FreeInBytes = &value + case float64: + f := int64(v) + s.FreeInBytes = &f + } + + case "total": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Total = &o + + case "total_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TotalInBytes = &value + case float64: + f := int64(v) + s.TotalInBytes = &f + } + + } + } + return nil } // NewFileSystemTotal returns a FileSystemTotal. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fillmaskinferenceoptions.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fillmaskinferenceoptions.go index b21c3b67d..10489d6b8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fillmaskinferenceoptions.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fillmaskinferenceoptions.go @@ -16,14 +16,32 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // FillMaskInferenceOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/inference.ts#L241-L249 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/inference.ts#L266-L280 type FillMaskInferenceOptions struct { + // MaskToken The string/token which will be removed from incoming documents and replaced + // with the inference prediction(s). + // In a response, this field contains the mask token for the specified + // model/tokenizer. Each model and tokenizer + // has a predefined mask token which cannot be changed. Thus, it is recommended + // not to set this value in requests. + // However, if this field is present in a request, its value must match the + // predefined value for that model/tokenizer, + // otherwise the request will fail. + MaskToken *string `json:"mask_token,omitempty"` // NumTopClasses Specifies the number of top class predictions to return. Defaults to 0. NumTopClasses *int `json:"num_top_classes,omitempty"` // ResultsField The field that is added to incoming documents to contain the inference @@ -33,6 +51,71 @@ type FillMaskInferenceOptions struct { Tokenization *TokenizationConfigContainer `json:"tokenization,omitempty"` } +func (s *FillMaskInferenceOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "mask_token": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MaskToken = &o + + case "num_top_classes": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NumTopClasses = &value + case float64: + f := int(v) + s.NumTopClasses = &f + } + + case "results_field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultsField = &o + + case "tokenization": + if err := dec.Decode(&s.Tokenization); err != nil { + return err + } + + } + } + return nil +} + // NewFillMaskInferenceOptions returns a FillMaskInferenceOptions. func NewFillMaskInferenceOptions() *FillMaskInferenceOptions { r := &FillMaskInferenceOptions{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fillmaskinferenceupdateoptions.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fillmaskinferenceupdateoptions.go index 66964e63d..d0aa7efa1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fillmaskinferenceupdateoptions.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fillmaskinferenceupdateoptions.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // FillMaskInferenceUpdateOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/inference.ts#L370-L377 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/inference.ts#L411-L418 type FillMaskInferenceUpdateOptions struct { // NumTopClasses Specifies the number of top class predictions to return. Defaults to 0. NumTopClasses *int `json:"num_top_classes,omitempty"` @@ -33,6 +41,59 @@ type FillMaskInferenceUpdateOptions struct { Tokenization *NlpTokenizationUpdateOptions `json:"tokenization,omitempty"` } +func (s *FillMaskInferenceUpdateOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "num_top_classes": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NumTopClasses = &value + case float64: + f := int(v) + s.NumTopClasses = &f + } + + case "results_field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultsField = &o + + case "tokenization": + if err := dec.Decode(&s.Tokenization); err != nil { + return err + } + + } + } + return nil +} + // NewFillMaskInferenceUpdateOptions returns a FillMaskInferenceUpdateOptions. func NewFillMaskInferenceUpdateOptions() *FillMaskInferenceUpdateOptions { r := &FillMaskInferenceUpdateOptions{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/filteraggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/filteraggregate.go index d7a0c7c1a..6b6f28a65 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/filteraggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/filteraggregate.go @@ -16,32 +16,31 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "fmt" - "bytes" + "encoding/json" "errors" + "fmt" "io" - + "strconv" "strings" - - "encoding/json" ) // FilterAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L494-L495 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L495-L496 type FilterAggregate struct { - Aggregations map[string]Aggregate `json:"-"` - DocCount int64 `json:"doc_count"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Aggregations map[string]Aggregate `json:"-"` + DocCount int64 `json:"doc_count"` + Meta Metadata `json:"meta,omitempty"` } func (s *FilterAggregate) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -55,451 +54,19 @@ func (s *FilterAggregate) UnmarshalJSON(data []byte) error { switch t { - case "aggregations": - for dec.More() { - tt, err := dec.Token() + case "doc_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) if err != nil { - if errors.Is(err, io.EOF) { - break - } return err } - if value, ok := tt.(string); ok { - if strings.Contains(value, "#") { - elems := strings.Split(value, "#") - if len(elems) == 2 { - switch elems[0] { - case "cardinality": - o := NewCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentiles": - o := NewHdrPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentile_ranks": - o := NewHdrPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentiles": - o := NewTDigestPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentile_ranks": - o := NewTDigestPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "percentiles_bucket": - o := NewPercentilesBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "median_absolute_deviation": - o := NewMedianAbsoluteDeviationAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "min": - o := NewMinAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "max": - o := NewMaxAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sum": - o := NewSumAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "avg": - o := NewAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "weighted_avg": - o := NewWeightedAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "value_count": - o := NewValueCountAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_value": - o := NewSimpleValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "derivative": - o := NewDerivativeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "bucket_metric_value": - o := NewBucketMetricValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats": - o := NewStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats_bucket": - o := NewStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats": - o := NewExtendedStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats_bucket": - o := NewExtendedStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_bounds": - o := NewGeoBoundsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_centroid": - o := NewGeoCentroidAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "histogram": - o := NewHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_histogram": - o := NewDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "auto_date_histogram": - o := NewAutoDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "variable_width_histogram": - o := NewVariableWidthHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sterms": - o := NewStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lterms": - o := NewLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "dterms": - o := NewDoubleTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umterms": - o := NewUnmappedTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lrareterms": - o := NewLongRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "srareterms": - o := NewStringRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umrareterms": - o := NewUnmappedRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "multi_terms": - o := NewMultiTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "missing": - o := NewMissingAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "nested": - o := NewNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "reverse_nested": - o := NewReverseNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "global": - o := NewGlobalAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filter": - o := NewFilterAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "children": - o := NewChildrenAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "parent": - o := NewParentAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sampler": - o := NewSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "unmapped_sampler": - o := NewUnmappedSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohash_grid": - o := NewGeoHashGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geotile_grid": - o := NewGeoTileGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohex_grid": - o := NewGeoHexGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "range": - o := NewRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_range": - o := NewDateRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_distance": - o := NewGeoDistanceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_range": - o := NewIpRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_prefix": - o := NewIpPrefixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filters": - o := NewFiltersAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "adjacency_matrix": - o := NewAdjacencyMatrixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "siglterms": - o := NewSignificantLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sigsterms": - o := NewSignificantStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umsigterms": - o := NewUnmappedSignificantTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "composite": - o := NewCompositeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "scripted_metric": - o := NewScriptedMetricAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_hits": - o := NewTopHitsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "inference": - o := NewInferenceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "string_stats": - o := NewStringStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "box_plot": - o := NewBoxPlotAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_metrics": - o := NewTopMetricsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "t_test": - o := NewTTestAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "rate": - o := NewRateAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_long_value": - o := NewCumulativeCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "matrix_stats": - o := NewMatrixStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_line": - o := NewGeoLineAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - default: - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - } - } else { - return errors.New("cannot decode JSON for field Aggregations") - } - } else { - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[value] = o - } - } - } - - case "doc_count": - if err := dec.Decode(&s.DocCount); err != nil { - return err + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f } case "meta": @@ -507,6 +74,519 @@ func (s *FilterAggregate) UnmarshalJSON(data []byte) error { return err } + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "box_plot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[value] = o + } + } + } } return nil @@ -531,6 +611,7 @@ func (s FilterAggregate) MarshalJSON() ([]byte, error) { for key, value := range s.Aggregations { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "Aggregations") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/filterref.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/filterref.go index 1e0b22c19..707b3d210 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/filterref.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/filterref.go @@ -16,17 +16,22 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/filtertype" ) // FilterRef type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Filter.ts#L31-L41 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Filter.ts#L31-L41 type FilterRef struct { // FilterId The identifier for the filter. FilterId string `json:"filter_id"` @@ -35,6 +40,36 @@ type FilterRef struct { FilterType *filtertype.FilterType `json:"filter_type,omitempty"` } +func (s *FilterRef) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "filter_id": + if err := dec.Decode(&s.FilterId); err != nil { + return err + } + + case "filter_type": + if err := dec.Decode(&s.FilterType); err != nil { + return err + } + + } + } + return nil +} + // NewFilterRef returns a FilterRef. func NewFilterRef() *FilterRef { r := &FilterRef{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/filtersaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/filtersaggregate.go index a43ef3d7e..67c3c9560 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/filtersaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/filtersaggregate.go @@ -16,27 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" ) // FiltersAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L567-L568 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L568-L569 type FiltersAggregate struct { - Buckets BucketsFiltersBucket `json:"buckets"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Buckets BucketsFiltersBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` } func (s *FiltersAggregate) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -57,15 +57,17 @@ func (s *FiltersAggregate) UnmarshalJSON(data []byte) error { source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]FiltersBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []FiltersBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/filtersaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/filtersaggregation.go index 2598dc82d..66fd534cf 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/filtersaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/filtersaggregation.go @@ -16,24 +16,132 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // FiltersAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L169-L174 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L358-L378 type FiltersAggregation struct { - Filters *BucketsQuery `json:"filters,omitempty"` - Keyed *bool `json:"keyed,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Name *string `json:"name,omitempty"` - OtherBucket *bool `json:"other_bucket,omitempty"` - OtherBucketKey *string `json:"other_bucket_key,omitempty"` + // Filters Collection of queries from which to build buckets. + Filters BucketsQuery `json:"filters,omitempty"` + // Keyed By default, the named filters aggregation returns the buckets as an object. + // Set to `false` to return the buckets as an array of objects. + Keyed *bool `json:"keyed,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Name *string `json:"name,omitempty"` + // OtherBucket Set to `true` to add a bucket to the response which will contain all + // documents that do not match any of the given filters. + OtherBucket *bool `json:"other_bucket,omitempty"` + // OtherBucketKey The key with which the other bucket is returned. + OtherBucketKey *string `json:"other_bucket_key,omitempty"` +} + +func (s *FiltersAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "filters": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]Query, 0) + if err := localDec.Decode(&o); err != nil { + return err + } + s.Filters = o + case '[': + o := []Query{} + if err := localDec.Decode(&o); err != nil { + return err + } + s.Filters = o + } + + case "keyed": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Keyed = &value + case bool: + s.Keyed = &v + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + case "other_bucket": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.OtherBucket = &value + case bool: + s.OtherBucket = &v + } + + case "other_bucket_key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.OtherBucketKey = &o + + } + } + return nil } // NewFiltersAggregation returns a FiltersAggregation. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/filtersbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/filtersbucket.go index 172d617c3..124cc83e3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/filtersbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/filtersbucket.go @@ -16,31 +16,30 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "fmt" - "bytes" + "encoding/json" "errors" + "fmt" "io" - + "strconv" "strings" - - "encoding/json" ) // FiltersBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L570-L570 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L571-L571 type FiltersBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` } func (s *FiltersBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -54,453 +53,534 @@ func (s *FiltersBucket) UnmarshalJSON(data []byte) error { switch t { - case "aggregations": - for dec.More() { - tt, err := dec.Token() + case "doc_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) if err != nil { - if errors.Is(err, io.EOF) { - break - } return err } - if value, ok := tt.(string); ok { - if strings.Contains(value, "#") { - elems := strings.Split(value, "#") - if len(elems) == 2 { - switch elems[0] { - case "cardinality": - o := NewCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentiles": - o := NewHdrPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentile_ranks": - o := NewHdrPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentiles": - o := NewTDigestPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentile_ranks": - o := NewTDigestPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "percentiles_bucket": - o := NewPercentilesBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "median_absolute_deviation": - o := NewMedianAbsoluteDeviationAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "min": - o := NewMinAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "max": - o := NewMaxAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sum": - o := NewSumAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "avg": - o := NewAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "weighted_avg": - o := NewWeightedAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "value_count": - o := NewValueCountAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_value": - o := NewSimpleValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "derivative": - o := NewDerivativeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "bucket_metric_value": - o := NewBucketMetricValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats": - o := NewStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats_bucket": - o := NewStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats": - o := NewExtendedStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats_bucket": - o := NewExtendedStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_bounds": - o := NewGeoBoundsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_centroid": - o := NewGeoCentroidAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "histogram": - o := NewHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_histogram": - o := NewDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "auto_date_histogram": - o := NewAutoDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "variable_width_histogram": - o := NewVariableWidthHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sterms": - o := NewStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lterms": - o := NewLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "dterms": - o := NewDoubleTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umterms": - o := NewUnmappedTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lrareterms": - o := NewLongRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "srareterms": - o := NewStringRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umrareterms": - o := NewUnmappedRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "multi_terms": - o := NewMultiTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "missing": - o := NewMissingAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "nested": - o := NewNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "reverse_nested": - o := NewReverseNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "global": - o := NewGlobalAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filter": - o := NewFilterAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "children": - o := NewChildrenAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "parent": - o := NewParentAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sampler": - o := NewSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "unmapped_sampler": - o := NewUnmappedSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohash_grid": - o := NewGeoHashGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geotile_grid": - o := NewGeoTileGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohex_grid": - o := NewGeoHexGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "range": - o := NewRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_range": - o := NewDateRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_distance": - o := NewGeoDistanceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_range": - o := NewIpRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_prefix": - o := NewIpPrefixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filters": - o := NewFiltersAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "adjacency_matrix": - o := NewAdjacencyMatrixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "siglterms": - o := NewSignificantLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sigsterms": - o := NewSignificantStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umsigterms": - o := NewUnmappedSignificantTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "composite": - o := NewCompositeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "scripted_metric": - o := NewScriptedMetricAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_hits": - o := NewTopHitsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "inference": - o := NewInferenceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "string_stats": - o := NewStringStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "box_plot": - o := NewBoxPlotAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_metrics": - o := NewTopMetricsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "t_test": - o := NewTTestAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "rate": - o := NewRateAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_long_value": - o := NewCumulativeCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "matrix_stats": - o := NewMatrixStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_line": - o := NewGeoLineAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - default: - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - } - } else { - return errors.New("cannot decode JSON for field Aggregations") + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f + } + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) } - } else { - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "box_plot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o } - s.Aggregations[value] = o + } else { + return errors.New("cannot decode JSON for field Aggregations") } + } else { + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[value] = o } } - case "doc_count": - if err := dec.Decode(&s.DocCount); err != nil { - return err - } - } } return nil @@ -525,6 +605,7 @@ func (s FiltersBucket) MarshalJSON() ([]byte, error) { for key, value := range s.Aggregations { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "Aggregations") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fingerprintanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fingerprintanalyzer.go index 58b1255bf..d15f8ed31 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fingerprintanalyzer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fingerprintanalyzer.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // FingerprintAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/analyzers.ts#L37-L45 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/analyzers.ts#L37-L45 type FingerprintAnalyzer struct { MaxOutputSize int `json:"max_output_size"` PreserveOriginal bool `json:"preserve_original"` @@ -33,11 +41,127 @@ type FingerprintAnalyzer struct { Version *string `json:"version,omitempty"` } +func (s *FingerprintAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_output_size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxOutputSize = value + case float64: + f := int(v) + s.MaxOutputSize = f + } + + case "preserve_original": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.PreserveOriginal = value + case bool: + s.PreserveOriginal = v + } + + case "separator": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Separator = o + + case "stopwords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Stopwords = append(s.Stopwords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { + return err + } + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s FingerprintAnalyzer) MarshalJSON() ([]byte, error) { + type innerFingerprintAnalyzer FingerprintAnalyzer + tmp := innerFingerprintAnalyzer{ + MaxOutputSize: s.MaxOutputSize, + PreserveOriginal: s.PreserveOriginal, + Separator: s.Separator, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "fingerprint" + + return json.Marshal(tmp) +} + // NewFingerprintAnalyzer returns a FingerprintAnalyzer. func NewFingerprintAnalyzer() *FingerprintAnalyzer { r := &FingerprintAnalyzer{} - r.Type = "fingerprint" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fingerprinttokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fingerprinttokenfilter.go index a8b78e17c..85e09f19b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fingerprinttokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fingerprinttokenfilter.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // FingerprintTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L193-L197 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L194-L198 type FingerprintTokenFilter struct { MaxOutputSize *int `json:"max_output_size,omitempty"` Separator *string `json:"separator,omitempty"` @@ -30,11 +38,82 @@ type FingerprintTokenFilter struct { Version *string `json:"version,omitempty"` } +func (s *FingerprintTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_output_size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxOutputSize = &value + case float64: + f := int(v) + s.MaxOutputSize = &f + } + + case "separator": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Separator = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s FingerprintTokenFilter) MarshalJSON() ([]byte, error) { + type innerFingerprintTokenFilter FingerprintTokenFilter + tmp := innerFingerprintTokenFilter{ + MaxOutputSize: s.MaxOutputSize, + Separator: s.Separator, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "fingerprint" + + return json.Marshal(tmp) +} + // NewFingerprintTokenFilter returns a FingerprintTokenFilter. func NewFingerprintTokenFilter() *FingerprintTokenFilter { r := &FingerprintTokenFilter{} - r.Type = "fingerprint" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/flattened.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/flattened.go index 5cd63a02f..0ef62d584 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/flattened.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/flattened.go @@ -16,19 +16,91 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Flattened type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L347-L349 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L356-L358 type Flattened struct { Available bool `json:"available"` Enabled bool `json:"enabled"` FieldCount int `json:"field_count"` } +func (s *Flattened) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "available": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Available = value + case bool: + s.Available = v + } + + case "enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "field_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.FieldCount = value + case float64: + f := int(v) + s.FieldCount = f + } + + } + } + return nil +} + // NewFlattened returns a Flattened. func NewFlattened() *Flattened { r := &Flattened{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/flattenedproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/flattenedproperty.go index 49d2ddd70..c316d92c6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/flattenedproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/flattenedproperty.go @@ -16,24 +16,24 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexoptions" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexoptions" ) // FlattenedProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/complex.ts#L26-L37 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/complex.ts#L26-L37 type FlattenedProperty struct { Boost *Float64 `json:"boost,omitempty"` DepthLimit *int `json:"depth_limit,omitempty"` @@ -54,6 +54,7 @@ type FlattenedProperty struct { } func (s *FlattenedProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -68,18 +69,49 @@ func (s *FlattenedProperty) UnmarshalJSON(data []byte) error { switch t { case "boost": - if err := dec.Decode(&s.Boost); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f } case "depth_limit": - if err := dec.Decode(&s.DepthLimit); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.DepthLimit = &value + case float64: + f := int(v) + s.DepthLimit = &f } case "doc_values": - if err := dec.Decode(&s.DocValues); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DocValues = &value + case bool: + s.DocValues = &v } case "dynamic": @@ -88,11 +120,23 @@ func (s *FlattenedProperty) UnmarshalJSON(data []byte) error { } case "eager_global_ordinals": - if err := dec.Decode(&s.EagerGlobalOrdinals); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.EagerGlobalOrdinals = &value + case bool: + s.EagerGlobalOrdinals = &v } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -101,7 +145,9 @@ func (s *FlattenedProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -380,20 +426,42 @@ func (s *FlattenedProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "index": - if err := dec.Decode(&s.Index); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Index = &value + case bool: + s.Index = &v } case "index_options": @@ -402,16 +470,29 @@ func (s *FlattenedProperty) UnmarshalJSON(data []byte) error { } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } case "null_value": - if err := dec.Decode(&s.NullValue); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NullValue = &o case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -420,7 +501,9 @@ func (s *FlattenedProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -699,20 +782,38 @@ func (s *FlattenedProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } case "similarity": - if err := dec.Decode(&s.Similarity); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Similarity = &o case "split_queries_on_whitespace": - if err := dec.Decode(&s.SplitQueriesOnWhitespace); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.SplitQueriesOnWhitespace = &value + case bool: + s.SplitQueriesOnWhitespace = &v } case "type": @@ -725,6 +826,32 @@ func (s *FlattenedProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s FlattenedProperty) MarshalJSON() ([]byte, error) { + type innerFlattenedProperty FlattenedProperty + tmp := innerFlattenedProperty{ + Boost: s.Boost, + DepthLimit: s.DepthLimit, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + EagerGlobalOrdinals: s.EagerGlobalOrdinals, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + IndexOptions: s.IndexOptions, + Meta: s.Meta, + NullValue: s.NullValue, + Properties: s.Properties, + Similarity: s.Similarity, + SplitQueriesOnWhitespace: s.SplitQueriesOnWhitespace, + Type: s.Type, + } + + tmp.Type = "flattened" + + return json.Marshal(tmp) +} + // NewFlattenedProperty returns a FlattenedProperty. func NewFlattenedProperty() *FlattenedProperty { r := &FlattenedProperty{ @@ -733,7 +860,5 @@ func NewFlattenedProperty() *FlattenedProperty { Properties: make(map[string]Property, 0), } - r.Type = "flattened" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/float64.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/float64.go index 58e65ad2b..f6d7c0589 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/float64.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/float64.go @@ -31,20 +31,26 @@ func (f Float64) MarshalJSON() ([]byte, error) { var s string switch { case math.IsInf(float64(f), 1): - s = "Infinity" + s = `"Infinity"` case math.IsInf(float64(f), -1): - s = "-Infinity" + s = `"-Infinity"` case math.IsNaN(float64(f)): - s = "NaN" + s = `"NaN"` default: s = strconv.FormatFloat(float64(f), 'f', -1, 64) } - return []byte(`"` + s + `"`), nil + return []byte(s), nil } // UnmarshalJSON implements Unmarshaler interface. func (f *Float64) UnmarshalJSON(data []byte) error { switch { + case bytes.Equal(data, []byte(`"NaN"`)): + *f = Float64(math.NaN()) + case bytes.Equal(data, []byte(`"Infinity"`)): + *f = Float64(math.Inf(1)) + case bytes.Equal(data, []byte(`"-Infinity"`)): + *f = Float64(math.Inf(-1)) case bytes.Equal(data, []byte(`null`)): return nil default: diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/floatnumberproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/floatnumberproperty.go index a6c2b4bd7..1e668b3b5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/floatnumberproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/floatnumberproperty.go @@ -16,25 +16,25 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" ) // FloatNumberProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/core.ts#L131-L134 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/core.ts#L134-L137 type FloatNumberProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -63,6 +63,7 @@ type FloatNumberProperty struct { } func (s *FloatNumberProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -77,23 +78,63 @@ func (s *FloatNumberProperty) UnmarshalJSON(data []byte) error { switch t { case "boost": - if err := dec.Decode(&s.Boost); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f } case "coerce": - if err := dec.Decode(&s.Coerce); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Coerce = &value + case bool: + s.Coerce = &v } case "copy_to": - if err := dec.Decode(&s.CopyTo); err != nil { - return err + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return err + } } case "doc_values": - if err := dec.Decode(&s.DocValues); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DocValues = &value + case bool: + s.DocValues = &v } case "dynamic": @@ -102,6 +143,9 @@ func (s *FloatNumberProperty) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -110,7 +154,9 @@ func (s *FloatNumberProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -389,35 +435,80 @@ func (s *FloatNumberProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "ignore_malformed": - if err := dec.Decode(&s.IgnoreMalformed); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreMalformed = &value + case bool: + s.IgnoreMalformed = &v } case "index": - if err := dec.Decode(&s.Index); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Index = &value + case bool: + s.Index = &v } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } case "null_value": - if err := dec.Decode(&s.NullValue); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.NullValue = &f + case float64: + f := float32(v) + s.NullValue = &f } case "on_script_error": @@ -426,6 +517,9 @@ func (s *FloatNumberProperty) UnmarshalJSON(data []byte) error { } case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -434,7 +528,9 @@ func (s *FloatNumberProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -713,9 +809,11 @@ func (s *FloatNumberProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } @@ -725,18 +823,43 @@ func (s *FloatNumberProperty) UnmarshalJSON(data []byte) error { } case "similarity": - if err := dec.Decode(&s.Similarity); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Similarity = &o case "store": - if err := dec.Decode(&s.Store); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Store = &value + case bool: + s.Store = &v } case "time_series_dimension": - if err := dec.Decode(&s.TimeSeriesDimension); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.TimeSeriesDimension = &value + case bool: + s.TimeSeriesDimension = &v } case "time_series_metric": @@ -754,6 +877,36 @@ func (s *FloatNumberProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s FloatNumberProperty) MarshalJSON() ([]byte, error) { + type innerFloatNumberProperty FloatNumberProperty + tmp := innerFloatNumberProperty{ + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + Index: s.Index, + Meta: s.Meta, + NullValue: s.NullValue, + OnScriptError: s.OnScriptError, + Properties: s.Properties, + Script: s.Script, + Similarity: s.Similarity, + Store: s.Store, + TimeSeriesDimension: s.TimeSeriesDimension, + TimeSeriesMetric: s.TimeSeriesMetric, + Type: s.Type, + } + + tmp.Type = "float" + + return json.Marshal(tmp) +} + // NewFloatNumberProperty returns a FloatNumberProperty. func NewFloatNumberProperty() *FloatNumberProperty { r := &FloatNumberProperty{ @@ -762,7 +915,5 @@ func NewFloatNumberProperty() *FloatNumberProperty { Properties: make(map[string]Property, 0), } - r.Type = "float" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/floatrangeproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/floatrangeproperty.go index b395fd31b..841f21cd9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/floatrangeproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/floatrangeproperty.go @@ -16,23 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" ) // FloatRangeProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/range.ts#L38-L40 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/range.ts#L38-L40 type FloatRangeProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -51,6 +51,7 @@ type FloatRangeProperty struct { } func (s *FloatRangeProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -65,23 +66,63 @@ func (s *FloatRangeProperty) UnmarshalJSON(data []byte) error { switch t { case "boost": - if err := dec.Decode(&s.Boost); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f } case "coerce": - if err := dec.Decode(&s.Coerce); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Coerce = &value + case bool: + s.Coerce = &v } case "copy_to": - if err := dec.Decode(&s.CopyTo); err != nil { - return err + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return err + } } case "doc_values": - if err := dec.Decode(&s.DocValues); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DocValues = &value + case bool: + s.DocValues = &v } case "dynamic": @@ -90,6 +131,9 @@ func (s *FloatRangeProperty) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -98,7 +142,9 @@ func (s *FloatRangeProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -377,28 +423,56 @@ func (s *FloatRangeProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "index": - if err := dec.Decode(&s.Index); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Index = &value + case bool: + s.Index = &v } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -407,7 +481,9 @@ func (s *FloatRangeProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -686,20 +762,38 @@ func (s *FloatRangeProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } case "similarity": - if err := dec.Decode(&s.Similarity); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Similarity = &o case "store": - if err := dec.Decode(&s.Store); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Store = &value + case bool: + s.Store = &v } case "type": @@ -712,6 +806,30 @@ func (s *FloatRangeProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s FloatRangeProperty) MarshalJSON() ([]byte, error) { + type innerFloatRangeProperty FloatRangeProperty + tmp := innerFloatRangeProperty{ + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + Meta: s.Meta, + Properties: s.Properties, + Similarity: s.Similarity, + Store: s.Store, + Type: s.Type, + } + + tmp.Type = "float_range" + + return json.Marshal(tmp) +} + // NewFloatRangeProperty returns a FloatRangeProperty. func NewFloatRangeProperty() *FloatRangeProperty { r := &FloatRangeProperty{ @@ -720,7 +838,5 @@ func NewFloatRangeProperty() *FloatRangeProperty { Properties: make(map[string]Property, 0), } - r.Type = "float_range" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/flushstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/flushstats.go index a63e35eb2..3b013c0d0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/flushstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/flushstats.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // FlushStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Stats.ts#L81-L86 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Stats.ts#L123-L128 type FlushStats struct { Periodic int64 `json:"periodic"` Total int64 `json:"total"` @@ -30,6 +38,66 @@ type FlushStats struct { TotalTimeInMillis int64 `json:"total_time_in_millis"` } +func (s *FlushStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "periodic": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Periodic = value + case float64: + f := int64(v) + s.Periodic = f + } + + case "total": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Total = value + case float64: + f := int64(v) + s.Total = f + } + + case "total_time": + if err := dec.Decode(&s.TotalTime); err != nil { + return err + } + + case "total_time_in_millis": + if err := dec.Decode(&s.TotalTimeInMillis); err != nil { + return err + } + + } + } + return nil +} + // NewFlushStats returns a FlushStats. func NewFlushStats() *FlushStats { r := &FlushStats{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/followerindex.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/followerindex.go index 4761e8c91..a2ad3c611 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/followerindex.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/followerindex.go @@ -16,17 +16,22 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/followerindexstatus" ) // FollowerIndex type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ccr/follow_info/types.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ccr/follow_info/types.ts#L22-L28 type FollowerIndex struct { FollowerIndex string `json:"follower_index"` LeaderIndex string `json:"leader_index"` @@ -35,6 +40,51 @@ type FollowerIndex struct { Status followerindexstatus.FollowerIndexStatus `json:"status"` } +func (s *FollowerIndex) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "follower_index": + if err := dec.Decode(&s.FollowerIndex); err != nil { + return err + } + + case "leader_index": + if err := dec.Decode(&s.LeaderIndex); err != nil { + return err + } + + case "parameters": + if err := dec.Decode(&s.Parameters); err != nil { + return err + } + + case "remote_cluster": + if err := dec.Decode(&s.RemoteCluster); err != nil { + return err + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return err + } + + } + } + return nil +} + // NewFollowerIndex returns a FollowerIndex. func NewFollowerIndex() *FollowerIndex { r := &FollowerIndex{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/followerindexparameters.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/followerindexparameters.go index c60712dee..407834dc5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/followerindexparameters.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/followerindexparameters.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // FollowerIndexParameters type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ccr/follow_info/types.ts#L38-L49 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ccr/follow_info/types.ts#L38-L49 type FollowerIndexParameters struct { MaxOutstandingReadRequests int `json:"max_outstanding_read_requests"` MaxOutstandingWriteRequests int `json:"max_outstanding_write_requests"` @@ -36,6 +44,152 @@ type FollowerIndexParameters struct { ReadPollTimeout Duration `json:"read_poll_timeout"` } +func (s *FollowerIndexParameters) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_outstanding_read_requests": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxOutstandingReadRequests = value + case float64: + f := int(v) + s.MaxOutstandingReadRequests = f + } + + case "max_outstanding_write_requests": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxOutstandingWriteRequests = value + case float64: + f := int(v) + s.MaxOutstandingWriteRequests = f + } + + case "max_read_request_operation_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxReadRequestOperationCount = value + case float64: + f := int(v) + s.MaxReadRequestOperationCount = f + } + + case "max_read_request_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MaxReadRequestSize = o + + case "max_retry_delay": + if err := dec.Decode(&s.MaxRetryDelay); err != nil { + return err + } + + case "max_write_buffer_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxWriteBufferCount = value + case float64: + f := int(v) + s.MaxWriteBufferCount = f + } + + case "max_write_buffer_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MaxWriteBufferSize = o + + case "max_write_request_operation_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxWriteRequestOperationCount = value + case float64: + f := int(v) + s.MaxWriteRequestOperationCount = f + } + + case "max_write_request_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MaxWriteRequestSize = o + + case "read_poll_timeout": + if err := dec.Decode(&s.ReadPollTimeout); err != nil { + return err + } + + } + } + return nil +} + // NewFollowerIndexParameters returns a FollowerIndexParameters. func NewFollowerIndexParameters() *FollowerIndexParameters { r := &FollowerIndexParameters{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/followindexstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/followindexstats.go index 06e0ff2e0..3c36361bf 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/followindexstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/followindexstats.go @@ -16,18 +16,55 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // FollowIndexStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ccr/_types/FollowIndexStats.ts#L30-L33 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ccr/_types/FollowIndexStats.ts#L30-L33 type FollowIndexStats struct { Index string `json:"index"` Shards []CcrShardStats `json:"shards"` } +func (s *FollowIndexStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return err + } + + case "shards": + if err := dec.Decode(&s.Shards); err != nil { + return err + } + + } + } + return nil +} + // NewFollowIndexStats returns a FollowIndexStats. func NewFollowIndexStats() *FollowIndexStats { r := &FollowIndexStats{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/followstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/followstats.go index 1dac77b6e..141ea1057 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/followstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/followstats.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // FollowStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ccr/stats/types.ts.ts#L41-L43 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ccr/stats/types.ts.ts#L41-L43 type FollowStats struct { Indices []FollowIndexStats `json:"indices"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/forcemergeconfiguration.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/forcemergeconfiguration.go index 062ab56d6..e175915b6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/forcemergeconfiguration.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/forcemergeconfiguration.go @@ -16,17 +16,61 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ForceMergeConfiguration type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ilm/_types/Phase.ts#L53-L55 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ilm/_types/Phase.ts#L56-L58 type ForceMergeConfiguration struct { MaxNumSegments int `json:"max_num_segments"` } +func (s *ForceMergeConfiguration) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_num_segments": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxNumSegments = value + case float64: + f := int(v) + s.MaxNumSegments = f + } + + } + } + return nil +} + // NewForceMergeConfiguration returns a ForceMergeConfiguration. func NewForceMergeConfiguration() *ForceMergeConfiguration { r := &ForceMergeConfiguration{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/forcemergeresponsebody.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/forcemergeresponsebody.go index 90a74c0e8..e5d0f3c6e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/forcemergeresponsebody.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/forcemergeresponsebody.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ForceMergeResponseBody type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/forcemerge/_types/response.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/forcemerge/_types/response.ts#L22-L28 type ForceMergeResponseBody struct { Shards_ ShardStatistics `json:"_shards"` // Task task contains a task id returned when wait_for_completion=false, @@ -30,6 +38,43 @@ type ForceMergeResponseBody struct { Task *string `json:"task,omitempty"` } +func (s *ForceMergeResponseBody) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "_shards": + if err := dec.Decode(&s.Shards_); err != nil { + return err + } + + case "task": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Task = &o + + } + } + return nil +} + // NewForceMergeResponseBody returns a ForceMergeResponseBody. func NewForceMergeResponseBody() *ForceMergeResponseBody { r := &ForceMergeResponseBody{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/foreachprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/foreachprocessor.go index e25cbf912..849506353 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/foreachprocessor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/foreachprocessor.go @@ -16,22 +16,140 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ForeachProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/_types/Processors.ts#L215-L219 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/_types/Processors.ts#L645-L659 type ForeachProcessor struct { - Description *string `json:"description,omitempty"` - Field string `json:"field"` - If *string `json:"if,omitempty"` - IgnoreFailure *bool `json:"ignore_failure,omitempty"` - IgnoreMissing *bool `json:"ignore_missing,omitempty"` - OnFailure []ProcessorContainer `json:"on_failure,omitempty"` - Processor *ProcessorContainer `json:"processor,omitempty"` - Tag *string `json:"tag,omitempty"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field Field containing array or object values. + Field string `json:"field"` + // If Conditionally execute the processor. + If *string `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true`, the processor silently exits without changing the document if the + // `field` is `null` or missing. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Processor Ingest processor to run on each element. + Processor *ProcessorContainer `json:"processor,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` +} + +func (s *ForeachProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.If = &o + + case "ignore_failure": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return err + } + + case "processor": + if err := dec.Decode(&s.Processor); err != nil { + return err + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + } + } + return nil } // NewForeachProcessor returns a ForeachProcessor. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/formattablemetricaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/formattablemetricaggregation.go index c9b3ec96e..e76ec401a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/formattablemetricaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/formattablemetricaggregation.go @@ -16,20 +16,78 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // FormattableMetricAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/metric.ts#L44-L46 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/metric.ts#L51-L53 type FormattableMetricAggregation struct { - Field *string `json:"field,omitempty"` - Format *string `json:"format,omitempty"` + // Field The field on which to run the aggregation. + Field *string `json:"field,omitempty"` + Format *string `json:"format,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. Missing Missing `json:"missing,omitempty"` Script Script `json:"script,omitempty"` } +func (s *FormattableMetricAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return err + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + } + } + return nil +} + // NewFormattableMetricAggregation returns a FormattableMetricAggregation. func NewFormattableMetricAggregation() *FormattableMetricAggregation { r := &FormattableMetricAggregation{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/foundstatus.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/foundstatus.go index feb624b29..3dfc8b3e5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/foundstatus.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/foundstatus.go @@ -16,17 +16,59 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // FoundStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/delete_privileges/types.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/delete_privileges/types.ts#L20-L22 type FoundStatus struct { Found bool `json:"found"` } +func (s *FoundStatus) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "found": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Found = value + case bool: + s.Found = v + } + + } + } + return nil +} + // NewFoundStatus returns a FoundStatus. func NewFoundStatus() *FoundStatus { r := &FoundStatus{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/frequencyencodingpreprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/frequencyencodingpreprocessor.go index 521ff17d8..bed8edc46 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/frequencyencodingpreprocessor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/frequencyencodingpreprocessor.go @@ -16,19 +16,79 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // FrequencyEncodingPreprocessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/put_trained_model/types.ts#L38-L42 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/put_trained_model/types.ts#L38-L42 type FrequencyEncodingPreprocessor struct { FeatureName string `json:"feature_name"` Field string `json:"field"` FrequencyMap map[string]Float64 `json:"frequency_map"` } +func (s *FrequencyEncodingPreprocessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "feature_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FeatureName = o + + case "field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Field = o + + case "frequency_map": + if s.FrequencyMap == nil { + s.FrequencyMap = make(map[string]Float64, 0) + } + if err := dec.Decode(&s.FrequencyMap); err != nil { + return err + } + + } + } + return nil +} + // NewFrequencyEncodingPreprocessor returns a FrequencyEncodingPreprocessor. func NewFrequencyEncodingPreprocessor() *FrequencyEncodingPreprocessor { r := &FrequencyEncodingPreprocessor{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/frequentitemsetsaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/frequentitemsetsaggregate.go new file mode 100644 index 000000000..93bed0898 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/frequentitemsetsaggregate.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + +// FrequentItemSetsAggregate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L639-L640 +type FrequentItemSetsAggregate struct { + Buckets BucketsFrequentItemSetsBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` +} + +func (s *FrequentItemSetsAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]FrequentItemSetsBucket, 0) + if err := localDec.Decode(&o); err != nil { + return err + } + s.Buckets = o + case '[': + o := []FrequentItemSetsBucket{} + if err := localDec.Decode(&o); err != nil { + return err + } + s.Buckets = o + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + } + } + return nil +} + +// NewFrequentItemSetsAggregate returns a FrequentItemSetsAggregate. +func NewFrequentItemSetsAggregate() *FrequentItemSetsAggregate { + r := &FrequentItemSetsAggregate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/frequentitemsetsaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/frequentitemsetsaggregation.go new file mode 100644 index 000000000..2f7a9d8ea --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/frequentitemsetsaggregation.go @@ -0,0 +1,130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + +// FrequentItemSetsAggregation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L1159-L1183 +type FrequentItemSetsAggregation struct { + // Fields Fields to analyze. + Fields []FrequentItemSetsField `json:"fields"` + // Filter Query that filters documents from analysis. + Filter *Query `json:"filter,omitempty"` + // MinimumSetSize The minimum size of one item set. + MinimumSetSize *int `json:"minimum_set_size,omitempty"` + // MinimumSupport The minimum support of one item set. + MinimumSupport *Float64 `json:"minimum_support,omitempty"` + // Size The number of top item sets to return. + Size *int `json:"size,omitempty"` +} + +func (s *FrequentItemSetsAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fields": + if err := dec.Decode(&s.Fields); err != nil { + return err + } + + case "filter": + if err := dec.Decode(&s.Filter); err != nil { + return err + } + + case "minimum_set_size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MinimumSetSize = &value + case float64: + f := int(v) + s.MinimumSetSize = &f + } + + case "minimum_support": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.MinimumSupport = &f + case float64: + f := Float64(v) + s.MinimumSupport = &f + } + + case "size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + } + } + return nil +} + +// NewFrequentItemSetsAggregation returns a FrequentItemSetsAggregation. +func NewFrequentItemSetsAggregation() *FrequentItemSetsAggregation { + r := &FrequentItemSetsAggregation{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/frequentitemsetsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/frequentitemsetsbucket.go new file mode 100644 index 000000000..558722231 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/frequentitemsetsbucket.go @@ -0,0 +1,652 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "strings" +) + +// FrequentItemSetsBucket type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L642-L645 +type FrequentItemSetsBucket struct { + Aggregations map[string]Aggregate `json:"-"` + DocCount int64 `json:"doc_count"` + Key map[string][]string `json:"key"` + Support Float64 `json:"support"` +} + +func (s *FrequentItemSetsBucket) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f + } + + case "key": + if s.Key == nil { + s.Key = make(map[string][]string, 0) + } + if err := dec.Decode(&s.Key); err != nil { + return err + } + + case "support": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Support = f + case float64: + f := Float64(v) + s.Support = f + } + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "box_plot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[value] = o + } + } + + } + } + return nil +} + +// MarhsalJSON overrides marshalling for types with additional properties +func (s FrequentItemSetsBucket) MarshalJSON() ([]byte, error) { + type opt FrequentItemSetsBucket + // We transform the struct to a map without the embedded additional properties map + tmp := make(map[string]interface{}, 0) + + data, err := json.Marshal(opt(s)) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &tmp) + if err != nil { + return nil, err + } + + // We inline the additional fields from the underlying map + for key, value := range s.Aggregations { + tmp[fmt.Sprintf("%s", key)] = value + } + delete(tmp, "Aggregations") + + data, err = json.Marshal(tmp) + if err != nil { + return nil, err + } + + return data, nil +} + +// NewFrequentItemSetsBucket returns a FrequentItemSetsBucket. +func NewFrequentItemSetsBucket() *FrequentItemSetsBucket { + r := &FrequentItemSetsBucket{ + Aggregations: make(map[string]Aggregate, 0), + Key: make(map[string][]string, 0), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/frequentitemsetsfield.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/frequentitemsetsfield.go new file mode 100644 index 000000000..28cf923bd --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/frequentitemsetsfield.go @@ -0,0 +1,94 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + +// FrequentItemSetsField type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L1145-L1157 +type FrequentItemSetsField struct { + // Exclude Values to exclude. + // Can be regular expression strings or arrays of strings of exact terms. + Exclude []string `json:"exclude,omitempty"` + Field string `json:"field"` + // Include Values to include. + // Can be regular expression strings or arrays of strings of exact terms. + Include TermsInclude `json:"include,omitempty"` +} + +func (s *FrequentItemSetsField) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "exclude": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Exclude = append(s.Exclude, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Exclude); err != nil { + return err + } + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "include": + if err := dec.Decode(&s.Include); err != nil { + return err + } + + } + } + return nil +} + +// NewFrequentItemSetsField returns a FrequentItemSetsField. +func NewFrequentItemSetsField() *FrequentItemSetsField { + r := &FrequentItemSetsField{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/frozenindices.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/frozenindices.go index c6ef9ca5e..9809caca4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/frozenindices.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/frozenindices.go @@ -16,19 +16,90 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // FrozenIndices type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L351-L353 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L360-L362 type FrozenIndices struct { Available bool `json:"available"` Enabled bool `json:"enabled"` IndicesCount int64 `json:"indices_count"` } +func (s *FrozenIndices) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "available": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Available = value + case bool: + s.Available = v + } + + case "enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "indices_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.IndicesCount = value + case float64: + f := int64(v) + s.IndicesCount = f + } + + } + } + return nil +} + // NewFrozenIndices returns a FrozenIndices. func NewFrozenIndices() *FrozenIndices { r := &FrozenIndices{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/functionscore.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/functionscore.go index 592024cdb..992ffb32d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/functionscore.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/functionscore.go @@ -16,22 +16,117 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // FunctionScore type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/compound.ts#L107-L127 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/compound.ts#L201-L241 type FunctionScore struct { - Exp DecayFunction `json:"exp,omitempty"` + // Exp Function that scores a document with a exponential decay, depending on the + // distance of a numeric field value of the document from an origin. + Exp DecayFunction `json:"exp,omitempty"` + // FieldValueFactor Function allows you to use a field from a document to influence the score. + // It’s similar to using the script_score function, however, it avoids the + // overhead of scripting. FieldValueFactor *FieldValueFactorScoreFunction `json:"field_value_factor,omitempty"` Filter *Query `json:"filter,omitempty"` - Gauss DecayFunction `json:"gauss,omitempty"` - Linear DecayFunction `json:"linear,omitempty"` - RandomScore *RandomScoreFunction `json:"random_score,omitempty"` - ScriptScore *ScriptScoreFunction `json:"script_score,omitempty"` - Weight *Float64 `json:"weight,omitempty"` + // Gauss Function that scores a document with a normal decay, depending on the + // distance of a numeric field value of the document from an origin. + Gauss DecayFunction `json:"gauss,omitempty"` + // Linear Function that scores a document with a linear decay, depending on the + // distance of a numeric field value of the document from an origin. + Linear DecayFunction `json:"linear,omitempty"` + // RandomScore Generates scores that are uniformly distributed from 0 up to but not + // including 1. + // In case you want scores to be reproducible, it is possible to provide a + // `seed` and `field`. + RandomScore *RandomScoreFunction `json:"random_score,omitempty"` + // ScriptScore Enables you to wrap another query and customize the scoring of it optionally + // with a computation derived from other numeric field values in the doc using a + // script expression. + ScriptScore *ScriptScoreFunction `json:"script_score,omitempty"` + Weight *Float64 `json:"weight,omitempty"` +} + +func (s *FunctionScore) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "exp": + if err := dec.Decode(&s.Exp); err != nil { + return err + } + + case "field_value_factor": + if err := dec.Decode(&s.FieldValueFactor); err != nil { + return err + } + + case "filter": + if err := dec.Decode(&s.Filter); err != nil { + return err + } + + case "gauss": + if err := dec.Decode(&s.Gauss); err != nil { + return err + } + + case "linear": + if err := dec.Decode(&s.Linear); err != nil { + return err + } + + case "random_score": + if err := dec.Decode(&s.RandomScore); err != nil { + return err + } + + case "script_score": + if err := dec.Decode(&s.ScriptScore); err != nil { + return err + } + + case "weight": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Weight = &f + case float64: + f := Float64(v) + s.Weight = &f + } + + } + } + return nil } // NewFunctionScore returns a FunctionScore. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/functionscorequery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/functionscorequery.go index 5f9e2ddb2..04d1df702 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/functionscorequery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/functionscorequery.go @@ -16,27 +16,145 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/functionboostmode" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/functionscoremode" ) // FunctionScoreQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/compound.ts#L52-L59 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/compound.ts#L92-L118 type FunctionScoreQuery struct { - Boost *float32 `json:"boost,omitempty"` - BoostMode *functionboostmode.FunctionBoostMode `json:"boost_mode,omitempty"` - Functions []FunctionScore `json:"functions,omitempty"` - MaxBoost *Float64 `json:"max_boost,omitempty"` - MinScore *Float64 `json:"min_score,omitempty"` - Query *Query `json:"query,omitempty"` - QueryName_ *string `json:"_name,omitempty"` - ScoreMode *functionscoremode.FunctionScoreMode `json:"score_mode,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // BoostMode Defines how he newly computed score is combined with the score of the query + BoostMode *functionboostmode.FunctionBoostMode `json:"boost_mode,omitempty"` + // Functions One or more functions that compute a new score for each document returned by + // the query. + Functions []FunctionScore `json:"functions,omitempty"` + // MaxBoost Restricts the new score to not exceed the provided limit. + MaxBoost *Float64 `json:"max_boost,omitempty"` + // MinScore Excludes documents that do not meet the provided score threshold. + MinScore *Float64 `json:"min_score,omitempty"` + // Query A query that determines the documents for which a new score is computed. + Query *Query `json:"query,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + // ScoreMode Specifies how the computed scores are combined + ScoreMode *functionscoremode.FunctionScoreMode `json:"score_mode,omitempty"` +} + +func (s *FunctionScoreQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "boost_mode": + if err := dec.Decode(&s.BoostMode); err != nil { + return err + } + + case "functions": + if err := dec.Decode(&s.Functions); err != nil { + return err + } + + case "max_boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.MaxBoost = &f + case float64: + f := Float64(v) + s.MaxBoost = &f + } + + case "min_score": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.MinScore = &f + case float64: + f := Float64(v) + s.MinScore = &f + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return err + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "score_mode": + if err := dec.Decode(&s.ScoreMode); err != nil { + return err + } + + } + } + return nil } // NewFunctionScoreQuery returns a FunctionScoreQuery. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fuzziness.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fuzziness.go index 28f75c0a0..8aa0fb201 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fuzziness.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fuzziness.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // string // int // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/common.ts#L114-L114 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/common.ts#L127-L128 type Fuzziness interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fuzzyquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fuzzyquery.go index 893f4938c..8d4071865 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fuzzyquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/fuzzyquery.go @@ -16,22 +16,163 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // FuzzyQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/term.ts#L40-L51 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/term.ts#L43-L78 type FuzzyQuery struct { - Boost *float32 `json:"boost,omitempty"` - Fuzziness Fuzziness `json:"fuzziness,omitempty"` - MaxExpansions *int `json:"max_expansions,omitempty"` - PrefixLength *int `json:"prefix_length,omitempty"` - QueryName_ *string `json:"_name,omitempty"` - Rewrite *string `json:"rewrite,omitempty"` - Transpositions *bool `json:"transpositions,omitempty"` - Value string `json:"value"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Fuzziness Maximum edit distance allowed for matching. + Fuzziness Fuzziness `json:"fuzziness,omitempty"` + // MaxExpansions Maximum number of variations created. + MaxExpansions *int `json:"max_expansions,omitempty"` + // PrefixLength Number of beginning characters left unchanged when creating expansions. + PrefixLength *int `json:"prefix_length,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + // Rewrite Number of beginning characters left unchanged when creating expansions. + Rewrite *string `json:"rewrite,omitempty"` + // Transpositions Indicates whether edits include transpositions of two adjacent characters + // (for example `ab` to `ba`). + Transpositions *bool `json:"transpositions,omitempty"` + // Value Term you wish to find in the provided field. + Value string `json:"value"` +} + +func (s *FuzzyQuery) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Value) + return err + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "fuzziness": + if err := dec.Decode(&s.Fuzziness); err != nil { + return err + } + + case "max_expansions": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxExpansions = &value + case float64: + f := int(v) + s.MaxExpansions = &f + } + + case "prefix_length": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.PrefixLength = &value + case float64: + f := int(v) + s.PrefixLength = &f + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "rewrite": + if err := dec.Decode(&s.Rewrite); err != nil { + return err + } + + case "transpositions": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Transpositions = &value + case bool: + s.Transpositions = &v + } + + case "value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Value = o + + } + } + return nil } // NewFuzzyQuery returns a FuzzyQuery. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/garbagecollector.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/garbagecollector.go index 6ed1d84a0..dc52b22b8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/garbagecollector.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/garbagecollector.go @@ -16,14 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // GarbageCollector type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L363-L365 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L923-L928 type GarbageCollector struct { + // Collectors Contains statistics about JVM garbage collectors for the node. Collectors map[string]GarbageCollectorTotal `json:"collectors,omitempty"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/garbagecollectortotal.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/garbagecollectortotal.go index 9791ad42c..73124f223 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/garbagecollectortotal.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/garbagecollectortotal.go @@ -16,17 +16,90 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // GarbageCollectorTotal type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L367-L371 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L930-L943 type GarbageCollectorTotal struct { - CollectionCount *int64 `json:"collection_count,omitempty"` - CollectionTime *string `json:"collection_time,omitempty"` - CollectionTimeInMillis *int64 `json:"collection_time_in_millis,omitempty"` + // CollectionCount Total number of JVM garbage collectors that collect objects. + CollectionCount *int64 `json:"collection_count,omitempty"` + // CollectionTime Total time spent by JVM collecting objects. + CollectionTime *string `json:"collection_time,omitempty"` + // CollectionTimeInMillis Total time, in milliseconds, spent by JVM collecting objects. + CollectionTimeInMillis *int64 `json:"collection_time_in_millis,omitempty"` +} + +func (s *GarbageCollectorTotal) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "collection_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.CollectionCount = &value + case float64: + f := int64(v) + s.CollectionCount = &f + } + + case "collection_time": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CollectionTime = &o + + case "collection_time_in_millis": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.CollectionTimeInMillis = &value + case float64: + f := int64(v) + s.CollectionTimeInMillis = &f + } + + } + } + return nil } // NewGarbageCollectorTotal returns a GarbageCollectorTotal. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoboundingboxquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoboundingboxquery.go index 6abebfc65..aa01f3d8e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoboundingboxquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoboundingboxquery.go @@ -16,28 +16,125 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geoexecution" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geovalidationmethod" - + "bytes" "encoding/json" + "errors" "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geoexecution" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geovalidationmethod" ) // GeoBoundingBoxQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/geo.ts#L32-L41 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/geo.ts#L32-L50 type GeoBoundingBoxQuery struct { - Boost *float32 `json:"boost,omitempty"` - GeoBoundingBoxQuery map[string]GeoBounds `json:"-"` - IgnoreUnmapped *bool `json:"ignore_unmapped,omitempty"` - QueryName_ *string `json:"_name,omitempty"` - Type *geoexecution.GeoExecution `json:"type,omitempty"` - ValidationMethod *geovalidationmethod.GeoValidationMethod `json:"validation_method,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + GeoBoundingBoxQuery map[string]GeoBounds `json:"GeoBoundingBoxQuery,omitempty"` + // IgnoreUnmapped Set to `true` to ignore an unmapped field and not match any documents for + // this query. + // Set to `false` to throw an exception if the field is not mapped. + IgnoreUnmapped *bool `json:"ignore_unmapped,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + Type *geoexecution.GeoExecution `json:"type,omitempty"` + // ValidationMethod Set to `IGNORE_MALFORMED` to accept geo points with invalid latitude or + // longitude. + // Set to `COERCE` to also try to infer correct latitude or longitude. + ValidationMethod *geovalidationmethod.GeoValidationMethod `json:"validation_method,omitempty"` +} + +func (s *GeoBoundingBoxQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "GeoBoundingBoxQuery": + if s.GeoBoundingBoxQuery == nil { + s.GeoBoundingBoxQuery = make(map[string]GeoBounds, 0) + } + if err := dec.Decode(&s.GeoBoundingBoxQuery); err != nil { + return err + } + + case "ignore_unmapped": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreUnmapped = &value + case bool: + s.IgnoreUnmapped = &v + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "validation_method": + if err := dec.Decode(&s.ValidationMethod); err != nil { + return err + } + + default: + + } + } + return nil } // MarhsalJSON overrides marshalling for types with additional properties @@ -59,6 +156,7 @@ func (s GeoBoundingBoxQuery) MarshalJSON() ([]byte, error) { for key, value := range s.GeoBoundingBoxQuery { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "GeoBoundingBoxQuery") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geobounds.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geobounds.go index 7c84610d7..c1407debc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geobounds.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geobounds.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -27,5 +27,5 @@ package types // TopRightBottomLeftGeoBounds // WktGeoBounds // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Geo.ts#L119-L132 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Geo.ts#L144-L157 type GeoBounds interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoboundsaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoboundsaggregate.go index a1d575606..50c635f15 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoboundsaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoboundsaggregate.go @@ -16,20 +16,53 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" ) // GeoBoundsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L302-L305 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L303-L306 type GeoBoundsAggregate struct { - Bounds GeoBounds `json:"bounds,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Bounds GeoBounds `json:"bounds,omitempty"` + Meta Metadata `json:"meta,omitempty"` +} + +func (s *GeoBoundsAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bounds": + if err := dec.Decode(&s.Bounds); err != nil { + return err + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + } + } + return nil } // NewGeoBoundsAggregate returns a GeoBoundsAggregate. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoboundsaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoboundsaggregation.go index d4ca9d9b4..98a9b46c2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoboundsaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoboundsaggregation.go @@ -16,18 +16,80 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // GeoBoundsAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/metric.ts#L72-L74 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/metric.ts#L108-L114 type GeoBoundsAggregation struct { - Field *string `json:"field,omitempty"` - Missing Missing `json:"missing,omitempty"` - Script Script `json:"script,omitempty"` - WrapLongitude *bool `json:"wrap_longitude,omitempty"` + // Field The field on which to run the aggregation. + Field *string `json:"field,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing Missing `json:"missing,omitempty"` + Script Script `json:"script,omitempty"` + // WrapLongitude Specifies whether the bounding box should be allowed to overlap the + // international date line. + WrapLongitude *bool `json:"wrap_longitude,omitempty"` +} + +func (s *GeoBoundsAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return err + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + case "wrap_longitude": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.WrapLongitude = &value + case bool: + s.WrapLongitude = &v + } + + } + } + return nil } // NewGeoBoundsAggregation returns a GeoBoundsAggregation. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geocentroidaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geocentroidaggregate.go index 3fe19da95..d5f14b54e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geocentroidaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geocentroidaggregate.go @@ -16,21 +16,70 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // GeoCentroidAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L307-L311 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L308-L312 type GeoCentroidAggregate struct { - Count int64 `json:"count"` - Location GeoLocation `json:"location,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Count int64 `json:"count"` + Location GeoLocation `json:"location,omitempty"` + Meta Metadata `json:"meta,omitempty"` +} + +func (s *GeoCentroidAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + case "location": + if err := dec.Decode(&s.Location); err != nil { + return err + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + } + } + return nil } // NewGeoCentroidAggregate returns a GeoCentroidAggregate. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geocentroidaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geocentroidaggregation.go index a87ec2f6d..32a7c1b82 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geocentroidaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geocentroidaggregation.go @@ -16,19 +16,85 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // GeoCentroidAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/metric.ts#L76-L79 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/metric.ts#L116-L119 type GeoCentroidAggregation struct { - Count *int64 `json:"count,omitempty"` + Count *int64 `json:"count,omitempty"` + // Field The field on which to run the aggregation. Field *string `json:"field,omitempty"` Location GeoLocation `json:"location,omitempty"` - Missing Missing `json:"missing,omitempty"` - Script Script `json:"script,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing Missing `json:"missing,omitempty"` + Script Script `json:"script,omitempty"` +} + +func (s *GeoCentroidAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Count = &value + case float64: + f := int64(v) + s.Count = &f + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "location": + if err := dec.Decode(&s.Location); err != nil { + return err + } + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return err + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + } + } + return nil } // NewGeoCentroidAggregation returns a GeoCentroidAggregation. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geodecayfunction.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geodecayfunction.go index 764f9ecf1..b64ea8198 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geodecayfunction.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geodecayfunction.go @@ -16,23 +16,25 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/multivaluemode" - "encoding/json" "fmt" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/multivaluemode" ) // GeoDecayFunction type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/compound.ts#L96-L98 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/compound.ts#L190-L192 type GeoDecayFunction struct { - GeoDecayFunction map[string]DecayPlacementGeoLocationDistance `json:"-"` - MultiValueMode *multivaluemode.MultiValueMode `json:"multi_value_mode,omitempty"` + GeoDecayFunction map[string]DecayPlacementGeoLocationDistance `json:"GeoDecayFunction,omitempty"` + // MultiValueMode Determines how the distance is calculated when a field used for computing the + // decay contains multiple values. + MultiValueMode *multivaluemode.MultiValueMode `json:"multi_value_mode,omitempty"` } // MarhsalJSON overrides marshalling for types with additional properties @@ -54,6 +56,7 @@ func (s GeoDecayFunction) MarshalJSON() ([]byte, error) { for key, value := range s.GeoDecayFunction { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "GeoDecayFunction") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geodistanceaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geodistanceaggregate.go index 2a4d2be50..72a6d682c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geodistanceaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geodistanceaggregate.go @@ -16,27 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" ) // GeoDistanceAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L549-L553 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L550-L554 type GeoDistanceAggregate struct { - Buckets BucketsRangeBucket `json:"buckets"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Buckets BucketsRangeBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` } func (s *GeoDistanceAggregate) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -57,15 +57,17 @@ func (s *GeoDistanceAggregate) UnmarshalJSON(data []byte) error { source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]RangeBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []RangeBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geodistanceaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geodistanceaggregation.go index 537b8131c..c6387e463 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geodistanceaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geodistanceaggregation.go @@ -16,12 +16,16 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/distanceunit" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geodistancetype" @@ -29,15 +33,82 @@ import ( // GeoDistanceAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L176-L182 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L380-L403 type GeoDistanceAggregation struct { + // DistanceType The distance calculation type. DistanceType *geodistancetype.GeoDistanceType `json:"distance_type,omitempty"` - Field *string `json:"field,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Name *string `json:"name,omitempty"` - Origin GeoLocation `json:"origin,omitempty"` - Ranges []AggregationRange `json:"ranges,omitempty"` - Unit *distanceunit.DistanceUnit `json:"unit,omitempty"` + // Field A field of type `geo_point` used to evaluate the distance. + Field *string `json:"field,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Name *string `json:"name,omitempty"` + // Origin The origin used to evaluate the distance. + Origin GeoLocation `json:"origin,omitempty"` + // Ranges An array of ranges used to bucket documents. + Ranges []AggregationRange `json:"ranges,omitempty"` + // Unit The distance unit. + Unit *distanceunit.DistanceUnit `json:"unit,omitempty"` +} + +func (s *GeoDistanceAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "distance_type": + if err := dec.Decode(&s.DistanceType); err != nil { + return err + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + case "origin": + if err := dec.Decode(&s.Origin); err != nil { + return err + } + + case "ranges": + if err := dec.Decode(&s.Ranges); err != nil { + return err + } + + case "unit": + if err := dec.Decode(&s.Unit); err != nil { + return err + } + + } + } + return nil } // NewGeoDistanceAggregation returns a GeoDistanceAggregation. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geodistancefeaturequery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geodistancefeaturequery.go index 597dad501..0fbc56548 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geodistancefeaturequery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geodistancefeaturequery.go @@ -16,19 +16,112 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // GeoDistanceFeatureQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/specialized.ts#L46-L49 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/specialized.ts#L62-L65 type GeoDistanceFeatureQuery struct { - Boost *float32 `json:"boost,omitempty"` - Field string `json:"field"` - Origin GeoLocation `json:"origin"` - Pivot string `json:"pivot"` - QueryName_ *string `json:"_name,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Field Name of the field used to calculate distances. This field must meet the + // following criteria: + // be a `date`, `date_nanos` or `geo_point` field; + // have an `index` mapping parameter value of `true`, which is the default; + // have an `doc_values` mapping parameter value of `true`, which is the default. + Field string `json:"field"` + // Origin Date or point of origin used to calculate distances. + // If the `field` value is a `date` or `date_nanos` field, the `origin` value + // must be a date. + // Date Math, such as `now-1h`, is supported. + // If the field value is a `geo_point` field, the `origin` value must be a + // geopoint. + Origin GeoLocation `json:"origin"` + // Pivot Distance from the `origin` at which relevance scores receive half of the + // `boost` value. + // If the `field` value is a `date` or `date_nanos` field, the `pivot` value + // must be a time unit, such as `1h` or `10d`. If the `field` value is a + // `geo_point` field, the `pivot` value must be a distance unit, such as `1km` + // or `12m`. + Pivot string `json:"pivot"` + QueryName_ *string `json:"_name,omitempty"` +} + +func (s *GeoDistanceFeatureQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "origin": + if err := dec.Decode(&s.Origin); err != nil { + return err + } + + case "pivot": + if err := dec.Decode(&s.Pivot); err != nil { + return err + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + } + } + return nil } // NewGeoDistanceFeatureQuery returns a GeoDistanceFeatureQuery. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geodistancequery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geodistancequery.go index d4434a965..12ef493e3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geodistancequery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geodistancequery.go @@ -16,30 +16,120 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geodistancetype" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geovalidationmethod" - + "bytes" "encoding/json" + "errors" "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geodistancetype" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geovalidationmethod" ) // GeoDistanceQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/geo.ts#L48-L57 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/geo.ts#L57-L79 type GeoDistanceQuery struct { - Boost *float32 `json:"boost,omitempty"` - Distance *string `json:"distance,omitempty"` - DistanceType *geodistancetype.GeoDistanceType `json:"distance_type,omitempty"` - GeoDistanceQuery map[string]GeoLocation `json:"-"` - QueryName_ *string `json:"_name,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Distance The radius of the circle centred on the specified location. + // Points which fall into this circle are considered to be matches. + Distance string `json:"distance"` + // DistanceType How to compute the distance. + // Set to `plane` for a faster calculation that's inaccurate on long distances + // and close to the poles. + DistanceType *geodistancetype.GeoDistanceType `json:"distance_type,omitempty"` + GeoDistanceQuery map[string]GeoLocation `json:"GeoDistanceQuery,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + // ValidationMethod Set to `IGNORE_MALFORMED` to accept geo points with invalid latitude or + // longitude. + // Set to `COERCE` to also try to infer correct latitude or longitude. ValidationMethod *geovalidationmethod.GeoValidationMethod `json:"validation_method,omitempty"` } +func (s *GeoDistanceQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "distance": + if err := dec.Decode(&s.Distance); err != nil { + return err + } + + case "distance_type": + if err := dec.Decode(&s.DistanceType); err != nil { + return err + } + + case "GeoDistanceQuery": + if s.GeoDistanceQuery == nil { + s.GeoDistanceQuery = make(map[string]GeoLocation, 0) + } + if err := dec.Decode(&s.GeoDistanceQuery); err != nil { + return err + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "validation_method": + if err := dec.Decode(&s.ValidationMethod); err != nil { + return err + } + + default: + + } + } + return nil +} + // MarhsalJSON overrides marshalling for types with additional properties func (s GeoDistanceQuery) MarshalJSON() ([]byte, error) { type opt GeoDistanceQuery @@ -59,6 +149,7 @@ func (s GeoDistanceQuery) MarshalJSON() ([]byte, error) { for key, value := range s.GeoDistanceQuery { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "GeoDistanceQuery") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geodistancesort.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geodistancesort.go index 7e7b1c812..0bef9e932 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geodistancesort.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geodistancesort.go @@ -16,32 +16,117 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/distanceunit" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geodistancetype" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortmode" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortorder" - - "encoding/json" - "fmt" ) // GeoDistanceSort type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/sort.ts#L58-L66 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/sort.ts#L58-L66 type GeoDistanceSort struct { DistanceType *geodistancetype.GeoDistanceType `json:"distance_type,omitempty"` - GeoDistanceSort map[string][]GeoLocation `json:"-"` + GeoDistanceSort map[string][]GeoLocation `json:"GeoDistanceSort,omitempty"` IgnoreUnmapped *bool `json:"ignore_unmapped,omitempty"` Mode *sortmode.SortMode `json:"mode,omitempty"` Order *sortorder.SortOrder `json:"order,omitempty"` Unit *distanceunit.DistanceUnit `json:"unit,omitempty"` } +func (s *GeoDistanceSort) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "distance_type": + if err := dec.Decode(&s.DistanceType); err != nil { + return err + } + + case "GeoDistanceSort": + if s.GeoDistanceSort == nil { + s.GeoDistanceSort = make(map[string][]GeoLocation, 0) + } + rawMsg := make(map[string]json.RawMessage, 0) + dec.Decode(&rawMsg) + for key, value := range rawMsg { + switch { + case bytes.HasPrefix(value, []byte("\"")), bytes.HasPrefix(value, []byte("{")): + o := new(GeoLocation) + err := json.NewDecoder(bytes.NewReader(value)).Decode(&o) + if err != nil { + return err + } + s.GeoDistanceSort[key] = append(s.GeoDistanceSort[key], o) + default: + o := []GeoLocation{} + err := json.NewDecoder(bytes.NewReader(value)).Decode(&o) + if err != nil { + return err + } + s.GeoDistanceSort[key] = o + } + } + + case "ignore_unmapped": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreUnmapped = &value + case bool: + s.IgnoreUnmapped = &v + } + + case "mode": + if err := dec.Decode(&s.Mode); err != nil { + return err + } + + case "order": + if err := dec.Decode(&s.Order); err != nil { + return err + } + + case "unit": + if err := dec.Decode(&s.Unit); err != nil { + return err + } + + default: + + } + } + return nil +} + // MarhsalJSON overrides marshalling for types with additional properties func (s GeoDistanceSort) MarshalJSON() ([]byte, error) { type opt GeoDistanceSort @@ -61,6 +146,7 @@ func (s GeoDistanceSort) MarshalJSON() ([]byte, error) { for key, value := range s.GeoDistanceSort { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "GeoDistanceSort") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geohashgridaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geohashgridaggregate.go index 89b19cda0..fbcdc6a86 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geohashgridaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geohashgridaggregate.go @@ -16,27 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" ) // GeoHashGridAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L505-L507 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L506-L508 type GeoHashGridAggregate struct { - Buckets BucketsGeoHashGridBucket `json:"buckets"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Buckets BucketsGeoHashGridBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` } func (s *GeoHashGridAggregate) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -57,15 +57,17 @@ func (s *GeoHashGridAggregate) UnmarshalJSON(data []byte) error { source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]GeoHashGridBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []GeoHashGridBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geohashgridaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geohashgridaggregation.go index c312dd5e4..f523cf8a5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geohashgridaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geohashgridaggregation.go @@ -16,25 +16,123 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // GeoHashGridAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L184-L190 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L405-L430 type GeoHashGridAggregation struct { - Bounds GeoBounds `json:"bounds,omitempty"` - Field *string `json:"field,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Name *string `json:"name,omitempty"` - Precision GeoHashPrecision `json:"precision,omitempty"` - ShardSize *int `json:"shard_size,omitempty"` - Size *int `json:"size,omitempty"` + // Bounds The bounding box to filter the points in each bucket. + Bounds GeoBounds `json:"bounds,omitempty"` + // Field Field containing indexed `geo_point` or `geo_shape` values. + // If the field contains an array, `geohash_grid` aggregates all array values. + Field *string `json:"field,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Name *string `json:"name,omitempty"` + // Precision The string length of the geohashes used to define cells/buckets in the + // results. + Precision GeoHashPrecision `json:"precision,omitempty"` + // ShardSize Allows for more accurate counting of the top cells returned in the final + // result the aggregation. + // Defaults to returning `max(10,(size x number-of-shards))` buckets from each + // shard. + ShardSize *int `json:"shard_size,omitempty"` + // Size The maximum number of geohash buckets to return. + Size *int `json:"size,omitempty"` +} + +func (s *GeoHashGridAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bounds": + if err := dec.Decode(&s.Bounds); err != nil { + return err + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + case "precision": + if err := dec.Decode(&s.Precision); err != nil { + return err + } + + case "shard_size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.ShardSize = &value + case float64: + f := int(v) + s.ShardSize = &f + } + + case "size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + } + } + return nil } // NewGeoHashGridAggregation returns a GeoHashGridAggregation. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geohashgridbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geohashgridbucket.go index b07bb2d88..edffdc56b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geohashgridbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geohashgridbucket.go @@ -16,25 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "fmt" - "bytes" + "encoding/json" "errors" + "fmt" "io" - + "strconv" "strings" - - "encoding/json" ) // GeoHashGridBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L509-L511 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L510-L512 type GeoHashGridBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -42,6 +40,7 @@ type GeoHashGridBucket struct { } func (s *GeoHashGridBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -55,451 +54,19 @@ func (s *GeoHashGridBucket) UnmarshalJSON(data []byte) error { switch t { - case "aggregations": - for dec.More() { - tt, err := dec.Token() + case "doc_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) if err != nil { - if errors.Is(err, io.EOF) { - break - } return err } - if value, ok := tt.(string); ok { - if strings.Contains(value, "#") { - elems := strings.Split(value, "#") - if len(elems) == 2 { - switch elems[0] { - case "cardinality": - o := NewCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentiles": - o := NewHdrPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentile_ranks": - o := NewHdrPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentiles": - o := NewTDigestPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentile_ranks": - o := NewTDigestPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "percentiles_bucket": - o := NewPercentilesBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "median_absolute_deviation": - o := NewMedianAbsoluteDeviationAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "min": - o := NewMinAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "max": - o := NewMaxAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sum": - o := NewSumAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "avg": - o := NewAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "weighted_avg": - o := NewWeightedAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "value_count": - o := NewValueCountAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_value": - o := NewSimpleValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "derivative": - o := NewDerivativeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "bucket_metric_value": - o := NewBucketMetricValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats": - o := NewStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats_bucket": - o := NewStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats": - o := NewExtendedStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats_bucket": - o := NewExtendedStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_bounds": - o := NewGeoBoundsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_centroid": - o := NewGeoCentroidAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "histogram": - o := NewHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_histogram": - o := NewDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "auto_date_histogram": - o := NewAutoDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "variable_width_histogram": - o := NewVariableWidthHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sterms": - o := NewStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lterms": - o := NewLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "dterms": - o := NewDoubleTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umterms": - o := NewUnmappedTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lrareterms": - o := NewLongRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "srareterms": - o := NewStringRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umrareterms": - o := NewUnmappedRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "multi_terms": - o := NewMultiTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "missing": - o := NewMissingAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "nested": - o := NewNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "reverse_nested": - o := NewReverseNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "global": - o := NewGlobalAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filter": - o := NewFilterAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "children": - o := NewChildrenAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "parent": - o := NewParentAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sampler": - o := NewSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "unmapped_sampler": - o := NewUnmappedSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohash_grid": - o := NewGeoHashGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geotile_grid": - o := NewGeoTileGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohex_grid": - o := NewGeoHexGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "range": - o := NewRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_range": - o := NewDateRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_distance": - o := NewGeoDistanceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_range": - o := NewIpRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_prefix": - o := NewIpPrefixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filters": - o := NewFiltersAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "adjacency_matrix": - o := NewAdjacencyMatrixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "siglterms": - o := NewSignificantLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sigsterms": - o := NewSignificantStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umsigterms": - o := NewUnmappedSignificantTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "composite": - o := NewCompositeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "scripted_metric": - o := NewScriptedMetricAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_hits": - o := NewTopHitsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "inference": - o := NewInferenceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "string_stats": - o := NewStringStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "box_plot": - o := NewBoxPlotAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_metrics": - o := NewTopMetricsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "t_test": - o := NewTTestAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "rate": - o := NewRateAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_long_value": - o := NewCumulativeCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "matrix_stats": - o := NewMatrixStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_line": - o := NewGeoLineAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - default: - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - } - } else { - return errors.New("cannot decode JSON for field Aggregations") - } - } else { - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[value] = o - } - } - } - - case "doc_count": - if err := dec.Decode(&s.DocCount); err != nil { - return err + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f } case "key": @@ -507,6 +74,519 @@ func (s *GeoHashGridBucket) UnmarshalJSON(data []byte) error { return err } + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "box_plot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[value] = o + } + } + } } return nil @@ -531,6 +611,7 @@ func (s GeoHashGridBucket) MarshalJSON() ([]byte, error) { for key, value := range s.Aggregations { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "Aggregations") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geohashlocation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geohashlocation.go index 4c10e1b38..e51dc02d3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geohashlocation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geohashlocation.go @@ -16,17 +16,49 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // GeoHashLocation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Geo.ts#L115-L117 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Geo.ts#L140-L142 type GeoHashLocation struct { Geohash string `json:"geohash"` } +func (s *GeoHashLocation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "geohash": + if err := dec.Decode(&s.Geohash); err != nil { + return err + } + + } + } + return nil +} + // NewGeoHashLocation returns a GeoHashLocation. func NewGeoHashLocation() *GeoHashLocation { r := &GeoHashLocation{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geohashprecision.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geohashprecision.go index ce0491866..d5521729b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geohashprecision.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geohashprecision.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // int // string // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Geo.ts#L76-L80 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Geo.ts#L95-L99 type GeoHashPrecision interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geohexgridaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geohexgridaggregate.go index 00e1bab71..0ffcda996 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geohexgridaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geohexgridaggregate.go @@ -16,27 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" ) // GeoHexGridAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L521-L522 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L522-L523 type GeoHexGridAggregate struct { - Buckets BucketsGeoHexGridBucket `json:"buckets"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Buckets BucketsGeoHexGridBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` } func (s *GeoHexGridAggregate) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -57,15 +57,17 @@ func (s *GeoHexGridAggregate) UnmarshalJSON(data []byte) error { source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]GeoHexGridBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []GeoHexGridBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geohexgridaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geohexgridaggregation.go index 474904b21..1baaabb24 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geohexgridaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geohexgridaggregation.go @@ -16,26 +16,29 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // GeohexGridAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L200-L226 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L460-L485 type GeohexGridAggregation struct { // Bounds Bounding box used to filter the geo-points in each bucket. Bounds GeoBounds `json:"bounds,omitempty"` - // Field Field containing indexed geo-point values. Must be explicitly - // mapped as a `geo_point` field. If the field contains an array - // `geohex_grid` aggregates all array values. - Field string `json:"field"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Name *string `json:"name,omitempty"` + // Field Field containing indexed `geo_point` or `geo_shape` values. + // If the field contains an array, `geohex_grid` aggregates all array values. + Field string `json:"field"` + Meta Metadata `json:"meta,omitempty"` + Name *string `json:"name,omitempty"` // Precision Integer zoom of the key used to defined cells or buckets // in the results. Value should be between 0-15. Precision *int `json:"precision,omitempty"` @@ -45,6 +48,101 @@ type GeohexGridAggregation struct { Size *int `json:"size,omitempty"` } +func (s *GeohexGridAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bounds": + if err := dec.Decode(&s.Bounds); err != nil { + return err + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + case "precision": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Precision = &value + case float64: + f := int(v) + s.Precision = &f + } + + case "shard_size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.ShardSize = &value + case float64: + f := int(v) + s.ShardSize = &f + } + + case "size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + } + } + return nil +} + // NewGeohexGridAggregation returns a GeohexGridAggregation. func NewGeohexGridAggregation() *GeohexGridAggregation { r := &GeohexGridAggregation{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geohexgridbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geohexgridbucket.go index 0dc7f9e9e..74a64588d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geohexgridbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geohexgridbucket.go @@ -16,25 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "fmt" - "bytes" + "encoding/json" "errors" + "fmt" "io" - + "strconv" "strings" - - "encoding/json" ) // GeoHexGridBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L524-L526 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L525-L527 type GeoHexGridBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -42,6 +40,7 @@ type GeoHexGridBucket struct { } func (s *GeoHexGridBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -55,451 +54,19 @@ func (s *GeoHexGridBucket) UnmarshalJSON(data []byte) error { switch t { - case "aggregations": - for dec.More() { - tt, err := dec.Token() + case "doc_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) if err != nil { - if errors.Is(err, io.EOF) { - break - } return err } - if value, ok := tt.(string); ok { - if strings.Contains(value, "#") { - elems := strings.Split(value, "#") - if len(elems) == 2 { - switch elems[0] { - case "cardinality": - o := NewCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentiles": - o := NewHdrPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentile_ranks": - o := NewHdrPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentiles": - o := NewTDigestPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentile_ranks": - o := NewTDigestPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "percentiles_bucket": - o := NewPercentilesBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "median_absolute_deviation": - o := NewMedianAbsoluteDeviationAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "min": - o := NewMinAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "max": - o := NewMaxAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sum": - o := NewSumAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "avg": - o := NewAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "weighted_avg": - o := NewWeightedAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "value_count": - o := NewValueCountAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_value": - o := NewSimpleValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "derivative": - o := NewDerivativeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "bucket_metric_value": - o := NewBucketMetricValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats": - o := NewStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats_bucket": - o := NewStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats": - o := NewExtendedStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats_bucket": - o := NewExtendedStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_bounds": - o := NewGeoBoundsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_centroid": - o := NewGeoCentroidAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "histogram": - o := NewHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_histogram": - o := NewDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "auto_date_histogram": - o := NewAutoDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "variable_width_histogram": - o := NewVariableWidthHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sterms": - o := NewStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lterms": - o := NewLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "dterms": - o := NewDoubleTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umterms": - o := NewUnmappedTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lrareterms": - o := NewLongRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "srareterms": - o := NewStringRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umrareterms": - o := NewUnmappedRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "multi_terms": - o := NewMultiTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "missing": - o := NewMissingAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "nested": - o := NewNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "reverse_nested": - o := NewReverseNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "global": - o := NewGlobalAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filter": - o := NewFilterAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "children": - o := NewChildrenAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "parent": - o := NewParentAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sampler": - o := NewSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "unmapped_sampler": - o := NewUnmappedSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohash_grid": - o := NewGeoHashGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geotile_grid": - o := NewGeoTileGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohex_grid": - o := NewGeoHexGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "range": - o := NewRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_range": - o := NewDateRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_distance": - o := NewGeoDistanceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_range": - o := NewIpRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_prefix": - o := NewIpPrefixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filters": - o := NewFiltersAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "adjacency_matrix": - o := NewAdjacencyMatrixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "siglterms": - o := NewSignificantLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sigsterms": - o := NewSignificantStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umsigterms": - o := NewUnmappedSignificantTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "composite": - o := NewCompositeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "scripted_metric": - o := NewScriptedMetricAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_hits": - o := NewTopHitsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "inference": - o := NewInferenceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "string_stats": - o := NewStringStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "box_plot": - o := NewBoxPlotAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_metrics": - o := NewTopMetricsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "t_test": - o := NewTTestAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "rate": - o := NewRateAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_long_value": - o := NewCumulativeCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "matrix_stats": - o := NewMatrixStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_line": - o := NewGeoLineAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - default: - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - } - } else { - return errors.New("cannot decode JSON for field Aggregations") - } - } else { - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[value] = o - } - } - } - - case "doc_count": - if err := dec.Decode(&s.DocCount); err != nil { - return err + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f } case "key": @@ -507,6 +74,519 @@ func (s *GeoHexGridBucket) UnmarshalJSON(data []byte) error { return err } + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "box_plot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[value] = o + } + } + } } return nil @@ -531,6 +611,7 @@ func (s GeoHexGridBucket) MarshalJSON() ([]byte, error) { for key, value := range s.Aggregations { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "Aggregations") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoipdownloadstatistics.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoipdownloadstatistics.go index 16a586ddd..9ac8bef84 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoipdownloadstatistics.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoipdownloadstatistics.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // GeoIpDownloadStatistics type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/geo_ip_stats/types.ts#L24-L35 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/geo_ip_stats/types.ts#L24-L35 type GeoIpDownloadStatistics struct { // DatabaseCount Current number of databases available for use. DatabaseCount int `json:"database_count"` @@ -36,6 +44,95 @@ type GeoIpDownloadStatistics struct { TotalDownloadTime int64 `json:"total_download_time"` } +func (s *GeoIpDownloadStatistics) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "database_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.DatabaseCount = value + case float64: + f := int(v) + s.DatabaseCount = f + } + + case "failed_downloads": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.FailedDownloads = value + case float64: + f := int(v) + s.FailedDownloads = f + } + + case "skipped_updates": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.SkippedUpdates = value + case float64: + f := int(v) + s.SkippedUpdates = f + } + + case "successful_downloads": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.SuccessfulDownloads = value + case float64: + f := int(v) + s.SuccessfulDownloads = f + } + + case "total_download_time": + if err := dec.Decode(&s.TotalDownloadTime); err != nil { + return err + } + + } + } + return nil +} + // NewGeoIpDownloadStatistics returns a GeoIpDownloadStatistics. func NewGeoIpDownloadStatistics() *GeoIpDownloadStatistics { r := &GeoIpDownloadStatistics{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoipnodedatabasename.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoipnodedatabasename.go index 257255618..297bf6e92 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoipnodedatabasename.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoipnodedatabasename.go @@ -16,18 +16,50 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // GeoIpNodeDatabaseName type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/geo_ip_stats/types.ts#L45-L48 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/geo_ip_stats/types.ts#L45-L48 type GeoIpNodeDatabaseName struct { // Name Name of the database. Name string `json:"name"` } +func (s *GeoIpNodeDatabaseName) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + } + } + return nil +} + // NewGeoIpNodeDatabaseName returns a GeoIpNodeDatabaseName. func NewGeoIpNodeDatabaseName() *GeoIpNodeDatabaseName { r := &GeoIpNodeDatabaseName{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoipnodedatabases.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoipnodedatabases.go index 900ba6b3b..b173cbcf2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoipnodedatabases.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoipnodedatabases.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // GeoIpNodeDatabases type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/geo_ip_stats/types.ts#L37-L43 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/geo_ip_stats/types.ts#L37-L43 type GeoIpNodeDatabases struct { // Databases Downloaded databases for the node. Databases []GeoIpNodeDatabaseName `json:"databases"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoipprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoipprocessor.go index 17ad0a959..00f95d685 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoipprocessor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoipprocessor.go @@ -16,25 +16,182 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // GeoIpProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/_types/Processors.ts#L106-L113 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/_types/Processors.ts#L328-L357 type GeoIpProcessor struct { - DatabaseFile *string `json:"database_file,omitempty"` - Description *string `json:"description,omitempty"` - Field string `json:"field"` - FirstOnly *bool `json:"first_only,omitempty"` - If *string `json:"if,omitempty"` - IgnoreFailure *bool `json:"ignore_failure,omitempty"` - IgnoreMissing *bool `json:"ignore_missing,omitempty"` - OnFailure []ProcessorContainer `json:"on_failure,omitempty"` - Properties []string `json:"properties,omitempty"` - Tag *string `json:"tag,omitempty"` - TargetField *string `json:"target_field,omitempty"` + // DatabaseFile The database filename referring to a database the module ships with + // (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb) or a custom + // database in the ingest-geoip config directory. + DatabaseFile *string `json:"database_file,omitempty"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field to get the ip address from for the geographical lookup. + Field string `json:"field"` + // FirstOnly If `true`, only the first found geoip data will be returned, even if the + // field contains an array. + FirstOnly *bool `json:"first_only,omitempty"` + // If Conditionally execute the processor. + If *string `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist, the processor quietly exits without + // modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Properties Controls what properties are added to the `target_field` based on the geoip + // lookup. + Properties []string `json:"properties,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField The field that will hold the geographical information looked up from the + // MaxMind database. + TargetField *string `json:"target_field,omitempty"` +} + +func (s *GeoIpProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "database_file": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DatabaseFile = &o + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "first_only": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.FirstOnly = &value + case bool: + s.FirstOnly = &v + } + + case "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.If = &o + + case "ignore_failure": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return err + } + + case "properties": + if err := dec.Decode(&s.Properties); err != nil { + return err + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return err + } + + } + } + return nil } // NewGeoIpProcessor returns a GeoIpProcessor. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoline.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoline.go index d30c6704c..4d0cdcce4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoline.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoline.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // GeoLine type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Geo.ts#L59-L65 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Geo.ts#L65-L71 type GeoLine struct { // Coordinates Array of `[lon, lat]` coordinates Coordinates [][]Float64 `json:"coordinates"` @@ -30,6 +38,43 @@ type GeoLine struct { Type string `json:"type"` } +func (s *GeoLine) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "coordinates": + if err := dec.Decode(&s.Coordinates); err != nil { + return err + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + // NewGeoLine returns a GeoLine. func NewGeoLine() *GeoLine { r := &GeoLine{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geolineaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geolineaggregate.go index 6aa078c13..7e7c98102 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geolineaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geolineaggregate.go @@ -16,22 +16,73 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // GeoLineAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L775-L782 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L784-L791 type GeoLineAggregate struct { - Geometry GeoLine `json:"geometry"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Properties json.RawMessage `json:"properties,omitempty"` - Type string `json:"type"` + Geometry GeoLine `json:"geometry"` + Meta Metadata `json:"meta,omitempty"` + Properties json.RawMessage `json:"properties,omitempty"` + Type string `json:"type"` +} + +func (s *GeoLineAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "geometry": + if err := dec.Decode(&s.Geometry); err != nil { + return err + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "properties": + if err := dec.Decode(&s.Properties); err != nil { + return err + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil } // NewGeoLineAggregate returns a GeoLineAggregate. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geolineaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geolineaggregation.go index c0be2da03..bec874f47 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geolineaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geolineaggregation.go @@ -16,23 +16,104 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortorder" ) // GeoLineAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/metric.ts#L81-L87 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/metric.ts#L121-L146 type GeoLineAggregation struct { - IncludeSort *bool `json:"include_sort,omitempty"` - Point GeoLinePoint `json:"point"` - Size *int `json:"size,omitempty"` - Sort GeoLineSort `json:"sort"` - SortOrder *sortorder.SortOrder `json:"sort_order,omitempty"` + // IncludeSort When `true`, returns an additional array of the sort values in the feature + // properties. + IncludeSort *bool `json:"include_sort,omitempty"` + // Point The name of the geo_point field. + Point GeoLinePoint `json:"point"` + // Size The maximum length of the line represented in the aggregation. + // Valid sizes are between 1 and 10000. + Size *int `json:"size,omitempty"` + // Sort The name of the numeric field to use as the sort key for ordering the points. + // When the `geo_line` aggregation is nested inside a `time_series` aggregation, + // this field defaults to `@timestamp`, and any other value will result in + // error. + Sort GeoLineSort `json:"sort"` + // SortOrder The order in which the line is sorted (ascending or descending). + SortOrder *sortorder.SortOrder `json:"sort_order,omitempty"` +} + +func (s *GeoLineAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "include_sort": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IncludeSort = &value + case bool: + s.IncludeSort = &v + } + + case "point": + if err := dec.Decode(&s.Point); err != nil { + return err + } + + case "size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + case "sort": + if err := dec.Decode(&s.Sort); err != nil { + return err + } + + case "sort_order": + if err := dec.Decode(&s.SortOrder); err != nil { + return err + } + + } + } + return nil } // NewGeoLineAggregation returns a GeoLineAggregation. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geolinepoint.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geolinepoint.go index 126784e7b..e3639fbe7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geolinepoint.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geolinepoint.go @@ -16,17 +16,50 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // GeoLinePoint type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/metric.ts#L93-L95 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/metric.ts#L155-L160 type GeoLinePoint struct { + // Field The name of the geo_point field. Field string `json:"field"` } +func (s *GeoLinePoint) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + } + } + return nil +} + // NewGeoLinePoint returns a GeoLinePoint. func NewGeoLinePoint() *GeoLinePoint { r := &GeoLinePoint{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geolinesort.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geolinesort.go index 33c71ce7b..98aad01d0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geolinesort.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geolinesort.go @@ -16,17 +16,50 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // GeoLineSort type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/metric.ts#L89-L91 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/metric.ts#L148-L153 type GeoLineSort struct { + // Field The name of the numeric field to use as the sort key for ordering the points. Field string `json:"field"` } +func (s *GeoLineSort) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + } + } + return nil +} + // NewGeoLineSort returns a GeoLineSort. func NewGeoLineSort() *GeoLineSort { r := &GeoLineSort{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geolocation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geolocation.go index 5396a647b..f43693f41 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geolocation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geolocation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -27,5 +27,5 @@ package types // []Float64 // string // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Geo.ts#L94-L108 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Geo.ts#L113-L127 type GeoLocation interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geopointproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geopointproperty.go index cdc2d1a8b..0ce50e850 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geopointproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geopointproperty.go @@ -16,23 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" ) // GeoPointProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/geo.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/geo.ts#L23-L28 type GeoPointProperty struct { CopyTo []string `json:"copy_to,omitempty"` DocValues *bool `json:"doc_values,omitempty"` @@ -51,6 +51,7 @@ type GeoPointProperty struct { } func (s *GeoPointProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -65,13 +66,33 @@ func (s *GeoPointProperty) UnmarshalJSON(data []byte) error { switch t { case "copy_to": - if err := dec.Decode(&s.CopyTo); err != nil { - return err + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return err + } } case "doc_values": - if err := dec.Decode(&s.DocValues); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DocValues = &value + case bool: + s.DocValues = &v } case "dynamic": @@ -80,6 +101,9 @@ func (s *GeoPointProperty) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -88,7 +112,9 @@ func (s *GeoPointProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -367,28 +393,62 @@ func (s *GeoPointProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "ignore_malformed": - if err := dec.Decode(&s.IgnoreMalformed); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreMalformed = &value + case bool: + s.IgnoreMalformed = &v } case "ignore_z_value": - if err := dec.Decode(&s.IgnoreZValue); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreZValue = &value + case bool: + s.IgnoreZValue = &v } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } @@ -399,6 +459,9 @@ func (s *GeoPointProperty) UnmarshalJSON(data []byte) error { } case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -407,7 +470,9 @@ func (s *GeoPointProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -686,20 +751,38 @@ func (s *GeoPointProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } case "similarity": - if err := dec.Decode(&s.Similarity); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Similarity = &o case "store": - if err := dec.Decode(&s.Store); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Store = &value + case bool: + s.Store = &v } case "type": @@ -712,6 +795,30 @@ func (s *GeoPointProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s GeoPointProperty) MarshalJSON() ([]byte, error) { + type innerGeoPointProperty GeoPointProperty + tmp := innerGeoPointProperty{ + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + IgnoreZValue: s.IgnoreZValue, + Meta: s.Meta, + NullValue: s.NullValue, + Properties: s.Properties, + Similarity: s.Similarity, + Store: s.Store, + Type: s.Type, + } + + tmp.Type = "geo_point" + + return json.Marshal(tmp) +} + // NewGeoPointProperty returns a GeoPointProperty. func NewGeoPointProperty() *GeoPointProperty { r := &GeoPointProperty{ @@ -720,7 +827,5 @@ func NewGeoPointProperty() *GeoPointProperty { Properties: make(map[string]Property, 0), } - r.Type = "geo_point" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geopolygonpoints.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geopolygonpoints.go index 714b7b769..a9b3ce93f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geopolygonpoints.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geopolygonpoints.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // GeoPolygonPoints type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/geo.ts#L59-L61 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/geo.ts#L81-L83 type GeoPolygonPoints struct { Points []GeoLocation `json:"points"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geopolygonquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geopolygonquery.go index 04acbce89..1597f3be4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geopolygonquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geopolygonquery.go @@ -16,28 +16,114 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geovalidationmethod" - + "bytes" "encoding/json" + "errors" "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geovalidationmethod" ) // GeoPolygonQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/geo.ts#L63-L71 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/geo.ts#L85-L93 type GeoPolygonQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. Boost *float32 `json:"boost,omitempty"` - GeoPolygonQuery map[string]GeoPolygonPoints `json:"-"` + GeoPolygonQuery map[string]GeoPolygonPoints `json:"GeoPolygonQuery,omitempty"` IgnoreUnmapped *bool `json:"ignore_unmapped,omitempty"` QueryName_ *string `json:"_name,omitempty"` ValidationMethod *geovalidationmethod.GeoValidationMethod `json:"validation_method,omitempty"` } +func (s *GeoPolygonQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "GeoPolygonQuery": + if s.GeoPolygonQuery == nil { + s.GeoPolygonQuery = make(map[string]GeoPolygonPoints, 0) + } + if err := dec.Decode(&s.GeoPolygonQuery); err != nil { + return err + } + + case "ignore_unmapped": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreUnmapped = &value + case bool: + s.IgnoreUnmapped = &v + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "validation_method": + if err := dec.Decode(&s.ValidationMethod); err != nil { + return err + } + + default: + + } + } + return nil +} + // MarhsalJSON overrides marshalling for types with additional properties func (s GeoPolygonQuery) MarshalJSON() ([]byte, error) { type opt GeoPolygonQuery @@ -57,6 +143,7 @@ func (s GeoPolygonQuery) MarshalJSON() ([]byte, error) { for key, value := range s.GeoPolygonQuery { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "GeoPolygonQuery") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/georesults.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/georesults.go index 9ff696ebe..77490d5d7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/georesults.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/georesults.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // GeoResults type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Anomaly.ts#L145-L154 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Anomaly.ts#L145-L154 type GeoResults struct { // ActualPoint The actual value for the bucket formatted as a `geo_point`. ActualPoint string `json:"actual_point"` @@ -30,6 +38,50 @@ type GeoResults struct { TypicalPoint string `json:"typical_point"` } +func (s *GeoResults) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "actual_point": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ActualPoint = o + + case "typical_point": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TypicalPoint = o + + } + } + return nil +} + // NewGeoResults returns a GeoResults. func NewGeoResults() *GeoResults { r := &GeoResults{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoshapefieldquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoshapefieldquery.go index bbc4eb65e..feb990c7d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoshapefieldquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoshapefieldquery.go @@ -16,23 +16,64 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geoshaperelation" ) // GeoShapeFieldQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/geo.ts#L78-L82 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/geo.ts#L100-L111 type GeoShapeFieldQuery struct { - IndexedShape *FieldLookup `json:"indexed_shape,omitempty"` - Relation *geoshaperelation.GeoShapeRelation `json:"relation,omitempty"` - Shape json.RawMessage `json:"shape,omitempty"` + // IndexedShape Query using an indexed shape retrieved from the the specified document and + // path. + IndexedShape *FieldLookup `json:"indexed_shape,omitempty"` + // Relation Spatial relation operator used to search a geo field. + Relation *geoshaperelation.GeoShapeRelation `json:"relation,omitempty"` + Shape json.RawMessage `json:"shape,omitempty"` +} + +func (s *GeoShapeFieldQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "indexed_shape": + if err := dec.Decode(&s.IndexedShape); err != nil { + return err + } + + case "relation": + if err := dec.Decode(&s.Relation); err != nil { + return err + } + + case "shape": + if err := dec.Decode(&s.Shape); err != nil { + return err + } + + } + } + return nil } // NewGeoShapeFieldQuery returns a GeoShapeFieldQuery. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoshapeproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoshapeproperty.go index 46137064f..a99584d10 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoshapeproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoshapeproperty.go @@ -16,25 +16,25 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geoorientation" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geostrategy" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geoorientation" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geostrategy" ) // GeoShapeProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/geo.ts#L37-L50 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/geo.ts#L37-L50 type GeoShapeProperty struct { Coerce *bool `json:"coerce,omitempty"` CopyTo []string `json:"copy_to,omitempty"` @@ -55,6 +55,7 @@ type GeoShapeProperty struct { } func (s *GeoShapeProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -69,18 +70,47 @@ func (s *GeoShapeProperty) UnmarshalJSON(data []byte) error { switch t { case "coerce": - if err := dec.Decode(&s.Coerce); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Coerce = &value + case bool: + s.Coerce = &v } case "copy_to": - if err := dec.Decode(&s.CopyTo); err != nil { - return err + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return err + } } case "doc_values": - if err := dec.Decode(&s.DocValues); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DocValues = &value + case bool: + s.DocValues = &v } case "dynamic": @@ -89,6 +119,9 @@ func (s *GeoShapeProperty) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -97,7 +130,9 @@ func (s *GeoShapeProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -376,28 +411,62 @@ func (s *GeoShapeProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "ignore_malformed": - if err := dec.Decode(&s.IgnoreMalformed); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreMalformed = &value + case bool: + s.IgnoreMalformed = &v } case "ignore_z_value": - if err := dec.Decode(&s.IgnoreZValue); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreZValue = &value + case bool: + s.IgnoreZValue = &v } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } @@ -408,6 +477,9 @@ func (s *GeoShapeProperty) UnmarshalJSON(data []byte) error { } case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -416,7 +488,9 @@ func (s *GeoShapeProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -695,20 +769,38 @@ func (s *GeoShapeProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } case "similarity": - if err := dec.Decode(&s.Similarity); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Similarity = &o case "store": - if err := dec.Decode(&s.Store); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Store = &value + case bool: + s.Store = &v } case "strategy": @@ -726,6 +818,32 @@ func (s *GeoShapeProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s GeoShapeProperty) MarshalJSON() ([]byte, error) { + type innerGeoShapeProperty GeoShapeProperty + tmp := innerGeoShapeProperty{ + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + IgnoreZValue: s.IgnoreZValue, + Meta: s.Meta, + Orientation: s.Orientation, + Properties: s.Properties, + Similarity: s.Similarity, + Store: s.Store, + Strategy: s.Strategy, + Type: s.Type, + } + + tmp.Type = "geo_shape" + + return json.Marshal(tmp) +} + // NewGeoShapeProperty returns a GeoShapeProperty. func NewGeoShapeProperty() *GeoShapeProperty { r := &GeoShapeProperty{ @@ -734,7 +852,5 @@ func NewGeoShapeProperty() *GeoShapeProperty { Properties: make(map[string]Property, 0), } - r.Type = "geo_shape" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoshapequery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoshapequery.go index d8d5a1263..771caa35f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoshapequery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoshapequery.go @@ -16,23 +16,107 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" "fmt" + "io" + "strconv" ) // GeoShapeQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/geo.ts#L86-L91 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/geo.ts#L115-L125 type GeoShapeQuery struct { - Boost *float32 `json:"boost,omitempty"` - GeoShapeQuery map[string]GeoShapeFieldQuery `json:"-"` - IgnoreUnmapped *bool `json:"ignore_unmapped,omitempty"` - QueryName_ *string `json:"_name,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + GeoShapeQuery map[string]GeoShapeFieldQuery `json:"GeoShapeQuery,omitempty"` + // IgnoreUnmapped Set to `true` to ignore an unmapped field and not match any documents for + // this query. + // Set to `false` to throw an exception if the field is not mapped. + IgnoreUnmapped *bool `json:"ignore_unmapped,omitempty"` + QueryName_ *string `json:"_name,omitempty"` +} + +func (s *GeoShapeQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "GeoShapeQuery": + if s.GeoShapeQuery == nil { + s.GeoShapeQuery = make(map[string]GeoShapeFieldQuery, 0) + } + if err := dec.Decode(&s.GeoShapeQuery); err != nil { + return err + } + + case "ignore_unmapped": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreUnmapped = &value + case bool: + s.IgnoreUnmapped = &v + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + default: + + } + } + return nil } // MarhsalJSON overrides marshalling for types with additional properties @@ -54,6 +138,7 @@ func (s GeoShapeQuery) MarshalJSON() ([]byte, error) { for key, value := range s.GeoShapeQuery { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "GeoShapeQuery") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geotilegridaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geotilegridaggregate.go index 8e8754d1a..2063d45bd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geotilegridaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geotilegridaggregate.go @@ -16,27 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" ) // GeoTileGridAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L513-L515 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L514-L516 type GeoTileGridAggregate struct { - Buckets BucketsGeoTileGridBucket `json:"buckets"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Buckets BucketsGeoTileGridBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` } func (s *GeoTileGridAggregate) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -57,15 +57,17 @@ func (s *GeoTileGridAggregate) UnmarshalJSON(data []byte) error { source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]GeoTileGridBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []GeoTileGridBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geotilegridaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geotilegridaggregation.go index c860d1278..710390e66 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geotilegridaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geotilegridaggregation.go @@ -16,25 +16,123 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // GeoTileGridAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L192-L198 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L432-L458 type GeoTileGridAggregation struct { - Bounds GeoBounds `json:"bounds,omitempty"` - Field *string `json:"field,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Name *string `json:"name,omitempty"` - Precision *int `json:"precision,omitempty"` - ShardSize *int `json:"shard_size,omitempty"` - Size *int `json:"size,omitempty"` + // Bounds A bounding box to filter the geo-points or geo-shapes in each bucket. + Bounds GeoBounds `json:"bounds,omitempty"` + // Field Field containing indexed `geo_point` or `geo_shape` values. + // If the field contains an array, `geotile_grid` aggregates all array values. + Field *string `json:"field,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Name *string `json:"name,omitempty"` + // Precision Integer zoom of the key used to define cells/buckets in the results. + // Values outside of the range [0,29] will be rejected. + Precision *int `json:"precision,omitempty"` + // ShardSize Allows for more accurate counting of the top cells returned in the final + // result the aggregation. + // Defaults to returning `max(10,(size x number-of-shards))` buckets from each + // shard. + ShardSize *int `json:"shard_size,omitempty"` + // Size The maximum number of buckets to return. + Size *int `json:"size,omitempty"` +} + +func (s *GeoTileGridAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bounds": + if err := dec.Decode(&s.Bounds); err != nil { + return err + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + case "precision": + if err := dec.Decode(&s.Precision); err != nil { + return err + } + + case "shard_size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.ShardSize = &value + case float64: + f := int(v) + s.ShardSize = &f + } + + case "size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + } + } + return nil } // NewGeoTileGridAggregation returns a GeoTileGridAggregation. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geotilegridbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geotilegridbucket.go index b1c45cb20..b73f436e6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geotilegridbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geotilegridbucket.go @@ -16,25 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "fmt" - "bytes" + "encoding/json" "errors" + "fmt" "io" - + "strconv" "strings" - - "encoding/json" ) // GeoTileGridBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L517-L519 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L518-L520 type GeoTileGridBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -42,6 +40,7 @@ type GeoTileGridBucket struct { } func (s *GeoTileGridBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -55,451 +54,19 @@ func (s *GeoTileGridBucket) UnmarshalJSON(data []byte) error { switch t { - case "aggregations": - for dec.More() { - tt, err := dec.Token() + case "doc_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) if err != nil { - if errors.Is(err, io.EOF) { - break - } return err } - if value, ok := tt.(string); ok { - if strings.Contains(value, "#") { - elems := strings.Split(value, "#") - if len(elems) == 2 { - switch elems[0] { - case "cardinality": - o := NewCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentiles": - o := NewHdrPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentile_ranks": - o := NewHdrPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentiles": - o := NewTDigestPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentile_ranks": - o := NewTDigestPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "percentiles_bucket": - o := NewPercentilesBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "median_absolute_deviation": - o := NewMedianAbsoluteDeviationAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "min": - o := NewMinAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "max": - o := NewMaxAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sum": - o := NewSumAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "avg": - o := NewAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "weighted_avg": - o := NewWeightedAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "value_count": - o := NewValueCountAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_value": - o := NewSimpleValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "derivative": - o := NewDerivativeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "bucket_metric_value": - o := NewBucketMetricValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats": - o := NewStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats_bucket": - o := NewStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats": - o := NewExtendedStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats_bucket": - o := NewExtendedStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_bounds": - o := NewGeoBoundsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_centroid": - o := NewGeoCentroidAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "histogram": - o := NewHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_histogram": - o := NewDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "auto_date_histogram": - o := NewAutoDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "variable_width_histogram": - o := NewVariableWidthHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sterms": - o := NewStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lterms": - o := NewLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "dterms": - o := NewDoubleTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umterms": - o := NewUnmappedTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lrareterms": - o := NewLongRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "srareterms": - o := NewStringRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umrareterms": - o := NewUnmappedRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "multi_terms": - o := NewMultiTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "missing": - o := NewMissingAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "nested": - o := NewNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "reverse_nested": - o := NewReverseNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "global": - o := NewGlobalAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filter": - o := NewFilterAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "children": - o := NewChildrenAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "parent": - o := NewParentAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sampler": - o := NewSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "unmapped_sampler": - o := NewUnmappedSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohash_grid": - o := NewGeoHashGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geotile_grid": - o := NewGeoTileGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohex_grid": - o := NewGeoHexGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "range": - o := NewRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_range": - o := NewDateRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_distance": - o := NewGeoDistanceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_range": - o := NewIpRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_prefix": - o := NewIpPrefixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filters": - o := NewFiltersAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "adjacency_matrix": - o := NewAdjacencyMatrixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "siglterms": - o := NewSignificantLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sigsterms": - o := NewSignificantStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umsigterms": - o := NewUnmappedSignificantTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "composite": - o := NewCompositeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "scripted_metric": - o := NewScriptedMetricAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_hits": - o := NewTopHitsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "inference": - o := NewInferenceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "string_stats": - o := NewStringStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "box_plot": - o := NewBoxPlotAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_metrics": - o := NewTopMetricsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "t_test": - o := NewTTestAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "rate": - o := NewRateAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_long_value": - o := NewCumulativeCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "matrix_stats": - o := NewMatrixStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_line": - o := NewGeoLineAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - default: - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - } - } else { - return errors.New("cannot decode JSON for field Aggregations") - } - } else { - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[value] = o - } - } - } - - case "doc_count": - if err := dec.Decode(&s.DocCount); err != nil { - return err + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f } case "key": @@ -507,6 +74,519 @@ func (s *GeoTileGridBucket) UnmarshalJSON(data []byte) error { return err } + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "box_plot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[value] = o + } + } + } } return nil @@ -531,6 +611,7 @@ func (s GeoTileGridBucket) MarshalJSON() ([]byte, error) { for key, value := range s.Aggregations { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "Aggregations") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/getmigrationfeature.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/getmigrationfeature.go index ea7c8e91b..de814dfd8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/getmigrationfeature.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/getmigrationfeature.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/migrationstatus" ) // GetMigrationFeature type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/migration/get_feature_upgrade_status/GetFeatureUpgradeStatusResponse.ts#L37-L42 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/migration/get_feature_upgrade_status/GetFeatureUpgradeStatusResponse.ts#L37-L42 type GetMigrationFeature struct { FeatureName string `json:"feature_name"` Indices []MigrationFeatureIndexInfo `json:"indices"` @@ -34,6 +40,53 @@ type GetMigrationFeature struct { MinimumIndexVersion string `json:"minimum_index_version"` } +func (s *GetMigrationFeature) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "feature_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FeatureName = o + + case "indices": + if err := dec.Decode(&s.Indices); err != nil { + return err + } + + case "migration_status": + if err := dec.Decode(&s.MigrationStatus); err != nil { + return err + } + + case "minimum_index_version": + if err := dec.Decode(&s.MinimumIndexVersion); err != nil { + return err + } + + } + } + return nil +} + // NewGetMigrationFeature returns a GetMigrationFeature. func NewGetMigrationFeature() *GetMigrationFeature { r := &GetMigrationFeature{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/getresult.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/getresult.go index a5dab41b7..7d5fa95c4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/getresult.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/getresult.go @@ -16,17 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // GetResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/get/types.ts#L25-L35 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/get/types.ts#L25-L35 type GetResult struct { Fields map[string]json.RawMessage `json:"fields,omitempty"` Found bool `json:"found"` @@ -39,6 +43,100 @@ type GetResult struct { Version_ *int64 `json:"_version,omitempty"` } +func (s *GetResult) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Fields); err != nil { + return err + } + + case "found": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Found = value + case bool: + s.Found = v + } + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return err + } + + case "_index": + if err := dec.Decode(&s.Index_); err != nil { + return err + } + + case "_primary_term": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.PrimaryTerm_ = &value + case float64: + f := int64(v) + s.PrimaryTerm_ = &f + } + + case "_routing": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Routing_ = &o + + case "_seq_no": + if err := dec.Decode(&s.SeqNo_); err != nil { + return err + } + + case "_source": + if err := dec.Decode(&s.Source_); err != nil { + return err + } + + case "_version": + if err := dec.Decode(&s.Version_); err != nil { + return err + } + + } + } + return nil +} + // NewGetResult returns a GetResult. func NewGetResult() *GetResult { r := &GetResult{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/getscriptcontext.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/getscriptcontext.go index b858c26e6..44820237c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/getscriptcontext.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/getscriptcontext.go @@ -16,18 +16,55 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // GetScriptContext type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/get_script_context/types.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/get_script_context/types.ts#L22-L25 type GetScriptContext struct { Methods []ContextMethod `json:"methods"` Name string `json:"name"` } +func (s *GetScriptContext) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "methods": + if err := dec.Decode(&s.Methods); err != nil { + return err + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + } + } + return nil +} + // NewGetScriptContext returns a GetScriptContext. func NewGetScriptContext() *GetScriptContext { r := &GetScriptContext{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/getstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/getstats.go index 52d4dc330..40f331950 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/getstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/getstats.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // GetStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Stats.ts#L88-L99 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Stats.ts#L130-L141 type GetStats struct { Current int64 `json:"current"` ExistsTime Duration `json:"exists_time,omitempty"` @@ -36,6 +44,116 @@ type GetStats struct { Total int64 `json:"total"` } +func (s *GetStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "current": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Current = value + case float64: + f := int64(v) + s.Current = f + } + + case "exists_time": + if err := dec.Decode(&s.ExistsTime); err != nil { + return err + } + + case "exists_time_in_millis": + if err := dec.Decode(&s.ExistsTimeInMillis); err != nil { + return err + } + + case "exists_total": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.ExistsTotal = value + case float64: + f := int64(v) + s.ExistsTotal = f + } + + case "missing_time": + if err := dec.Decode(&s.MissingTime); err != nil { + return err + } + + case "missing_time_in_millis": + if err := dec.Decode(&s.MissingTimeInMillis); err != nil { + return err + } + + case "missing_total": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.MissingTotal = value + case float64: + f := int64(v) + s.MissingTotal = f + } + + case "time": + if err := dec.Decode(&s.Time); err != nil { + return err + } + + case "time_in_millis": + if err := dec.Decode(&s.TimeInMillis); err != nil { + return err + } + + case "total": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Total = value + case float64: + f := int64(v) + s.Total = f + } + + } + } + return nil +} + // NewGetStats returns a GetStats. func NewGetStats() *GetStats { r := &GetStats{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/getuserprofileerrors.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/getuserprofileerrors.go index ad946c5bd..3d636026f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/getuserprofileerrors.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/getuserprofileerrors.go @@ -16,18 +16,69 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // GetUserProfileErrors type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/get_user_profile/types.ts#L25-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/get_user_profile/types.ts#L25-L28 type GetUserProfileErrors struct { Count int64 `json:"count"` Details map[string]ErrorCause `json:"details"` } +func (s *GetUserProfileErrors) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + case "details": + if s.Details == nil { + s.Details = make(map[string]ErrorCause, 0) + } + if err := dec.Decode(&s.Details); err != nil { + return err + } + + } + } + return nil +} + // NewGetUserProfileErrors returns a GetUserProfileErrors. func NewGetUserProfileErrors() *GetUserProfileErrors { r := &GetUserProfileErrors{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/globalaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/globalaggregate.go index e16e79b4d..6a666cf7e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/globalaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/globalaggregate.go @@ -16,32 +16,31 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "fmt" - "bytes" + "encoding/json" "errors" + "fmt" "io" - + "strconv" "strings" - - "encoding/json" ) // GlobalAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L491-L492 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L492-L493 type GlobalAggregate struct { - Aggregations map[string]Aggregate `json:"-"` - DocCount int64 `json:"doc_count"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Aggregations map[string]Aggregate `json:"-"` + DocCount int64 `json:"doc_count"` + Meta Metadata `json:"meta,omitempty"` } func (s *GlobalAggregate) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -55,451 +54,19 @@ func (s *GlobalAggregate) UnmarshalJSON(data []byte) error { switch t { - case "aggregations": - for dec.More() { - tt, err := dec.Token() + case "doc_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) if err != nil { - if errors.Is(err, io.EOF) { - break - } return err } - if value, ok := tt.(string); ok { - if strings.Contains(value, "#") { - elems := strings.Split(value, "#") - if len(elems) == 2 { - switch elems[0] { - case "cardinality": - o := NewCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentiles": - o := NewHdrPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentile_ranks": - o := NewHdrPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentiles": - o := NewTDigestPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentile_ranks": - o := NewTDigestPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "percentiles_bucket": - o := NewPercentilesBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "median_absolute_deviation": - o := NewMedianAbsoluteDeviationAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "min": - o := NewMinAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "max": - o := NewMaxAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sum": - o := NewSumAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "avg": - o := NewAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "weighted_avg": - o := NewWeightedAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "value_count": - o := NewValueCountAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_value": - o := NewSimpleValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "derivative": - o := NewDerivativeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "bucket_metric_value": - o := NewBucketMetricValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats": - o := NewStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats_bucket": - o := NewStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats": - o := NewExtendedStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats_bucket": - o := NewExtendedStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_bounds": - o := NewGeoBoundsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_centroid": - o := NewGeoCentroidAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "histogram": - o := NewHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_histogram": - o := NewDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "auto_date_histogram": - o := NewAutoDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "variable_width_histogram": - o := NewVariableWidthHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sterms": - o := NewStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lterms": - o := NewLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "dterms": - o := NewDoubleTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umterms": - o := NewUnmappedTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lrareterms": - o := NewLongRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "srareterms": - o := NewStringRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umrareterms": - o := NewUnmappedRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "multi_terms": - o := NewMultiTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "missing": - o := NewMissingAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "nested": - o := NewNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "reverse_nested": - o := NewReverseNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "global": - o := NewGlobalAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filter": - o := NewFilterAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "children": - o := NewChildrenAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "parent": - o := NewParentAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sampler": - o := NewSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "unmapped_sampler": - o := NewUnmappedSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohash_grid": - o := NewGeoHashGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geotile_grid": - o := NewGeoTileGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohex_grid": - o := NewGeoHexGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "range": - o := NewRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_range": - o := NewDateRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_distance": - o := NewGeoDistanceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_range": - o := NewIpRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_prefix": - o := NewIpPrefixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filters": - o := NewFiltersAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "adjacency_matrix": - o := NewAdjacencyMatrixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "siglterms": - o := NewSignificantLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sigsterms": - o := NewSignificantStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umsigterms": - o := NewUnmappedSignificantTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "composite": - o := NewCompositeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "scripted_metric": - o := NewScriptedMetricAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_hits": - o := NewTopHitsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "inference": - o := NewInferenceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "string_stats": - o := NewStringStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "box_plot": - o := NewBoxPlotAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_metrics": - o := NewTopMetricsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "t_test": - o := NewTTestAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "rate": - o := NewRateAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_long_value": - o := NewCumulativeCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "matrix_stats": - o := NewMatrixStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_line": - o := NewGeoLineAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - default: - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - } - } else { - return errors.New("cannot decode JSON for field Aggregations") - } - } else { - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[value] = o - } - } - } - - case "doc_count": - if err := dec.Decode(&s.DocCount); err != nil { - return err + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f } case "meta": @@ -507,6 +74,519 @@ func (s *GlobalAggregate) UnmarshalJSON(data []byte) error { return err } + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "box_plot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[value] = o + } + } + } } return nil @@ -531,6 +611,7 @@ func (s GlobalAggregate) MarshalJSON() ([]byte, error) { for key, value := range s.Aggregations { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "Aggregations") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/globalaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/globalaggregation.go index 2a9a6d8b2..6442a4098 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/globalaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/globalaggregation.go @@ -16,20 +16,61 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // GlobalAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L228-L228 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L487-L487 type GlobalAggregation struct { - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Name *string `json:"name,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Name *string `json:"name,omitempty"` +} + +func (s *GlobalAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + } + } + return nil } // NewGlobalAggregation returns a GlobalAggregation. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/globalprivilege.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/globalprivilege.go index 7cccf6882..696d497a4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/globalprivilege.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/globalprivilege.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // GlobalPrivilege type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/_types/Privileges.ts#L187-L189 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/_types/Privileges.ts#L189-L191 type GlobalPrivilege struct { Application ApplicationGlobalUserPrivileges `json:"application"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/googlenormalizeddistanceheuristic.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/googlenormalizeddistanceheuristic.go index e24d31091..331844985 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/googlenormalizeddistanceheuristic.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/googlenormalizeddistanceheuristic.go @@ -16,17 +16,61 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // GoogleNormalizedDistanceHeuristic type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L327-L329 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L746-L751 type GoogleNormalizedDistanceHeuristic struct { + // BackgroundIsSuperset Set to `false` if you defined a custom background filter that represents a + // different set of documents that you want to compare to. BackgroundIsSuperset *bool `json:"background_is_superset,omitempty"` } +func (s *GoogleNormalizedDistanceHeuristic) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "background_is_superset": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.BackgroundIsSuperset = &value + case bool: + s.BackgroundIsSuperset = &v + } + + } + } + return nil +} + // NewGoogleNormalizedDistanceHeuristic returns a GoogleNormalizedDistanceHeuristic. func NewGoogleNormalizedDistanceHeuristic() *GoogleNormalizedDistanceHeuristic { r := &GoogleNormalizedDistanceHeuristic{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/grantapikey.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/grantapikey.go index cd91c0f89..8fbaa7244 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/grantapikey.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/grantapikey.go @@ -16,24 +16,94 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" ) // GrantApiKey type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/grant_api_key/types.ts#L25-L32 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/grant_api_key/types.ts#L25-L46 type GrantApiKey struct { - Expiration *string `json:"expiration,omitempty"` - Metadata map[string]json.RawMessage `json:"metadata,omitempty"` - Name string `json:"name"` + // Expiration Expiration time for the API key. By default, API keys never expire. + Expiration *string `json:"expiration,omitempty"` + // Metadata Arbitrary metadata that you want to associate with the API key. + // It supports nested data structure. + // Within the `metadata` object, keys beginning with `_` are reserved for system + // usage. + Metadata Metadata `json:"metadata,omitempty"` + Name string `json:"name"` + // RoleDescriptors The role descriptors for this API key. + // This parameter is optional. + // When it is not specified or is an empty array, the API key has a point in + // time snapshot of permissions of the specified user or access token. + // If you supply role descriptors, the resultant permissions are an intersection + // of API keys permissions and the permissions of the user or access token. RoleDescriptors []map[string]RoleDescriptor `json:"role_descriptors,omitempty"` } +func (s *GrantApiKey) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "expiration": + if err := dec.Decode(&s.Expiration); err != nil { + return err + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return err + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "role_descriptors": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]RoleDescriptor, 0) + if err := localDec.Decode(&o); err != nil { + return err + } + s.RoleDescriptors = append(s.RoleDescriptors, o) + case '[': + o := make([]map[string]RoleDescriptor, 0) + if err := localDec.Decode(&o); err != nil { + return err + } + s.RoleDescriptors = o + } + + } + } + return nil +} + // NewGrantApiKey returns a GrantApiKey. func NewGrantApiKey() *GrantApiKey { r := &GrantApiKey{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/grokprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/grokprocessor.go index 870748d6c..09afa8d33 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/grokprocessor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/grokprocessor.go @@ -16,24 +16,171 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // GrokProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/_types/Processors.ts#L221-L227 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/_types/Processors.ts#L661-L686 type GrokProcessor struct { - Description *string `json:"description,omitempty"` - Field string `json:"field"` - If *string `json:"if,omitempty"` - IgnoreFailure *bool `json:"ignore_failure,omitempty"` - IgnoreMissing *bool `json:"ignore_missing,omitempty"` - OnFailure []ProcessorContainer `json:"on_failure,omitempty"` - PatternDefinitions map[string]string `json:"pattern_definitions,omitempty"` - Patterns []string `json:"patterns"` - Tag *string `json:"tag,omitempty"` - TraceMatch *bool `json:"trace_match,omitempty"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field to use for grok expression parsing. + Field string `json:"field"` + // If Conditionally execute the processor. + If *string `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist or is `null`, the processor quietly + // exits without modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // PatternDefinitions A map of pattern-name and pattern tuples defining custom patterns to be used + // by the current processor. + // Patterns matching existing names will override the pre-existing definition. + PatternDefinitions map[string]string `json:"pattern_definitions,omitempty"` + // Patterns An ordered list of grok expression to match and extract named captures with. + // Returns on the first expression in the list that matches. + Patterns []string `json:"patterns"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TraceMatch When `true`, `_ingest._grok_match_index` will be inserted into your matched + // document’s metadata with the index into the pattern found in `patterns` that + // matched. + TraceMatch *bool `json:"trace_match,omitempty"` +} + +func (s *GrokProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.If = &o + + case "ignore_failure": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return err + } + + case "pattern_definitions": + if s.PatternDefinitions == nil { + s.PatternDefinitions = make(map[string]string, 0) + } + if err := dec.Decode(&s.PatternDefinitions); err != nil { + return err + } + + case "patterns": + if err := dec.Decode(&s.Patterns); err != nil { + return err + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "trace_match": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.TraceMatch = &value + case bool: + s.TraceMatch = &v + } + + } + } + return nil } // NewGrokProcessor returns a GrokProcessor. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/groupings.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/groupings.go index baea62977..b6b8637b2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/groupings.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/groupings.go @@ -16,17 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // Groupings type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/rollup/_types/Groupings.ts#L24-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/rollup/_types/Groupings.ts#L24-L40 type Groupings struct { + // DateHistogram A date histogram group aggregates a date field into time-based buckets. + // This group is mandatory; you currently cannot roll up documents without a + // timestamp and a `date_histogram` group. DateHistogram *DateHistogramGrouping `json:"date_histogram,omitempty"` - Histogram *HistogramGrouping `json:"histogram,omitempty"` - Terms *TermsGrouping `json:"terms,omitempty"` + // Histogram The histogram group aggregates one or more numeric fields into numeric + // histogram intervals. + Histogram *HistogramGrouping `json:"histogram,omitempty"` + // Terms The terms group can be used on keyword or numeric fields to allow bucketing + // via the terms aggregation at a later point. + // The indexer enumerates and stores all values of a field for each time-period. + // This can be potentially costly for high-cardinality groups such as IP + // addresses, especially if the time-bucket is particularly sparse. + Terms *TermsGrouping `json:"terms,omitempty"` } // NewGroupings returns a Groupings. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/gsubprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/gsubprocessor.go index 6393dd990..ef861f341 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/gsubprocessor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/gsubprocessor.go @@ -16,24 +16,169 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // GsubProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/_types/Processors.ts#L229-L235 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/_types/Processors.ts#L688-L712 type GsubProcessor struct { - Description *string `json:"description,omitempty"` - Field string `json:"field"` - If *string `json:"if,omitempty"` - IgnoreFailure *bool `json:"ignore_failure,omitempty"` - IgnoreMissing *bool `json:"ignore_missing,omitempty"` - OnFailure []ProcessorContainer `json:"on_failure,omitempty"` - Pattern string `json:"pattern"` - Replacement string `json:"replacement"` - Tag *string `json:"tag,omitempty"` - TargetField *string `json:"target_field,omitempty"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field to apply the replacement to. + Field string `json:"field"` + // If Conditionally execute the processor. + If *string `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist or is `null`, the processor quietly + // exits without modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Pattern The pattern to be replaced. + Pattern string `json:"pattern"` + // Replacement The string to replace the matching patterns with. + Replacement string `json:"replacement"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField The field to assign the converted value to + // By default, the `field` is updated in-place. + TargetField *string `json:"target_field,omitempty"` +} + +func (s *GsubProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.If = &o + + case "ignore_failure": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return err + } + + case "pattern": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pattern = o + + case "replacement": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Replacement = o + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return err + } + + } + } + return nil } // NewGsubProcessor returns a GsubProcessor. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/halffloatnumberproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/halffloatnumberproperty.go index 778ae515b..ab5212d8b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/halffloatnumberproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/halffloatnumberproperty.go @@ -16,25 +16,25 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" ) // HalfFloatNumberProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/core.ts#L136-L139 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/core.ts#L139-L142 type HalfFloatNumberProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -63,6 +63,7 @@ type HalfFloatNumberProperty struct { } func (s *HalfFloatNumberProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -77,23 +78,63 @@ func (s *HalfFloatNumberProperty) UnmarshalJSON(data []byte) error { switch t { case "boost": - if err := dec.Decode(&s.Boost); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f } case "coerce": - if err := dec.Decode(&s.Coerce); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Coerce = &value + case bool: + s.Coerce = &v } case "copy_to": - if err := dec.Decode(&s.CopyTo); err != nil { - return err + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return err + } } case "doc_values": - if err := dec.Decode(&s.DocValues); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DocValues = &value + case bool: + s.DocValues = &v } case "dynamic": @@ -102,6 +143,9 @@ func (s *HalfFloatNumberProperty) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -110,7 +154,9 @@ func (s *HalfFloatNumberProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -389,35 +435,80 @@ func (s *HalfFloatNumberProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "ignore_malformed": - if err := dec.Decode(&s.IgnoreMalformed); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreMalformed = &value + case bool: + s.IgnoreMalformed = &v } case "index": - if err := dec.Decode(&s.Index); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Index = &value + case bool: + s.Index = &v } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } case "null_value": - if err := dec.Decode(&s.NullValue); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.NullValue = &f + case float64: + f := float32(v) + s.NullValue = &f } case "on_script_error": @@ -426,6 +517,9 @@ func (s *HalfFloatNumberProperty) UnmarshalJSON(data []byte) error { } case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -434,7 +528,9 @@ func (s *HalfFloatNumberProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -713,9 +809,11 @@ func (s *HalfFloatNumberProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } @@ -725,18 +823,43 @@ func (s *HalfFloatNumberProperty) UnmarshalJSON(data []byte) error { } case "similarity": - if err := dec.Decode(&s.Similarity); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Similarity = &o case "store": - if err := dec.Decode(&s.Store); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Store = &value + case bool: + s.Store = &v } case "time_series_dimension": - if err := dec.Decode(&s.TimeSeriesDimension); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.TimeSeriesDimension = &value + case bool: + s.TimeSeriesDimension = &v } case "time_series_metric": @@ -754,6 +877,36 @@ func (s *HalfFloatNumberProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s HalfFloatNumberProperty) MarshalJSON() ([]byte, error) { + type innerHalfFloatNumberProperty HalfFloatNumberProperty + tmp := innerHalfFloatNumberProperty{ + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + Index: s.Index, + Meta: s.Meta, + NullValue: s.NullValue, + OnScriptError: s.OnScriptError, + Properties: s.Properties, + Script: s.Script, + Similarity: s.Similarity, + Store: s.Store, + TimeSeriesDimension: s.TimeSeriesDimension, + TimeSeriesMetric: s.TimeSeriesMetric, + Type: s.Type, + } + + tmp.Type = "half_float" + + return json.Marshal(tmp) +} + // NewHalfFloatNumberProperty returns a HalfFloatNumberProperty. func NewHalfFloatNumberProperty() *HalfFloatNumberProperty { r := &HalfFloatNumberProperty{ @@ -762,7 +915,5 @@ func NewHalfFloatNumberProperty() *HalfFloatNumberProperty { Properties: make(map[string]Property, 0), } - r.Type = "half_float" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/haschildquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/haschildquery.go index 3e7cfbea4..f9583a631 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/haschildquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/haschildquery.go @@ -16,27 +16,169 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/childscoremode" ) // HasChildQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/joining.ts#L41-L51 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/joining.ts#L41-L76 type HasChildQuery struct { - Boost *float32 `json:"boost,omitempty"` - IgnoreUnmapped *bool `json:"ignore_unmapped,omitempty"` - InnerHits *InnerHits `json:"inner_hits,omitempty"` - MaxChildren *int `json:"max_children,omitempty"` - MinChildren *int `json:"min_children,omitempty"` - Query *Query `json:"query,omitempty"` - QueryName_ *string `json:"_name,omitempty"` - ScoreMode *childscoremode.ChildScoreMode `json:"score_mode,omitempty"` - Type string `json:"type"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // IgnoreUnmapped Indicates whether to ignore an unmapped `type` and not return any documents + // instead of an error. + IgnoreUnmapped *bool `json:"ignore_unmapped,omitempty"` + // InnerHits If defined, each search hit will contain inner hits. + InnerHits *InnerHits `json:"inner_hits,omitempty"` + // MaxChildren Maximum number of child documents that match the query allowed for a returned + // parent document. + // If the parent document exceeds this limit, it is excluded from the search + // results. + MaxChildren *int `json:"max_children,omitempty"` + // MinChildren Minimum number of child documents that match the query required to match the + // query for a returned parent document. + // If the parent document does not meet this limit, it is excluded from the + // search results. + MinChildren *int `json:"min_children,omitempty"` + // Query Query you wish to run on child documents of the `type` field. + // If a child document matches the search, the query returns the parent + // document. + Query *Query `json:"query,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + // ScoreMode Indicates how scores for matching child documents affect the root parent + // document’s relevance score. + ScoreMode *childscoremode.ChildScoreMode `json:"score_mode,omitempty"` + // Type Name of the child relationship mapped for the `join` field. + Type string `json:"type"` +} + +func (s *HasChildQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "ignore_unmapped": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreUnmapped = &value + case bool: + s.IgnoreUnmapped = &v + } + + case "inner_hits": + if err := dec.Decode(&s.InnerHits); err != nil { + return err + } + + case "max_children": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxChildren = &value + case float64: + f := int(v) + s.MaxChildren = &f + } + + case "min_children": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MinChildren = &value + case float64: + f := int(v) + s.MinChildren = &f + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return err + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "score_mode": + if err := dec.Decode(&s.ScoreMode); err != nil { + return err + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + } + } + return nil } // NewHasChildQuery returns a HasChildQuery. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hasparentquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hasparentquery.go index 27f5247b0..416a59c80 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hasparentquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hasparentquery.go @@ -16,21 +16,136 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // HasParentQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/joining.ts#L53-L61 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/joining.ts#L78-L104 type HasParentQuery struct { - Boost *float32 `json:"boost,omitempty"` - IgnoreUnmapped *bool `json:"ignore_unmapped,omitempty"` - InnerHits *InnerHits `json:"inner_hits,omitempty"` - ParentType string `json:"parent_type"` - Query *Query `json:"query,omitempty"` - QueryName_ *string `json:"_name,omitempty"` - Score *bool `json:"score,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // IgnoreUnmapped Indicates whether to ignore an unmapped `parent_type` and not return any + // documents instead of an error. + // You can use this parameter to query multiple indices that may not contain the + // `parent_type`. + IgnoreUnmapped *bool `json:"ignore_unmapped,omitempty"` + // InnerHits If defined, each search hit will contain inner hits. + InnerHits *InnerHits `json:"inner_hits,omitempty"` + // ParentType Name of the parent relationship mapped for the `join` field. + ParentType string `json:"parent_type"` + // Query Query you wish to run on parent documents of the `parent_type` field. + // If a parent document matches the search, the query returns its child + // documents. + Query *Query `json:"query,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + // Score Indicates whether the relevance score of a matching parent document is + // aggregated into its child documents. + Score *bool `json:"score,omitempty"` +} + +func (s *HasParentQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "ignore_unmapped": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreUnmapped = &value + case bool: + s.IgnoreUnmapped = &v + } + + case "inner_hits": + if err := dec.Decode(&s.InnerHits); err != nil { + return err + } + + case "parent_type": + if err := dec.Decode(&s.ParentType); err != nil { + return err + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return err + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "score": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Score = &value + case bool: + s.Score = &v + } + + } + } + return nil } // NewHasParentQuery returns a HasParentQuery. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hasprivilegesuserprofileerrors.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hasprivilegesuserprofileerrors.go index ca0287aeb..5f1444e4e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hasprivilegesuserprofileerrors.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hasprivilegesuserprofileerrors.go @@ -16,18 +16,69 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // HasPrivilegesUserProfileErrors type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/has_privileges_user_profile/types.ts#L39-L42 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/has_privileges_user_profile/types.ts#L39-L42 type HasPrivilegesUserProfileErrors struct { Count int64 `json:"count"` Details map[string]ErrorCause `json:"details"` } +func (s *HasPrivilegesUserProfileErrors) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + case "details": + if s.Details == nil { + s.Details = make(map[string]ErrorCause, 0) + } + if err := dec.Decode(&s.Details); err != nil { + return err + } + + } + } + return nil +} + // NewHasPrivilegesUserProfileErrors returns a HasPrivilegesUserProfileErrors. func NewHasPrivilegesUserProfileErrors() *HasPrivilegesUserProfileErrors { r := &HasPrivilegesUserProfileErrors{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hdrmethod.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hdrmethod.go index c6f3738e7..a2031f7cc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hdrmethod.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hdrmethod.go @@ -16,17 +16,63 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // HdrMethod type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/metric.ts#L119-L121 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/metric.ts#L216-L221 type HdrMethod struct { + // NumberOfSignificantValueDigits Specifies the resolution of values for the histogram in number of significant + // digits. NumberOfSignificantValueDigits *int `json:"number_of_significant_value_digits,omitempty"` } +func (s *HdrMethod) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "number_of_significant_value_digits": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NumberOfSignificantValueDigits = &value + case float64: + f := int(v) + s.NumberOfSignificantValueDigits = &f + } + + } + } + return nil +} + // NewHdrMethod returns a HdrMethod. func NewHdrMethod() *HdrMethod { r := &HdrMethod{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hdrpercentileranksaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hdrpercentileranksaggregate.go index c1946fcdc..33542187d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hdrpercentileranksaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hdrpercentileranksaggregate.go @@ -16,20 +16,69 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" ) // HdrPercentileRanksAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L168-L169 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L169-L170 type HdrPercentileRanksAggregate struct { - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Values Percentiles `json:"values"` + Meta Metadata `json:"meta,omitempty"` + Values Percentiles `json:"values"` +} + +func (s *HdrPercentileRanksAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "values": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(KeyedPercentiles, 0) + if err := localDec.Decode(&o); err != nil { + return err + } + s.Values = o + case '[': + o := []ArrayPercentilesItem{} + if err := localDec.Decode(&o); err != nil { + return err + } + s.Values = o + } + + } + } + return nil } // NewHdrPercentileRanksAggregate returns a HdrPercentileRanksAggregate. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hdrpercentilesaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hdrpercentilesaggregate.go index d906c66fb..cb98a1243 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hdrpercentilesaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hdrpercentilesaggregate.go @@ -16,20 +16,69 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" ) // HdrPercentilesAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L165-L166 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L166-L167 type HdrPercentilesAggregate struct { - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Values Percentiles `json:"values"` + Meta Metadata `json:"meta,omitempty"` + Values Percentiles `json:"values"` +} + +func (s *HdrPercentilesAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "values": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(KeyedPercentiles, 0) + if err := localDec.Decode(&o); err != nil { + return err + } + s.Values = o + case '[': + o := []ArrayPercentilesItem{} + if err := localDec.Decode(&o); err != nil { + return err + } + s.Values = o + } + + } + } + return nil } // NewHdrPercentilesAggregate returns a HdrPercentilesAggregate. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/healthrecord.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/healthrecord.go index 05efc4a10..0c9efefa9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/healthrecord.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/healthrecord.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // HealthRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/health/types.ts#L23-L94 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/health/types.ts#L23-L94 type HealthRecord struct { // ActiveShardsPercent active number of shards in percent ActiveShardsPercent *string `json:"active_shards_percent,omitempty"` @@ -54,6 +62,180 @@ type HealthRecord struct { Unassign *string `json:"unassign,omitempty"` } +func (s *HealthRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "active_shards_percent", "asp", "activeShardsPercent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ActiveShardsPercent = &o + + case "cluster", "cl": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Cluster = &o + + case "epoch", "time": + if err := dec.Decode(&s.Epoch); err != nil { + return err + } + + case "init", "i", "shards.initializing", "shardsInitializing": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Init = &o + + case "max_task_wait_time", "mtwt", "maxTaskWaitTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MaxTaskWaitTime = &o + + case "node.data", "nd", "nodeData": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NodeData = &o + + case "node.total", "nt", "nodeTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NodeTotal = &o + + case "pending_tasks", "pt", "pendingTasks": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PendingTasks = &o + + case "pri", "p", "shards.primary", "shardsPrimary": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pri = &o + + case "relo", "r", "shards.relocating", "shardsRelocating": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Relo = &o + + case "shards", "t", "sh", "shards.total", "shardsTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Shards = &o + + case "status", "st": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Status = &o + + case "timestamp", "ts", "hms", "hhmmss": + if err := dec.Decode(&s.Timestamp); err != nil { + return err + } + + case "unassign", "u", "shards.unassigned", "shardsUnassigned": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Unassign = &o + + } + } + return nil +} + // NewHealthRecord returns a HealthRecord. func NewHealthRecord() *HealthRecord { r := &HealthRecord{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/healthresponsebody.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/healthresponsebody.go new file mode 100644 index 000000000..713caf34e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/healthresponsebody.go @@ -0,0 +1,308 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/healthstatus" +) + +// HealthResponseBody type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/health/ClusterHealthResponse.ts#L39-L72 +type HealthResponseBody struct { + // ActivePrimaryShards The number of active primary shards. + ActivePrimaryShards int `json:"active_primary_shards"` + // ActiveShards The total number of active primary and replica shards. + ActiveShards int `json:"active_shards"` + // ActiveShardsPercentAsNumber The ratio of active shards in the cluster expressed as a percentage. + ActiveShardsPercentAsNumber Percentage `json:"active_shards_percent_as_number"` + // ClusterName The name of the cluster. + ClusterName string `json:"cluster_name"` + // DelayedUnassignedShards The number of shards whose allocation has been delayed by the timeout + // settings. + DelayedUnassignedShards int `json:"delayed_unassigned_shards"` + Indices map[string]IndexHealthStats `json:"indices,omitempty"` + // InitializingShards The number of shards that are under initialization. + InitializingShards int `json:"initializing_shards"` + // NumberOfDataNodes The number of nodes that are dedicated data nodes. + NumberOfDataNodes int `json:"number_of_data_nodes"` + // NumberOfInFlightFetch The number of unfinished fetches. + NumberOfInFlightFetch int `json:"number_of_in_flight_fetch"` + // NumberOfNodes The number of nodes within the cluster. + NumberOfNodes int `json:"number_of_nodes"` + // NumberOfPendingTasks The number of cluster-level changes that have not yet been executed. + NumberOfPendingTasks int `json:"number_of_pending_tasks"` + // RelocatingShards The number of shards that are under relocation. + RelocatingShards int `json:"relocating_shards"` + Status healthstatus.HealthStatus `json:"status"` + // TaskMaxWaitingInQueue The time since the earliest initiated task is waiting for being performed. + TaskMaxWaitingInQueue Duration `json:"task_max_waiting_in_queue,omitempty"` + // TaskMaxWaitingInQueueMillis The time expressed in milliseconds since the earliest initiated task is + // waiting for being performed. + TaskMaxWaitingInQueueMillis int64 `json:"task_max_waiting_in_queue_millis"` + // TimedOut If false the response returned within the period of time that is specified by + // the timeout parameter (30s by default) + TimedOut bool `json:"timed_out"` + // UnassignedShards The number of shards that are not allocated. + UnassignedShards int `json:"unassigned_shards"` +} + +func (s *HealthResponseBody) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "active_primary_shards": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.ActivePrimaryShards = value + case float64: + f := int(v) + s.ActivePrimaryShards = f + } + + case "active_shards": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.ActiveShards = value + case float64: + f := int(v) + s.ActiveShards = f + } + + case "active_shards_percent_as_number": + if err := dec.Decode(&s.ActiveShardsPercentAsNumber); err != nil { + return err + } + + case "cluster_name": + if err := dec.Decode(&s.ClusterName); err != nil { + return err + } + + case "delayed_unassigned_shards": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.DelayedUnassignedShards = value + case float64: + f := int(v) + s.DelayedUnassignedShards = f + } + + case "indices": + if s.Indices == nil { + s.Indices = make(map[string]IndexHealthStats, 0) + } + if err := dec.Decode(&s.Indices); err != nil { + return err + } + + case "initializing_shards": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.InitializingShards = value + case float64: + f := int(v) + s.InitializingShards = f + } + + case "number_of_data_nodes": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NumberOfDataNodes = value + case float64: + f := int(v) + s.NumberOfDataNodes = f + } + + case "number_of_in_flight_fetch": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NumberOfInFlightFetch = value + case float64: + f := int(v) + s.NumberOfInFlightFetch = f + } + + case "number_of_nodes": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NumberOfNodes = value + case float64: + f := int(v) + s.NumberOfNodes = f + } + + case "number_of_pending_tasks": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NumberOfPendingTasks = value + case float64: + f := int(v) + s.NumberOfPendingTasks = f + } + + case "relocating_shards": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.RelocatingShards = value + case float64: + f := int(v) + s.RelocatingShards = f + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return err + } + + case "task_max_waiting_in_queue": + if err := dec.Decode(&s.TaskMaxWaitingInQueue); err != nil { + return err + } + + case "task_max_waiting_in_queue_millis": + if err := dec.Decode(&s.TaskMaxWaitingInQueueMillis); err != nil { + return err + } + + case "timed_out": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.TimedOut = value + case bool: + s.TimedOut = v + } + + case "unassigned_shards": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.UnassignedShards = value + case float64: + f := int(v) + s.UnassignedShards = f + } + + } + } + return nil +} + +// NewHealthResponseBody returns a HealthResponseBody. +func NewHealthResponseBody() *HealthResponseBody { + r := &HealthResponseBody{ + Indices: make(map[string]IndexHealthStats, 0), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/healthstatistics.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/healthstatistics.go index d377a4454..9b1344e3a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/healthstatistics.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/healthstatistics.go @@ -16,19 +16,80 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // HealthStatistics type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L153-L155 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L153-L155 type HealthStatistics struct { Available bool `json:"available"` Enabled bool `json:"enabled"` Invocations Invocations `json:"invocations"` } +func (s *HealthStatistics) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "available": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Available = value + case bool: + s.Available = v + } + + case "enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "invocations": + if err := dec.Decode(&s.Invocations); err != nil { + return err + } + + } + } + return nil +} + // NewHealthStatistics returns a HealthStatistics. func NewHealthStatistics() *HealthStatistics { r := &HealthStatistics{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/helprecord.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/helprecord.go index 3493ed45d..721730f0a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/helprecord.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/helprecord.go @@ -16,17 +16,57 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // HelpRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/help/types.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/help/types.ts#L20-L22 type HelpRecord struct { Endpoint string `json:"endpoint"` } +func (s *HelpRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "endpoint": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Endpoint = o + + } + } + return nil +} + // NewHelpRecord returns a HelpRecord. func NewHelpRecord() *HelpRecord { r := &HelpRecord{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/highlight.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/highlight.go index ca11123cf..e96d55728 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/highlight.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/highlight.go @@ -16,12 +16,16 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/boundaryscanner" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/highlighterencoder" @@ -33,31 +37,341 @@ import ( // Highlight type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/highlighting.ts#L57-L60 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/highlighting.ts#L153-L156 type Highlight struct { - BoundaryChars *string `json:"boundary_chars,omitempty"` - BoundaryMaxScan *int `json:"boundary_max_scan,omitempty"` - BoundaryScanner *boundaryscanner.BoundaryScanner `json:"boundary_scanner,omitempty"` - BoundaryScannerLocale *string `json:"boundary_scanner_locale,omitempty"` - Encoder *highlighterencoder.HighlighterEncoder `json:"encoder,omitempty"` - Fields map[string]HighlightField `json:"fields"` - ForceSource *bool `json:"force_source,omitempty"` - FragmentSize *int `json:"fragment_size,omitempty"` - Fragmenter *highlighterfragmenter.HighlighterFragmenter `json:"fragmenter,omitempty"` - HighlightFilter *bool `json:"highlight_filter,omitempty"` - HighlightQuery *Query `json:"highlight_query,omitempty"` - MaxAnalyzedOffset *int `json:"max_analyzed_offset,omitempty"` - MaxFragmentLength *int `json:"max_fragment_length,omitempty"` - NoMatchSize *int `json:"no_match_size,omitempty"` - NumberOfFragments *int `json:"number_of_fragments,omitempty"` - Options map[string]json.RawMessage `json:"options,omitempty"` - Order *highlighterorder.HighlighterOrder `json:"order,omitempty"` - PhraseLimit *int `json:"phrase_limit,omitempty"` - PostTags []string `json:"post_tags,omitempty"` - PreTags []string `json:"pre_tags,omitempty"` - RequireFieldMatch *bool `json:"require_field_match,omitempty"` - TagsSchema *highlightertagsschema.HighlighterTagsSchema `json:"tags_schema,omitempty"` - Type *highlightertype.HighlighterType `json:"type,omitempty"` + // BoundaryChars A string that contains each boundary character. + BoundaryChars *string `json:"boundary_chars,omitempty"` + // BoundaryMaxScan How far to scan for boundary characters. + BoundaryMaxScan *int `json:"boundary_max_scan,omitempty"` + // BoundaryScanner Specifies how to break the highlighted fragments: chars, sentence, or word. + // Only valid for the unified and fvh highlighters. + // Defaults to `sentence` for the `unified` highlighter. Defaults to `chars` for + // the `fvh` highlighter. + BoundaryScanner *boundaryscanner.BoundaryScanner `json:"boundary_scanner,omitempty"` + // BoundaryScannerLocale Controls which locale is used to search for sentence and word boundaries. + // This parameter takes a form of a language tag, for example: `"en-US"`, + // `"fr-FR"`, `"ja-JP"`. + BoundaryScannerLocale *string `json:"boundary_scanner_locale,omitempty"` + Encoder *highlighterencoder.HighlighterEncoder `json:"encoder,omitempty"` + Fields map[string]HighlightField `json:"fields"` + ForceSource *bool `json:"force_source,omitempty"` + // FragmentSize The size of the highlighted fragment in characters. + FragmentSize *int `json:"fragment_size,omitempty"` + // Fragmenter Specifies how text should be broken up in highlight snippets: `simple` or + // `span`. + // Only valid for the `plain` highlighter. + Fragmenter *highlighterfragmenter.HighlighterFragmenter `json:"fragmenter,omitempty"` + HighlightFilter *bool `json:"highlight_filter,omitempty"` + // HighlightQuery Highlight matches for a query other than the search query. + // This is especially useful if you use a rescore query because those are not + // taken into account by highlighting by default. + HighlightQuery *Query `json:"highlight_query,omitempty"` + // MaxAnalyzedOffset If set to a non-negative value, highlighting stops at this defined maximum + // limit. + // The rest of the text is not processed, thus not highlighted and no error is + // returned + // The `max_analyzed_offset` query setting does not override the + // `index.highlight.max_analyzed_offset` setting, which prevails when it’s set + // to lower value than the query setting. + MaxAnalyzedOffset *int `json:"max_analyzed_offset,omitempty"` + MaxFragmentLength *int `json:"max_fragment_length,omitempty"` + // NoMatchSize The amount of text you want to return from the beginning of the field if + // there are no matching fragments to highlight. + NoMatchSize *int `json:"no_match_size,omitempty"` + // NumberOfFragments The maximum number of fragments to return. + // If the number of fragments is set to `0`, no fragments are returned. + // Instead, the entire field contents are highlighted and returned. + // This can be handy when you need to highlight short texts such as a title or + // address, but fragmentation is not required. + // If `number_of_fragments` is `0`, `fragment_size` is ignored. + NumberOfFragments *int `json:"number_of_fragments,omitempty"` + Options map[string]json.RawMessage `json:"options,omitempty"` + // Order Sorts highlighted fragments by score when set to `score`. + // By default, fragments will be output in the order they appear in the field + // (order: `none`). + // Setting this option to `score` will output the most relevant fragments first. + // Each highlighter applies its own logic to compute relevancy scores. + Order *highlighterorder.HighlighterOrder `json:"order,omitempty"` + // PhraseLimit Controls the number of matching phrases in a document that are considered. + // Prevents the `fvh` highlighter from analyzing too many phrases and consuming + // too much memory. + // When using `matched_fields`, `phrase_limit` phrases per matched field are + // considered. Raising the limit increases query time and consumes more memory. + // Only supported by the `fvh` highlighter. + PhraseLimit *int `json:"phrase_limit,omitempty"` + // PostTags Use in conjunction with `pre_tags` to define the HTML tags to use for the + // highlighted text. + // By default, highlighted text is wrapped in `` and `` tags. + PostTags []string `json:"post_tags,omitempty"` + // PreTags Use in conjunction with `post_tags` to define the HTML tags to use for the + // highlighted text. + // By default, highlighted text is wrapped in `` and `` tags. + PreTags []string `json:"pre_tags,omitempty"` + // RequireFieldMatch By default, only fields that contains a query match are highlighted. + // Set to `false` to highlight all fields. + RequireFieldMatch *bool `json:"require_field_match,omitempty"` + // TagsSchema Set to `styled` to use the built-in tag schema. + TagsSchema *highlightertagsschema.HighlighterTagsSchema `json:"tags_schema,omitempty"` + Type *highlightertype.HighlighterType `json:"type,omitempty"` +} + +func (s *Highlight) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boundary_chars": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BoundaryChars = &o + + case "boundary_max_scan": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.BoundaryMaxScan = &value + case float64: + f := int(v) + s.BoundaryMaxScan = &f + } + + case "boundary_scanner": + if err := dec.Decode(&s.BoundaryScanner); err != nil { + return err + } + + case "boundary_scanner_locale": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BoundaryScannerLocale = &o + + case "encoder": + if err := dec.Decode(&s.Encoder); err != nil { + return err + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]HighlightField, 0) + } + if err := dec.Decode(&s.Fields); err != nil { + return err + } + + case "force_source": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.ForceSource = &value + case bool: + s.ForceSource = &v + } + + case "fragment_size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.FragmentSize = &value + case float64: + f := int(v) + s.FragmentSize = &f + } + + case "fragmenter": + if err := dec.Decode(&s.Fragmenter); err != nil { + return err + } + + case "highlight_filter": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.HighlightFilter = &value + case bool: + s.HighlightFilter = &v + } + + case "highlight_query": + if err := dec.Decode(&s.HighlightQuery); err != nil { + return err + } + + case "max_analyzed_offset": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxAnalyzedOffset = &value + case float64: + f := int(v) + s.MaxAnalyzedOffset = &f + } + + case "max_fragment_length": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxFragmentLength = &value + case float64: + f := int(v) + s.MaxFragmentLength = &f + } + + case "no_match_size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NoMatchSize = &value + case float64: + f := int(v) + s.NoMatchSize = &f + } + + case "number_of_fragments": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NumberOfFragments = &value + case float64: + f := int(v) + s.NumberOfFragments = &f + } + + case "options": + if s.Options == nil { + s.Options = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Options); err != nil { + return err + } + + case "order": + if err := dec.Decode(&s.Order); err != nil { + return err + } + + case "phrase_limit": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.PhraseLimit = &value + case float64: + f := int(v) + s.PhraseLimit = &f + } + + case "post_tags": + if err := dec.Decode(&s.PostTags); err != nil { + return err + } + + case "pre_tags": + if err := dec.Decode(&s.PreTags); err != nil { + return err + } + + case "require_field_match": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.RequireFieldMatch = &value + case bool: + s.RequireFieldMatch = &v + } + + case "tags_schema": + if err := dec.Decode(&s.TagsSchema); err != nil { + return err + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + } + } + return nil } // NewHighlight returns a Highlight. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/highlightfield.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/highlightfield.go index be7a6cd0f..ef0560185 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/highlightfield.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/highlightfield.go @@ -16,55 +16,107 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/boundaryscanner" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/highlighterfragmenter" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/highlighterorder" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/highlightertagsschema" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/highlightertype" - - "bytes" - "errors" - "io" - - "encoding/json" ) // HighlightField type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/highlighting.ts#L88-L92 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/highlighting.ts#L193-L197 type HighlightField struct { - Analyzer Analyzer `json:"analyzer,omitempty"` - BoundaryChars *string `json:"boundary_chars,omitempty"` - BoundaryMaxScan *int `json:"boundary_max_scan,omitempty"` - BoundaryScanner *boundaryscanner.BoundaryScanner `json:"boundary_scanner,omitempty"` - BoundaryScannerLocale *string `json:"boundary_scanner_locale,omitempty"` - ForceSource *bool `json:"force_source,omitempty"` - FragmentOffset *int `json:"fragment_offset,omitempty"` - FragmentSize *int `json:"fragment_size,omitempty"` - Fragmenter *highlighterfragmenter.HighlighterFragmenter `json:"fragmenter,omitempty"` - HighlightFilter *bool `json:"highlight_filter,omitempty"` - HighlightQuery *Query `json:"highlight_query,omitempty"` - MatchedFields []string `json:"matched_fields,omitempty"` - MaxAnalyzedOffset *int `json:"max_analyzed_offset,omitempty"` - MaxFragmentLength *int `json:"max_fragment_length,omitempty"` - NoMatchSize *int `json:"no_match_size,omitempty"` - NumberOfFragments *int `json:"number_of_fragments,omitempty"` - Options map[string]json.RawMessage `json:"options,omitempty"` - Order *highlighterorder.HighlighterOrder `json:"order,omitempty"` - PhraseLimit *int `json:"phrase_limit,omitempty"` - PostTags []string `json:"post_tags,omitempty"` - PreTags []string `json:"pre_tags,omitempty"` - RequireFieldMatch *bool `json:"require_field_match,omitempty"` - TagsSchema *highlightertagsschema.HighlighterTagsSchema `json:"tags_schema,omitempty"` - Type *highlightertype.HighlighterType `json:"type,omitempty"` + Analyzer Analyzer `json:"analyzer,omitempty"` + // BoundaryChars A string that contains each boundary character. + BoundaryChars *string `json:"boundary_chars,omitempty"` + // BoundaryMaxScan How far to scan for boundary characters. + BoundaryMaxScan *int `json:"boundary_max_scan,omitempty"` + // BoundaryScanner Specifies how to break the highlighted fragments: chars, sentence, or word. + // Only valid for the unified and fvh highlighters. + // Defaults to `sentence` for the `unified` highlighter. Defaults to `chars` for + // the `fvh` highlighter. + BoundaryScanner *boundaryscanner.BoundaryScanner `json:"boundary_scanner,omitempty"` + // BoundaryScannerLocale Controls which locale is used to search for sentence and word boundaries. + // This parameter takes a form of a language tag, for example: `"en-US"`, + // `"fr-FR"`, `"ja-JP"`. + BoundaryScannerLocale *string `json:"boundary_scanner_locale,omitempty"` + ForceSource *bool `json:"force_source,omitempty"` + FragmentOffset *int `json:"fragment_offset,omitempty"` + // FragmentSize The size of the highlighted fragment in characters. + FragmentSize *int `json:"fragment_size,omitempty"` + // Fragmenter Specifies how text should be broken up in highlight snippets: `simple` or + // `span`. + // Only valid for the `plain` highlighter. + Fragmenter *highlighterfragmenter.HighlighterFragmenter `json:"fragmenter,omitempty"` + HighlightFilter *bool `json:"highlight_filter,omitempty"` + // HighlightQuery Highlight matches for a query other than the search query. + // This is especially useful if you use a rescore query because those are not + // taken into account by highlighting by default. + HighlightQuery *Query `json:"highlight_query,omitempty"` + MatchedFields []string `json:"matched_fields,omitempty"` + // MaxAnalyzedOffset If set to a non-negative value, highlighting stops at this defined maximum + // limit. + // The rest of the text is not processed, thus not highlighted and no error is + // returned + // The `max_analyzed_offset` query setting does not override the + // `index.highlight.max_analyzed_offset` setting, which prevails when it’s set + // to lower value than the query setting. + MaxAnalyzedOffset *int `json:"max_analyzed_offset,omitempty"` + MaxFragmentLength *int `json:"max_fragment_length,omitempty"` + // NoMatchSize The amount of text you want to return from the beginning of the field if + // there are no matching fragments to highlight. + NoMatchSize *int `json:"no_match_size,omitempty"` + // NumberOfFragments The maximum number of fragments to return. + // If the number of fragments is set to `0`, no fragments are returned. + // Instead, the entire field contents are highlighted and returned. + // This can be handy when you need to highlight short texts such as a title or + // address, but fragmentation is not required. + // If `number_of_fragments` is `0`, `fragment_size` is ignored. + NumberOfFragments *int `json:"number_of_fragments,omitempty"` + Options map[string]json.RawMessage `json:"options,omitempty"` + // Order Sorts highlighted fragments by score when set to `score`. + // By default, fragments will be output in the order they appear in the field + // (order: `none`). + // Setting this option to `score` will output the most relevant fragments first. + // Each highlighter applies its own logic to compute relevancy scores. + Order *highlighterorder.HighlighterOrder `json:"order,omitempty"` + // PhraseLimit Controls the number of matching phrases in a document that are considered. + // Prevents the `fvh` highlighter from analyzing too many phrases and consuming + // too much memory. + // When using `matched_fields`, `phrase_limit` phrases per matched field are + // considered. Raising the limit increases query time and consumes more memory. + // Only supported by the `fvh` highlighter. + PhraseLimit *int `json:"phrase_limit,omitempty"` + // PostTags Use in conjunction with `pre_tags` to define the HTML tags to use for the + // highlighted text. + // By default, highlighted text is wrapped in `` and `` tags. + PostTags []string `json:"post_tags,omitempty"` + // PreTags Use in conjunction with `post_tags` to define the HTML tags to use for the + // highlighted text. + // By default, highlighted text is wrapped in `` and `` tags. + PreTags []string `json:"pre_tags,omitempty"` + // RequireFieldMatch By default, only fields that contains a query match are highlighted. + // Set to `false` to highlight all fields. + RequireFieldMatch *bool `json:"require_field_match,omitempty"` + // TagsSchema Set to `styled` to use the built-in tag schema. + TagsSchema *highlightertagsschema.HighlighterTagsSchema `json:"tags_schema,omitempty"` + Type *highlightertype.HighlighterType `json:"type,omitempty"` } func (s *HighlightField) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -87,107 +139,127 @@ func (s *HighlightField) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(source) localDec.Decode(&kind) source.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "custom" + } switch kind["type"] { case "custom": o := NewCustomAnalyzer() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Analyzer = *o case "fingerprint": o := NewFingerprintAnalyzer() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Analyzer = *o case "keyword": o := NewKeywordAnalyzer() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Analyzer = *o case "language": o := NewLanguageAnalyzer() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Analyzer = *o case "nori": o := NewNoriAnalyzer() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Analyzer = *o case "pattern": o := NewPatternAnalyzer() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Analyzer = *o case "simple": o := NewSimpleAnalyzer() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Analyzer = *o case "standard": o := NewStandardAnalyzer() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Analyzer = *o case "stop": o := NewStopAnalyzer() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Analyzer = *o case "whitespace": o := NewWhitespaceAnalyzer() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Analyzer = *o case "icu_analyzer": o := NewIcuAnalyzer() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Analyzer = *o case "kuromoji": o := NewKuromojiAnalyzer() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Analyzer = *o case "snowball": o := NewSnowballAnalyzer() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Analyzer = *o case "dutch": o := NewDutchAnalyzer() - if err := localDec.Decode(o); err != nil { + if err := localDec.Decode(&o); err != nil { return err } s.Analyzer = *o default: - if err := dec.Decode(&s.Analyzer); err != nil { + if err := localDec.Decode(&s.Analyzer); err != nil { return err } } case "boundary_chars": - if err := dec.Decode(&s.BoundaryChars); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BoundaryChars = &o case "boundary_max_scan": - if err := dec.Decode(&s.BoundaryMaxScan); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.BoundaryMaxScan = &value + case float64: + f := int(v) + s.BoundaryMaxScan = &f } case "boundary_scanner": @@ -196,23 +268,61 @@ func (s *HighlightField) UnmarshalJSON(data []byte) error { } case "boundary_scanner_locale": - if err := dec.Decode(&s.BoundaryScannerLocale); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BoundaryScannerLocale = &o case "force_source": - if err := dec.Decode(&s.ForceSource); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.ForceSource = &value + case bool: + s.ForceSource = &v } case "fragment_offset": - if err := dec.Decode(&s.FragmentOffset); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.FragmentOffset = &value + case float64: + f := int(v) + s.FragmentOffset = &f } case "fragment_size": - if err := dec.Decode(&s.FragmentSize); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.FragmentSize = &value + case float64: + f := int(v) + s.FragmentSize = &f } case "fragmenter": @@ -221,8 +331,17 @@ func (s *HighlightField) UnmarshalJSON(data []byte) error { } case "highlight_filter": - if err := dec.Decode(&s.HighlightFilter); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.HighlightFilter = &value + case bool: + s.HighlightFilter = &v } case "highlight_query": @@ -231,31 +350,89 @@ func (s *HighlightField) UnmarshalJSON(data []byte) error { } case "matched_fields": - if err := dec.Decode(&s.MatchedFields); err != nil { - return err + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.MatchedFields = append(s.MatchedFields, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.MatchedFields); err != nil { + return err + } } case "max_analyzed_offset": - if err := dec.Decode(&s.MaxAnalyzedOffset); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxAnalyzedOffset = &value + case float64: + f := int(v) + s.MaxAnalyzedOffset = &f } case "max_fragment_length": - if err := dec.Decode(&s.MaxFragmentLength); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxFragmentLength = &value + case float64: + f := int(v) + s.MaxFragmentLength = &f } case "no_match_size": - if err := dec.Decode(&s.NoMatchSize); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NoMatchSize = &value + case float64: + f := int(v) + s.NoMatchSize = &f } case "number_of_fragments": - if err := dec.Decode(&s.NumberOfFragments); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NumberOfFragments = &value + case float64: + f := int(v) + s.NumberOfFragments = &f } case "options": + if s.Options == nil { + s.Options = make(map[string]json.RawMessage, 0) + } if err := dec.Decode(&s.Options); err != nil { return err } @@ -266,8 +443,19 @@ func (s *HighlightField) UnmarshalJSON(data []byte) error { } case "phrase_limit": - if err := dec.Decode(&s.PhraseLimit); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.PhraseLimit = &value + case float64: + f := int(v) + s.PhraseLimit = &f } case "post_tags": @@ -281,8 +469,17 @@ func (s *HighlightField) UnmarshalJSON(data []byte) error { } case "require_field_match": - if err := dec.Decode(&s.RequireFieldMatch); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.RequireFieldMatch = &value + case bool: + s.RequireFieldMatch = &v } case "tags_schema": diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hint.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hint.go index 12cbee85b..f9167fff3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hint.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hint.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // Hint type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/suggest_user_profiles/types.ts#L23-L34 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/suggest_user_profiles/types.ts#L23-L34 type Hint struct { // Labels A single key-value pair to match against the labels section // of a profile. A profile is considered matching if it matches diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/histogramaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/histogramaggregate.go index f05c60729..67a49649a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/histogramaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/histogramaggregate.go @@ -16,27 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" ) // HistogramAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L339-L340 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L340-L341 type HistogramAggregate struct { - Buckets BucketsHistogramBucket `json:"buckets"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Buckets BucketsHistogramBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` } func (s *HistogramAggregate) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -57,15 +57,17 @@ func (s *HistogramAggregate) UnmarshalJSON(data []byte) error { source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]HistogramBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []HistogramBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/histogramaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/histogramaggregation.go index be05fa047..0373b7a36 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/histogramaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/histogramaggregation.go @@ -16,40 +16,59 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortorder" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortorder" ) // HistogramAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L235-L247 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L500-L546 type HistogramAggregation struct { - ExtendedBounds *ExtendedBoundsdouble `json:"extended_bounds,omitempty"` - Field *string `json:"field,omitempty"` - Format *string `json:"format,omitempty"` - HardBounds *ExtendedBoundsdouble `json:"hard_bounds,omitempty"` - Interval *Float64 `json:"interval,omitempty"` - Keyed *bool `json:"keyed,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - MinDocCount *int `json:"min_doc_count,omitempty"` - Missing *Float64 `json:"missing,omitempty"` - Name *string `json:"name,omitempty"` - Offset *Float64 `json:"offset,omitempty"` - Order AggregateOrder `json:"order,omitempty"` - Script Script `json:"script,omitempty"` + // ExtendedBounds Enables extending the bounds of the histogram beyond the data itself. + ExtendedBounds *ExtendedBoundsdouble `json:"extended_bounds,omitempty"` + // Field The name of the field to aggregate on. + Field *string `json:"field,omitempty"` + Format *string `json:"format,omitempty"` + // HardBounds Limits the range of buckets in the histogram. + // It is particularly useful in the case of open data ranges that can result in + // a very large number of buckets. + HardBounds *ExtendedBoundsdouble `json:"hard_bounds,omitempty"` + // Interval The interval for the buckets. + // Must be a positive decimal. + Interval *Float64 `json:"interval,omitempty"` + // Keyed If `true`, returns buckets as a hash instead of an array, keyed by the bucket + // keys. + Keyed *bool `json:"keyed,omitempty"` + Meta Metadata `json:"meta,omitempty"` + // MinDocCount Only returns buckets that have `min_doc_count` number of documents. + // By default, the response will fill gaps in the histogram with empty buckets. + MinDocCount *int `json:"min_doc_count,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing *Float64 `json:"missing,omitempty"` + Name *string `json:"name,omitempty"` + // Offset By default, the bucket keys start with 0 and then continue in even spaced + // steps of `interval`. + // The bucket boundaries can be shifted by using the `offset` option. + Offset *Float64 `json:"offset,omitempty"` + // Order The sort order of the returned buckets. + // By default, the returned buckets are sorted by their key ascending. + Order AggregateOrder `json:"order,omitempty"` + Script Script `json:"script,omitempty"` } func (s *HistogramAggregation) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -74,9 +93,16 @@ func (s *HistogramAggregation) UnmarshalJSON(data []byte) error { } case "format": - if err := dec.Decode(&s.Format); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o case "hard_bounds": if err := dec.Decode(&s.HardBounds); err != nil { @@ -84,13 +110,33 @@ func (s *HistogramAggregation) UnmarshalJSON(data []byte) error { } case "interval": - if err := dec.Decode(&s.Interval); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Interval = &f + case float64: + f := Float64(v) + s.Interval = &f } case "keyed": - if err := dec.Decode(&s.Keyed); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Keyed = &value + case bool: + s.Keyed = &v } case "meta": @@ -99,23 +145,63 @@ func (s *HistogramAggregation) UnmarshalJSON(data []byte) error { } case "min_doc_count": - if err := dec.Decode(&s.MinDocCount); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MinDocCount = &value + case float64: + f := int(v) + s.MinDocCount = &f } case "missing": - if err := dec.Decode(&s.Missing); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Missing = &f + case float64: + f := Float64(v) + s.Missing = &f } case "name": - if err := dec.Decode(&s.Name); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o case "offset": - if err := dec.Decode(&s.Offset); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Offset = &f + case float64: + f := Float64(v) + s.Offset = &f } case "order": @@ -125,15 +211,17 @@ func (s *HistogramAggregation) UnmarshalJSON(data []byte) error { source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]sortorder.SortOrder, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Order = o - case '[': o := make([]map[string]sortorder.SortOrder, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Order = o } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/histogrambucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/histogrambucket.go index aa7252523..470c75a1d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/histogrambucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/histogrambucket.go @@ -16,25 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "fmt" - "bytes" + "encoding/json" "errors" + "fmt" "io" - + "strconv" "strings" - - "encoding/json" ) // HistogramBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L342-L345 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L343-L346 type HistogramBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -43,6 +41,7 @@ type HistogramBucket struct { } func (s *HistogramBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -56,462 +55,561 @@ func (s *HistogramBucket) UnmarshalJSON(data []byte) error { switch t { - case "aggregations": - for dec.More() { - tt, err := dec.Token() + case "doc_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) if err != nil { - if errors.Is(err, io.EOF) { - break - } return err } - if value, ok := tt.(string); ok { - if strings.Contains(value, "#") { - elems := strings.Split(value, "#") - if len(elems) == 2 { - switch elems[0] { - case "cardinality": - o := NewCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentiles": - o := NewHdrPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentile_ranks": - o := NewHdrPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentiles": - o := NewTDigestPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentile_ranks": - o := NewTDigestPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "percentiles_bucket": - o := NewPercentilesBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "median_absolute_deviation": - o := NewMedianAbsoluteDeviationAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "min": - o := NewMinAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "max": - o := NewMaxAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sum": - o := NewSumAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "avg": - o := NewAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "weighted_avg": - o := NewWeightedAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "value_count": - o := NewValueCountAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_value": - o := NewSimpleValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "derivative": - o := NewDerivativeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "bucket_metric_value": - o := NewBucketMetricValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats": - o := NewStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats_bucket": - o := NewStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats": - o := NewExtendedStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats_bucket": - o := NewExtendedStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_bounds": - o := NewGeoBoundsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_centroid": - o := NewGeoCentroidAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "histogram": - o := NewHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_histogram": - o := NewDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "auto_date_histogram": - o := NewAutoDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "variable_width_histogram": - o := NewVariableWidthHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sterms": - o := NewStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lterms": - o := NewLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "dterms": - o := NewDoubleTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umterms": - o := NewUnmappedTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lrareterms": - o := NewLongRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "srareterms": - o := NewStringRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umrareterms": - o := NewUnmappedRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "multi_terms": - o := NewMultiTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "missing": - o := NewMissingAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "nested": - o := NewNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "reverse_nested": - o := NewReverseNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "global": - o := NewGlobalAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filter": - o := NewFilterAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "children": - o := NewChildrenAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "parent": - o := NewParentAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sampler": - o := NewSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "unmapped_sampler": - o := NewUnmappedSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohash_grid": - o := NewGeoHashGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geotile_grid": - o := NewGeoTileGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohex_grid": - o := NewGeoHexGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "range": - o := NewRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_range": - o := NewDateRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_distance": - o := NewGeoDistanceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_range": - o := NewIpRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_prefix": - o := NewIpPrefixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filters": - o := NewFiltersAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "adjacency_matrix": - o := NewAdjacencyMatrixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "siglterms": - o := NewSignificantLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sigsterms": - o := NewSignificantStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umsigterms": - o := NewUnmappedSignificantTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "composite": - o := NewCompositeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "scripted_metric": - o := NewScriptedMetricAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_hits": - o := NewTopHitsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "inference": - o := NewInferenceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "string_stats": - o := NewStringStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "box_plot": - o := NewBoxPlotAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_metrics": - o := NewTopMetricsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "t_test": - o := NewTTestAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "rate": - o := NewRateAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_long_value": - o := NewCumulativeCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "matrix_stats": - o := NewMatrixStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_line": - o := NewGeoLineAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - default: - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - } - } else { - return errors.New("cannot decode JSON for field Aggregations") - } - } else { - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[value] = o - } - } - } - - case "doc_count": - if err := dec.Decode(&s.DocCount); err != nil { - return err + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f } case "key": - if err := dec.Decode(&s.Key); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Key = f + case float64: + f := Float64(v) + s.Key = f } case "key_as_string": - if err := dec.Decode(&s.KeyAsString); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.KeyAsString = &o + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "box_plot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[value] = o + } + } } } @@ -537,6 +635,7 @@ func (s HistogramBucket) MarshalJSON() ([]byte, error) { for key, value := range s.Aggregations { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "Aggregations") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/histogramgrouping.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/histogramgrouping.go index 9e7bd6cce..dd4271221 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/histogramgrouping.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/histogramgrouping.go @@ -16,16 +16,83 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // HistogramGrouping type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/rollup/_types/Groupings.ts#L44-L47 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/rollup/_types/Groupings.ts#L84-L97 type HistogramGrouping struct { - Fields []string `json:"fields"` - Interval int64 `json:"interval"` + // Fields The set of fields that you wish to build histograms for. + // All fields specified must be some kind of numeric. + // Order does not matter. + Fields []string `json:"fields"` + // Interval The interval of histogram buckets to be generated when rolling up. + // For example, a value of `5` creates buckets that are five units wide (`0-5`, + // `5-10`, etc). + // Note that only one interval can be specified in the histogram group, meaning + // that all fields being grouped via the histogram must share the same interval. + Interval int64 `json:"interval"` +} + +func (s *HistogramGrouping) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fields": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Fields = append(s.Fields, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Fields); err != nil { + return err + } + } + + case "interval": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Interval = value + case float64: + f := int64(v) + s.Interval = f + } + + } + } + return nil } // NewHistogramGrouping returns a HistogramGrouping. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/histogramproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/histogramproperty.go index 80f9a3b50..428db2e99 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/histogramproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/histogramproperty.go @@ -16,23 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" ) // HistogramProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/specialized.ts#L54-L57 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/specialized.ts#L54-L57 type HistogramProperty struct { Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` Fields map[string]Property `json:"fields,omitempty"` @@ -45,6 +45,7 @@ type HistogramProperty struct { } func (s *HistogramProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -64,6 +65,9 @@ func (s *HistogramProperty) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -72,7 +76,9 @@ func (s *HistogramProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -351,28 +357,56 @@ func (s *HistogramProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "ignore_malformed": - if err := dec.Decode(&s.IgnoreMalformed); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreMalformed = &value + case bool: + s.IgnoreMalformed = &v } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -381,7 +415,9 @@ func (s *HistogramProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -660,9 +696,11 @@ func (s *HistogramProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } @@ -676,6 +714,24 @@ func (s *HistogramProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s HistogramProperty) MarshalJSON() ([]byte, error) { + type innerHistogramProperty HistogramProperty + tmp := innerHistogramProperty{ + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + Meta: s.Meta, + Properties: s.Properties, + Type: s.Type, + } + + tmp.Type = "histogram" + + return json.Marshal(tmp) +} + // NewHistogramProperty returns a HistogramProperty. func NewHistogramProperty() *HistogramProperty { r := &HistogramProperty{ @@ -684,7 +740,5 @@ func NewHistogramProperty() *HistogramProperty { Properties: make(map[string]Property, 0), } - r.Type = "histogram" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hit.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hit.go index e9eab85d4..c9b305158 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hit.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hit.go @@ -16,17 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // Hit type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/hits.ts#L40-L64 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/hits.ts#L40-L64 type Hit struct { Explanation_ *Explanation `json:"_explanation,omitempty"` Fields map[string]json.RawMessage `json:"fields,omitempty"` @@ -49,6 +53,164 @@ type Hit struct { Version_ *int64 `json:"_version,omitempty"` } +func (s *Hit) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "_explanation": + if err := dec.Decode(&s.Explanation_); err != nil { + return err + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Fields); err != nil { + return err + } + + case "highlight": + if s.Highlight == nil { + s.Highlight = make(map[string][]string, 0) + } + if err := dec.Decode(&s.Highlight); err != nil { + return err + } + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return err + } + + case "ignored_field_values": + if s.IgnoredFieldValues == nil { + s.IgnoredFieldValues = make(map[string][]string, 0) + } + if err := dec.Decode(&s.IgnoredFieldValues); err != nil { + return err + } + + case "_ignored": + if err := dec.Decode(&s.Ignored_); err != nil { + return err + } + + case "_index": + if err := dec.Decode(&s.Index_); err != nil { + return err + } + + case "inner_hits": + if s.InnerHits == nil { + s.InnerHits = make(map[string]InnerHitsResult, 0) + } + if err := dec.Decode(&s.InnerHits); err != nil { + return err + } + + case "matched_queries": + if err := dec.Decode(&s.MatchedQueries); err != nil { + return err + } + + case "_nested": + if err := dec.Decode(&s.Nested_); err != nil { + return err + } + + case "_node": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Node_ = &o + + case "_primary_term": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.PrimaryTerm_ = &value + case float64: + f := int64(v) + s.PrimaryTerm_ = &f + } + + case "_routing": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Routing_ = &o + + case "_score": + if err := dec.Decode(&s.Score_); err != nil { + return err + } + + case "_seq_no": + if err := dec.Decode(&s.SeqNo_); err != nil { + return err + } + + case "_shard": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Shard_ = &o + + case "sort": + if err := dec.Decode(&s.Sort); err != nil { + return err + } + + case "_source": + if err := dec.Decode(&s.Source_); err != nil { + return err + } + + case "_version": + if err := dec.Decode(&s.Version_); err != nil { + return err + } + + } + } + return nil +} + // NewHit returns a Hit. func NewHit() *Hit { r := &Hit{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hitsevent.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hitsevent.go index 5c8d0f56f..b3703ae95 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hitsevent.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hitsevent.go @@ -16,17 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" ) // HitsEvent type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/eql/_types/EqlHits.ts#L41-L49 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/eql/_types/EqlHits.ts#L41-L49 type HitsEvent struct { Fields map[string][]json.RawMessage `json:"fields,omitempty"` // Id_ Unique identifier for the event. This ID is only unique within the index. @@ -37,6 +40,49 @@ type HitsEvent struct { Source_ json.RawMessage `json:"_source,omitempty"` } +func (s *HitsEvent) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string][]json.RawMessage, 0) + } + if err := dec.Decode(&s.Fields); err != nil { + return err + } + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return err + } + + case "_index": + if err := dec.Decode(&s.Index_); err != nil { + return err + } + + case "_source": + if err := dec.Decode(&s.Source_); err != nil { + return err + } + + } + } + return nil +} + // NewHitsEvent returns a HitsEvent. func NewHitsEvent() *HitsEvent { r := &HitsEvent{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hitsmetadata.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hitsmetadata.go index 085e2eee2..40dae2bb9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hitsmetadata.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hitsmetadata.go @@ -16,13 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // HitsMetadata type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/hits.ts#L66-L72 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/hits.ts#L66-L72 type HitsMetadata struct { Hits []Hit `json:"hits"` MaxScore Float64 `json:"max_score,omitempty"` @@ -31,6 +38,41 @@ type HitsMetadata struct { Total *TotalHits `json:"total,omitempty"` } +func (s *HitsMetadata) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "hits": + if err := dec.Decode(&s.Hits); err != nil { + return err + } + + case "max_score": + if err := dec.Decode(&s.MaxScore); err != nil { + return err + } + + case "total": + if err := dec.Decode(&s.Total); err != nil { + return err + } + + } + } + return nil +} + // NewHitsMetadata returns a HitsMetadata. func NewHitsMetadata() *HitsMetadata { r := &HitsMetadata{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hitssequence.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hitssequence.go index 775163a3a..321a1143a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hitssequence.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hitssequence.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -26,7 +26,7 @@ import ( // HitsSequence type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/eql/_types/EqlHits.ts#L51-L59 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/eql/_types/EqlHits.ts#L51-L59 type HitsSequence struct { // Events Contains events matching the query. Each object represents a matching event. Events []HitsEvent `json:"events"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/holtlinearmodelsettings.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/holtlinearmodelsettings.go index b2cc36be9..cc59bd3f4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/holtlinearmodelsettings.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/holtlinearmodelsettings.go @@ -16,18 +16,78 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // HoltLinearModelSettings type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/pipeline.ts#L231-L234 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/pipeline.ts#L271-L274 type HoltLinearModelSettings struct { Alpha *float32 `json:"alpha,omitempty"` Beta *float32 `json:"beta,omitempty"` } +func (s *HoltLinearModelSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "alpha": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Alpha = &f + case float64: + f := float32(v) + s.Alpha = &f + } + + case "beta": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Beta = &f + case float64: + f := float32(v) + s.Beta = &f + } + + } + } + return nil +} + // NewHoltLinearModelSettings returns a HoltLinearModelSettings. func NewHoltLinearModelSettings() *HoltLinearModelSettings { r := &HoltLinearModelSettings{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/holtmovingaverageaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/holtmovingaverageaggregation.go index 4c51a6d9c..d606d10c2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/holtmovingaverageaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/holtmovingaverageaggregation.go @@ -16,38 +16,43 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" ) // HoltMovingAverageAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/pipeline.ts#L217-L220 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/pipeline.ts#L257-L260 type HoltMovingAverageAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. - BucketsPath BucketsPath `json:"buckets_path,omitempty"` - Format *string `json:"format,omitempty"` - GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Minimize *bool `json:"minimize,omitempty"` - Model string `json:"model,omitempty"` - Name *string `json:"name,omitempty"` - Predict *int `json:"predict,omitempty"` - Settings HoltLinearModelSettings `json:"settings"` - Window *int `json:"window,omitempty"` + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Minimize *bool `json:"minimize,omitempty"` + Model string `json:"model,omitempty"` + Name *string `json:"name,omitempty"` + Predict *int `json:"predict,omitempty"` + Settings HoltLinearModelSettings `json:"settings"` + Window *int `json:"window,omitempty"` } func (s *HoltMovingAverageAggregation) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -67,9 +72,16 @@ func (s *HoltMovingAverageAggregation) UnmarshalJSON(data []byte) error { } case "format": - if err := dec.Decode(&s.Format); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o case "gap_policy": if err := dec.Decode(&s.GapPolicy); err != nil { @@ -82,8 +94,17 @@ func (s *HoltMovingAverageAggregation) UnmarshalJSON(data []byte) error { } case "minimize": - if err := dec.Decode(&s.Minimize); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Minimize = &value + case bool: + s.Minimize = &v } case "model": @@ -92,13 +113,31 @@ func (s *HoltMovingAverageAggregation) UnmarshalJSON(data []byte) error { } case "name": - if err := dec.Decode(&s.Name); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o case "predict": - if err := dec.Decode(&s.Predict); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Predict = &value + case float64: + f := int(v) + s.Predict = &f } case "settings": @@ -107,8 +146,19 @@ func (s *HoltMovingAverageAggregation) UnmarshalJSON(data []byte) error { } case "window": - if err := dec.Decode(&s.Window); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Window = &value + case float64: + f := int(v) + s.Window = &f } } @@ -116,11 +166,30 @@ func (s *HoltMovingAverageAggregation) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s HoltMovingAverageAggregation) MarshalJSON() ([]byte, error) { + type innerHoltMovingAverageAggregation HoltMovingAverageAggregation + tmp := innerHoltMovingAverageAggregation{ + BucketsPath: s.BucketsPath, + Format: s.Format, + GapPolicy: s.GapPolicy, + Meta: s.Meta, + Minimize: s.Minimize, + Model: s.Model, + Name: s.Name, + Predict: s.Predict, + Settings: s.Settings, + Window: s.Window, + } + + tmp.Model = "holt" + + return json.Marshal(tmp) +} + // NewHoltMovingAverageAggregation returns a HoltMovingAverageAggregation. func NewHoltMovingAverageAggregation() *HoltMovingAverageAggregation { r := &HoltMovingAverageAggregation{} - r.Model = "holt" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/holtwintersmodelsettings.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/holtwintersmodelsettings.go index 4d2320dcb..689a383ca 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/holtwintersmodelsettings.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/holtwintersmodelsettings.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/holtwinterstype" ) // HoltWintersModelSettings type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/pipeline.ts#L235-L242 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/pipeline.ts#L275-L282 type HoltWintersModelSettings struct { Alpha *float32 `json:"alpha,omitempty"` Beta *float32 `json:"beta,omitempty"` @@ -36,6 +42,109 @@ type HoltWintersModelSettings struct { Type *holtwinterstype.HoltWintersType `json:"type,omitempty"` } +func (s *HoltWintersModelSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "alpha": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Alpha = &f + case float64: + f := float32(v) + s.Alpha = &f + } + + case "beta": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Beta = &f + case float64: + f := float32(v) + s.Beta = &f + } + + case "gamma": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Gamma = &f + case float64: + f := float32(v) + s.Gamma = &f + } + + case "pad": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Pad = &value + case bool: + s.Pad = &v + } + + case "period": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Period = &value + case float64: + f := int(v) + s.Period = &f + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + } + } + return nil +} + // NewHoltWintersModelSettings returns a HoltWintersModelSettings. func NewHoltWintersModelSettings() *HoltWintersModelSettings { r := &HoltWintersModelSettings{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/holtwintersmovingaverageaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/holtwintersmovingaverageaggregation.go index 702a55e59..f2aeb6b9a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/holtwintersmovingaverageaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/holtwintersmovingaverageaggregation.go @@ -16,38 +16,43 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" ) // HoltWintersMovingAverageAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/pipeline.ts#L222-L225 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/pipeline.ts#L262-L265 type HoltWintersMovingAverageAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. - BucketsPath BucketsPath `json:"buckets_path,omitempty"` - Format *string `json:"format,omitempty"` - GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Minimize *bool `json:"minimize,omitempty"` - Model string `json:"model,omitempty"` - Name *string `json:"name,omitempty"` - Predict *int `json:"predict,omitempty"` - Settings HoltWintersModelSettings `json:"settings"` - Window *int `json:"window,omitempty"` + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Minimize *bool `json:"minimize,omitempty"` + Model string `json:"model,omitempty"` + Name *string `json:"name,omitempty"` + Predict *int `json:"predict,omitempty"` + Settings HoltWintersModelSettings `json:"settings"` + Window *int `json:"window,omitempty"` } func (s *HoltWintersMovingAverageAggregation) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -67,9 +72,16 @@ func (s *HoltWintersMovingAverageAggregation) UnmarshalJSON(data []byte) error { } case "format": - if err := dec.Decode(&s.Format); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o case "gap_policy": if err := dec.Decode(&s.GapPolicy); err != nil { @@ -82,8 +94,17 @@ func (s *HoltWintersMovingAverageAggregation) UnmarshalJSON(data []byte) error { } case "minimize": - if err := dec.Decode(&s.Minimize); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Minimize = &value + case bool: + s.Minimize = &v } case "model": @@ -92,13 +113,31 @@ func (s *HoltWintersMovingAverageAggregation) UnmarshalJSON(data []byte) error { } case "name": - if err := dec.Decode(&s.Name); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o case "predict": - if err := dec.Decode(&s.Predict); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Predict = &value + case float64: + f := int(v) + s.Predict = &f } case "settings": @@ -107,8 +146,19 @@ func (s *HoltWintersMovingAverageAggregation) UnmarshalJSON(data []byte) error { } case "window": - if err := dec.Decode(&s.Window); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Window = &value + case float64: + f := int(v) + s.Window = &f } } @@ -116,11 +166,30 @@ func (s *HoltWintersMovingAverageAggregation) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s HoltWintersMovingAverageAggregation) MarshalJSON() ([]byte, error) { + type innerHoltWintersMovingAverageAggregation HoltWintersMovingAverageAggregation + tmp := innerHoltWintersMovingAverageAggregation{ + BucketsPath: s.BucketsPath, + Format: s.Format, + GapPolicy: s.GapPolicy, + Meta: s.Meta, + Minimize: s.Minimize, + Model: s.Model, + Name: s.Name, + Predict: s.Predict, + Settings: s.Settings, + Window: s.Window, + } + + tmp.Model = "holt_winters" + + return json.Marshal(tmp) +} + // NewHoltWintersMovingAverageAggregation returns a HoltWintersMovingAverageAggregation. func NewHoltWintersMovingAverageAggregation() *HoltWintersMovingAverageAggregation { r := &HoltWintersMovingAverageAggregation{} - r.Model = "holt_winters" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hop.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hop.go index 9b2d97bed..db3f6e13f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hop.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hop.go @@ -16,17 +16,22 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // Hop type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/graph/_types/Hop.ts#L23-L27 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/graph/_types/Hop.ts#L23-L36 type Hop struct { - Connections *Hop `json:"connections,omitempty"` - Query Query `json:"query"` - Vertices []VertexDefinition `json:"vertices"` + // Connections Specifies one or more fields from which you want to extract terms that are + // associated with the specified vertices. + Connections *Hop `json:"connections,omitempty"` + // Query An optional guiding query that constrains the Graph API as it explores + // connected terms. + Query Query `json:"query"` + // Vertices Contains the fields you are interested in. + Vertices []VertexDefinition `json:"vertices"` } // NewHop returns a Hop. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hotthread.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hotthread.go index a2bbd3f70..71cd37eb2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hotthread.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hotthread.go @@ -16,13 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // HotThread type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/hot_threads/types.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/hot_threads/types.ts#L23-L28 type HotThread struct { Hosts []string `json:"hosts"` NodeId string `json:"node_id"` @@ -30,6 +37,46 @@ type HotThread struct { Threads []string `json:"threads"` } +func (s *HotThread) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "hosts": + if err := dec.Decode(&s.Hosts); err != nil { + return err + } + + case "node_id": + if err := dec.Decode(&s.NodeId); err != nil { + return err + } + + case "node_name": + if err := dec.Decode(&s.NodeName); err != nil { + return err + } + + case "threads": + if err := dec.Decode(&s.Threads); err != nil { + return err + } + + } + } + return nil +} + // NewHotThread returns a HotThread. func NewHotThread() *HotThread { r := &HotThread{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hourandminute.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hourandminute.go index c9e8fb4ba..8d3f6c4b5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hourandminute.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hourandminute.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // HourAndMinute type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Schedule.ts#L110-L113 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Schedule.ts#L110-L113 type HourAndMinute struct { Hour []int `json:"hour"` Minute []int `json:"minute"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hourlyschedule.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hourlyschedule.go index 20a062d5e..c4729d12a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hourlyschedule.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hourlyschedule.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // HourlySchedule type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Schedule.ts#L47-L49 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Schedule.ts#L47-L49 type HourlySchedule struct { Minute []int `json:"minute"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/htmlstripcharfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/htmlstripcharfilter.go index 17a5ab612..31ece212a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/htmlstripcharfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/htmlstripcharfilter.go @@ -16,23 +16,71 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // HtmlStripCharFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/char_filters.ts#L43-L45 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/char_filters.ts#L43-L45 type HtmlStripCharFilter struct { Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` } +func (s *HtmlStripCharFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s HtmlStripCharFilter) MarshalJSON() ([]byte, error) { + type innerHtmlStripCharFilter HtmlStripCharFilter + tmp := innerHtmlStripCharFilter{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "html_strip" + + return json.Marshal(tmp) +} + // NewHtmlStripCharFilter returns a HtmlStripCharFilter. func NewHtmlStripCharFilter() *HtmlStripCharFilter { r := &HtmlStripCharFilter{} - r.Type = "html_strip" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/http.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/http.go index 3fc0ce64c..4b69e2ff1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/http.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/http.go @@ -16,17 +16,87 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Http type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L266-L270 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L633-L647 type Http struct { - Clients []Client `json:"clients,omitempty"` - CurrentOpen *int `json:"current_open,omitempty"` - TotalOpened *int64 `json:"total_opened,omitempty"` + // Clients Information on current and recently-closed HTTP client connections. + // Clients that have been closed longer than the + // `http.client_stats.closed_channels.max_age` setting will not be represented + // here. + Clients []Client `json:"clients,omitempty"` + // CurrentOpen Current number of open HTTP connections for the node. + CurrentOpen *int `json:"current_open,omitempty"` + // TotalOpened Total number of HTTP connections opened for the node. + TotalOpened *int64 `json:"total_opened,omitempty"` +} + +func (s *Http) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "clients": + if err := dec.Decode(&s.Clients); err != nil { + return err + } + + case "current_open": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.CurrentOpen = &value + case float64: + f := int(v) + s.CurrentOpen = &f + } + + case "total_opened": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TotalOpened = &value + case float64: + f := int64(v) + s.TotalOpened = &f + } + + } + } + return nil } // NewHttp returns a Http. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/httpemailattachment.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/httpemailattachment.go index 62a61936d..da9c3abfd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/httpemailattachment.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/httpemailattachment.go @@ -16,19 +16,78 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // HttpEmailAttachment type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Actions.ts#L218-L222 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Actions.ts#L218-L222 type HttpEmailAttachment struct { ContentType *string `json:"content_type,omitempty"` Inline *bool `json:"inline,omitempty"` Request *HttpInputRequestDefinition `json:"request,omitempty"` } +func (s *HttpEmailAttachment) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "content_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ContentType = &o + + case "inline": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Inline = &value + case bool: + s.Inline = &v + } + + case "request": + if err := dec.Decode(&s.Request); err != nil { + return err + } + + } + } + return nil +} + // NewHttpEmailAttachment returns a HttpEmailAttachment. func NewHttpEmailAttachment() *HttpEmailAttachment { r := &HttpEmailAttachment{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/httpheaders.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/httpheaders.go index 3fbebc479..59c2e2f5f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/httpheaders.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/httpheaders.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // HttpHeaders type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/common.ts#L138-L138 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/common.ts#L152-L152 type HttpHeaders map[string][]string diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/httpinput.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/httpinput.go index 277f2ecb2..e479e5ce4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/httpinput.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/httpinput.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -26,7 +26,7 @@ import ( // HttpInput type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Input.ts#L44-L48 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Input.ts#L44-L48 type HttpInput struct { Extract []string `json:"extract,omitempty"` Request *HttpInputRequestDefinition `json:"request,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/httpinputauthentication.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/httpinputauthentication.go index 6e16e3794..378488a40 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/httpinputauthentication.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/httpinputauthentication.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // HttpInputAuthentication type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Input.ts#L50-L52 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Input.ts#L50-L52 type HttpInputAuthentication struct { Basic HttpInputBasicAuthentication `json:"basic"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/httpinputbasicauthentication.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/httpinputbasicauthentication.go index d38355feb..5d842e7b5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/httpinputbasicauthentication.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/httpinputbasicauthentication.go @@ -16,18 +16,55 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // HttpInputBasicAuthentication type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Input.ts#L54-L57 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Input.ts#L54-L57 type HttpInputBasicAuthentication struct { Password string `json:"password"` Username string `json:"username"` } +func (s *HttpInputBasicAuthentication) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "password": + if err := dec.Decode(&s.Password); err != nil { + return err + } + + case "username": + if err := dec.Decode(&s.Username); err != nil { + return err + } + + } + } + return nil +} + // NewHttpInputBasicAuthentication returns a HttpInputBasicAuthentication. func NewHttpInputBasicAuthentication() *HttpInputBasicAuthentication { r := &HttpInputBasicAuthentication{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/httpinputproxy.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/httpinputproxy.go index e4a497f42..7516a32cd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/httpinputproxy.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/httpinputproxy.go @@ -16,18 +16,55 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // HttpInputProxy type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Input.ts#L67-L70 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Input.ts#L67-L70 type HttpInputProxy struct { Host string `json:"host"` Port uint `json:"port"` } +func (s *HttpInputProxy) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "host": + if err := dec.Decode(&s.Host); err != nil { + return err + } + + case "port": + if err := dec.Decode(&s.Port); err != nil { + return err + } + + } + } + return nil +} + // NewHttpInputProxy returns a HttpInputProxy. func NewHttpInputProxy() *HttpInputProxy { r := &HttpInputProxy{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/httpinputrequestdefinition.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/httpinputrequestdefinition.go index 1cea4a269..fb1c9e18d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/httpinputrequestdefinition.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/httpinputrequestdefinition.go @@ -16,18 +16,24 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/connectionscheme" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/httpinputmethod" ) // HttpInputRequestDefinition type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Input.ts#L72-L86 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Input.ts#L72-L86 type HttpInputRequestDefinition struct { Auth *HttpInputAuthentication `json:"auth,omitempty"` Body *string `json:"body,omitempty"` @@ -44,6 +50,118 @@ type HttpInputRequestDefinition struct { Url *string `json:"url,omitempty"` } +func (s *HttpInputRequestDefinition) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "auth": + if err := dec.Decode(&s.Auth); err != nil { + return err + } + + case "body": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Body = &o + + case "connection_timeout": + if err := dec.Decode(&s.ConnectionTimeout); err != nil { + return err + } + + case "headers": + if s.Headers == nil { + s.Headers = make(map[string]string, 0) + } + if err := dec.Decode(&s.Headers); err != nil { + return err + } + + case "host": + if err := dec.Decode(&s.Host); err != nil { + return err + } + + case "method": + if err := dec.Decode(&s.Method); err != nil { + return err + } + + case "params": + if s.Params == nil { + s.Params = make(map[string]string, 0) + } + if err := dec.Decode(&s.Params); err != nil { + return err + } + + case "path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Path = &o + + case "port": + if err := dec.Decode(&s.Port); err != nil { + return err + } + + case "proxy": + if err := dec.Decode(&s.Proxy); err != nil { + return err + } + + case "read_timeout": + if err := dec.Decode(&s.ReadTimeout); err != nil { + return err + } + + case "scheme": + if err := dec.Decode(&s.Scheme); err != nil { + return err + } + + case "url": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Url = &o + + } + } + return nil +} + // NewHttpInputRequestDefinition returns a HttpInputRequestDefinition. func NewHttpInputRequestDefinition() *HttpInputRequestDefinition { r := &HttpInputRequestDefinition{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/httpinputrequestresult.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/httpinputrequestresult.go index 074930353..374b30b99 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/httpinputrequestresult.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/httpinputrequestresult.go @@ -16,18 +16,24 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/connectionscheme" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/httpinputmethod" ) // HttpInputRequestResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Actions.ts#L300-L300 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Actions.ts#L300-L300 type HttpInputRequestResult struct { Auth *HttpInputAuthentication `json:"auth,omitempty"` Body *string `json:"body,omitempty"` @@ -44,6 +50,118 @@ type HttpInputRequestResult struct { Url *string `json:"url,omitempty"` } +func (s *HttpInputRequestResult) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "auth": + if err := dec.Decode(&s.Auth); err != nil { + return err + } + + case "body": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Body = &o + + case "connection_timeout": + if err := dec.Decode(&s.ConnectionTimeout); err != nil { + return err + } + + case "headers": + if s.Headers == nil { + s.Headers = make(map[string]string, 0) + } + if err := dec.Decode(&s.Headers); err != nil { + return err + } + + case "host": + if err := dec.Decode(&s.Host); err != nil { + return err + } + + case "method": + if err := dec.Decode(&s.Method); err != nil { + return err + } + + case "params": + if s.Params == nil { + s.Params = make(map[string]string, 0) + } + if err := dec.Decode(&s.Params); err != nil { + return err + } + + case "path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Path = &o + + case "port": + if err := dec.Decode(&s.Port); err != nil { + return err + } + + case "proxy": + if err := dec.Decode(&s.Proxy); err != nil { + return err + } + + case "read_timeout": + if err := dec.Decode(&s.ReadTimeout); err != nil { + return err + } + + case "scheme": + if err := dec.Decode(&s.Scheme); err != nil { + return err + } + + case "url": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Url = &o + + } + } + return nil +} + // NewHttpInputRequestResult returns a HttpInputRequestResult. func NewHttpInputRequestResult() *HttpInputRequestResult { r := &HttpInputRequestResult{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/httpinputresponseresult.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/httpinputresponseresult.go index 22c012002..40dc52720 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/httpinputresponseresult.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/httpinputresponseresult.go @@ -16,17 +16,78 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // HttpInputResponseResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Actions.ts#L302-L306 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Actions.ts#L302-L306 type HttpInputResponseResult struct { - Body string `json:"body"` - Headers map[string][]string `json:"headers"` - Status int `json:"status"` + Body string `json:"body"` + Headers HttpHeaders `json:"headers"` + Status int `json:"status"` +} + +func (s *HttpInputResponseResult) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "body": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Body = o + + case "headers": + if err := dec.Decode(&s.Headers); err != nil { + return err + } + + case "status": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Status = value + case float64: + f := int(v) + s.Status = f + } + + } + } + return nil } // NewHttpInputResponseResult returns a HttpInputResponseResult. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hunspelltokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hunspelltokenfilter.go index b257a69d6..63c6742be 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hunspelltokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hunspelltokenfilter.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // HunspellTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L199-L205 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L200-L206 type HunspellTokenFilter struct { Dedup *bool `json:"dedup,omitempty"` Dictionary *string `json:"dictionary,omitempty"` @@ -32,11 +40,108 @@ type HunspellTokenFilter struct { Version *string `json:"version,omitempty"` } +func (s *HunspellTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "dedup": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Dedup = &value + case bool: + s.Dedup = &v + } + + case "dictionary": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Dictionary = &o + + case "locale": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Locale = o + + case "longest_only": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.LongestOnly = &value + case bool: + s.LongestOnly = &v + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s HunspellTokenFilter) MarshalJSON() ([]byte, error) { + type innerHunspellTokenFilter HunspellTokenFilter + tmp := innerHunspellTokenFilter{ + Dedup: s.Dedup, + Dictionary: s.Dictionary, + Locale: s.Locale, + LongestOnly: s.LongestOnly, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "hunspell" + + return json.Marshal(tmp) +} + // NewHunspellTokenFilter returns a HunspellTokenFilter. func NewHunspellTokenFilter() *HunspellTokenFilter { r := &HunspellTokenFilter{} - r.Type = "hunspell" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hyperparameter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hyperparameter.go index 60619eb6d..bfa564bf7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hyperparameter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hyperparameter.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Hyperparameter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/TrainedModel.ts#L206-L220 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/TrainedModel.ts#L216-L230 type Hyperparameter struct { // AbsoluteImportance A positive number showing how much the parameter influences the variation of // the loss function. For hyperparameters with values that are not specified by @@ -42,6 +50,93 @@ type Hyperparameter struct { Value Float64 `json:"value"` } +func (s *Hyperparameter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "absolute_importance": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.AbsoluteImportance = &f + case float64: + f := Float64(v) + s.AbsoluteImportance = &f + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "relative_importance": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.RelativeImportance = &f + case float64: + f := Float64(v) + s.RelativeImportance = &f + } + + case "supplied": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Supplied = value + case bool: + s.Supplied = v + } + + case "value": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Value = f + case float64: + f := Float64(v) + s.Value = f + } + + } + } + return nil +} + // NewHyperparameter returns a Hyperparameter. func NewHyperparameter() *Hyperparameter { r := &Hyperparameter{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hyperparameters.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hyperparameters.go index 9c869d733..6aaecefdb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hyperparameters.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hyperparameters.go @@ -16,28 +16,363 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Hyperparameters type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/DataframeAnalytics.ts#L395-L410 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/DataframeAnalytics.ts#L419-L525 type Hyperparameters struct { - Alpha *Float64 `json:"alpha,omitempty"` - DownsampleFactor *Float64 `json:"downsample_factor,omitempty"` - Eta *Float64 `json:"eta,omitempty"` - EtaGrowthRatePerTree *Float64 `json:"eta_growth_rate_per_tree,omitempty"` - FeatureBagFraction *Float64 `json:"feature_bag_fraction,omitempty"` - Gamma *Float64 `json:"gamma,omitempty"` - Lambda *Float64 `json:"lambda,omitempty"` - MaxAttemptsToAddTree *int `json:"max_attempts_to_add_tree,omitempty"` - MaxOptimizationRoundsPerHyperparameter *int `json:"max_optimization_rounds_per_hyperparameter,omitempty"` - MaxTrees *int `json:"max_trees,omitempty"` - NumFolds *int `json:"num_folds,omitempty"` - NumSplitsPerFeature *int `json:"num_splits_per_feature,omitempty"` - SoftTreeDepthLimit *int `json:"soft_tree_depth_limit,omitempty"` - SoftTreeDepthTolerance *Float64 `json:"soft_tree_depth_tolerance,omitempty"` + // Alpha Advanced configuration option. + // Machine learning uses loss guided tree growing, which means that the decision + // trees grow where the regularized loss decreases most quickly. + // This parameter affects loss calculations by acting as a multiplier of the + // tree depth. + // Higher alpha values result in shallower trees and faster training times. + // By default, this value is calculated during hyperparameter optimization. + // It must be greater than or equal to zero. + Alpha *Float64 `json:"alpha,omitempty"` + // DownsampleFactor Advanced configuration option. + // Controls the fraction of data that is used to compute the derivatives of the + // loss function for tree training. + // A small value results in the use of a small fraction of the data. + // If this value is set to be less than 1, accuracy typically improves. + // However, too small a value may result in poor convergence for the ensemble + // and so require more trees. + // By default, this value is calculated during hyperparameter optimization. + // It must be greater than zero and less than or equal to 1. + DownsampleFactor *Float64 `json:"downsample_factor,omitempty"` + // Eta Advanced configuration option. + // The shrinkage applied to the weights. + // Smaller values result in larger forests which have a better generalization + // error. + // However, larger forests cause slower training. + // By default, this value is calculated during hyperparameter optimization. + // It must be a value between `0.001` and `1`. + Eta *Float64 `json:"eta,omitempty"` + // EtaGrowthRatePerTree Advanced configuration option. + // Specifies the rate at which `eta` increases for each new tree that is added + // to the forest. + // For example, a rate of 1.05 increases `eta` by 5% for each extra tree. + // By default, this value is calculated during hyperparameter optimization. + // It must be between `0.5` and `2`. + EtaGrowthRatePerTree *Float64 `json:"eta_growth_rate_per_tree,omitempty"` + // FeatureBagFraction Advanced configuration option. + // Defines the fraction of features that will be used when selecting a random + // bag for each candidate split. + // By default, this value is calculated during hyperparameter optimization. + FeatureBagFraction *Float64 `json:"feature_bag_fraction,omitempty"` + // Gamma Advanced configuration option. + // Regularization parameter to prevent overfitting on the training data set. + // Multiplies a linear penalty associated with the size of individual trees in + // the forest. + // A high gamma value causes training to prefer small trees. + // A small gamma value results in larger individual trees and slower training. + // By default, this value is calculated during hyperparameter optimization. + // It must be a nonnegative value. + Gamma *Float64 `json:"gamma,omitempty"` + // Lambda Advanced configuration option. + // Regularization parameter to prevent overfitting on the training data set. + // Multiplies an L2 regularization term which applies to leaf weights of the + // individual trees in the forest. + // A high lambda value causes training to favor small leaf weights. + // This behavior makes the prediction function smoother at the expense of + // potentially not being able to capture relevant relationships between the + // features and the dependent variable. + // A small lambda value results in large individual trees and slower training. + // By default, this value is calculated during hyperparameter optimization. + // It must be a nonnegative value. + Lambda *Float64 `json:"lambda,omitempty"` + // MaxAttemptsToAddTree If the algorithm fails to determine a non-trivial tree (more than a single + // leaf), this parameter determines how many of such consecutive failures are + // tolerated. + // Once the number of attempts exceeds the threshold, the forest training stops. + MaxAttemptsToAddTree *int `json:"max_attempts_to_add_tree,omitempty"` + // MaxOptimizationRoundsPerHyperparameter Advanced configuration option. + // A multiplier responsible for determining the maximum number of hyperparameter + // optimization steps in the Bayesian optimization procedure. + // The maximum number of steps is determined based on the number of undefined + // hyperparameters times the maximum optimization rounds per hyperparameter. + // By default, this value is calculated during hyperparameter optimization. + MaxOptimizationRoundsPerHyperparameter *int `json:"max_optimization_rounds_per_hyperparameter,omitempty"` + // MaxTrees Advanced configuration option. + // Defines the maximum number of decision trees in the forest. + // The maximum value is 2000. + // By default, this value is calculated during hyperparameter optimization. + MaxTrees *int `json:"max_trees,omitempty"` + // NumFolds The maximum number of folds for the cross-validation procedure. + NumFolds *int `json:"num_folds,omitempty"` + // NumSplitsPerFeature Determines the maximum number of splits for every feature that can occur in a + // decision tree when the tree is trained. + NumSplitsPerFeature *int `json:"num_splits_per_feature,omitempty"` + // SoftTreeDepthLimit Advanced configuration option. + // Machine learning uses loss guided tree growing, which means that the decision + // trees grow where the regularized loss decreases most quickly. + // This soft limit combines with the `soft_tree_depth_tolerance` to penalize + // trees that exceed the specified depth; the regularized loss increases quickly + // beyond this depth. + // By default, this value is calculated during hyperparameter optimization. + // It must be greater than or equal to 0. + SoftTreeDepthLimit *int `json:"soft_tree_depth_limit,omitempty"` + // SoftTreeDepthTolerance Advanced configuration option. + // This option controls how quickly the regularized loss increases when the tree + // depth exceeds `soft_tree_depth_limit`. + // By default, this value is calculated during hyperparameter optimization. + // It must be greater than or equal to 0.01. + SoftTreeDepthTolerance *Float64 `json:"soft_tree_depth_tolerance,omitempty"` +} + +func (s *Hyperparameters) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "alpha": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Alpha = &f + case float64: + f := Float64(v) + s.Alpha = &f + } + + case "downsample_factor": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.DownsampleFactor = &f + case float64: + f := Float64(v) + s.DownsampleFactor = &f + } + + case "eta": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Eta = &f + case float64: + f := Float64(v) + s.Eta = &f + } + + case "eta_growth_rate_per_tree": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.EtaGrowthRatePerTree = &f + case float64: + f := Float64(v) + s.EtaGrowthRatePerTree = &f + } + + case "feature_bag_fraction": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.FeatureBagFraction = &f + case float64: + f := Float64(v) + s.FeatureBagFraction = &f + } + + case "gamma": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Gamma = &f + case float64: + f := Float64(v) + s.Gamma = &f + } + + case "lambda": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Lambda = &f + case float64: + f := Float64(v) + s.Lambda = &f + } + + case "max_attempts_to_add_tree": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxAttemptsToAddTree = &value + case float64: + f := int(v) + s.MaxAttemptsToAddTree = &f + } + + case "max_optimization_rounds_per_hyperparameter": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxOptimizationRoundsPerHyperparameter = &value + case float64: + f := int(v) + s.MaxOptimizationRoundsPerHyperparameter = &f + } + + case "max_trees": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxTrees = &value + case float64: + f := int(v) + s.MaxTrees = &f + } + + case "num_folds": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NumFolds = &value + case float64: + f := int(v) + s.NumFolds = &f + } + + case "num_splits_per_feature": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NumSplitsPerFeature = &value + case float64: + f := int(v) + s.NumSplitsPerFeature = &f + } + + case "soft_tree_depth_limit": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.SoftTreeDepthLimit = &value + case float64: + f := int(v) + s.SoftTreeDepthLimit = &f + } + + case "soft_tree_depth_tolerance": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.SoftTreeDepthTolerance = &f + case float64: + f := Float64(v) + s.SoftTreeDepthTolerance = &f + } + + } + } + return nil } // NewHyperparameters returns a Hyperparameters. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hyphenationdecompoundertokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hyphenationdecompoundertokenfilter.go index dcb36efb5..ef249373b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hyphenationdecompoundertokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/hyphenationdecompoundertokenfilter.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // HyphenationDecompounderTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L57-L59 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L58-L60 type HyphenationDecompounderTokenFilter struct { HyphenationPatternsPath *string `json:"hyphenation_patterns_path,omitempty"` MaxSubwordSize *int `json:"max_subword_size,omitempty"` @@ -35,11 +43,150 @@ type HyphenationDecompounderTokenFilter struct { WordListPath *string `json:"word_list_path,omitempty"` } +func (s *HyphenationDecompounderTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "hyphenation_patterns_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.HyphenationPatternsPath = &o + + case "max_subword_size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxSubwordSize = &value + case float64: + f := int(v) + s.MaxSubwordSize = &f + } + + case "min_subword_size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MinSubwordSize = &value + case float64: + f := int(v) + s.MinSubwordSize = &f + } + + case "min_word_size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MinWordSize = &value + case float64: + f := int(v) + s.MinWordSize = &f + } + + case "only_longest_match": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.OnlyLongestMatch = &value + case bool: + s.OnlyLongestMatch = &v + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + case "word_list": + if err := dec.Decode(&s.WordList); err != nil { + return err + } + + case "word_list_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.WordListPath = &o + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s HyphenationDecompounderTokenFilter) MarshalJSON() ([]byte, error) { + type innerHyphenationDecompounderTokenFilter HyphenationDecompounderTokenFilter + tmp := innerHyphenationDecompounderTokenFilter{ + HyphenationPatternsPath: s.HyphenationPatternsPath, + MaxSubwordSize: s.MaxSubwordSize, + MinSubwordSize: s.MinSubwordSize, + MinWordSize: s.MinWordSize, + OnlyLongestMatch: s.OnlyLongestMatch, + Type: s.Type, + Version: s.Version, + WordList: s.WordList, + WordListPath: s.WordListPath, + } + + tmp.Type = "hyphenation_decompounder" + + return json.Marshal(tmp) +} + // NewHyphenationDecompounderTokenFilter returns a HyphenationDecompounderTokenFilter. func NewHyphenationDecompounderTokenFilter() *HyphenationDecompounderTokenFilter { r := &HyphenationDecompounderTokenFilter{} - r.Type = "hyphenation_decompounder" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/icuanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/icuanalyzer.go index 202375245..f66c979f6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/icuanalyzer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/icuanalyzer.go @@ -16,29 +16,43 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icunormalizationmode" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icunormalizationtype" ) // IcuAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/icu-plugin.ts#L67-L71 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/icu-plugin.ts#L67-L71 type IcuAnalyzer struct { Method icunormalizationtype.IcuNormalizationType `json:"method"` Mode icunormalizationmode.IcuNormalizationMode `json:"mode"` Type string `json:"type,omitempty"` } +// MarshalJSON override marshalling to include literal value +func (s IcuAnalyzer) MarshalJSON() ([]byte, error) { + type innerIcuAnalyzer IcuAnalyzer + tmp := innerIcuAnalyzer{ + Method: s.Method, + Mode: s.Mode, + Type: s.Type, + } + + tmp.Type = "icu_analyzer" + + return json.Marshal(tmp) +} + // NewIcuAnalyzer returns a IcuAnalyzer. func NewIcuAnalyzer() *IcuAnalyzer { r := &IcuAnalyzer{} - r.Type = "icu_analyzer" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/icucollationtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/icucollationtokenfilter.go index 237507349..f1f786a50 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/icucollationtokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/icucollationtokenfilter.go @@ -16,11 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icucollationalternate" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icucollationcasefirst" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icucollationdecomposition" @@ -29,7 +35,7 @@ import ( // IcuCollationTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/icu-plugin.ts#L51-L65 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/icu-plugin.ts#L51-L65 type IcuCollationTokenFilter struct { Alternate *icucollationalternate.IcuCollationAlternate `json:"alternate,omitempty"` CaseFirst *icucollationcasefirst.IcuCollationCaseFirst `json:"caseFirst,omitempty"` @@ -47,11 +53,186 @@ type IcuCollationTokenFilter struct { Version *string `json:"version,omitempty"` } +func (s *IcuCollationTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "alternate": + if err := dec.Decode(&s.Alternate); err != nil { + return err + } + + case "caseFirst": + if err := dec.Decode(&s.CaseFirst); err != nil { + return err + } + + case "caseLevel": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.CaseLevel = &value + case bool: + s.CaseLevel = &v + } + + case "country": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Country = &o + + case "decomposition": + if err := dec.Decode(&s.Decomposition); err != nil { + return err + } + + case "hiraganaQuaternaryMode": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.HiraganaQuaternaryMode = &value + case bool: + s.HiraganaQuaternaryMode = &v + } + + case "language": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Language = &o + + case "numeric": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Numeric = &value + case bool: + s.Numeric = &v + } + + case "rules": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Rules = &o + + case "strength": + if err := dec.Decode(&s.Strength); err != nil { + return err + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "variableTop": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.VariableTop = &o + + case "variant": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Variant = &o + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s IcuCollationTokenFilter) MarshalJSON() ([]byte, error) { + type innerIcuCollationTokenFilter IcuCollationTokenFilter + tmp := innerIcuCollationTokenFilter{ + Alternate: s.Alternate, + CaseFirst: s.CaseFirst, + CaseLevel: s.CaseLevel, + Country: s.Country, + Decomposition: s.Decomposition, + HiraganaQuaternaryMode: s.HiraganaQuaternaryMode, + Language: s.Language, + Numeric: s.Numeric, + Rules: s.Rules, + Strength: s.Strength, + Type: s.Type, + VariableTop: s.VariableTop, + Variant: s.Variant, + Version: s.Version, + } + + tmp.Type = "icu_collation" + + return json.Marshal(tmp) +} + // NewIcuCollationTokenFilter returns a IcuCollationTokenFilter. func NewIcuCollationTokenFilter() *IcuCollationTokenFilter { r := &IcuCollationTokenFilter{} - r.Type = "icu_collation" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/icufoldingtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/icufoldingtokenfilter.go index f35ff338f..1d61c78ba 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/icufoldingtokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/icufoldingtokenfilter.go @@ -16,24 +16,86 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // IcuFoldingTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/icu-plugin.ts#L46-L49 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/icu-plugin.ts#L46-L49 type IcuFoldingTokenFilter struct { Type string `json:"type,omitempty"` UnicodeSetFilter string `json:"unicode_set_filter"` Version *string `json:"version,omitempty"` } +func (s *IcuFoldingTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "unicode_set_filter": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.UnicodeSetFilter = o + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s IcuFoldingTokenFilter) MarshalJSON() ([]byte, error) { + type innerIcuFoldingTokenFilter IcuFoldingTokenFilter + tmp := innerIcuFoldingTokenFilter{ + Type: s.Type, + UnicodeSetFilter: s.UnicodeSetFilter, + Version: s.Version, + } + + tmp.Type = "icu_folding" + + return json.Marshal(tmp) +} + // NewIcuFoldingTokenFilter returns a IcuFoldingTokenFilter. func NewIcuFoldingTokenFilter() *IcuFoldingTokenFilter { r := &IcuFoldingTokenFilter{} - r.Type = "icu_folding" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/icunormalizationcharfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/icunormalizationcharfilter.go index 69061f9e7..edab4ff00 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/icunormalizationcharfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/icunormalizationcharfilter.go @@ -16,18 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icunormalizationmode" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icunormalizationtype" ) // IcuNormalizationCharFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/icu-plugin.ts#L40-L44 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/icu-plugin.ts#L40-L44 type IcuNormalizationCharFilter struct { Mode *icunormalizationmode.IcuNormalizationMode `json:"mode,omitempty"` Name *icunormalizationtype.IcuNormalizationType `json:"name,omitempty"` @@ -35,11 +40,64 @@ type IcuNormalizationCharFilter struct { Version *string `json:"version,omitempty"` } +func (s *IcuNormalizationCharFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "mode": + if err := dec.Decode(&s.Mode); err != nil { + return err + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s IcuNormalizationCharFilter) MarshalJSON() ([]byte, error) { + type innerIcuNormalizationCharFilter IcuNormalizationCharFilter + tmp := innerIcuNormalizationCharFilter{ + Mode: s.Mode, + Name: s.Name, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "icu_normalizer" + + return json.Marshal(tmp) +} + // NewIcuNormalizationCharFilter returns a IcuNormalizationCharFilter. func NewIcuNormalizationCharFilter() *IcuNormalizationCharFilter { r := &IcuNormalizationCharFilter{} - r.Type = "icu_normalizer" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/icunormalizationtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/icunormalizationtokenfilter.go index d4b0cb488..82aa2b923 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/icunormalizationtokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/icunormalizationtokenfilter.go @@ -16,28 +16,80 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icunormalizationtype" ) // IcuNormalizationTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/icu-plugin.ts#L35-L38 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/icu-plugin.ts#L35-L38 type IcuNormalizationTokenFilter struct { Name icunormalizationtype.IcuNormalizationType `json:"name"` Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` } +func (s *IcuNormalizationTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s IcuNormalizationTokenFilter) MarshalJSON() ([]byte, error) { + type innerIcuNormalizationTokenFilter IcuNormalizationTokenFilter + tmp := innerIcuNormalizationTokenFilter{ + Name: s.Name, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "icu_normalizer" + + return json.Marshal(tmp) +} + // NewIcuNormalizationTokenFilter returns a IcuNormalizationTokenFilter. func NewIcuNormalizationTokenFilter() *IcuNormalizationTokenFilter { r := &IcuNormalizationTokenFilter{} - r.Type = "icu_normalizer" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/icutokenizer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/icutokenizer.go index d7bbf13a8..d0f11b5c0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/icutokenizer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/icutokenizer.go @@ -16,24 +16,86 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // IcuTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/icu-plugin.ts#L30-L33 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/icu-plugin.ts#L30-L33 type IcuTokenizer struct { RuleFiles string `json:"rule_files"` Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` } +func (s *IcuTokenizer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "rule_files": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RuleFiles = o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s IcuTokenizer) MarshalJSON() ([]byte, error) { + type innerIcuTokenizer IcuTokenizer + tmp := innerIcuTokenizer{ + RuleFiles: s.RuleFiles, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "icu_tokenizer" + + return json.Marshal(tmp) +} + // NewIcuTokenizer returns a IcuTokenizer. func NewIcuTokenizer() *IcuTokenizer { r := &IcuTokenizer{} - r.Type = "icu_tokenizer" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/icutransformtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/icutransformtokenfilter.go index 1a1c01976..10be7b238 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/icutransformtokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/icutransformtokenfilter.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icutransformdirection" ) // IcuTransformTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/icu-plugin.ts#L24-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/icu-plugin.ts#L24-L28 type IcuTransformTokenFilter struct { Dir *icutransformdirection.IcuTransformDirection `json:"dir,omitempty"` Id string `json:"id"` @@ -34,11 +40,71 @@ type IcuTransformTokenFilter struct { Version *string `json:"version,omitempty"` } +func (s *IcuTransformTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "dir": + if err := dec.Decode(&s.Dir); err != nil { + return err + } + + case "id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Id = o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s IcuTransformTokenFilter) MarshalJSON() ([]byte, error) { + type innerIcuTransformTokenFilter IcuTransformTokenFilter + tmp := innerIcuTransformTokenFilter{ + Dir: s.Dir, + Id: s.Id, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "icu_transform" + + return json.Marshal(tmp) +} + // NewIcuTransformTokenFilter returns a IcuTransformTokenFilter. func NewIcuTransformTokenFilter() *IcuTransformTokenFilter { r := &IcuTransformTokenFilter{} - r.Type = "icu_transform" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ids.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ids.go index 01c7752bb..2ea9cfdd0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ids.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ids.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // Ids type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/common.ts#L56-L56 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/common.ts#L56-L56 type Ids []string diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/idsquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/idsquery.go index 54e50c3e4..d6df13a36 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/idsquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/idsquery.go @@ -16,17 +16,95 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // IdsQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/term.ts#L53-L55 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/term.ts#L80-L85 type IdsQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. Boost *float32 `json:"boost,omitempty"` QueryName_ *string `json:"_name,omitempty"` - Values []string `json:"values,omitempty"` + // Values An array of document IDs. + Values []string `json:"values,omitempty"` +} + +func (s *IdsQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "values": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Values = append(s.Values, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Values); err != nil { + return err + } + } + + } + } + return nil } // NewIdsQuery returns a IdsQuery. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ilm.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ilm.go index bc63fe50c..3c3123aa1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ilm.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ilm.go @@ -16,18 +16,67 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Ilm type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L162-L165 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L162-L165 type Ilm struct { PolicyCount int `json:"policy_count"` PolicyStats []IlmPolicyStatistics `json:"policy_stats"` } +func (s *Ilm) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "policy_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.PolicyCount = value + case float64: + f := int(v) + s.PolicyCount = f + } + + case "policy_stats": + if err := dec.Decode(&s.PolicyStats); err != nil { + return err + } + + } + } + return nil +} + // NewIlm returns a Ilm. func NewIlm() *Ilm { r := &Ilm{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ilmindicator.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ilmindicator.go new file mode 100644 index 000000000..f2ff2a0e5 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ilmindicator.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indicatorhealthstatus" +) + +// IlmIndicator type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/health_report/types.ts#L145-L149 +type IlmIndicator struct { + Details *IlmIndicatorDetails `json:"details,omitempty"` + Diagnosis []Diagnosis `json:"diagnosis,omitempty"` + Impacts []Impact `json:"impacts,omitempty"` + Status indicatorhealthstatus.IndicatorHealthStatus `json:"status"` + Symptom string `json:"symptom"` +} + +func (s *IlmIndicator) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "details": + if err := dec.Decode(&s.Details); err != nil { + return err + } + + case "diagnosis": + if err := dec.Decode(&s.Diagnosis); err != nil { + return err + } + + case "impacts": + if err := dec.Decode(&s.Impacts); err != nil { + return err + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return err + } + + case "symptom": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Symptom = o + + } + } + return nil +} + +// NewIlmIndicator returns a IlmIndicator. +func NewIlmIndicator() *IlmIndicator { + r := &IlmIndicator{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ilmindicatordetails.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ilmindicatordetails.go new file mode 100644 index 000000000..c4427bd3e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ilmindicatordetails.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/lifecycleoperationmode" +) + +// IlmIndicatorDetails type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/health_report/types.ts#L150-L153 +type IlmIndicatorDetails struct { + IlmStatus lifecycleoperationmode.LifecycleOperationMode `json:"ilm_status"` + Policies int64 `json:"policies"` +} + +func (s *IlmIndicatorDetails) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "ilm_status": + if err := dec.Decode(&s.IlmStatus); err != nil { + return err + } + + case "policies": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Policies = value + case float64: + f := int64(v) + s.Policies = f + } + + } + } + return nil +} + +// NewIlmIndicatorDetails returns a IlmIndicatorDetails. +func NewIlmIndicatorDetails() *IlmIndicatorDetails { + r := &IlmIndicatorDetails{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ilmpolicy.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ilmpolicy.go index 9a9665262..8046e1e50 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ilmpolicy.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ilmpolicy.go @@ -16,20 +16,53 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" ) // IlmPolicy type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ilm/_types/Policy.ts#L23-L26 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ilm/_types/Policy.ts#L23-L26 type IlmPolicy struct { - Meta_ map[string]json.RawMessage `json:"_meta,omitempty"` - Phases Phases `json:"phases"` + Meta_ Metadata `json:"_meta,omitempty"` + Phases Phases `json:"phases"` +} + +func (s *IlmPolicy) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "_meta": + if err := dec.Decode(&s.Meta_); err != nil { + return err + } + + case "phases": + if err := dec.Decode(&s.Phases); err != nil { + return err + } + + } + } + return nil } // NewIlmPolicy returns a IlmPolicy. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ilmpolicystatistics.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ilmpolicystatistics.go index 2ead2bfd9..803e06897 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ilmpolicystatistics.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ilmpolicystatistics.go @@ -16,18 +16,67 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // IlmPolicyStatistics type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L157-L160 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L157-L160 type IlmPolicyStatistics struct { IndicesManaged int `json:"indices_managed"` Phases Phases `json:"phases"` } +func (s *IlmPolicyStatistics) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "indices_managed": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IndicesManaged = value + case float64: + f := int(v) + s.IndicesManaged = f + } + + case "phases": + if err := dec.Decode(&s.Phases); err != nil { + return err + } + + } + } + return nil +} + // NewIlmPolicyStatistics returns a IlmPolicyStatistics. func NewIlmPolicyStatistics() *IlmPolicyStatistics { r := &IlmPolicyStatistics{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/impact.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/impact.go new file mode 100644 index 000000000..7dd56ccb1 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/impact.go @@ -0,0 +1,113 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/impactarea" +) + +// Impact type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/health_report/types.ts#L65-L70 +type Impact struct { + Description string `json:"description"` + Id string `json:"id"` + ImpactAreas []impactarea.ImpactArea `json:"impact_areas"` + Severity int `json:"severity"` +} + +func (s *Impact) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = o + + case "id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Id = o + + case "impact_areas": + if err := dec.Decode(&s.ImpactAreas); err != nil { + return err + } + + case "severity": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Severity = value + case float64: + f := int(v) + s.Severity = f + } + + } + } + return nil +} + +// NewImpact returns a Impact. +func NewImpact() *Impact { + r := &Impact{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexaction.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexaction.go index a551d9271..e9f71e34f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexaction.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexaction.go @@ -16,18 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/optype" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/refresh" ) // IndexAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Actions.ts#L256-L265 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Actions.ts#L256-L265 type IndexAction struct { DocId *string `json:"doc_id,omitempty"` ExecutionTimeField *string `json:"execution_time_field,omitempty"` @@ -37,6 +42,56 @@ type IndexAction struct { Timeout Duration `json:"timeout,omitempty"` } +func (s *IndexAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_id": + if err := dec.Decode(&s.DocId); err != nil { + return err + } + + case "execution_time_field": + if err := dec.Decode(&s.ExecutionTimeField); err != nil { + return err + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return err + } + + case "op_type": + if err := dec.Decode(&s.OpType); err != nil { + return err + } + + case "refresh": + if err := dec.Decode(&s.Refresh); err != nil { + return err + } + + case "timeout": + if err := dec.Decode(&s.Timeout); err != nil { + return err + } + + } + } + return nil +} + // NewIndexAction returns a IndexAction. func NewIndexAction() *IndexAction { r := &IndexAction{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexaliases.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexaliases.go index 6bd177a13..2e0c98074 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexaliases.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexaliases.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // IndexAliases type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/get_alias/IndicesGetAliasResponse.ts#L36-L38 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/get_alias/IndicesGetAliasResponse.ts#L36-L38 type IndexAliases struct { Aliases map[string]AliasDefinition `json:"aliases"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexanddatastreamaction.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexanddatastreamaction.go index 7a65e11f3..4c0ebfcab 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexanddatastreamaction.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexanddatastreamaction.go @@ -16,16 +16,55 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // IndexAndDataStreamAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/modify_data_stream/types.ts#L28-L31 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/modify_data_stream/types.ts#L39-L44 type IndexAndDataStreamAction struct { + // DataStream Data stream targeted by the action. DataStream string `json:"data_stream"` - Index string `json:"index"` + // Index Index for the action. + Index string `json:"index"` +} + +func (s *IndexAndDataStreamAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "data_stream": + if err := dec.Decode(&s.DataStream); err != nil { + return err + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return err + } + + } + } + return nil } // NewIndexAndDataStreamAction returns a IndexAndDataStreamAction. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexcapabilities.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexcapabilities.go index c72c54213..1a87cec1b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexcapabilities.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexcapabilities.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // IndexCapabilities type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/rollup/get_rollup_index_caps/types.ts#L24-L26 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/rollup/get_rollup_index_caps/types.ts#L24-L26 type IndexCapabilities struct { RollupJobs []RollupJobSummary `json:"rollup_jobs"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexdetails.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexdetails.go index 8eeb50f7e..c5e704d81 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexdetails.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexdetails.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // IndexDetails type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/snapshot/_types/SnapshotIndexDetails.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/snapshot/_types/SnapshotIndexDetails.ts#L23-L28 type IndexDetails struct { MaxSegmentsPerShard int64 `json:"max_segments_per_shard"` ShardCount int `json:"shard_count"` @@ -30,6 +38,77 @@ type IndexDetails struct { SizeInBytes int64 `json:"size_in_bytes"` } +func (s *IndexDetails) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_segments_per_shard": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.MaxSegmentsPerShard = value + case float64: + f := int64(v) + s.MaxSegmentsPerShard = f + } + + case "shard_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.ShardCount = value + case float64: + f := int(v) + s.ShardCount = f + } + + case "size": + if err := dec.Decode(&s.Size); err != nil { + return err + } + + case "size_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.SizeInBytes = value + case float64: + f := int64(v) + s.SizeInBytes = f + } + + } + } + return nil +} + // NewIndexDetails returns a IndexDetails. func NewIndexDetails() *IndexDetails { r := &IndexDetails{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexfield.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexfield.go index 7e7ffe1ce..9eacc7d17 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexfield.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexfield.go @@ -16,17 +16,59 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // IndexField type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/meta-fields.ts#L46-L48 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/meta-fields.ts#L46-L48 type IndexField struct { Enabled bool `json:"enabled"` } +func (s *IndexField) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = value + case bool: + s.Enabled = v + } + + } + } + return nil +} + // NewIndexField returns a IndexField. func NewIndexField() *IndexField { r := &IndexField{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexhealthstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexhealthstats.go index 3225c8a38..ea242300b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexhealthstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexhealthstats.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/healthstatus" ) // IndexHealthStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/health/types.ts#L24-L34 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/health/types.ts#L24-L34 type IndexHealthStats struct { ActivePrimaryShards int `json:"active_primary_shards"` ActiveShards int `json:"active_shards"` @@ -39,6 +45,151 @@ type IndexHealthStats struct { UnassignedShards int `json:"unassigned_shards"` } +func (s *IndexHealthStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "active_primary_shards": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.ActivePrimaryShards = value + case float64: + f := int(v) + s.ActivePrimaryShards = f + } + + case "active_shards": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.ActiveShards = value + case float64: + f := int(v) + s.ActiveShards = f + } + + case "initializing_shards": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.InitializingShards = value + case float64: + f := int(v) + s.InitializingShards = f + } + + case "number_of_replicas": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NumberOfReplicas = value + case float64: + f := int(v) + s.NumberOfReplicas = f + } + + case "number_of_shards": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NumberOfShards = value + case float64: + f := int(v) + s.NumberOfShards = f + } + + case "relocating_shards": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.RelocatingShards = value + case float64: + f := int(v) + s.RelocatingShards = f + } + + case "shards": + if s.Shards == nil { + s.Shards = make(map[string]ShardHealthStats, 0) + } + if err := dec.Decode(&s.Shards); err != nil { + return err + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return err + } + + case "unassigned_shards": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.UnassignedShards = value + case float64: + f := int(v) + s.UnassignedShards = f + } + + } + } + return nil +} + // NewIndexHealthStats returns a IndexHealthStats. func NewIndexHealthStats() *IndexHealthStats { r := &IndexHealthStats{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexingpressurememorysummary.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexingpressurememorysummary.go index 8bd9ce6bb..aef615fd1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexingpressurememorysummary.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexingpressurememorysummary.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // IndexingPressureMemorySummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/stats/types.ts#L309-L318 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/stats/types.ts#L580-L589 type IndexingPressureMemorySummary struct { AllInBytes int64 `json:"all_in_bytes"` CombinedCoordinatingAndPrimaryInBytes int64 `json:"combined_coordinating_and_primary_in_bytes"` @@ -34,6 +42,146 @@ type IndexingPressureMemorySummary struct { ReplicaRejections *int64 `json:"replica_rejections,omitempty"` } +func (s *IndexingPressureMemorySummary) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "all_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.AllInBytes = value + case float64: + f := int64(v) + s.AllInBytes = f + } + + case "combined_coordinating_and_primary_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.CombinedCoordinatingAndPrimaryInBytes = value + case float64: + f := int64(v) + s.CombinedCoordinatingAndPrimaryInBytes = f + } + + case "coordinating_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.CoordinatingInBytes = value + case float64: + f := int64(v) + s.CoordinatingInBytes = f + } + + case "coordinating_rejections": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.CoordinatingRejections = &value + case float64: + f := int64(v) + s.CoordinatingRejections = &f + } + + case "primary_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.PrimaryInBytes = value + case float64: + f := int64(v) + s.PrimaryInBytes = f + } + + case "primary_rejections": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.PrimaryRejections = &value + case float64: + f := int64(v) + s.PrimaryRejections = &f + } + + case "replica_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.ReplicaInBytes = value + case float64: + f := int64(v) + s.ReplicaInBytes = f + } + + case "replica_rejections": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.ReplicaRejections = &value + case float64: + f := int64(v) + s.ReplicaRejections = &f + } + + } + } + return nil +} + // NewIndexingPressureMemorySummary returns a IndexingPressureMemorySummary. func NewIndexingPressureMemorySummary() *IndexingPressureMemorySummary { r := &IndexingPressureMemorySummary{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexingslowlogsettings.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexingslowlogsettings.go new file mode 100644 index 000000000..e7f918938 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexingslowlogsettings.go @@ -0,0 +1,113 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + +// IndexingSlowlogSettings type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L547-L552 +type IndexingSlowlogSettings struct { + Level *string `json:"level,omitempty"` + Reformat *bool `json:"reformat,omitempty"` + Source *int `json:"source,omitempty"` + Threshold *IndexingSlowlogTresholds `json:"threshold,omitempty"` +} + +func (s *IndexingSlowlogSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "level": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Level = &o + + case "reformat": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Reformat = &value + case bool: + s.Reformat = &v + } + + case "source": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Source = &value + case float64: + f := int(v) + s.Source = &f + } + + case "threshold": + if err := dec.Decode(&s.Threshold); err != nil { + return err + } + + } + } + return nil +} + +// NewIndexingSlowlogSettings returns a IndexingSlowlogSettings. +func NewIndexingSlowlogSettings() *IndexingSlowlogSettings { + r := &IndexingSlowlogSettings{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexingslowlogtresholds.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexingslowlogtresholds.go new file mode 100644 index 000000000..47ebf71b0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexingslowlogtresholds.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +// IndexingSlowlogTresholds type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L554-L561 +type IndexingSlowlogTresholds struct { + // Index The indexing slow log, similar in functionality to the search slow log. The + // log file name ends with `_index_indexing_slowlog.json`. + // Log and the thresholds are configured in the same way as the search slowlog. + Index *SlowlogTresholdLevels `json:"index,omitempty"` +} + +// NewIndexingSlowlogTresholds returns a IndexingSlowlogTresholds. +func NewIndexingSlowlogTresholds() *IndexingSlowlogTresholds { + r := &IndexingSlowlogTresholds{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexingstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexingstats.go index cc931a584..14dbbdf99 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexingstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexingstats.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // IndexingStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Stats.ts#L101-L117 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Stats.ts#L143-L159 type IndexingStats struct { DeleteCurrent int64 `json:"delete_current"` DeleteTime Duration `json:"delete_time,omitempty"` @@ -41,6 +49,184 @@ type IndexingStats struct { WriteLoad *Float64 `json:"write_load,omitempty"` } +func (s *IndexingStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "delete_current": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DeleteCurrent = value + case float64: + f := int64(v) + s.DeleteCurrent = f + } + + case "delete_time": + if err := dec.Decode(&s.DeleteTime); err != nil { + return err + } + + case "delete_time_in_millis": + if err := dec.Decode(&s.DeleteTimeInMillis); err != nil { + return err + } + + case "delete_total": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DeleteTotal = value + case float64: + f := int64(v) + s.DeleteTotal = f + } + + case "index_current": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.IndexCurrent = value + case float64: + f := int64(v) + s.IndexCurrent = f + } + + case "index_failed": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.IndexFailed = value + case float64: + f := int64(v) + s.IndexFailed = f + } + + case "index_time": + if err := dec.Decode(&s.IndexTime); err != nil { + return err + } + + case "index_time_in_millis": + if err := dec.Decode(&s.IndexTimeInMillis); err != nil { + return err + } + + case "index_total": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.IndexTotal = value + case float64: + f := int64(v) + s.IndexTotal = f + } + + case "is_throttled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IsThrottled = value + case bool: + s.IsThrottled = v + } + + case "noop_update_total": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.NoopUpdateTotal = value + case float64: + f := int64(v) + s.NoopUpdateTotal = f + } + + case "throttle_time": + if err := dec.Decode(&s.ThrottleTime); err != nil { + return err + } + + case "throttle_time_in_millis": + if err := dec.Decode(&s.ThrottleTimeInMillis); err != nil { + return err + } + + case "types": + if s.Types == nil { + s.Types = make(map[string]IndexingStats, 0) + } + if err := dec.Decode(&s.Types); err != nil { + return err + } + + case "write_load": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.WriteLoad = &f + case float64: + f := Float64(v) + s.WriteLoad = &f + } + + } + } + return nil +} + // NewIndexingStats returns a IndexingStats. func NewIndexingStats() *IndexingStats { r := &IndexingStats{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexmappingrecord.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexmappingrecord.go index 1167212c3..b8d57088a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexmappingrecord.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexmappingrecord.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // IndexMappingRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/get_mapping/IndicesGetMappingResponse.ts#L28-L31 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/get_mapping/IndicesGetMappingResponse.ts#L28-L31 type IndexMappingRecord struct { Item *TypeMapping `json:"item,omitempty"` Mappings TypeMapping `json:"mappings"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexoperation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexoperation.go new file mode 100644 index 000000000..eb066f424 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexoperation.go @@ -0,0 +1,170 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/versiontype" +) + +// IndexOperation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/bulk/types.ts#L132-L132 +type IndexOperation struct { + // DynamicTemplates A map from the full name of fields to the name of dynamic templates. + // Defaults to an empty map. + // If a name matches a dynamic template, then that template will be applied + // regardless of other match predicates defined in the template. + // If a field is already defined in the mapping, then this parameter won’t be + // used. + DynamicTemplates map[string]string `json:"dynamic_templates,omitempty"` + // Id_ The document ID. + Id_ *string `json:"_id,omitempty"` + IfPrimaryTerm *int64 `json:"if_primary_term,omitempty"` + IfSeqNo *int64 `json:"if_seq_no,omitempty"` + // Index_ Name of the index or index alias to perform the action on. + Index_ *string `json:"_index,omitempty"` + // Pipeline ID of the pipeline to use to preprocess incoming documents. + // If the index has a default ingest pipeline specified, then setting the value + // to `_none` disables the default ingest pipeline for this request. + // If a final pipeline is configured it will always run, regardless of the value + // of this parameter. + Pipeline *string `json:"pipeline,omitempty"` + // RequireAlias If `true`, the request’s actions must target an index alias. + RequireAlias *bool `json:"require_alias,omitempty"` + // Routing Custom value used to route operations to a specific shard. + Routing *string `json:"routing,omitempty"` + Version *int64 `json:"version,omitempty"` + VersionType *versiontype.VersionType `json:"version_type,omitempty"` +} + +func (s *IndexOperation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "dynamic_templates": + if s.DynamicTemplates == nil { + s.DynamicTemplates = make(map[string]string, 0) + } + if err := dec.Decode(&s.DynamicTemplates); err != nil { + return err + } + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return err + } + + case "if_primary_term": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.IfPrimaryTerm = &value + case float64: + f := int64(v) + s.IfPrimaryTerm = &f + } + + case "if_seq_no": + if err := dec.Decode(&s.IfSeqNo); err != nil { + return err + } + + case "_index": + if err := dec.Decode(&s.Index_); err != nil { + return err + } + + case "pipeline": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pipeline = &o + + case "require_alias": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.RequireAlias = &value + case bool: + s.RequireAlias = &v + } + + case "routing": + if err := dec.Decode(&s.Routing); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + case "version_type": + if err := dec.Decode(&s.VersionType); err != nil { + return err + } + + } + } + return nil +} + +// NewIndexOperation returns a IndexOperation. +func NewIndexOperation() *IndexOperation { + r := &IndexOperation{ + DynamicTemplates: make(map[string]string, 0), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexprivilegescheck.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexprivilegescheck.go index 0950b7cfd..85930cf27 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexprivilegescheck.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexprivilegescheck.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexprivilege" ) // IndexPrivilegesCheck type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/has_privileges/types.ts#L33-L44 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/has_privileges/types.ts#L33-L44 type IndexPrivilegesCheck struct { // AllowRestrictedIndices This needs to be set to true (default is false) if using wildcards or regexps // for patterns that cover restricted indices. @@ -43,6 +49,61 @@ type IndexPrivilegesCheck struct { Privileges []indexprivilege.IndexPrivilege `json:"privileges"` } +func (s *IndexPrivilegesCheck) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_restricted_indices": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.AllowRestrictedIndices = &value + case bool: + s.AllowRestrictedIndices = &v + } + + case "names": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Names = append(s.Names, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Names); err != nil { + return err + } + } + + case "privileges": + if err := dec.Decode(&s.Privileges); err != nil { + return err + } + + } + } + return nil +} + // NewIndexPrivilegesCheck returns a IndexPrivilegesCheck. func NewIndexPrivilegesCheck() *IndexPrivilegesCheck { r := &IndexPrivilegesCheck{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexresult.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexresult.go index 076e617ea..30ab2630f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexresult.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexresult.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // IndexResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Actions.ts#L267-L269 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Actions.ts#L267-L269 type IndexResult struct { Response IndexResultSummary `json:"response"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexresultsummary.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexresultsummary.go index 3f0a2f820..22892079e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexresultsummary.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexresultsummary.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/result" ) // IndexResultSummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Actions.ts#L271-L277 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Actions.ts#L271-L277 type IndexResultSummary struct { Created bool `json:"created"` Id string `json:"id"` @@ -35,6 +41,60 @@ type IndexResultSummary struct { Version int64 `json:"version"` } +func (s *IndexResultSummary) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "created": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Created = value + case bool: + s.Created = v + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return err + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return err + } + + case "result": + if err := dec.Decode(&s.Result); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + // NewIndexResultSummary returns a IndexResultSummary. func NewIndexResultSummary() *IndexResultSummary { r := &IndexResultSummary{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexrouting.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexrouting.go index 3c8108f8a..447fac8d0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexrouting.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexrouting.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // IndexRouting type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexRouting.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexRouting.ts#L22-L25 type IndexRouting struct { Allocation *IndexRoutingAllocation `json:"allocation,omitempty"` Rebalance *IndexRoutingRebalance `json:"rebalance,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexroutingallocation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexroutingallocation.go index b49c248e8..2edcedf8b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexroutingallocation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexroutingallocation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -26,7 +26,7 @@ import ( // IndexRoutingAllocation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexRouting.ts#L27-L32 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexRouting.ts#L27-L32 type IndexRoutingAllocation struct { Disk *IndexRoutingAllocationDisk `json:"disk,omitempty"` Enable *indexroutingallocationoptions.IndexRoutingAllocationOptions `json:"enable,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexroutingallocationdisk.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexroutingallocationdisk.go index 4caa4ec26..3ae9c1e3f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexroutingallocationdisk.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexroutingallocationdisk.go @@ -16,17 +16,57 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // IndexRoutingAllocationDisk type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexRouting.ts#L62-L64 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexRouting.ts#L62-L64 type IndexRoutingAllocationDisk struct { ThresholdEnabled string `json:"threshold_enabled,omitempty"` } +func (s *IndexRoutingAllocationDisk) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "threshold_enabled": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ThresholdEnabled = o + + } + } + return nil +} + // NewIndexRoutingAllocationDisk returns a IndexRoutingAllocationDisk. func NewIndexRoutingAllocationDisk() *IndexRoutingAllocationDisk { r := &IndexRoutingAllocationDisk{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexroutingallocationinclude.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexroutingallocationinclude.go index 41db3c974..6efba47e1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexroutingallocationinclude.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexroutingallocationinclude.go @@ -16,18 +16,63 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // IndexRoutingAllocationInclude type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexRouting.ts#L52-L55 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexRouting.ts#L52-L55 type IndexRoutingAllocationInclude struct { Id_ *string `json:"_id,omitempty"` TierPreference_ *string `json:"_tier_preference,omitempty"` } +func (s *IndexRoutingAllocationInclude) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return err + } + + case "_tier_preference": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TierPreference_ = &o + + } + } + return nil +} + // NewIndexRoutingAllocationInclude returns a IndexRoutingAllocationInclude. func NewIndexRoutingAllocationInclude() *IndexRoutingAllocationInclude { r := &IndexRoutingAllocationInclude{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexroutingallocationinitialrecovery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexroutingallocationinitialrecovery.go index 173b857ce..14fc1692d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexroutingallocationinitialrecovery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexroutingallocationinitialrecovery.go @@ -16,17 +16,49 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // IndexRoutingAllocationInitialRecovery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexRouting.ts#L57-L59 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexRouting.ts#L57-L59 type IndexRoutingAllocationInitialRecovery struct { Id_ *string `json:"_id,omitempty"` } +func (s *IndexRoutingAllocationInitialRecovery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return err + } + + } + } + return nil +} + // NewIndexRoutingAllocationInitialRecovery returns a IndexRoutingAllocationInitialRecovery. func NewIndexRoutingAllocationInitialRecovery() *IndexRoutingAllocationInitialRecovery { r := &IndexRoutingAllocationInitialRecovery{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexroutingrebalance.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexroutingrebalance.go index 120acc8a9..6243a6c24 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexroutingrebalance.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexroutingrebalance.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -26,7 +26,7 @@ import ( // IndexRoutingRebalance type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexRouting.ts#L34-L36 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexRouting.ts#L34-L36 type IndexRoutingRebalance struct { Enable indexroutingrebalanceoptions.IndexRoutingRebalanceOptions `json:"enable"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexsegment.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexsegment.go index bcfc2ab4d..c49e410d8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexsegment.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexsegment.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // IndexSegment type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/segments/types.ts#L24-L26 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/segments/types.ts#L24-L26 type IndexSegment struct { Shards map[string][]ShardsSegment `json:"shards"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexsegmentsort.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexsegmentsort.go index 2a456b066..7e60a4783 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexsegmentsort.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexsegmentsort.go @@ -16,11 +16,16 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/segmentsortmissing" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/segmentsortmode" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/segmentsortorder" @@ -28,7 +33,7 @@ import ( // IndexSegmentSort type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSegmentSort.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSegmentSort.ts#L22-L27 type IndexSegmentSort struct { Field []string `json:"field,omitempty"` Missing []segmentsortmissing.SegmentSortMissing `json:"missing,omitempty"` @@ -36,6 +41,90 @@ type IndexSegmentSort struct { Order []segmentsortorder.SegmentSortOrder `json:"order,omitempty"` } +func (s *IndexSegmentSort) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Field = append(s.Field, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Field); err != nil { + return err + } + } + + case "missing": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := &segmentsortmissing.SegmentSortMissing{} + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Missing = append(s.Missing, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Missing); err != nil { + return err + } + } + + case "mode": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := &segmentsortmode.SegmentSortMode{} + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Mode = append(s.Mode, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Mode); err != nil { + return err + } + } + + case "order": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := &segmentsortorder.SegmentSortOrder{} + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Order = append(s.Order, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Order); err != nil { + return err + } + } + + } + } + return nil +} + // NewIndexSegmentSort returns a IndexSegmentSort. func NewIndexSegmentSort() *IndexSegmentSort { r := &IndexSegmentSort{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexsettingblocks.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexsettingblocks.go index 08ac1f4c5..8eb4e7ad3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexsettingblocks.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexsettingblocks.go @@ -16,19 +16,71 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // IndexSettingBlocks type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L245-L251 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L245-L251 type IndexSettingBlocks struct { - Metadata *bool `json:"metadata,omitempty"` - Read *bool `json:"read,omitempty"` - ReadOnly *bool `json:"read_only,omitempty"` - ReadOnlyAllowDelete *bool `json:"read_only_allow_delete,omitempty"` - Write string `json:"write,omitempty"` + Metadata Stringifiedboolean `json:"metadata,omitempty"` + Read Stringifiedboolean `json:"read,omitempty"` + ReadOnly Stringifiedboolean `json:"read_only,omitempty"` + ReadOnlyAllowDelete Stringifiedboolean `json:"read_only_allow_delete,omitempty"` + Write Stringifiedboolean `json:"write,omitempty"` +} + +func (s *IndexSettingBlocks) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return err + } + + case "read": + if err := dec.Decode(&s.Read); err != nil { + return err + } + + case "read_only": + if err := dec.Decode(&s.ReadOnly); err != nil { + return err + } + + case "read_only_allow_delete": + if err := dec.Decode(&s.ReadOnlyAllowDelete); err != nil { + return err + } + + case "write": + if err := dec.Decode(&s.Write); err != nil { + return err + } + + } + } + return nil } // NewIndexSettingBlocks returns a IndexSettingBlocks. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexsettings.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexsettings.go index 4ca13999c..ade329d92 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexsettings.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexsettings.go @@ -16,20 +16,24 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexcheckonstartup" - + "bytes" "encoding/json" + "errors" "fmt" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexcheckonstartup" ) // IndexSettings type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L69-L168 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L69-L168 type IndexSettings struct { Analysis *IndexSettingsAnalysis `json:"analysis,omitempty"` // Analyze Settings to define analyzers, tokenizers, token filters and character @@ -51,7 +55,7 @@ type IndexSettings struct { IndexSettings map[string]json.RawMessage `json:"-"` // IndexingPressure Configure indexing back pressure limits. IndexingPressure *IndicesIndexingPressure `json:"indexing_pressure,omitempty"` - IndexingSlowlog *SlowlogSettings `json:"indexing.slowlog,omitempty"` + IndexingSlowlog *IndexingSlowlogSettings `json:"indexing.slowlog,omitempty"` Lifecycle *IndexSettingsLifecycle `json:"lifecycle,omitempty"` LoadFixedBitsetFiltersEagerly *bool `json:"load_fixed_bitset_filters_eagerly,omitempty"` // Mapping Enable or disable dynamic mapping for an index. @@ -78,7 +82,7 @@ type IndexSettings struct { QueryString *SettingsQueryString `json:"query_string,omitempty"` RefreshInterval Duration `json:"refresh_interval,omitempty"` Routing *IndexRouting `json:"routing,omitempty"` - RoutingPartitionSize *int `json:"routing_partition_size,omitempty"` + RoutingPartitionSize Stringifiedinteger `json:"routing_partition_size,omitempty"` RoutingPath []string `json:"routing_path,omitempty"` Search *SettingsSearch `json:"search,omitempty"` Settings *IndexSettings `json:"settings,omitempty"` @@ -99,6 +103,561 @@ type IndexSettings struct { Version *IndexVersioning `json:"version,omitempty"` } +func (s *IndexSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analysis": + if err := dec.Decode(&s.Analysis); err != nil { + return err + } + + case "analyze": + if err := dec.Decode(&s.Analyze); err != nil { + return err + } + + case "auto_expand_replicas": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AutoExpandReplicas = &o + + case "blocks": + if err := dec.Decode(&s.Blocks); err != nil { + return err + } + + case "check_on_startup": + if err := dec.Decode(&s.CheckOnStartup); err != nil { + return err + } + + case "codec": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Codec = &o + + case "creation_date": + if err := dec.Decode(&s.CreationDate); err != nil { + return err + } + + case "creation_date_string": + if err := dec.Decode(&s.CreationDateString); err != nil { + return err + } + + case "default_pipeline": + if err := dec.Decode(&s.DefaultPipeline); err != nil { + return err + } + + case "final_pipeline": + if err := dec.Decode(&s.FinalPipeline); err != nil { + return err + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = o + + case "gc_deletes": + if err := dec.Decode(&s.GcDeletes); err != nil { + return err + } + + case "hidden": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Hidden = o + + case "highlight": + if err := dec.Decode(&s.Highlight); err != nil { + return err + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return err + } + + case "indexing_pressure": + if err := dec.Decode(&s.IndexingPressure); err != nil { + return err + } + + case "indexing.slowlog": + if err := dec.Decode(&s.IndexingSlowlog); err != nil { + return err + } + + case "lifecycle": + if err := dec.Decode(&s.Lifecycle); err != nil { + return err + } + + case "load_fixed_bitset_filters_eagerly": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.LoadFixedBitsetFiltersEagerly = &value + case bool: + s.LoadFixedBitsetFiltersEagerly = &v + } + + case "mapping": + if err := dec.Decode(&s.Mapping); err != nil { + return err + } + + case "max_docvalue_fields_search": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxDocvalueFieldsSearch = &value + case float64: + f := int(v) + s.MaxDocvalueFieldsSearch = &f + } + + case "max_inner_result_window": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxInnerResultWindow = &value + case float64: + f := int(v) + s.MaxInnerResultWindow = &f + } + + case "max_ngram_diff": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxNgramDiff = &value + case float64: + f := int(v) + s.MaxNgramDiff = &f + } + + case "max_refresh_listeners": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxRefreshListeners = &value + case float64: + f := int(v) + s.MaxRefreshListeners = &f + } + + case "max_regex_length": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxRegexLength = &value + case float64: + f := int(v) + s.MaxRegexLength = &f + } + + case "max_rescore_window": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxRescoreWindow = &value + case float64: + f := int(v) + s.MaxRescoreWindow = &f + } + + case "max_result_window": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxResultWindow = &value + case float64: + f := int(v) + s.MaxResultWindow = &f + } + + case "max_script_fields": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxScriptFields = &value + case float64: + f := int(v) + s.MaxScriptFields = &f + } + + case "max_shingle_diff": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxShingleDiff = &value + case float64: + f := int(v) + s.MaxShingleDiff = &f + } + + case "max_slices_per_scroll": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxSlicesPerScroll = &value + case float64: + f := int(v) + s.MaxSlicesPerScroll = &f + } + + case "max_terms_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxTermsCount = &value + case float64: + f := int(v) + s.MaxTermsCount = &f + } + + case "merge": + if err := dec.Decode(&s.Merge); err != nil { + return err + } + + case "mode": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Mode = &o + + case "number_of_replicas": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NumberOfReplicas = o + + case "number_of_routing_shards": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NumberOfRoutingShards = &value + case float64: + f := int(v) + s.NumberOfRoutingShards = &f + } + + case "number_of_shards": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NumberOfShards = o + + case "priority": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Priority = o + + case "provided_name": + if err := dec.Decode(&s.ProvidedName); err != nil { + return err + } + + case "queries": + if err := dec.Decode(&s.Queries); err != nil { + return err + } + + case "query_string": + if err := dec.Decode(&s.QueryString); err != nil { + return err + } + + case "refresh_interval": + if err := dec.Decode(&s.RefreshInterval); err != nil { + return err + } + + case "routing": + if err := dec.Decode(&s.Routing); err != nil { + return err + } + + case "routing_partition_size": + if err := dec.Decode(&s.RoutingPartitionSize); err != nil { + return err + } + + case "routing_path": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.RoutingPath = append(s.RoutingPath, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.RoutingPath); err != nil { + return err + } + } + + case "search": + if err := dec.Decode(&s.Search); err != nil { + return err + } + + case "settings": + if err := dec.Decode(&s.Settings); err != nil { + return err + } + + case "shards": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Shards = &value + case float64: + f := int(v) + s.Shards = &f + } + + case "similarity": + if err := dec.Decode(&s.Similarity); err != nil { + return err + } + + case "soft_deletes": + if err := dec.Decode(&s.SoftDeletes); err != nil { + return err + } + + case "sort": + if err := dec.Decode(&s.Sort); err != nil { + return err + } + + case "store": + if err := dec.Decode(&s.Store); err != nil { + return err + } + + case "time_series": + if err := dec.Decode(&s.TimeSeries); err != nil { + return err + } + + case "top_metrics_max_size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.TopMetricsMaxSize = &value + case float64: + f := int(v) + s.TopMetricsMaxSize = &f + } + + case "translog": + if err := dec.Decode(&s.Translog); err != nil { + return err + } + + case "uuid": + if err := dec.Decode(&s.Uuid); err != nil { + return err + } + + case "verified_before_close": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.VerifiedBeforeClose = o + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + default: + + if key, ok := t.(string); ok { + if s.IndexSettings == nil { + s.IndexSettings = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return err + } + s.IndexSettings[key] = *raw + } + + } + } + return nil +} + // MarhsalJSON overrides marshalling for types with additional properties func (s IndexSettings) MarshalJSON() ([]byte, error) { type opt IndexSettings @@ -118,6 +677,7 @@ func (s IndexSettings) MarshalJSON() ([]byte, error) { for key, value := range s.IndexSettings { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "IndexSettings") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexsettingsanalysis.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexsettingsanalysis.go index 202b48f62..033b8c792 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexsettingsanalysis.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexsettingsanalysis.go @@ -16,21 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" ) // IndexSettingsAnalysis type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L310-L316 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L310-L316 type IndexSettingsAnalysis struct { Analyzer map[string]Analyzer `json:"analyzer,omitempty"` CharFilter map[string]CharFilter `json:"char_filter,omitempty"` @@ -40,6 +39,7 @@ type IndexSettingsAnalysis struct { } func (s *IndexSettingsAnalysis) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -54,6 +54,9 @@ func (s *IndexSettingsAnalysis) UnmarshalJSON(data []byte) error { switch t { case "analyzer": + if s.Analyzer == nil { + s.Analyzer = make(map[string]Analyzer, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -62,7 +65,9 @@ func (s *IndexSettingsAnalysis) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "custom" + } switch kind["type"] { case "custom": oo := NewCustomAnalyzer() @@ -149,13 +154,18 @@ func (s *IndexSettingsAnalysis) UnmarshalJSON(data []byte) error { } s.Analyzer[key] = oo default: - if err := dec.Decode(&s.Analyzer); err != nil { + oo := new(Analyzer) + if err := localDec.Decode(&oo); err != nil { return err } + s.Analyzer[key] = oo } } case "char_filter": + if s.CharFilter == nil { + s.CharFilter = make(map[string]CharFilter, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -197,13 +207,18 @@ func (s *IndexSettingsAnalysis) UnmarshalJSON(data []byte) error { } s.CharFilter[key] = oo default: - if err := dec.Decode(&s.CharFilter); err != nil { + oo := new(CharFilter) + if err := localDec.Decode(&oo); err != nil { return err } + s.CharFilter[key] = oo } } case "filter": + if s.Filter == nil { + s.Filter = make(map[string]TokenFilter, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -503,13 +518,18 @@ func (s *IndexSettingsAnalysis) UnmarshalJSON(data []byte) error { } s.Filter[key] = oo default: - if err := dec.Decode(&s.Filter); err != nil { + oo := new(TokenFilter) + if err := localDec.Decode(&oo); err != nil { return err } + s.Filter[key] = oo } } case "normalizer": + if s.Normalizer == nil { + s.Normalizer = make(map[string]Normalizer, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -533,13 +553,18 @@ func (s *IndexSettingsAnalysis) UnmarshalJSON(data []byte) error { } s.Normalizer[key] = oo default: - if err := dec.Decode(&s.Normalizer); err != nil { + oo := new(Normalizer) + if err := localDec.Decode(&oo); err != nil { return err } + s.Normalizer[key] = oo } } case "tokenizer": + if s.Tokenizer == nil { + s.Tokenizer = make(map[string]Tokenizer, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -635,9 +660,11 @@ func (s *IndexSettingsAnalysis) UnmarshalJSON(data []byte) error { } s.Tokenizer[key] = oo default: - if err := dec.Decode(&s.Tokenizer); err != nil { + oo := new(Tokenizer) + if err := localDec.Decode(&oo); err != nil { return err } + s.Tokenizer[key] = oo } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexsettingslifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexsettingslifecycle.go index 10e0416c7..ceec2e72f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexsettingslifecycle.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexsettingslifecycle.go @@ -16,18 +16,26 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // IndexSettingsLifecycle type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L267-L300 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L267-L300 type IndexSettingsLifecycle struct { // IndexingComplete Indicates whether or not the index has been rolled over. Automatically set to // true when ILM completes the rollover action. // You can explicitly set it to skip rollover. - IndexingComplete *bool `json:"indexing_complete,omitempty"` + IndexingComplete Stringifiedboolean `json:"indexing_complete,omitempty"` // Name The name of the policy to use to manage the index. For information about how // Elasticsearch applies policy changes, see Policy updates. Name string `json:"name"` @@ -55,6 +63,82 @@ type IndexSettingsLifecycle struct { Step *IndexSettingsLifecycleStep `json:"step,omitempty"` } +func (s *IndexSettingsLifecycle) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "indexing_complete": + if err := dec.Decode(&s.IndexingComplete); err != nil { + return err + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "origination_date": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.OriginationDate = &value + case float64: + f := int64(v) + s.OriginationDate = &f + } + + case "parse_origination_date": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.ParseOriginationDate = &value + case bool: + s.ParseOriginationDate = &v + } + + case "rollover_alias": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RolloverAlias = &o + + case "step": + if err := dec.Decode(&s.Step); err != nil { + return err + } + + } + } + return nil +} + // NewIndexSettingsLifecycle returns a IndexSettingsLifecycle. func NewIndexSettingsLifecycle() *IndexSettingsLifecycle { r := &IndexSettingsLifecycle{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexsettingslifecyclestep.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexsettingslifecyclestep.go index e74241c3d..87e8aaf0a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexsettingslifecyclestep.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexsettingslifecyclestep.go @@ -16,13 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // IndexSettingsLifecycleStep type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L302-L308 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L302-L308 type IndexSettingsLifecycleStep struct { // WaitTimeThreshold Time to wait for the cluster to resolve allocation issues during an ILM // shrink action. Must be greater than 1h (1 hour). @@ -30,6 +37,31 @@ type IndexSettingsLifecycleStep struct { WaitTimeThreshold Duration `json:"wait_time_threshold,omitempty"` } +func (s *IndexSettingsLifecycleStep) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "wait_time_threshold": + if err := dec.Decode(&s.WaitTimeThreshold); err != nil { + return err + } + + } + } + return nil +} + // NewIndexSettingsLifecycleStep returns a IndexSettingsLifecycleStep. func NewIndexSettingsLifecycleStep() *IndexSettingsLifecycleStep { r := &IndexSettingsLifecycleStep{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexsettingstimeseries.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexsettingstimeseries.go index d855ae62b..4a1498cd1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexsettingstimeseries.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexsettingstimeseries.go @@ -16,18 +16,55 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // IndexSettingsTimeSeries type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L318-L321 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L318-L321 type IndexSettingsTimeSeries struct { EndTime DateTime `json:"end_time,omitempty"` StartTime DateTime `json:"start_time,omitempty"` } +func (s *IndexSettingsTimeSeries) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "end_time": + if err := dec.Decode(&s.EndTime); err != nil { + return err + } + + case "start_time": + if err := dec.Decode(&s.StartTime); err != nil { + return err + } + + } + } + return nil +} + // NewIndexSettingsTimeSeries returns a IndexSettingsTimeSeries. func NewIndexSettingsTimeSeries() *IndexSettingsTimeSeries { r := &IndexSettingsTimeSeries{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexstate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexstate.go index 46356906f..733b93860 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexstate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexstate.go @@ -16,20 +16,82 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // IndexState type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexState.ts#L26-L33 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexState.ts#L27-L40 type IndexState struct { Aliases map[string]Alias `json:"aliases,omitempty"` DataStream *string `json:"data_stream,omitempty"` // Defaults Default settings, included when the request's `include_default` is `true`. Defaults *IndexSettings `json:"defaults,omitempty"` - Mappings *TypeMapping `json:"mappings,omitempty"` - Settings *IndexSettings `json:"settings,omitempty"` + // Lifecycle Data lifecycle applicable if this is a data stream. + Lifecycle *DataStreamLifecycle `json:"lifecycle,omitempty"` + Mappings *TypeMapping `json:"mappings,omitempty"` + Settings *IndexSettings `json:"settings,omitempty"` +} + +func (s *IndexState) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aliases": + if s.Aliases == nil { + s.Aliases = make(map[string]Alias, 0) + } + if err := dec.Decode(&s.Aliases); err != nil { + return err + } + + case "data_stream": + if err := dec.Decode(&s.DataStream); err != nil { + return err + } + + case "defaults": + if err := dec.Decode(&s.Defaults); err != nil { + return err + } + + case "lifecycle": + if err := dec.Decode(&s.Lifecycle); err != nil { + return err + } + + case "mappings": + if err := dec.Decode(&s.Mappings); err != nil { + return err + } + + case "settings": + if err := dec.Decode(&s.Settings); err != nil { + return err + } + + } + } + return nil } // NewIndexState returns a IndexState. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexstats.go index 49ff82bd7..7eda0c259 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexstats.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // IndexStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/stats/types.ts#L52-L90 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/stats/types.ts#L52-L93 type IndexStats struct { Bulk *BulkStats `json:"bulk,omitempty"` // Completion Contains statistics about completions across all shards assigned to the node. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indextemplate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indextemplate.go index 0530cdab6..af5f28f22 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indextemplate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indextemplate.go @@ -16,26 +16,141 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // IndexTemplate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexTemplate.ts#L27-L37 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexTemplate.ts#L31-L70 type IndexTemplate struct { - AllowAutoCreate *bool `json:"allow_auto_create,omitempty"` - ComposedOf []string `json:"composed_of"` - DataStream *IndexTemplateDataStreamConfiguration `json:"data_stream,omitempty"` - IndexPatterns []string `json:"index_patterns"` - Meta_ map[string]json.RawMessage `json:"_meta,omitempty"` - Priority *int64 `json:"priority,omitempty"` - Template *IndexTemplateSummary `json:"template,omitempty"` - Version *int64 `json:"version,omitempty"` + AllowAutoCreate *bool `json:"allow_auto_create,omitempty"` + // ComposedOf An ordered list of component template names. + // Component templates are merged in the order specified, meaning that the last + // component template specified has the highest precedence. + ComposedOf []string `json:"composed_of"` + // DataStream If this object is included, the template is used to create data streams and + // their backing indices. + // Supports an empty object. + // Data streams require a matching index template with a `data_stream` object. + DataStream *IndexTemplateDataStreamConfiguration `json:"data_stream,omitempty"` + // IndexPatterns Name of the index template. + IndexPatterns []string `json:"index_patterns"` + // Meta_ Optional user metadata about the index template. May have any contents. + // This map is not automatically generated by Elasticsearch. + Meta_ Metadata `json:"_meta,omitempty"` + // Priority Priority to determine index template precedence when a new data stream or + // index is created. + // The index template with the highest priority is chosen. + // If no priority is specified the template is treated as though it is of + // priority 0 (lowest priority). + // This number is not automatically generated by Elasticsearch. + Priority *int64 `json:"priority,omitempty"` + // Template Template to be applied. + // It may optionally include an `aliases`, `mappings`, or `settings` + // configuration. + Template *IndexTemplateSummary `json:"template,omitempty"` + // Version Version number used to manage index templates externally. + // This number is not automatically generated by Elasticsearch. + Version *int64 `json:"version,omitempty"` +} + +func (s *IndexTemplate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_auto_create": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.AllowAutoCreate = &value + case bool: + s.AllowAutoCreate = &v + } + + case "composed_of": + if err := dec.Decode(&s.ComposedOf); err != nil { + return err + } + + case "data_stream": + if err := dec.Decode(&s.DataStream); err != nil { + return err + } + + case "index_patterns": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.IndexPatterns = append(s.IndexPatterns, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.IndexPatterns); err != nil { + return err + } + } + + case "_meta": + if err := dec.Decode(&s.Meta_); err != nil { + return err + } + + case "priority": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Priority = &value + case float64: + f := int64(v) + s.Priority = &f + } + + case "template": + if err := dec.Decode(&s.Template); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil } // NewIndexTemplate returns a IndexTemplate. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indextemplatedatastreamconfiguration.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indextemplatedatastreamconfiguration.go index 15fc05930..bf4770eee 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indextemplatedatastreamconfiguration.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indextemplatedatastreamconfiguration.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // IndexTemplateDataStreamConfiguration type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexTemplate.ts#L39-L50 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexTemplate.ts#L72-L83 type IndexTemplateDataStreamConfiguration struct { // AllowCustomRouting If true, the data stream supports custom routing. AllowCustomRouting *bool `json:"allow_custom_routing,omitempty"` @@ -30,6 +38,54 @@ type IndexTemplateDataStreamConfiguration struct { Hidden *bool `json:"hidden,omitempty"` } +func (s *IndexTemplateDataStreamConfiguration) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_custom_routing": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.AllowCustomRouting = &value + case bool: + s.AllowCustomRouting = &v + } + + case "hidden": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Hidden = &value + case bool: + s.Hidden = &v + } + + } + } + return nil +} + // NewIndexTemplateDataStreamConfiguration returns a IndexTemplateDataStreamConfiguration. func NewIndexTemplateDataStreamConfiguration() *IndexTemplateDataStreamConfiguration { r := &IndexTemplateDataStreamConfiguration{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indextemplateitem.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indextemplateitem.go index a8f5cc1b6..7933ebfa7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indextemplateitem.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indextemplateitem.go @@ -16,18 +16,55 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // IndexTemplateItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/get_index_template/IndicesGetIndexTemplateResponse.ts#L29-L32 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/get_index_template/IndicesGetIndexTemplateResponse.ts#L29-L32 type IndexTemplateItem struct { IndexTemplate IndexTemplate `json:"index_template"` Name string `json:"name"` } +func (s *IndexTemplateItem) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index_template": + if err := dec.Decode(&s.IndexTemplate); err != nil { + return err + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + } + } + return nil +} + // NewIndexTemplateItem returns a IndexTemplateItem. func NewIndexTemplateItem() *IndexTemplateItem { r := &IndexTemplateItem{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indextemplatemapping.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indextemplatemapping.go index c394ef7e5..4b7e63ed2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indextemplatemapping.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indextemplatemapping.go @@ -16,17 +16,28 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // IndexTemplateMapping type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/put_index_template/IndicesPutIndexTemplateRequest.ts#L60-L64 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/put_index_template/IndicesPutIndexTemplateRequest.ts#L97-L119 type IndexTemplateMapping struct { - Aliases map[string]Alias `json:"aliases,omitempty"` - Mappings *TypeMapping `json:"mappings,omitempty"` - Settings *IndexSettings `json:"settings,omitempty"` + // Aliases Aliases to add. + // If the index template includes a `data_stream` object, these are data stream + // aliases. + // Otherwise, these are index aliases. + // Data stream aliases ignore the `index_routing`, `routing`, and + // `search_routing` options. + Aliases map[string]Alias `json:"aliases,omitempty"` + Lifecycle *DataStreamLifecycle `json:"lifecycle,omitempty"` + // Mappings Mapping for fields in the index. + // If specified, this mapping can include field names, field data types, and + // mapping parameters. + Mappings *TypeMapping `json:"mappings,omitempty"` + // Settings Configuration options for the index. + Settings *IndexSettings `json:"settings,omitempty"` } // NewIndexTemplateMapping returns a IndexTemplateMapping. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indextemplatesummary.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indextemplatesummary.go index bd7401481..5835c4797 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indextemplatesummary.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indextemplatesummary.go @@ -16,17 +16,28 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // IndexTemplateSummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexTemplate.ts#L52-L56 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexTemplate.ts#L85-L107 type IndexTemplateSummary struct { - Aliases map[string]Alias `json:"aliases,omitempty"` - Mappings *TypeMapping `json:"mappings,omitempty"` - Settings *IndexSettings `json:"settings,omitempty"` + // Aliases Aliases to add. + // If the index template includes a `data_stream` object, these are data stream + // aliases. + // Otherwise, these are index aliases. + // Data stream aliases ignore the `index_routing`, `routing`, and + // `search_routing` options. + Aliases map[string]Alias `json:"aliases,omitempty"` + Lifecycle *DataStreamLifecycleWithRollover `json:"lifecycle,omitempty"` + // Mappings Mapping for fields in the index. + // If specified, this mapping can include field names, field data types, and + // mapping parameters. + Mappings *TypeMapping `json:"mappings,omitempty"` + // Settings Configuration options for the index. + Settings *IndexSettings `json:"settings,omitempty"` } // NewIndexTemplateSummary returns a IndexTemplateSummary. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexversioning.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexversioning.go index 52be73d73..f037ac083 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexversioning.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexversioning.go @@ -16,18 +16,63 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // IndexVersioning type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L262-L265 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L262-L265 type IndexVersioning struct { Created *string `json:"created,omitempty"` CreatedString *string `json:"created_string,omitempty"` } +func (s *IndexVersioning) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "created": + if err := dec.Decode(&s.Created); err != nil { + return err + } + + case "created_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CreatedString = &o + + } + } + return nil +} + // NewIndexVersioning returns a IndexVersioning. func NewIndexVersioning() *IndexVersioning { r := &IndexVersioning{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicatornode.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicatornode.go new file mode 100644 index 000000000..20b716833 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicatornode.go @@ -0,0 +1,88 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + +// IndicatorNode type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/health_report/types.ts#L90-L93 +type IndicatorNode struct { + Name string `json:"name,omitempty"` + NodeId string `json:"node_id,omitempty"` +} + +func (s *IndicatorNode) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = o + + case "node_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NodeId = o + + } + } + return nil +} + +// NewIndicatorNode returns a IndicatorNode. +func NewIndicatorNode() *IndicatorNode { + r := &IndicatorNode{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicators.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicators.go new file mode 100644 index 000000000..473ce8970 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicators.go @@ -0,0 +1,41 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +// Indicators type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/health_report/types.ts#L32-L40 +type Indicators struct { + Disk *DiskIndicator `json:"disk,omitempty"` + Ilm *IlmIndicator `json:"ilm,omitempty"` + MasterIsStable *MasterIsStableIndicator `json:"master_is_stable,omitempty"` + RepositoryIntegrity *RepositoryIntegrityIndicator `json:"repository_integrity,omitempty"` + ShardsAvailability *ShardsAvailabilityIndicator `json:"shards_availability,omitempty"` + ShardsCapacity *ShardsCapacityIndicator `json:"shards_capacity,omitempty"` + Slm *SlmIndicator `json:"slm,omitempty"` +} + +// NewIndicators returns a Indicators. +func NewIndicators() *Indicators { + r := &Indicators{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indices.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indices.go index 85db181ce..f8103495b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indices.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indices.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // Indices type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/common.ts#L61-L61 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/common.ts#L61-L61 type Indices []string diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesaction.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesaction.go index fb918705c..e76587c2e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesaction.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesaction.go @@ -16,16 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // IndicesAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/update_aliases/types.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/update_aliases/types.ts#L23-L39 type IndicesAction struct { - Add *AddAction `json:"add,omitempty"` - Remove *RemoveAction `json:"remove,omitempty"` + // Add Adds a data stream or index to an alias. + // If the alias doesn’t exist, the `add` action creates it. + Add *AddAction `json:"add,omitempty"` + // Remove Removes a data stream or index from an alias. + Remove *RemoveAction `json:"remove,omitempty"` + // RemoveIndex Deletes an index. + // You cannot use this action on aliases or data streams. RemoveIndex *RemoveIndexAction `json:"remove_index,omitempty"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesblockstatus.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesblockstatus.go index b96aecdff..49a75b46b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesblockstatus.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesblockstatus.go @@ -16,18 +16,65 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // IndicesBlockStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/add_block/IndicesAddBlockResponse.ts#L30-L33 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/add_block/IndicesAddBlockResponse.ts#L30-L33 type IndicesBlockStatus struct { Blocked bool `json:"blocked"` Name string `json:"name"` } +func (s *IndicesBlockStatus) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "blocked": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Blocked = value + case bool: + s.Blocked = v + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + } + } + return nil +} + // NewIndicesBlockStatus returns a IndicesBlockStatus. func NewIndicesBlockStatus() *IndicesBlockStatus { r := &IndicesBlockStatus{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesindexingpressure.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesindexingpressure.go index 8a439411c..b968c70a5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesindexingpressure.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesindexingpressure.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // IndicesIndexingPressure type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L540-L542 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L534-L536 type IndicesIndexingPressure struct { Memory IndicesIndexingPressureMemory `json:"memory"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesindexingpressurememory.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesindexingpressurememory.go index d616f2e38..9e24a809f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesindexingpressurememory.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesindexingpressurememory.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // IndicesIndexingPressureMemory type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L544-L551 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L538-L545 type IndicesIndexingPressureMemory struct { // Limit Number of outstanding bytes that may be consumed by indexing requests. When // this limit is reached or exceeded, @@ -32,6 +40,42 @@ type IndicesIndexingPressureMemory struct { Limit *int `json:"limit,omitempty"` } +func (s *IndicesIndexingPressureMemory) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "limit": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Limit = &value + case float64: + f := int(v) + s.Limit = &f + } + + } + } + return nil +} + // NewIndicesIndexingPressureMemory returns a IndicesIndexingPressureMemory. func NewIndicesIndexingPressureMemory() *IndicesIndexingPressureMemory { r := &IndicesIndexingPressureMemory{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesmodifyaction.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesmodifyaction.go index 246931f80..de615ff01 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesmodifyaction.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesmodifyaction.go @@ -16,15 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // IndicesModifyAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/modify_data_stream/types.ts#L22-L26 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/modify_data_stream/types.ts#L22-L37 type IndicesModifyAction struct { - AddBackingIndex *IndexAndDataStreamAction `json:"add_backing_index,omitempty"` + // AddBackingIndex Adds an existing index as a backing index for a data stream. + // The index is hidden as part of this operation. + // WARNING: Adding indices with the `add_backing_index` action can potentially + // result in improper data stream behavior. + // This should be considered an expert level API. + AddBackingIndex *IndexAndDataStreamAction `json:"add_backing_index,omitempty"` + // RemoveBackingIndex Removes a backing index from a data stream. + // The index is unhidden as part of this operation. + // A data stream’s write index cannot be removed. RemoveBackingIndex *IndexAndDataStreamAction `json:"remove_backing_index,omitempty"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesoptions.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesoptions.go index b7de16ff5..e6634dcba 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesoptions.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesoptions.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" ) // IndicesOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/common.ts#L297-L324 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/common.ts#L332-L359 type IndicesOptions struct { // AllowNoIndices If false, the request returns an error if any wildcard expression, index // alias, or `_all` value targets only @@ -47,6 +53,84 @@ type IndicesOptions struct { IgnoreUnavailable *bool `json:"ignore_unavailable,omitempty"` } +func (s *IndicesOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_no_indices": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.AllowNoIndices = &value + case bool: + s.AllowNoIndices = &v + } + + case "expand_wildcards": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := &expandwildcard.ExpandWildcard{} + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.ExpandWildcards = append(s.ExpandWildcards, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.ExpandWildcards); err != nil { + return err + } + } + + case "ignore_throttled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreThrottled = &value + case bool: + s.IgnoreThrottled = &v + } + + case "ignore_unavailable": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreUnavailable = &value + case bool: + s.IgnoreUnavailable = &v + } + + } + } + return nil +} + // NewIndicesOptions returns a IndicesOptions. func NewIndicesOptions() *IndicesOptions { r := &IndicesOptions{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesprivileges.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesprivileges.go index 8308f0ad3..9b7518eeb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesprivileges.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesprivileges.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexprivilege" ) // IndicesPrivileges type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/_types/Privileges.ts#L81-L104 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/_types/Privileges.ts#L82-L105 type IndicesPrivileges struct { // AllowRestrictedIndices Set to `true` if using wildcard or regular expressions for patterns that // cover restricted indices. Implicitly, restricted indices have limited @@ -36,7 +42,7 @@ type IndicesPrivileges struct { // `allow_restricted_indices`. AllowRestrictedIndices *bool `json:"allow_restricted_indices,omitempty"` // FieldSecurity The document fields that the owners of the role have read access to. - FieldSecurity []FieldSecurity `json:"field_security,omitempty"` + FieldSecurity *FieldSecurity `json:"field_security,omitempty"` // Names A list of indices (or index name patterns) to which the permissions in this // entry apply. Names []string `json:"names"` @@ -49,6 +55,71 @@ type IndicesPrivileges struct { Query IndicesPrivilegesQuery `json:"query,omitempty"` } +func (s *IndicesPrivileges) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_restricted_indices": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.AllowRestrictedIndices = &value + case bool: + s.AllowRestrictedIndices = &v + } + + case "field_security": + if err := dec.Decode(&s.FieldSecurity); err != nil { + return err + } + + case "names": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Names = append(s.Names, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Names); err != nil { + return err + } + } + + case "privileges": + if err := dec.Decode(&s.Privileges); err != nil { + return err + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return err + } + + } + } + return nil +} + // NewIndicesPrivileges returns a IndicesPrivileges. func NewIndicesPrivileges() *IndicesPrivileges { r := &IndicesPrivileges{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesprivilegesquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesprivilegesquery.go index 5d0f1ba29..d95debc10 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesprivilegesquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesprivilegesquery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -26,5 +26,5 @@ package types // Query // RoleTemplateQuery // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/_types/Privileges.ts#L130-L138 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/_types/Privileges.ts#L131-L139 type IndicesPrivilegesQuery interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesrecord.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesrecord.go index 8b88a27c7..e8139c535 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesrecord.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesrecord.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // IndicesRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/indices/types.ts#L20-L801 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/indices/types.ts#L20-L801 type IndicesRecord struct { // BulkAvgSizeInBytes average size in bytes of shard bulk BulkAvgSizeInBytes *string `json:"bulk.avg_size_in_bytes,omitempty"` @@ -310,6 +318,1718 @@ type IndicesRecord struct { WarmerTotalTime *string `json:"warmer.total_time,omitempty"` } +func (s *IndicesRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bulk.avg_size_in_bytes", "basi", "bulkAvgSizeInBytes": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BulkAvgSizeInBytes = &o + + case "bulk.avg_time", "bati", "bulkAvgTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BulkAvgTime = &o + + case "bulk.total_operations", "bto", "bulkTotalOperation": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BulkTotalOperations = &o + + case "bulk.total_size_in_bytes", "btsi", "bulkTotalSizeInBytes": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BulkTotalSizeInBytes = &o + + case "bulk.total_time", "btti", "bulkTotalTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BulkTotalTime = &o + + case "completion.size", "cs", "completionSize": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CompletionSize = &o + + case "creation.date", "cd": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CreationDate = &o + + case "creation.date.string", "cds": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CreationDateString = &o + + case "docs.count", "dc", "docsCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DocsCount = o + + case "docs.deleted", "dd", "docsDeleted": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DocsDeleted = o + + case "fielddata.evictions", "fe", "fielddataEvictions": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FielddataEvictions = &o + + case "fielddata.memory_size", "fm", "fielddataMemory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FielddataMemorySize = &o + + case "flush.total", "ft", "flushTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FlushTotal = &o + + case "flush.total_time", "ftt", "flushTotalTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FlushTotalTime = &o + + case "get.current", "gc", "getCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.GetCurrent = &o + + case "get.exists_time", "geti", "getExistsTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.GetExistsTime = &o + + case "get.exists_total", "geto", "getExistsTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.GetExistsTotal = &o + + case "get.missing_time", "gmti", "getMissingTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.GetMissingTime = &o + + case "get.missing_total", "gmto", "getMissingTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.GetMissingTotal = &o + + case "get.time", "gti", "getTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.GetTime = &o + + case "get.total", "gto", "getTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.GetTotal = &o + + case "health", "h": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Health = &o + + case "index", "i", "idx": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Index = &o + + case "indexing.delete_current", "idc", "indexingDeleteCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexingDeleteCurrent = &o + + case "indexing.delete_time", "idti", "indexingDeleteTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexingDeleteTime = &o + + case "indexing.delete_total", "idto", "indexingDeleteTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexingDeleteTotal = &o + + case "indexing.index_current", "iic", "indexingIndexCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexingIndexCurrent = &o + + case "indexing.index_failed", "iif", "indexingIndexFailed": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexingIndexFailed = &o + + case "indexing.index_time", "iiti", "indexingIndexTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexingIndexTime = &o + + case "indexing.index_total", "iito", "indexingIndexTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexingIndexTotal = &o + + case "memory.total", "tm", "memoryTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MemoryTotal = &o + + case "merges.current", "mc", "mergesCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MergesCurrent = &o + + case "merges.current_docs", "mcd", "mergesCurrentDocs": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MergesCurrentDocs = &o + + case "merges.current_size", "mcs", "mergesCurrentSize": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MergesCurrentSize = &o + + case "merges.total", "mt", "mergesTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MergesTotal = &o + + case "merges.total_docs", "mtd", "mergesTotalDocs": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MergesTotalDocs = &o + + case "merges.total_size", "mts", "mergesTotalSize": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MergesTotalSize = &o + + case "merges.total_time", "mtt", "mergesTotalTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MergesTotalTime = &o + + case "pri", "p", "shards.primary", "shardsPrimary": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pri = &o + + case "pri.bulk.avg_size_in_bytes": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriBulkAvgSizeInBytes = &o + + case "pri.bulk.avg_time": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriBulkAvgTime = &o + + case "pri.bulk.total_operations": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriBulkTotalOperations = &o + + case "pri.bulk.total_size_in_bytes": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriBulkTotalSizeInBytes = &o + + case "pri.bulk.total_time": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriBulkTotalTime = &o + + case "pri.completion.size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriCompletionSize = &o + + case "pri.fielddata.evictions": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriFielddataEvictions = &o + + case "pri.fielddata.memory_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriFielddataMemorySize = &o + + case "pri.flush.total": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriFlushTotal = &o + + case "pri.flush.total_time": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriFlushTotalTime = &o + + case "pri.get.current": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriGetCurrent = &o + + case "pri.get.exists_time": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriGetExistsTime = &o + + case "pri.get.exists_total": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriGetExistsTotal = &o + + case "pri.get.missing_time": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriGetMissingTime = &o + + case "pri.get.missing_total": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriGetMissingTotal = &o + + case "pri.get.time": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriGetTime = &o + + case "pri.get.total": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriGetTotal = &o + + case "pri.indexing.delete_current": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriIndexingDeleteCurrent = &o + + case "pri.indexing.delete_time": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriIndexingDeleteTime = &o + + case "pri.indexing.delete_total": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriIndexingDeleteTotal = &o + + case "pri.indexing.index_current": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriIndexingIndexCurrent = &o + + case "pri.indexing.index_failed": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriIndexingIndexFailed = &o + + case "pri.indexing.index_time": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriIndexingIndexTime = &o + + case "pri.indexing.index_total": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriIndexingIndexTotal = &o + + case "pri.memory.total": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriMemoryTotal = &o + + case "pri.merges.current": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriMergesCurrent = &o + + case "pri.merges.current_docs": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriMergesCurrentDocs = &o + + case "pri.merges.current_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriMergesCurrentSize = &o + + case "pri.merges.total": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriMergesTotal = &o + + case "pri.merges.total_docs": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriMergesTotalDocs = &o + + case "pri.merges.total_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriMergesTotalSize = &o + + case "pri.merges.total_time": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriMergesTotalTime = &o + + case "pri.query_cache.evictions": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriQueryCacheEvictions = &o + + case "pri.query_cache.memory_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriQueryCacheMemorySize = &o + + case "pri.refresh.external_time": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriRefreshExternalTime = &o + + case "pri.refresh.external_total": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriRefreshExternalTotal = &o + + case "pri.refresh.listeners": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriRefreshListeners = &o + + case "pri.refresh.time": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriRefreshTime = &o + + case "pri.refresh.total": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriRefreshTotal = &o + + case "pri.request_cache.evictions": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriRequestCacheEvictions = &o + + case "pri.request_cache.hit_count": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriRequestCacheHitCount = &o + + case "pri.request_cache.memory_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriRequestCacheMemorySize = &o + + case "pri.request_cache.miss_count": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriRequestCacheMissCount = &o + + case "pri.search.fetch_current": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriSearchFetchCurrent = &o + + case "pri.search.fetch_time": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriSearchFetchTime = &o + + case "pri.search.fetch_total": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriSearchFetchTotal = &o + + case "pri.search.open_contexts": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriSearchOpenContexts = &o + + case "pri.search.query_current": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriSearchQueryCurrent = &o + + case "pri.search.query_time": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriSearchQueryTime = &o + + case "pri.search.query_total": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriSearchQueryTotal = &o + + case "pri.search.scroll_current": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriSearchScrollCurrent = &o + + case "pri.search.scroll_time": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriSearchScrollTime = &o + + case "pri.search.scroll_total": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriSearchScrollTotal = &o + + case "pri.segments.count": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriSegmentsCount = &o + + case "pri.segments.fixed_bitset_memory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriSegmentsFixedBitsetMemory = &o + + case "pri.segments.index_writer_memory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriSegmentsIndexWriterMemory = &o + + case "pri.segments.memory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriSegmentsMemory = &o + + case "pri.segments.version_map_memory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriSegmentsVersionMapMemory = &o + + case "pri.store.size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriStoreSize = o + + case "pri.suggest.current": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriSuggestCurrent = &o + + case "pri.suggest.time": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriSuggestTime = &o + + case "pri.suggest.total": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriSuggestTotal = &o + + case "pri.warmer.current": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriWarmerCurrent = &o + + case "pri.warmer.total": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriWarmerTotal = &o + + case "pri.warmer.total_time": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PriWarmerTotalTime = &o + + case "query_cache.evictions", "qce", "queryCacheEvictions": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryCacheEvictions = &o + + case "query_cache.memory_size", "qcm", "queryCacheMemory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryCacheMemorySize = &o + + case "refresh.external_time", "reti": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RefreshExternalTime = &o + + case "refresh.external_total", "reto": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RefreshExternalTotal = &o + + case "refresh.listeners", "rli", "refreshListeners": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RefreshListeners = &o + + case "refresh.time", "rti", "refreshTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RefreshTime = &o + + case "refresh.total", "rto", "refreshTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RefreshTotal = &o + + case "rep", "r", "shards.replica", "shardsReplica": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Rep = &o + + case "request_cache.evictions", "rce", "requestCacheEvictions": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RequestCacheEvictions = &o + + case "request_cache.hit_count", "rchc", "requestCacheHitCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RequestCacheHitCount = &o + + case "request_cache.memory_size", "rcm", "requestCacheMemory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RequestCacheMemorySize = &o + + case "request_cache.miss_count", "rcmc", "requestCacheMissCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RequestCacheMissCount = &o + + case "search.fetch_current", "sfc", "searchFetchCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchFetchCurrent = &o + + case "search.fetch_time", "sfti", "searchFetchTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchFetchTime = &o + + case "search.fetch_total", "sfto", "searchFetchTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchFetchTotal = &o + + case "search.open_contexts", "so", "searchOpenContexts": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchOpenContexts = &o + + case "search.query_current", "sqc", "searchQueryCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchQueryCurrent = &o + + case "search.query_time", "sqti", "searchQueryTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchQueryTime = &o + + case "search.query_total", "sqto", "searchQueryTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchQueryTotal = &o + + case "search.scroll_current", "scc", "searchScrollCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchScrollCurrent = &o + + case "search.scroll_time", "scti", "searchScrollTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchScrollTime = &o + + case "search.scroll_total", "scto", "searchScrollTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchScrollTotal = &o + + case "search.throttled", "sth": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchThrottled = &o + + case "segments.count", "sc", "segmentsCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SegmentsCount = &o + + case "segments.fixed_bitset_memory", "sfbm", "fixedBitsetMemory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SegmentsFixedBitsetMemory = &o + + case "segments.index_writer_memory", "siwm", "segmentsIndexWriterMemory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SegmentsIndexWriterMemory = &o + + case "segments.memory", "sm", "segmentsMemory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SegmentsMemory = &o + + case "segments.version_map_memory", "svmm", "segmentsVersionMapMemory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SegmentsVersionMapMemory = &o + + case "status", "s": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Status = &o + + case "store.size", "ss", "storeSize": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StoreSize = o + + case "suggest.current", "suc", "suggestCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SuggestCurrent = &o + + case "suggest.time", "suti", "suggestTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SuggestTime = &o + + case "suggest.total", "suto", "suggestTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SuggestTotal = &o + + case "uuid", "id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Uuid = &o + + case "warmer.current", "wc", "warmerCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.WarmerCurrent = &o + + case "warmer.total", "wto", "warmerTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.WarmerTotal = &o + + case "warmer.total_time", "wtt", "warmerTotalTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.WarmerTotalTime = &o + + } + } + return nil +} + // NewIndicesRecord returns a IndicesRecord. func NewIndicesRecord() *IndicesRecord { r := &IndicesRecord{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesshardsstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesshardsstats.go index 30c159aba..e03e41cb3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesshardsstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesshardsstats.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // IndicesShardsStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L49-L52 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L49-L52 type IndicesShardsStats struct { AllFields FieldSummary `json:"all_fields"` Fields map[string]FieldSummary `json:"fields"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesshardstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesshardstats.go index c802ccb29..705624840 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesshardstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesshardstats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -26,7 +26,7 @@ import ( // IndicesShardStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/stats/types.ts#L183-L211 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/stats/types.ts#L192-L223 type IndicesShardStats struct { Bulk *BulkStats `json:"bulk,omitempty"` Commit *ShardCommit `json:"commit,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesshardstores.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesshardstores.go index d9e24d649..4200821a1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesshardstores.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesshardstores.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // IndicesShardStores type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/shard_stores/types.ts#L26-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/shard_stores/types.ts#L26-L28 type IndicesShardStores struct { Shards map[string]ShardStoreWrapper `json:"shards"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesstats.go index 630785c82..bfb4b6079 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesstats.go @@ -16,18 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/healthstatus" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexmetadatastate" ) // IndicesStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/stats/types.ts#L92-L101 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/stats/types.ts#L95-L110 type IndicesStats struct { Health *healthstatus.HealthStatus `json:"health,omitempty"` Primaries *IndexStats `json:"primaries,omitempty"` @@ -37,6 +42,59 @@ type IndicesStats struct { Uuid *string `json:"uuid,omitempty"` } +func (s *IndicesStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "health": + if err := dec.Decode(&s.Health); err != nil { + return err + } + + case "primaries": + if err := dec.Decode(&s.Primaries); err != nil { + return err + } + + case "shards": + if s.Shards == nil { + s.Shards = make(map[string][]IndicesShardStats, 0) + } + if err := dec.Decode(&s.Shards); err != nil { + return err + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return err + } + + case "total": + if err := dec.Decode(&s.Total); err != nil { + return err + } + + case "uuid": + if err := dec.Decode(&s.Uuid); err != nil { + return err + } + + } + } + return nil +} + // NewIndicesStats returns a IndicesStats. func NewIndicesStats() *IndicesStats { r := &IndicesStats{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesvalidationexplanation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesvalidationexplanation.go index 5b67ecc90..db12098d1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesvalidationexplanation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesvalidationexplanation.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // IndicesValidationExplanation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/validate_query/IndicesValidateQueryResponse.ts#L32-L37 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/validate_query/IndicesValidateQueryResponse.ts#L32-L37 type IndicesValidationExplanation struct { Error *string `json:"error,omitempty"` Explanation *string `json:"explanation,omitempty"` @@ -30,6 +38,69 @@ type IndicesValidationExplanation struct { Valid bool `json:"valid"` } +func (s *IndicesValidationExplanation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "error": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Error = &o + + case "explanation": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Explanation = &o + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return err + } + + case "valid": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Valid = value + case bool: + s.Valid = v + } + + } + } + return nil +} + // NewIndicesValidationExplanation returns a IndicesValidationExplanation. func NewIndicesValidationExplanation() *IndicesValidationExplanation { r := &IndicesValidationExplanation{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesversions.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesversions.go index cefe6605f..aeb39a0ef 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesversions.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indicesversions.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // IndicesVersions type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/stats/types.ts#L144-L149 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/stats/types.ts#L263-L268 type IndicesVersions struct { IndexCount int `json:"index_count"` PrimaryShardCount int `json:"primary_shard_count"` @@ -30,6 +38,78 @@ type IndicesVersions struct { Version string `json:"version"` } +func (s *IndicesVersions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IndexCount = value + case float64: + f := int(v) + s.IndexCount = f + } + + case "primary_shard_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.PrimaryShardCount = value + case float64: + f := int(v) + s.PrimaryShardCount = f + } + + case "total_primary_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TotalPrimaryBytes = value + case float64: + f := int64(v) + s.TotalPrimaryBytes = f + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + // NewIndicesVersions returns a IndicesVersions. func NewIndicesVersions() *IndicesVersions { r := &IndicesVersions{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceaggregate.go index ad9cadb4b..96604c047 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceaggregate.go @@ -16,27 +16,96 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" "fmt" + "io" + "strconv" ) // InferenceAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L650-L661 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L659-L670 type InferenceAggregate struct { Data map[string]json.RawMessage `json:"-"` FeatureImportance []InferenceFeatureImportance `json:"feature_importance,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Meta Metadata `json:"meta,omitempty"` TopClasses []InferenceTopClassEntry `json:"top_classes,omitempty"` Value FieldValue `json:"value,omitempty"` Warning *string `json:"warning,omitempty"` } +func (s *InferenceAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "feature_importance": + if err := dec.Decode(&s.FeatureImportance); err != nil { + return err + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "top_classes": + if err := dec.Decode(&s.TopClasses); err != nil { + return err + } + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return err + } + + case "warning": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Warning = &o + + default: + + if key, ok := t.(string); ok { + if s.Data == nil { + s.Data = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return err + } + s.Data[key] = *raw + } + + } + } + return nil +} + // MarhsalJSON overrides marshalling for types with additional properties func (s InferenceAggregate) MarshalJSON() ([]byte, error) { type opt InferenceAggregate @@ -56,6 +125,7 @@ func (s InferenceAggregate) MarshalJSON() ([]byte, error) { for key, value := range s.Data { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "Data") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceaggregation.go index c528bd143..a158e0074 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceaggregation.go @@ -16,35 +16,42 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" ) // InferenceAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/pipeline.ts#L171-L174 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/pipeline.ts#L205-L214 type InferenceAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. - BucketsPath BucketsPath `json:"buckets_path,omitempty"` - Format *string `json:"format,omitempty"` - GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` - InferenceConfig *InferenceConfigContainer `json:"inference_config,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - ModelId string `json:"model_id"` - Name *string `json:"name,omitempty"` + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` + // InferenceConfig Contains the inference type and its options. + InferenceConfig *InferenceConfigContainer `json:"inference_config,omitempty"` + Meta Metadata `json:"meta,omitempty"` + // ModelId The ID or alias for the trained model. + ModelId string `json:"model_id"` + Name *string `json:"name,omitempty"` } func (s *InferenceAggregation) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -64,9 +71,16 @@ func (s *InferenceAggregation) UnmarshalJSON(data []byte) error { } case "format": - if err := dec.Decode(&s.Format); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o case "gap_policy": if err := dec.Decode(&s.GapPolicy); err != nil { @@ -89,9 +103,16 @@ func (s *InferenceAggregation) UnmarshalJSON(data []byte) error { } case "name": - if err := dec.Decode(&s.Name); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceclassimportance.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceclassimportance.go index d5b5bed39..bb938d8a2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceclassimportance.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceclassimportance.go @@ -16,18 +16,74 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // InferenceClassImportance type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L675-L678 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L684-L687 type InferenceClassImportance struct { ClassName string `json:"class_name"` Importance Float64 `json:"importance"` } +func (s *InferenceClassImportance) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "class_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ClassName = o + + case "importance": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Importance = f + case float64: + f := Float64(v) + s.Importance = f + } + + } + } + return nil +} + // NewInferenceClassImportance returns a InferenceClassImportance. func NewInferenceClassImportance() *InferenceClassImportance { r := &InferenceClassImportance{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceconfig.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceconfig.go index 2dcdd8fa9..c523ee6ce 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceconfig.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceconfig.go @@ -16,16 +16,18 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // InferenceConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/_types/Processors.ts#L244-L250 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/_types/Processors.ts#L735-L747 type InferenceConfig struct { + // Classification Classification configuration for inference. Classification *InferenceConfigClassification `json:"classification,omitempty"` - Regression *InferenceConfigRegression `json:"regression,omitempty"` + // Regression Regression configuration for inference. + Regression *InferenceConfigRegression `json:"regression,omitempty"` } // NewInferenceConfig returns a InferenceConfig. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceconfigclassification.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceconfigclassification.go index b41210b62..329f445e6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceconfigclassification.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceconfigclassification.go @@ -16,19 +16,108 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // InferenceConfigClassification type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/_types/Processors.ts#L257-L263 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/_types/Processors.ts#L762-L788 type InferenceConfigClassification struct { - NumTopClasses *int `json:"num_top_classes,omitempty"` - NumTopFeatureImportanceValues *int `json:"num_top_feature_importance_values,omitempty"` - PredictionFieldType *string `json:"prediction_field_type,omitempty"` - ResultsField *string `json:"results_field,omitempty"` - TopClassesResultsField *string `json:"top_classes_results_field,omitempty"` + // NumTopClasses Specifies the number of top class predictions to return. + NumTopClasses *int `json:"num_top_classes,omitempty"` + // NumTopFeatureImportanceValues Specifies the maximum number of feature importance values per document. + NumTopFeatureImportanceValues *int `json:"num_top_feature_importance_values,omitempty"` + // PredictionFieldType Specifies the type of the predicted field to write. + // Valid values are: `string`, `number`, `boolean`. + PredictionFieldType *string `json:"prediction_field_type,omitempty"` + // ResultsField The field that is added to incoming documents to contain the inference + // prediction. + ResultsField *string `json:"results_field,omitempty"` + // TopClassesResultsField Specifies the field to which the top classes are written. + TopClassesResultsField *string `json:"top_classes_results_field,omitempty"` +} + +func (s *InferenceConfigClassification) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "num_top_classes": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NumTopClasses = &value + case float64: + f := int(v) + s.NumTopClasses = &f + } + + case "num_top_feature_importance_values": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NumTopFeatureImportanceValues = &value + case float64: + f := int(v) + s.NumTopFeatureImportanceValues = &f + } + + case "prediction_field_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PredictionFieldType = &o + + case "results_field": + if err := dec.Decode(&s.ResultsField); err != nil { + return err + } + + case "top_classes_results_field": + if err := dec.Decode(&s.TopClassesResultsField); err != nil { + return err + } + + } + } + return nil } // NewInferenceConfigClassification returns a InferenceConfigClassification. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceconfigcontainer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceconfigcontainer.go index e9e0cdea5..f97d998ee 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceconfigcontainer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceconfigcontainer.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // InferenceConfigContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/pipeline.ts#L176-L182 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/pipeline.ts#L216-L222 type InferenceConfigContainer struct { // Classification Classification configuration for inference. Classification *ClassificationInferenceOptions `json:"classification,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceconfigcreatecontainer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceconfigcreatecontainer.go index b020a79ae..84e63781e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceconfigcreatecontainer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceconfigcreatecontainer.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // InferenceConfigCreateContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/inference.ts#L23-L67 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/inference.ts#L23-L80 type InferenceConfigCreateContainer struct { // Classification Classification configuration for inference. Classification *ClassificationInferenceOptions `json:"classification,omitempty"` @@ -40,6 +40,8 @@ type InferenceConfigCreateContainer struct { TextClassification *TextClassificationInferenceOptions `json:"text_classification,omitempty"` // TextEmbedding Text embedding configuration for inference. TextEmbedding *TextEmbeddingInferenceOptions `json:"text_embedding,omitempty"` + // TextExpansion Text expansion configuration for inference. + TextExpansion *TextExpansionInferenceOptions `json:"text_expansion,omitempty"` // ZeroShotClassification Zeroshot classification configuration for inference. ZeroShotClassification *ZeroShotClassificationInferenceOptions `json:"zero_shot_classification,omitempty"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceconfigregression.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceconfigregression.go index fb0e1acef..1ebd1df54 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceconfigregression.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceconfigregression.go @@ -16,16 +16,68 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // InferenceConfigRegression type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/_types/Processors.ts#L252-L255 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/_types/Processors.ts#L749-L760 type InferenceConfigRegression struct { - NumTopFeatureImportanceValues *int `json:"num_top_feature_importance_values,omitempty"` - ResultsField *string `json:"results_field,omitempty"` + // NumTopFeatureImportanceValues Specifies the maximum number of feature importance values per document. + NumTopFeatureImportanceValues *int `json:"num_top_feature_importance_values,omitempty"` + // ResultsField The field that is added to incoming documents to contain the inference + // prediction. + ResultsField *string `json:"results_field,omitempty"` +} + +func (s *InferenceConfigRegression) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "num_top_feature_importance_values": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NumTopFeatureImportanceValues = &value + case float64: + f := int(v) + s.NumTopFeatureImportanceValues = &f + } + + case "results_field": + if err := dec.Decode(&s.ResultsField); err != nil { + return err + } + + } + } + return nil } // NewInferenceConfigRegression returns a InferenceConfigRegression. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceconfigupdatecontainer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceconfigupdatecontainer.go index 1042c97e9..439c86bf9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceconfigupdatecontainer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceconfigupdatecontainer.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // InferenceConfigUpdateContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/inference.ts#L265-L285 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/inference.ts#L296-L318 type InferenceConfigUpdateContainer struct { // Classification Classification configuration for inference. Classification *ClassificationInferenceOptions `json:"classification,omitempty"` @@ -40,6 +40,8 @@ type InferenceConfigUpdateContainer struct { TextClassification *TextClassificationInferenceUpdateOptions `json:"text_classification,omitempty"` // TextEmbedding Text embedding configuration for inference. TextEmbedding *TextEmbeddingInferenceUpdateOptions `json:"text_embedding,omitempty"` + // TextExpansion Text expansion configuration for inference. + TextExpansion *TextExpansionInferenceUpdateOptions `json:"text_expansion,omitempty"` // ZeroShotClassification Zeroshot classification configuration for inference. ZeroShotClassification *ZeroShotClassificationInferenceUpdateOptions `json:"zero_shot_classification,omitempty"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferencefeatureimportance.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferencefeatureimportance.go index 6fb34289a..e0bf44648 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferencefeatureimportance.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferencefeatureimportance.go @@ -16,19 +16,80 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // InferenceFeatureImportance type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L669-L673 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L678-L682 type InferenceFeatureImportance struct { Classes []InferenceClassImportance `json:"classes,omitempty"` FeatureName string `json:"feature_name"` Importance *Float64 `json:"importance,omitempty"` } +func (s *InferenceFeatureImportance) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "classes": + if err := dec.Decode(&s.Classes); err != nil { + return err + } + + case "feature_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FeatureName = o + + case "importance": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Importance = &f + case float64: + f := Float64(v) + s.Importance = &f + } + + } + } + return nil +} + // NewInferenceFeatureImportance returns a InferenceFeatureImportance. func NewInferenceFeatureImportance() *InferenceFeatureImportance { r := &InferenceFeatureImportance{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceprocessor.go index 31a155357..01ee09ea0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceprocessor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceprocessor.go @@ -16,27 +16,142 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // InferenceProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/_types/Processors.ts#L237-L242 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/_types/Processors.ts#L714-L733 type InferenceProcessor struct { - Description *string `json:"description,omitempty"` - FieldMap map[string]json.RawMessage `json:"field_map,omitempty"` - If *string `json:"if,omitempty"` - IgnoreFailure *bool `json:"ignore_failure,omitempty"` - InferenceConfig *InferenceConfig `json:"inference_config,omitempty"` - ModelId string `json:"model_id"` - OnFailure []ProcessorContainer `json:"on_failure,omitempty"` - Tag *string `json:"tag,omitempty"` - TargetField *string `json:"target_field,omitempty"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // FieldMap Maps the document field names to the known field names of the model. + // This mapping takes precedence over any default mappings provided in the model + // configuration. + FieldMap map[string]json.RawMessage `json:"field_map,omitempty"` + // If Conditionally execute the processor. + If *string `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // InferenceConfig Contains the inference type and its options. + InferenceConfig *InferenceConfig `json:"inference_config,omitempty"` + // ModelId The ID or alias for the trained model, or the ID of the deployment. + ModelId string `json:"model_id"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField Field added to incoming documents to contain results objects. + TargetField *string `json:"target_field,omitempty"` +} + +func (s *InferenceProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field_map": + if s.FieldMap == nil { + s.FieldMap = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.FieldMap); err != nil { + return err + } + + case "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.If = &o + + case "ignore_failure": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "inference_config": + if err := dec.Decode(&s.InferenceConfig); err != nil { + return err + } + + case "model_id": + if err := dec.Decode(&s.ModelId); err != nil { + return err + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return err + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return err + } + + } + } + return nil } // NewInferenceProcessor returns a InferenceProcessor. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceresponseresult.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceresponseresult.go index 8ef1d8136..8c5f8b88b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceresponseresult.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferenceresponseresult.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // InferenceResponseResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/inference.ts#L418-L465 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/inference.ts#L459-L506 type InferenceResponseResult struct { // Entities If the model is trained for named entity recognition (NER) tasks, the // response contains the recognized entities. @@ -64,6 +72,116 @@ type InferenceResponseResult struct { Warning *string `json:"warning,omitempty"` } +func (s *InferenceResponseResult) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "entities": + if err := dec.Decode(&s.Entities); err != nil { + return err + } + + case "feature_importance": + if err := dec.Decode(&s.FeatureImportance); err != nil { + return err + } + + case "is_truncated": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IsTruncated = &value + case bool: + s.IsTruncated = &v + } + + case "predicted_value": + if err := dec.Decode(&s.PredictedValue); err != nil { + return err + } + + case "predicted_value_sequence": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PredictedValueSequence = &o + + case "prediction_probability": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.PredictionProbability = &f + case float64: + f := Float64(v) + s.PredictionProbability = &f + } + + case "prediction_score": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.PredictionScore = &f + case float64: + f := Float64(v) + s.PredictionScore = &f + } + + case "top_classes": + if err := dec.Decode(&s.TopClasses); err != nil { + return err + } + + case "warning": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Warning = &o + + } + } + return nil +} + // NewInferenceResponseResult returns a InferenceResponseResult. func NewInferenceResponseResult() *InferenceResponseResult { r := &InferenceResponseResult{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferencetopclassentry.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferencetopclassentry.go index ab49f8912..f604a7925 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferencetopclassentry.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inferencetopclassentry.go @@ -16,19 +16,84 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // InferenceTopClassEntry type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L663-L667 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L672-L676 type InferenceTopClassEntry struct { ClassName FieldValue `json:"class_name"` ClassProbability Float64 `json:"class_probability"` ClassScore Float64 `json:"class_score"` } +func (s *InferenceTopClassEntry) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "class_name": + if err := dec.Decode(&s.ClassName); err != nil { + return err + } + + case "class_probability": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.ClassProbability = f + case float64: + f := Float64(v) + s.ClassProbability = f + } + + case "class_score": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.ClassScore = f + case float64: + f := Float64(v) + s.ClassScore = f + } + + } + } + return nil +} + // NewInferenceTopClassEntry returns a InferenceTopClassEntry. func NewInferenceTopClassEntry() *InferenceTopClassEntry { r := &InferenceTopClassEntry{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/influence.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/influence.go index 0a53ac974..54d69142e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/influence.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/influence.go @@ -16,18 +16,63 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Influence type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Anomaly.ts#L140-L143 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Anomaly.ts#L140-L143 type Influence struct { InfluencerFieldName string `json:"influencer_field_name"` InfluencerFieldValues []string `json:"influencer_field_values"` } +func (s *Influence) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "influencer_field_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.InfluencerFieldName = o + + case "influencer_field_values": + if err := dec.Decode(&s.InfluencerFieldValues); err != nil { + return err + } + + } + } + return nil +} + // NewInfluence returns a Influence. func NewInfluence() *Influence { r := &Influence{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/influencer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/influencer.go index 3ca9d6895..1fbabe8cd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/influencer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/influencer.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Influencer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Influencer.ts#L31-L83 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Influencer.ts#L31-L83 type Influencer struct { // BucketSpan The length of the bucket in seconds. This value matches the bucket span that // is specified in the job. @@ -65,6 +73,144 @@ type Influencer struct { Timestamp int64 `json:"timestamp"` } +func (s *Influencer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bucket_span": + if err := dec.Decode(&s.BucketSpan); err != nil { + return err + } + + case "foo": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Foo = &o + + case "influencer_field_name": + if err := dec.Decode(&s.InfluencerFieldName); err != nil { + return err + } + + case "influencer_field_value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.InfluencerFieldValue = o + + case "influencer_score": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.InfluencerScore = f + case float64: + f := Float64(v) + s.InfluencerScore = f + } + + case "initial_influencer_score": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.InitialInfluencerScore = f + case float64: + f := Float64(v) + s.InitialInfluencerScore = f + } + + case "is_interim": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IsInterim = value + case bool: + s.IsInterim = v + } + + case "job_id": + if err := dec.Decode(&s.JobId); err != nil { + return err + } + + case "probability": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Probability = f + case float64: + f := Float64(v) + s.Probability = f + } + + case "result_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultType = o + + case "timestamp": + if err := dec.Decode(&s.Timestamp); err != nil { + return err + } + + } + } + return nil +} + // NewInfluencer returns a Influencer. func NewInfluencer() *Influencer { r := &Influencer{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/infofeaturestate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/infofeaturestate.go index ed9e5c1cf..38e80f3a4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/infofeaturestate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/infofeaturestate.go @@ -16,18 +16,74 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // InfoFeatureState type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/snapshot/_types/SnapshotInfoFeatureState.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/snapshot/_types/SnapshotInfoFeatureState.ts#L22-L25 type InfoFeatureState struct { FeatureName string `json:"feature_name"` Indices []string `json:"indices"` } +func (s *InfoFeatureState) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "feature_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FeatureName = o + + case "indices": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Indices = append(s.Indices, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Indices); err != nil { + return err + } + } + + } + } + return nil +} + // NewInfoFeatureState returns a InfoFeatureState. func NewInfoFeatureState() *InfoFeatureState { r := &InfoFeatureState{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ingestpipeline.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ingestpipeline.go index 3f374eb8d..e9a772a7e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ingestpipeline.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ingestpipeline.go @@ -16,18 +16,78 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // IngestPipeline type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/_types/Pipeline.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/_types/Pipeline.ts#L23-L41 type IngestPipeline struct { - Description *string `json:"description,omitempty"` - OnFailure []ProcessorContainer `json:"on_failure,omitempty"` - Processors []ProcessorContainer `json:"processors,omitempty"` - Version *int64 `json:"version,omitempty"` + // Description Description of the ingest pipeline. + Description *string `json:"description,omitempty"` + // OnFailure Processors to run immediately after a processor failure. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Processors Processors used to perform transformations on documents before indexing. + // Processors run sequentially in the order specified. + Processors []ProcessorContainer `json:"processors,omitempty"` + // Version Version number used by external systems to track ingest pipelines. + Version *int64 `json:"version,omitempty"` +} + +func (s *IngestPipeline) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return err + } + + case "processors": + if err := dec.Decode(&s.Processors); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil } // NewIngestPipeline returns a IngestPipeline. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ingesttotal.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ingesttotal.go index dee501228..4339dc138 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ingesttotal.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ingesttotal.go @@ -16,19 +16,108 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // IngestTotal type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L149-L155 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L356-L377 type IngestTotal struct { - Count *int64 `json:"count,omitempty"` - Current *int64 `json:"current,omitempty"` - Failed *int64 `json:"failed,omitempty"` - Processors []map[string]KeyedProcessor `json:"processors,omitempty"` - TimeInMillis *int64 `json:"time_in_millis,omitempty"` + // Count Total number of documents ingested during the lifetime of this node. + Count *int64 `json:"count,omitempty"` + // Current Total number of documents currently being ingested. + Current *int64 `json:"current,omitempty"` + // Failed Total number of failed ingest operations during the lifetime of this node. + Failed *int64 `json:"failed,omitempty"` + // Processors Total number of ingest processors. + Processors []map[string]KeyedProcessor `json:"processors,omitempty"` + // TimeInMillis Total time, in milliseconds, spent preprocessing ingest documents during the + // lifetime of this node. + TimeInMillis *int64 `json:"time_in_millis,omitempty"` +} + +func (s *IngestTotal) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Count = &value + case float64: + f := int64(v) + s.Count = &f + } + + case "current": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Current = &value + case float64: + f := int64(v) + s.Current = &f + } + + case "failed": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Failed = &value + case float64: + f := int64(v) + s.Failed = &f + } + + case "processors": + if err := dec.Decode(&s.Processors); err != nil { + return err + } + + case "time_in_millis": + if err := dec.Decode(&s.TimeInMillis); err != nil { + return err + } + + } + } + return nil } // NewIngestTotal returns a IngestTotal. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inlineget.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inlineget.go index 1ade91ca0..cedbacbb7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inlineget.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inlineget.go @@ -16,18 +16,22 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" "fmt" + "io" + "strconv" ) // InlineGet type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/common.ts#L286-L295 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/common.ts#L321-L330 type InlineGet struct { Fields map[string]json.RawMessage `json:"fields,omitempty"` Found bool `json:"found"` @@ -38,6 +42,91 @@ type InlineGet struct { Source_ json.RawMessage `json:"_source,omitempty"` } +func (s *InlineGet) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Fields); err != nil { + return err + } + + case "found": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Found = value + case bool: + s.Found = v + } + + case "_primary_term": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.PrimaryTerm_ = &value + case float64: + f := int64(v) + s.PrimaryTerm_ = &f + } + + case "_routing": + if err := dec.Decode(&s.Routing_); err != nil { + return err + } + + case "_seq_no": + if err := dec.Decode(&s.SeqNo_); err != nil { + return err + } + + case "_source": + if err := dec.Decode(&s.Source_); err != nil { + return err + } + + default: + + if key, ok := t.(string); ok { + if s.Metadata == nil { + s.Metadata = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return err + } + s.Metadata[key] = *raw + } + + } + } + return nil +} + // MarhsalJSON overrides marshalling for types with additional properties func (s InlineGet) MarshalJSON() ([]byte, error) { type opt InlineGet @@ -57,6 +146,7 @@ func (s InlineGet) MarshalJSON() ([]byte, error) { for key, value := range s.Metadata { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "Metadata") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inlinegetdictuserdefined.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inlinegetdictuserdefined.go index 86a2a4db5..f581808c0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inlinegetdictuserdefined.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inlinegetdictuserdefined.go @@ -16,18 +16,22 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" "fmt" + "io" + "strconv" ) // InlineGetDictUserDefined type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/common.ts#L286-L295 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/common.ts#L321-L330 type InlineGetDictUserDefined struct { Fields map[string]json.RawMessage `json:"fields,omitempty"` Found bool `json:"found"` @@ -38,6 +42,94 @@ type InlineGetDictUserDefined struct { Source_ map[string]json.RawMessage `json:"_source"` } +func (s *InlineGetDictUserDefined) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Fields); err != nil { + return err + } + + case "found": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Found = value + case bool: + s.Found = v + } + + case "_primary_term": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.PrimaryTerm_ = &value + case float64: + f := int64(v) + s.PrimaryTerm_ = &f + } + + case "_routing": + if err := dec.Decode(&s.Routing_); err != nil { + return err + } + + case "_seq_no": + if err := dec.Decode(&s.SeqNo_); err != nil { + return err + } + + case "_source": + if s.Source_ == nil { + s.Source_ = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Source_); err != nil { + return err + } + + default: + + if key, ok := t.(string); ok { + if s.InlineGetDictUserDefined == nil { + s.InlineGetDictUserDefined = make(map[string]json.RawMessage, 0) + } + raw := new(json.RawMessage) + if err := dec.Decode(&raw); err != nil { + return err + } + s.InlineGetDictUserDefined[key] = *raw + } + + } + } + return nil +} + // MarhsalJSON overrides marshalling for types with additional properties func (s InlineGetDictUserDefined) MarshalJSON() ([]byte, error) { type opt InlineGetDictUserDefined @@ -57,6 +149,7 @@ func (s InlineGetDictUserDefined) MarshalJSON() ([]byte, error) { for key, value := range s.InlineGetDictUserDefined { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "InlineGetDictUserDefined") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inlinescript.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inlinescript.go index ab5cb279c..20233291d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inlinescript.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inlinescript.go @@ -16,24 +16,90 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/scriptlanguage" ) // InlineScript type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Scripting.ts#L45-L50 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Scripting.ts#L67-L79 type InlineScript struct { + // Lang Specifies the language the script is written in. Lang *scriptlanguage.ScriptLanguage `json:"lang,omitempty"` Options map[string]string `json:"options,omitempty"` - Params map[string]json.RawMessage `json:"params,omitempty"` - Source string `json:"source"` + // Params Specifies any named parameters that are passed into the script as variables. + // Use parameters instead of hard-coded values to decrease compile time. + Params map[string]json.RawMessage `json:"params,omitempty"` + // Source The script source. + Source string `json:"source"` +} + +func (s *InlineScript) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Source) + return err + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "lang": + if err := dec.Decode(&s.Lang); err != nil { + return err + } + + case "options": + if s.Options == nil { + s.Options = make(map[string]string, 0) + } + if err := dec.Decode(&s.Options); err != nil { + return err + } + + case "params": + if s.Params == nil { + s.Params = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Params); err != nil { + return err + } + + case "source": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Source = o + + } + } + return nil } // NewInlineScript returns a InlineScript. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/innerhits.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/innerhits.go index 25b2bf386..b020a1f25 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/innerhits.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/innerhits.go @@ -16,30 +16,247 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // InnerHits type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/hits.ts#L106-L124 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/hits.ts#L106-L140 type InnerHits struct { - Collapse *FieldCollapse `json:"collapse,omitempty"` - DocvalueFields []FieldAndFormat `json:"docvalue_fields,omitempty"` - Explain *bool `json:"explain,omitempty"` - Fields []string `json:"fields,omitempty"` - From *int `json:"from,omitempty"` - Highlight *Highlight `json:"highlight,omitempty"` - IgnoreUnmapped *bool `json:"ignore_unmapped,omitempty"` + Collapse *FieldCollapse `json:"collapse,omitempty"` + DocvalueFields []FieldAndFormat `json:"docvalue_fields,omitempty"` + Explain *bool `json:"explain,omitempty"` + Fields []string `json:"fields,omitempty"` + // From Inner hit starting document offset. + From *int `json:"from,omitempty"` + Highlight *Highlight `json:"highlight,omitempty"` + IgnoreUnmapped *bool `json:"ignore_unmapped,omitempty"` + // Name The name for the particular inner hit definition in the response. + // Useful when a search request contains multiple inner hits. Name *string `json:"name,omitempty"` ScriptFields map[string]ScriptField `json:"script_fields,omitempty"` SeqNoPrimaryTerm *bool `json:"seq_no_primary_term,omitempty"` - Size *int `json:"size,omitempty"` - Sort []SortCombinations `json:"sort,omitempty"` - Source_ SourceConfig `json:"_source,omitempty"` - StoredField []string `json:"stored_field,omitempty"` - TrackScores *bool `json:"track_scores,omitempty"` - Version *bool `json:"version,omitempty"` + // Size The maximum number of hits to return per `inner_hits`. + Size *int `json:"size,omitempty"` + // Sort How the inner hits should be sorted per `inner_hits`. + // By default, inner hits are sorted by score. + Sort []SortCombinations `json:"sort,omitempty"` + Source_ SourceConfig `json:"_source,omitempty"` + StoredField []string `json:"stored_field,omitempty"` + TrackScores *bool `json:"track_scores,omitempty"` + Version *bool `json:"version,omitempty"` +} + +func (s *InnerHits) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "collapse": + if err := dec.Decode(&s.Collapse); err != nil { + return err + } + + case "docvalue_fields": + if err := dec.Decode(&s.DocvalueFields); err != nil { + return err + } + + case "explain": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Explain = &value + case bool: + s.Explain = &v + } + + case "fields": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Fields = append(s.Fields, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Fields); err != nil { + return err + } + } + + case "from": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.From = &value + case float64: + f := int(v) + s.From = &f + } + + case "highlight": + if err := dec.Decode(&s.Highlight); err != nil { + return err + } + + case "ignore_unmapped": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreUnmapped = &value + case bool: + s.IgnoreUnmapped = &v + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "script_fields": + if s.ScriptFields == nil { + s.ScriptFields = make(map[string]ScriptField, 0) + } + if err := dec.Decode(&s.ScriptFields); err != nil { + return err + } + + case "seq_no_primary_term": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.SeqNoPrimaryTerm = &value + case bool: + s.SeqNoPrimaryTerm = &v + } + + case "size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + case "sort": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(SortCombinations) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Sort = append(s.Sort, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Sort); err != nil { + return err + } + } + + case "_source": + if err := dec.Decode(&s.Source_); err != nil { + return err + } + + case "stored_field": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.StoredField = append(s.StoredField, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.StoredField); err != nil { + return err + } + } + + case "track_scores": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.TrackScores = &value + case bool: + s.TrackScores = &v + } + + case "version": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Version = &value + case bool: + s.Version = &v + } + + } + } + return nil } // NewInnerHits returns a InnerHits. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/innerhitsresult.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/innerhitsresult.go index d45a1bef0..81a0f9b3c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/innerhitsresult.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/innerhitsresult.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // InnerHitsResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/hits.ts#L84-L86 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/hits.ts#L84-L86 type InnerHitsResult struct { Hits *HitsMetadata `json:"hits,omitempty"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inprogress.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inprogress.go index e083f3a22..3715ca03b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inprogress.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/inprogress.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // InProgress type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/slm/_types/SnapshotLifecycle.ts#L131-L136 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/slm/_types/SnapshotLifecycle.ts#L131-L136 type InProgress struct { Name string `json:"name"` StartTimeMillis int64 `json:"start_time_millis"` @@ -30,6 +38,53 @@ type InProgress struct { Uuid string `json:"uuid"` } +func (s *InProgress) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "start_time_millis": + if err := dec.Decode(&s.StartTimeMillis); err != nil { + return err + } + + case "state": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.State = o + + case "uuid": + if err := dec.Decode(&s.Uuid); err != nil { + return err + } + + } + } + return nil +} + // NewInProgress returns a InProgress. func NewInProgress() *InProgress { r := &InProgress{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/input.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/input.go index 9db5ffdb5..7a3ed5517 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/input.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/input.go @@ -16,17 +16,60 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // Input type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/put_trained_model/types.ts#L56-L58 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/put_trained_model/types.ts#L56-L58 type Input struct { FieldNames []string `json:"field_names"` } +func (s *Input) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field_names": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.FieldNames = append(s.FieldNames, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.FieldNames); err != nil { + return err + } + } + + } + } + return nil +} + // NewInput returns a Input. func NewInput() *Input { r := &Input{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/integernumberproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/integernumberproperty.go index c1ef5f152..4e7120143 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/integernumberproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/integernumberproperty.go @@ -16,25 +16,25 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" ) // IntegerNumberProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/core.ts#L146-L149 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/core.ts#L149-L152 type IntegerNumberProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -63,6 +63,7 @@ type IntegerNumberProperty struct { } func (s *IntegerNumberProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -77,23 +78,63 @@ func (s *IntegerNumberProperty) UnmarshalJSON(data []byte) error { switch t { case "boost": - if err := dec.Decode(&s.Boost); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f } case "coerce": - if err := dec.Decode(&s.Coerce); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Coerce = &value + case bool: + s.Coerce = &v } case "copy_to": - if err := dec.Decode(&s.CopyTo); err != nil { - return err + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return err + } } case "doc_values": - if err := dec.Decode(&s.DocValues); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DocValues = &value + case bool: + s.DocValues = &v } case "dynamic": @@ -102,6 +143,9 @@ func (s *IntegerNumberProperty) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -110,7 +154,9 @@ func (s *IntegerNumberProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -389,35 +435,80 @@ func (s *IntegerNumberProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "ignore_malformed": - if err := dec.Decode(&s.IgnoreMalformed); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreMalformed = &value + case bool: + s.IgnoreMalformed = &v } case "index": - if err := dec.Decode(&s.Index); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Index = &value + case bool: + s.Index = &v } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } case "null_value": - if err := dec.Decode(&s.NullValue); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NullValue = &value + case float64: + f := int(v) + s.NullValue = &f } case "on_script_error": @@ -426,6 +517,9 @@ func (s *IntegerNumberProperty) UnmarshalJSON(data []byte) error { } case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -434,7 +528,9 @@ func (s *IntegerNumberProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -713,9 +809,11 @@ func (s *IntegerNumberProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } @@ -725,18 +823,43 @@ func (s *IntegerNumberProperty) UnmarshalJSON(data []byte) error { } case "similarity": - if err := dec.Decode(&s.Similarity); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Similarity = &o case "store": - if err := dec.Decode(&s.Store); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Store = &value + case bool: + s.Store = &v } case "time_series_dimension": - if err := dec.Decode(&s.TimeSeriesDimension); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.TimeSeriesDimension = &value + case bool: + s.TimeSeriesDimension = &v } case "time_series_metric": @@ -754,6 +877,36 @@ func (s *IntegerNumberProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s IntegerNumberProperty) MarshalJSON() ([]byte, error) { + type innerIntegerNumberProperty IntegerNumberProperty + tmp := innerIntegerNumberProperty{ + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + Index: s.Index, + Meta: s.Meta, + NullValue: s.NullValue, + OnScriptError: s.OnScriptError, + Properties: s.Properties, + Script: s.Script, + Similarity: s.Similarity, + Store: s.Store, + TimeSeriesDimension: s.TimeSeriesDimension, + TimeSeriesMetric: s.TimeSeriesMetric, + Type: s.Type, + } + + tmp.Type = "integer" + + return json.Marshal(tmp) +} + // NewIntegerNumberProperty returns a IntegerNumberProperty. func NewIntegerNumberProperty() *IntegerNumberProperty { r := &IntegerNumberProperty{ @@ -762,7 +915,5 @@ func NewIntegerNumberProperty() *IntegerNumberProperty { Properties: make(map[string]Property, 0), } - r.Type = "integer" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/integerrangeproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/integerrangeproperty.go index 3f5e1a042..c21e8d1b6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/integerrangeproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/integerrangeproperty.go @@ -16,23 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" ) // IntegerRangeProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/range.ts#L42-L44 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/range.ts#L42-L44 type IntegerRangeProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -51,6 +51,7 @@ type IntegerRangeProperty struct { } func (s *IntegerRangeProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -65,23 +66,63 @@ func (s *IntegerRangeProperty) UnmarshalJSON(data []byte) error { switch t { case "boost": - if err := dec.Decode(&s.Boost); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f } case "coerce": - if err := dec.Decode(&s.Coerce); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Coerce = &value + case bool: + s.Coerce = &v } case "copy_to": - if err := dec.Decode(&s.CopyTo); err != nil { - return err + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return err + } } case "doc_values": - if err := dec.Decode(&s.DocValues); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DocValues = &value + case bool: + s.DocValues = &v } case "dynamic": @@ -90,6 +131,9 @@ func (s *IntegerRangeProperty) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -98,7 +142,9 @@ func (s *IntegerRangeProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -377,28 +423,56 @@ func (s *IntegerRangeProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "index": - if err := dec.Decode(&s.Index); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Index = &value + case bool: + s.Index = &v } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -407,7 +481,9 @@ func (s *IntegerRangeProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -686,20 +762,38 @@ func (s *IntegerRangeProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } case "similarity": - if err := dec.Decode(&s.Similarity); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Similarity = &o case "store": - if err := dec.Decode(&s.Store); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Store = &value + case bool: + s.Store = &v } case "type": @@ -712,6 +806,30 @@ func (s *IntegerRangeProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s IntegerRangeProperty) MarshalJSON() ([]byte, error) { + type innerIntegerRangeProperty IntegerRangeProperty + tmp := innerIntegerRangeProperty{ + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + Meta: s.Meta, + Properties: s.Properties, + Similarity: s.Similarity, + Store: s.Store, + Type: s.Type, + } + + tmp.Type = "integer_range" + + return json.Marshal(tmp) +} + // NewIntegerRangeProperty returns a IntegerRangeProperty. func NewIntegerRangeProperty() *IntegerRangeProperty { r := &IntegerRangeProperty{ @@ -720,7 +838,5 @@ func NewIntegerRangeProperty() *IntegerRangeProperty { Properties: make(map[string]Property, 0), } - r.Type = "integer_range" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/intervals.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/intervals.go index 913d46157..b057f1450 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/intervals.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/intervals.go @@ -16,19 +16,25 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // Intervals type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/fulltext.ts#L63-L72 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/fulltext.ts#L83-L110 type Intervals struct { - AllOf *IntervalsAllOf `json:"all_of,omitempty"` - AnyOf *IntervalsAnyOf `json:"any_of,omitempty"` - Fuzzy *IntervalsFuzzy `json:"fuzzy,omitempty"` - Match *IntervalsMatch `json:"match,omitempty"` - Prefix *IntervalsPrefix `json:"prefix,omitempty"` + // AllOf Returns matches that span a combination of other rules. + AllOf *IntervalsAllOf `json:"all_of,omitempty"` + // AnyOf Returns intervals produced by any of its sub-rules. + AnyOf *IntervalsAnyOf `json:"any_of,omitempty"` + // Fuzzy Matches analyzed text. + Fuzzy *IntervalsFuzzy `json:"fuzzy,omitempty"` + // Match Matches analyzed text. + Match *IntervalsMatch `json:"match,omitempty"` + // Prefix Matches terms that start with a specified set of characters. + Prefix *IntervalsPrefix `json:"prefix,omitempty"` + // Wildcard Matches terms using a wildcard pattern. Wildcard *IntervalsWildcard `json:"wildcard,omitempty"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/intervalsallof.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/intervalsallof.go index a99099f57..32351aaee 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/intervalsallof.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/intervalsallof.go @@ -16,18 +16,94 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // IntervalsAllOf type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/fulltext.ts#L49-L56 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/fulltext.ts#L50-L70 type IntervalsAllOf struct { - Filter *IntervalsFilter `json:"filter,omitempty"` - Intervals []Intervals `json:"intervals"` - MaxGaps *int `json:"max_gaps,omitempty"` - Ordered *bool `json:"ordered,omitempty"` + // Filter Rule used to filter returned intervals. + Filter *IntervalsFilter `json:"filter,omitempty"` + // Intervals An array of rules to combine. All rules must produce a match in a document + // for the overall source to match. + Intervals []Intervals `json:"intervals"` + // MaxGaps Maximum number of positions between the matching terms. + // Intervals produced by the rules further apart than this are not considered + // matches. + MaxGaps *int `json:"max_gaps,omitempty"` + // Ordered If `true`, intervals produced by the rules should appear in the order in + // which they are specified. + Ordered *bool `json:"ordered,omitempty"` +} + +func (s *IntervalsAllOf) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "filter": + if err := dec.Decode(&s.Filter); err != nil { + return err + } + + case "intervals": + if err := dec.Decode(&s.Intervals); err != nil { + return err + } + + case "max_gaps": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxGaps = &value + case float64: + f := int(v) + s.MaxGaps = &f + } + + case "ordered": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Ordered = &value + case bool: + s.Ordered = &v + } + + } + } + return nil } // NewIntervalsAllOf returns a IntervalsAllOf. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/intervalsanyof.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/intervalsanyof.go index bc8c28fd2..3b1fbc918 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/intervalsanyof.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/intervalsanyof.go @@ -16,16 +16,18 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // IntervalsAnyOf type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/fulltext.ts#L58-L61 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/fulltext.ts#L72-L81 type IntervalsAnyOf struct { - Filter *IntervalsFilter `json:"filter,omitempty"` - Intervals []Intervals `json:"intervals"` + // Filter Rule used to filter returned intervals. + Filter *IntervalsFilter `json:"filter,omitempty"` + // Intervals An array of rules to match. + Intervals []Intervals `json:"intervals"` } // NewIntervalsAnyOf returns a IntervalsAnyOf. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/intervalsfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/intervalsfilter.go index 56f6fbfd6..4d6291b25 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/intervalsfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/intervalsfilter.go @@ -16,23 +16,113 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // IntervalsFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/fulltext.ts#L74-L86 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/fulltext.ts#L112-L152 type IntervalsFilter struct { - After *Intervals `json:"after,omitempty"` - Before *Intervals `json:"before,omitempty"` - ContainedBy *Intervals `json:"contained_by,omitempty"` - Containing *Intervals `json:"containing,omitempty"` + // After Query used to return intervals that follow an interval from the `filter` + // rule. + After *Intervals `json:"after,omitempty"` + // Before Query used to return intervals that occur before an interval from the + // `filter` rule. + Before *Intervals `json:"before,omitempty"` + // ContainedBy Query used to return intervals contained by an interval from the `filter` + // rule. + ContainedBy *Intervals `json:"contained_by,omitempty"` + // Containing Query used to return intervals that contain an interval from the `filter` + // rule. + Containing *Intervals `json:"containing,omitempty"` + // NotContainedBy Query used to return intervals that are **not** contained by an interval from + // the `filter` rule. NotContainedBy *Intervals `json:"not_contained_by,omitempty"` - NotContaining *Intervals `json:"not_containing,omitempty"` + // NotContaining Query used to return intervals that do **not** contain an interval from the + // `filter` rule. + NotContaining *Intervals `json:"not_containing,omitempty"` + // NotOverlapping Query used to return intervals that do **not** overlap with an interval from + // the `filter` rule. NotOverlapping *Intervals `json:"not_overlapping,omitempty"` - Overlapping *Intervals `json:"overlapping,omitempty"` - Script Script `json:"script,omitempty"` + // Overlapping Query used to return intervals that overlap with an interval from the + // `filter` rule. + Overlapping *Intervals `json:"overlapping,omitempty"` + // Script Script used to return matching documents. + // This script must return a boolean value: `true` or `false`. + Script Script `json:"script,omitempty"` +} + +func (s *IntervalsFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "after": + if err := dec.Decode(&s.After); err != nil { + return err + } + + case "before": + if err := dec.Decode(&s.Before); err != nil { + return err + } + + case "contained_by": + if err := dec.Decode(&s.ContainedBy); err != nil { + return err + } + + case "containing": + if err := dec.Decode(&s.Containing); err != nil { + return err + } + + case "not_contained_by": + if err := dec.Decode(&s.NotContainedBy); err != nil { + return err + } + + case "not_containing": + if err := dec.Decode(&s.NotContaining); err != nil { + return err + } + + case "not_overlapping": + if err := dec.Decode(&s.NotOverlapping); err != nil { + return err + } + + case "overlapping": + if err := dec.Decode(&s.Overlapping); err != nil { + return err + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + } + } + return nil } // NewIntervalsFilter returns a IntervalsFilter. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/intervalsfuzzy.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/intervalsfuzzy.go index 6607acf2b..a09e93db3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/intervalsfuzzy.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/intervalsfuzzy.go @@ -16,20 +16,122 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // IntervalsFuzzy type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/fulltext.ts#L88-L97 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/fulltext.ts#L154-L184 type IntervalsFuzzy struct { - Analyzer *string `json:"analyzer,omitempty"` - Fuzziness Fuzziness `json:"fuzziness,omitempty"` - PrefixLength *int `json:"prefix_length,omitempty"` - Term string `json:"term"` - Transpositions *bool `json:"transpositions,omitempty"` - UseField *string `json:"use_field,omitempty"` + // Analyzer Analyzer used to normalize the term. + Analyzer *string `json:"analyzer,omitempty"` + // Fuzziness Maximum edit distance allowed for matching. + Fuzziness Fuzziness `json:"fuzziness,omitempty"` + // PrefixLength Number of beginning characters left unchanged when creating expansions. + PrefixLength *int `json:"prefix_length,omitempty"` + // Term The term to match. + Term string `json:"term"` + // Transpositions Indicates whether edits include transpositions of two adjacent characters + // (for example, `ab` to `ba`). + Transpositions *bool `json:"transpositions,omitempty"` + // UseField If specified, match intervals from this field rather than the top-level + // field. + // The `term` is normalized using the search analyzer from this field, unless + // `analyzer` is specified separately. + UseField *string `json:"use_field,omitempty"` +} + +func (s *IntervalsFuzzy) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o + + case "fuzziness": + if err := dec.Decode(&s.Fuzziness); err != nil { + return err + } + + case "prefix_length": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.PrefixLength = &value + case float64: + f := int(v) + s.PrefixLength = &f + } + + case "term": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Term = o + + case "transpositions": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Transpositions = &value + case bool: + s.Transpositions = &v + } + + case "use_field": + if err := dec.Decode(&s.UseField); err != nil { + return err + } + + } + } + return nil } // NewIntervalsFuzzy returns a IntervalsFuzzy. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/intervalsmatch.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/intervalsmatch.go index 7af911698..0a4656f77 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/intervalsmatch.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/intervalsmatch.go @@ -16,20 +16,122 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // IntervalsMatch type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/fulltext.ts#L99-L108 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/fulltext.ts#L186-L216 type IntervalsMatch struct { - Analyzer *string `json:"analyzer,omitempty"` - Filter *IntervalsFilter `json:"filter,omitempty"` - MaxGaps *int `json:"max_gaps,omitempty"` - Ordered *bool `json:"ordered,omitempty"` - Query string `json:"query"` - UseField *string `json:"use_field,omitempty"` + // Analyzer Analyzer used to analyze terms in the query. + Analyzer *string `json:"analyzer,omitempty"` + // Filter An optional interval filter. + Filter *IntervalsFilter `json:"filter,omitempty"` + // MaxGaps Maximum number of positions between the matching terms. + // Terms further apart than this are not considered matches. + MaxGaps *int `json:"max_gaps,omitempty"` + // Ordered If `true`, matching terms must appear in their specified order. + Ordered *bool `json:"ordered,omitempty"` + // Query Text you wish to find in the provided field. + Query string `json:"query"` + // UseField If specified, match intervals from this field rather than the top-level + // field. + // The `term` is normalized using the search analyzer from this field, unless + // `analyzer` is specified separately. + UseField *string `json:"use_field,omitempty"` +} + +func (s *IntervalsMatch) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o + + case "filter": + if err := dec.Decode(&s.Filter); err != nil { + return err + } + + case "max_gaps": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxGaps = &value + case float64: + f := int(v) + s.MaxGaps = &f + } + + case "ordered": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Ordered = &value + case bool: + s.Ordered = &v + } + + case "query": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Query = o + + case "use_field": + if err := dec.Decode(&s.UseField); err != nil { + return err + } + + } + } + return nil } // NewIntervalsMatch returns a IntervalsMatch. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/intervalsprefix.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/intervalsprefix.go index fe335b074..d913df593 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/intervalsprefix.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/intervalsprefix.go @@ -16,19 +16,82 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // IntervalsPrefix type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/fulltext.ts#L110-L114 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/fulltext.ts#L218-L233 type IntervalsPrefix struct { + // Analyzer Analyzer used to analyze the `prefix`. Analyzer *string `json:"analyzer,omitempty"` - Prefix string `json:"prefix"` + // Prefix Beginning characters of terms you wish to find in the top-level field. + Prefix string `json:"prefix"` + // UseField If specified, match intervals from this field rather than the top-level + // field. + // The `prefix` is normalized using the search analyzer from this field, unless + // `analyzer` is specified separately. UseField *string `json:"use_field,omitempty"` } +func (s *IntervalsPrefix) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o + + case "prefix": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Prefix = o + + case "use_field": + if err := dec.Decode(&s.UseField); err != nil { + return err + } + + } + } + return nil +} + // NewIntervalsPrefix returns a IntervalsPrefix. func NewIntervalsPrefix() *IntervalsPrefix { r := &IntervalsPrefix{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/intervalsquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/intervalsquery.go index 49a742c16..64c4c5b9e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/intervalsquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/intervalsquery.go @@ -16,22 +16,120 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // IntervalsQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/fulltext.ts#L116-L125 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/fulltext.ts#L235-L263 type IntervalsQuery struct { - AllOf *IntervalsAllOf `json:"all_of,omitempty"` - AnyOf *IntervalsAnyOf `json:"any_of,omitempty"` - Boost *float32 `json:"boost,omitempty"` - Fuzzy *IntervalsFuzzy `json:"fuzzy,omitempty"` - Match *IntervalsMatch `json:"match,omitempty"` - Prefix *IntervalsPrefix `json:"prefix,omitempty"` - QueryName_ *string `json:"_name,omitempty"` - Wildcard *IntervalsWildcard `json:"wildcard,omitempty"` + // AllOf Returns matches that span a combination of other rules. + AllOf *IntervalsAllOf `json:"all_of,omitempty"` + // AnyOf Returns intervals produced by any of its sub-rules. + AnyOf *IntervalsAnyOf `json:"any_of,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Fuzzy Matches terms that are similar to the provided term, within an edit distance + // defined by `fuzziness`. + Fuzzy *IntervalsFuzzy `json:"fuzzy,omitempty"` + // Match Matches analyzed text. + Match *IntervalsMatch `json:"match,omitempty"` + // Prefix Matches terms that start with a specified set of characters. + Prefix *IntervalsPrefix `json:"prefix,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + // Wildcard Matches terms using a wildcard pattern. + Wildcard *IntervalsWildcard `json:"wildcard,omitempty"` +} + +func (s *IntervalsQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "all_of": + if err := dec.Decode(&s.AllOf); err != nil { + return err + } + + case "any_of": + if err := dec.Decode(&s.AnyOf); err != nil { + return err + } + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "fuzzy": + if err := dec.Decode(&s.Fuzzy); err != nil { + return err + } + + case "match": + if err := dec.Decode(&s.Match); err != nil { + return err + } + + case "prefix": + if err := dec.Decode(&s.Prefix); err != nil { + return err + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "wildcard": + if err := dec.Decode(&s.Wildcard); err != nil { + return err + } + + } + } + return nil } // NewIntervalsQuery returns a IntervalsQuery. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/intervalswildcard.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/intervalswildcard.go index b2653ffc2..97dcb8306 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/intervalswildcard.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/intervalswildcard.go @@ -16,19 +16,83 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // IntervalsWildcard type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/fulltext.ts#L127-L131 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/fulltext.ts#L265-L280 type IntervalsWildcard struct { + // Analyzer Analyzer used to analyze the `pattern`. + // Defaults to the top-level field's analyzer. Analyzer *string `json:"analyzer,omitempty"` - Pattern string `json:"pattern"` + // Pattern Wildcard pattern used to find matching terms. + Pattern string `json:"pattern"` + // UseField If specified, match intervals from this field rather than the top-level + // field. + // The `pattern` is normalized using the search analyzer from this field, unless + // `analyzer` is specified separately. UseField *string `json:"use_field,omitempty"` } +func (s *IntervalsWildcard) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o + + case "pattern": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pattern = o + + case "use_field": + if err := dec.Decode(&s.UseField); err != nil { + return err + } + + } + } + return nil +} + // NewIntervalsWildcard returns a IntervalsWildcard. func NewIntervalsWildcard() *IntervalsWildcard { r := &IntervalsWildcard{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/invertedindex.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/invertedindex.go index 002e17449..718c655ee 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/invertedindex.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/invertedindex.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // InvertedIndex type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L65-L73 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L65-L73 type InvertedIndex struct { Offsets uint `json:"offsets"` Payloads uint `json:"payloads"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/invocation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/invocation.go index 01a212461..b2ff0b9a2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/invocation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/invocation.go @@ -16,18 +16,55 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // Invocation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/slm/_types/SnapshotLifecycle.ts#L138-L141 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/slm/_types/SnapshotLifecycle.ts#L138-L141 type Invocation struct { SnapshotName string `json:"snapshot_name"` Time DateTime `json:"time"` } +func (s *Invocation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "snapshot_name": + if err := dec.Decode(&s.SnapshotName); err != nil { + return err + } + + case "time": + if err := dec.Decode(&s.Time); err != nil { + return err + } + + } + } + return nil +} + // NewInvocation returns a Invocation. func NewInvocation() *Invocation { r := &Invocation{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/invocations.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/invocations.go index 2e8483d5b..82cca2992 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/invocations.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/invocations.go @@ -16,17 +16,60 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Invocations type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L44-L46 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L44-L46 type Invocations struct { Total int64 `json:"total"` } +func (s *Invocations) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "total": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Total = value + case float64: + f := int64(v) + s.Total = f + } + + } + } + return nil +} + // NewInvocations returns a Invocations. func NewInvocations() *Invocations { r := &Invocations{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/iostatdevice.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/iostatdevice.go index 22d6aad49..17ed5fd17 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/iostatdevice.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/iostatdevice.go @@ -16,20 +16,146 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // IoStatDevice type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L298-L305 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L730-L755 type IoStatDevice struct { - DeviceName *string `json:"device_name,omitempty"` - Operations *int64 `json:"operations,omitempty"` - ReadKilobytes *int64 `json:"read_kilobytes,omitempty"` - ReadOperations *int64 `json:"read_operations,omitempty"` - WriteKilobytes *int64 `json:"write_kilobytes,omitempty"` - WriteOperations *int64 `json:"write_operations,omitempty"` + // DeviceName The Linux device name. + DeviceName *string `json:"device_name,omitempty"` + // Operations The total number of read and write operations for the device completed since + // starting Elasticsearch. + Operations *int64 `json:"operations,omitempty"` + // ReadKilobytes The total number of kilobytes read for the device since starting + // Elasticsearch. + ReadKilobytes *int64 `json:"read_kilobytes,omitempty"` + // ReadOperations The total number of read operations for the device completed since starting + // Elasticsearch. + ReadOperations *int64 `json:"read_operations,omitempty"` + // WriteKilobytes The total number of kilobytes written for the device since starting + // Elasticsearch. + WriteKilobytes *int64 `json:"write_kilobytes,omitempty"` + // WriteOperations The total number of write operations for the device completed since starting + // Elasticsearch. + WriteOperations *int64 `json:"write_operations,omitempty"` +} + +func (s *IoStatDevice) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "device_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DeviceName = &o + + case "operations": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Operations = &value + case float64: + f := int64(v) + s.Operations = &f + } + + case "read_kilobytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.ReadKilobytes = &value + case float64: + f := int64(v) + s.ReadKilobytes = &f + } + + case "read_operations": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.ReadOperations = &value + case float64: + f := int64(v) + s.ReadOperations = &f + } + + case "write_kilobytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.WriteKilobytes = &value + case float64: + f := int64(v) + s.WriteKilobytes = &f + } + + case "write_operations": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.WriteOperations = &value + case float64: + f := int64(v) + s.WriteOperations = &f + } + + } + } + return nil } // NewIoStatDevice returns a IoStatDevice. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/iostats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/iostats.go index 25213fca1..d5591905f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/iostats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/iostats.go @@ -16,16 +16,22 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // IoStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L293-L296 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L718-L728 type IoStats struct { + // Devices Array of disk metrics for each device that is backing an Elasticsearch data + // path. + // These disk metrics are probed periodically and averages between the last + // probe and the current probe are computed. Devices []IoStatDevice `json:"devices,omitempty"` - Total *IoStatDevice `json:"total,omitempty"` + // Total The sum of the disk metrics for all devices that back an Elasticsearch data + // path. + Total *IoStatDevice `json:"total,omitempty"` } // NewIoStats returns a IoStats. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ipfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ipfilter.go index 2dd9583b7..e00208e20 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ipfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ipfilter.go @@ -16,18 +16,74 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // IpFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L167-L170 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L167-L170 type IpFilter struct { Http bool `json:"http"` Transport bool `json:"transport"` } +func (s *IpFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "http": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Http = value + case bool: + s.Http = v + } + + case "transport": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Transport = value + case bool: + s.Transport = v + } + + } + } + return nil +} + // NewIpFilter returns a IpFilter. func NewIpFilter() *IpFilter { r := &IpFilter{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ipprefixaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ipprefixaggregate.go index acbedc448..d412f5512 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ipprefixaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ipprefixaggregate.go @@ -16,27 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" ) // IpPrefixAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L628-L629 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L629-L630 type IpPrefixAggregate struct { - Buckets BucketsIpPrefixBucket `json:"buckets"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Buckets BucketsIpPrefixBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` } func (s *IpPrefixAggregate) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -57,15 +57,17 @@ func (s *IpPrefixAggregate) UnmarshalJSON(data []byte) error { source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]IpPrefixBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []IpPrefixBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ipprefixaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ipprefixaggregation.go index 46c816cb5..e0cf95ec7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ipprefixaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ipprefixaggregation.go @@ -16,31 +16,35 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // IpPrefixAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L514-L543 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L1114-L1143 type IpPrefixAggregation struct { // AppendPrefixLength Defines whether the prefix length is appended to IP address keys in the // response. AppendPrefixLength *bool `json:"append_prefix_length,omitempty"` - // Field The document IP address field to aggregation on. The field mapping type must - // be `ip` + // Field The IP address field to aggregation on. The field mapping type must be `ip`. Field string `json:"field"` // IsIpv6 Defines whether the prefix applies to IPv6 addresses. IsIpv6 *bool `json:"is_ipv6,omitempty"` // Keyed Defines whether buckets are returned as a hash rather than an array in the // response. - Keyed *bool `json:"keyed,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - // MinDocCount Minimum number of documents for buckets to be included in the response. + Keyed *bool `json:"keyed,omitempty"` + Meta Metadata `json:"meta,omitempty"` + // MinDocCount Minimum number of documents in a bucket for it to be included in the + // response. MinDocCount *int64 `json:"min_doc_count,omitempty"` Name *string `json:"name,omitempty"` // PrefixLength Length of the network prefix. For IPv4 addresses the accepted range is [0, @@ -49,6 +53,121 @@ type IpPrefixAggregation struct { PrefixLength int `json:"prefix_length"` } +func (s *IpPrefixAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "append_prefix_length": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.AppendPrefixLength = &value + case bool: + s.AppendPrefixLength = &v + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "is_ipv6": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IsIpv6 = &value + case bool: + s.IsIpv6 = &v + } + + case "keyed": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Keyed = &value + case bool: + s.Keyed = &v + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "min_doc_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.MinDocCount = &value + case float64: + f := int64(v) + s.MinDocCount = &f + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + case "prefix_length": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.PrefixLength = value + case float64: + f := int(v) + s.PrefixLength = f + } + + } + } + return nil +} + // NewIpPrefixAggregation returns a IpPrefixAggregation. func NewIpPrefixAggregation() *IpPrefixAggregation { r := &IpPrefixAggregation{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ipprefixbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ipprefixbucket.go index 1e4303570..8524c12cd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ipprefixbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ipprefixbucket.go @@ -16,25 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "fmt" - "bytes" + "encoding/json" "errors" + "fmt" "io" - + "strconv" "strings" - - "encoding/json" ) // IpPrefixBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L631-L636 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L632-L637 type IpPrefixBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -45,6 +43,7 @@ type IpPrefixBucket struct { } func (s *IpPrefixBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -58,471 +57,586 @@ func (s *IpPrefixBucket) UnmarshalJSON(data []byte) error { switch t { - case "aggregations": - for dec.More() { - tt, err := dec.Token() + case "doc_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) if err != nil { - if errors.Is(err, io.EOF) { - break - } return err } - if value, ok := tt.(string); ok { - if strings.Contains(value, "#") { - elems := strings.Split(value, "#") - if len(elems) == 2 { - switch elems[0] { - case "cardinality": - o := NewCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentiles": - o := NewHdrPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentile_ranks": - o := NewHdrPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentiles": - o := NewTDigestPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentile_ranks": - o := NewTDigestPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "percentiles_bucket": - o := NewPercentilesBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "median_absolute_deviation": - o := NewMedianAbsoluteDeviationAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "min": - o := NewMinAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "max": - o := NewMaxAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sum": - o := NewSumAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "avg": - o := NewAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "weighted_avg": - o := NewWeightedAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "value_count": - o := NewValueCountAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_value": - o := NewSimpleValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "derivative": - o := NewDerivativeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "bucket_metric_value": - o := NewBucketMetricValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats": - o := NewStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats_bucket": - o := NewStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats": - o := NewExtendedStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats_bucket": - o := NewExtendedStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_bounds": - o := NewGeoBoundsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_centroid": - o := NewGeoCentroidAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "histogram": - o := NewHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_histogram": - o := NewDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "auto_date_histogram": - o := NewAutoDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "variable_width_histogram": - o := NewVariableWidthHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sterms": - o := NewStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lterms": - o := NewLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "dterms": - o := NewDoubleTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umterms": - o := NewUnmappedTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lrareterms": - o := NewLongRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "srareterms": - o := NewStringRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umrareterms": - o := NewUnmappedRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "multi_terms": - o := NewMultiTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "missing": - o := NewMissingAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "nested": - o := NewNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "reverse_nested": - o := NewReverseNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "global": - o := NewGlobalAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filter": - o := NewFilterAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "children": - o := NewChildrenAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "parent": - o := NewParentAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sampler": - o := NewSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "unmapped_sampler": - o := NewUnmappedSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohash_grid": - o := NewGeoHashGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geotile_grid": - o := NewGeoTileGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohex_grid": - o := NewGeoHexGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "range": - o := NewRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_range": - o := NewDateRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_distance": - o := NewGeoDistanceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_range": - o := NewIpRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_prefix": - o := NewIpPrefixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filters": - o := NewFiltersAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "adjacency_matrix": - o := NewAdjacencyMatrixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "siglterms": - o := NewSignificantLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sigsterms": - o := NewSignificantStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umsigterms": - o := NewUnmappedSignificantTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "composite": - o := NewCompositeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "scripted_metric": - o := NewScriptedMetricAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_hits": - o := NewTopHitsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "inference": - o := NewInferenceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "string_stats": - o := NewStringStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "box_plot": - o := NewBoxPlotAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_metrics": - o := NewTopMetricsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "t_test": - o := NewTTestAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "rate": - o := NewRateAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_long_value": - o := NewCumulativeCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "matrix_stats": - o := NewMatrixStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_line": - o := NewGeoLineAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - default: - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - } - } else { - return errors.New("cannot decode JSON for field Aggregations") - } - } else { - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[value] = o - } - } - } - - case "doc_count": - if err := dec.Decode(&s.DocCount); err != nil { - return err + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f } case "is_ipv6": - if err := dec.Decode(&s.IsIpv6); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IsIpv6 = value + case bool: + s.IsIpv6 = v } case "key": - if err := dec.Decode(&s.Key); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Key = o case "netmask": - if err := dec.Decode(&s.Netmask); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Netmask = &o case "prefix_length": - if err := dec.Decode(&s.PrefixLength); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.PrefixLength = value + case float64: + f := int(v) + s.PrefixLength = f + } + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "box_plot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[value] = o + } } } @@ -549,6 +663,7 @@ func (s IpPrefixBucket) MarshalJSON() ([]byte, error) { for key, value := range s.Aggregations { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "Aggregations") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ipproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ipproperty.go index 3e46aaaf9..244a9d9bc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ipproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ipproperty.go @@ -16,24 +16,24 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" ) // IpProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/specialized.ts#L59-L72 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/specialized.ts#L59-L73 type IpProperty struct { Boost *Float64 `json:"boost,omitempty"` CopyTo []string `json:"copy_to,omitempty"` @@ -58,6 +58,7 @@ type IpProperty struct { } func (s *IpProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -72,18 +73,49 @@ func (s *IpProperty) UnmarshalJSON(data []byte) error { switch t { case "boost": - if err := dec.Decode(&s.Boost); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f } case "copy_to": - if err := dec.Decode(&s.CopyTo); err != nil { - return err + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return err + } } case "doc_values": - if err := dec.Decode(&s.DocValues); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DocValues = &value + case bool: + s.DocValues = &v } case "dynamic": @@ -92,6 +124,9 @@ func (s *IpProperty) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -100,7 +135,9 @@ func (s *IpProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -379,36 +416,77 @@ func (s *IpProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "ignore_malformed": - if err := dec.Decode(&s.IgnoreMalformed); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreMalformed = &value + case bool: + s.IgnoreMalformed = &v } case "index": - if err := dec.Decode(&s.Index); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Index = &value + case bool: + s.Index = &v } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } case "null_value": - if err := dec.Decode(&s.NullValue); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NullValue = &o case "on_script_error": if err := dec.Decode(&s.OnScriptError); err != nil { @@ -416,6 +494,9 @@ func (s *IpProperty) UnmarshalJSON(data []byte) error { } case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -424,7 +505,9 @@ func (s *IpProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -703,9 +786,11 @@ func (s *IpProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } @@ -715,18 +800,43 @@ func (s *IpProperty) UnmarshalJSON(data []byte) error { } case "similarity": - if err := dec.Decode(&s.Similarity); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Similarity = &o case "store": - if err := dec.Decode(&s.Store); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Store = &value + case bool: + s.Store = &v } case "time_series_dimension": - if err := dec.Decode(&s.TimeSeriesDimension); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.TimeSeriesDimension = &value + case bool: + s.TimeSeriesDimension = &v } case "type": @@ -739,6 +849,34 @@ func (s *IpProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s IpProperty) MarshalJSON() ([]byte, error) { + type innerIpProperty IpProperty + tmp := innerIpProperty{ + Boost: s.Boost, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + Index: s.Index, + Meta: s.Meta, + NullValue: s.NullValue, + OnScriptError: s.OnScriptError, + Properties: s.Properties, + Script: s.Script, + Similarity: s.Similarity, + Store: s.Store, + TimeSeriesDimension: s.TimeSeriesDimension, + Type: s.Type, + } + + tmp.Type = "ip" + + return json.Marshal(tmp) +} + // NewIpProperty returns a IpProperty. func NewIpProperty() *IpProperty { r := &IpProperty{ @@ -747,7 +885,5 @@ func NewIpProperty() *IpProperty { Properties: make(map[string]Property, 0), } - r.Type = "ip" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/iprangeaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/iprangeaggregate.go index 2b319ab85..bf376c477 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/iprangeaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/iprangeaggregate.go @@ -16,27 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" ) // IpRangeAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L555-L557 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L556-L558 type IpRangeAggregate struct { - Buckets BucketsIpRangeBucket `json:"buckets"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Buckets BucketsIpRangeBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` } func (s *IpRangeAggregate) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -57,15 +57,17 @@ func (s *IpRangeAggregate) UnmarshalJSON(data []byte) error { source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]IpRangeBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []IpRangeBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/iprangeaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/iprangeaggregation.go index 21e3fef4f..cdb22ed8e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/iprangeaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/iprangeaggregation.go @@ -16,22 +16,75 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // IpRangeAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L249-L252 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L548-L557 type IpRangeAggregation struct { - Field *string `json:"field,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Name *string `json:"name,omitempty"` - Ranges []IpRangeAggregationRange `json:"ranges,omitempty"` + // Field The date field whose values are used to build ranges. + Field *string `json:"field,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Name *string `json:"name,omitempty"` + // Ranges Array of IP ranges. + Ranges []IpRangeAggregationRange `json:"ranges,omitempty"` +} + +func (s *IpRangeAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + case "ranges": + if err := dec.Decode(&s.Ranges); err != nil { + return err + } + + } + } + return nil } // NewIpRangeAggregation returns a IpRangeAggregation. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/iprangeaggregationrange.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/iprangeaggregationrange.go index edf94f24e..8941eb5a1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/iprangeaggregationrange.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/iprangeaggregationrange.go @@ -16,17 +16,84 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // IpRangeAggregationRange type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L254-L258 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L559-L572 type IpRangeAggregationRange struct { - From string `json:"from,omitempty"` + // From Start of the range. + From string `json:"from,omitempty"` + // Mask IP range defined as a CIDR mask. Mask *string `json:"mask,omitempty"` - To string `json:"to,omitempty"` + // To End of the range. + To string `json:"to,omitempty"` +} + +func (s *IpRangeAggregationRange) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "from": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.From = o + + case "mask": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Mask = &o + + case "to": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.To = o + + } + } + return nil } // NewIpRangeAggregationRange returns a IpRangeAggregationRange. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/iprangebucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/iprangebucket.go index 767bfad20..8a461eb2a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/iprangebucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/iprangebucket.go @@ -16,25 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "fmt" - "bytes" + "encoding/json" "errors" + "fmt" "io" - + "strconv" "strings" - - "encoding/json" ) // IpRangeBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L559-L563 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L560-L564 type IpRangeBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -44,6 +42,7 @@ type IpRangeBucket struct { } func (s *IpRangeBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -57,467 +56,569 @@ func (s *IpRangeBucket) UnmarshalJSON(data []byte) error { switch t { - case "aggregations": - for dec.More() { - tt, err := dec.Token() + case "doc_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) if err != nil { - if errors.Is(err, io.EOF) { - break - } return err } - if value, ok := tt.(string); ok { - if strings.Contains(value, "#") { - elems := strings.Split(value, "#") - if len(elems) == 2 { - switch elems[0] { - case "cardinality": - o := NewCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentiles": - o := NewHdrPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentile_ranks": - o := NewHdrPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentiles": - o := NewTDigestPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentile_ranks": - o := NewTDigestPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "percentiles_bucket": - o := NewPercentilesBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "median_absolute_deviation": - o := NewMedianAbsoluteDeviationAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "min": - o := NewMinAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "max": - o := NewMaxAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sum": - o := NewSumAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "avg": - o := NewAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "weighted_avg": - o := NewWeightedAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "value_count": - o := NewValueCountAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_value": - o := NewSimpleValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "derivative": - o := NewDerivativeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "bucket_metric_value": - o := NewBucketMetricValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats": - o := NewStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats_bucket": - o := NewStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats": - o := NewExtendedStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats_bucket": - o := NewExtendedStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_bounds": - o := NewGeoBoundsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_centroid": - o := NewGeoCentroidAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "histogram": - o := NewHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_histogram": - o := NewDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "auto_date_histogram": - o := NewAutoDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "variable_width_histogram": - o := NewVariableWidthHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sterms": - o := NewStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lterms": - o := NewLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "dterms": - o := NewDoubleTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umterms": - o := NewUnmappedTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lrareterms": - o := NewLongRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "srareterms": - o := NewStringRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umrareterms": - o := NewUnmappedRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "multi_terms": - o := NewMultiTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "missing": - o := NewMissingAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "nested": - o := NewNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "reverse_nested": - o := NewReverseNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "global": - o := NewGlobalAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filter": - o := NewFilterAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "children": - o := NewChildrenAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "parent": - o := NewParentAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sampler": - o := NewSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "unmapped_sampler": - o := NewUnmappedSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohash_grid": - o := NewGeoHashGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geotile_grid": - o := NewGeoTileGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohex_grid": - o := NewGeoHexGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "range": - o := NewRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_range": - o := NewDateRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_distance": - o := NewGeoDistanceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_range": - o := NewIpRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_prefix": - o := NewIpPrefixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filters": - o := NewFiltersAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "adjacency_matrix": - o := NewAdjacencyMatrixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "siglterms": - o := NewSignificantLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sigsterms": - o := NewSignificantStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umsigterms": - o := NewUnmappedSignificantTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "composite": - o := NewCompositeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "scripted_metric": - o := NewScriptedMetricAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_hits": - o := NewTopHitsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "inference": - o := NewInferenceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "string_stats": - o := NewStringStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "box_plot": - o := NewBoxPlotAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_metrics": - o := NewTopMetricsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "t_test": - o := NewTTestAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "rate": - o := NewRateAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_long_value": - o := NewCumulativeCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "matrix_stats": - o := NewMatrixStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_line": - o := NewGeoLineAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - default: - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - } - } else { - return errors.New("cannot decode JSON for field Aggregations") - } - } else { - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[value] = o - } - } - } - - case "doc_count": - if err := dec.Decode(&s.DocCount); err != nil { - return err + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f } case "from": - if err := dec.Decode(&s.From); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.From = &o case "key": - if err := dec.Decode(&s.Key); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Key = &o case "to": - if err := dec.Decode(&s.To); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.To = &o + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "box_plot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[value] = o + } + } } } @@ -543,6 +644,7 @@ func (s IpRangeBucket) MarshalJSON() ([]byte, error) { for key, value := range s.Aggregations { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "Aggregations") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/iprangeproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/iprangeproperty.go index ed94ac8c8..3e7a70e85 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/iprangeproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/iprangeproperty.go @@ -16,23 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" ) // IpRangeProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/range.ts#L46-L48 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/range.ts#L46-L48 type IpRangeProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -51,6 +51,7 @@ type IpRangeProperty struct { } func (s *IpRangeProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -65,23 +66,63 @@ func (s *IpRangeProperty) UnmarshalJSON(data []byte) error { switch t { case "boost": - if err := dec.Decode(&s.Boost); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f } case "coerce": - if err := dec.Decode(&s.Coerce); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Coerce = &value + case bool: + s.Coerce = &v } case "copy_to": - if err := dec.Decode(&s.CopyTo); err != nil { - return err + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return err + } } case "doc_values": - if err := dec.Decode(&s.DocValues); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DocValues = &value + case bool: + s.DocValues = &v } case "dynamic": @@ -90,6 +131,9 @@ func (s *IpRangeProperty) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -98,7 +142,9 @@ func (s *IpRangeProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -377,28 +423,56 @@ func (s *IpRangeProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "index": - if err := dec.Decode(&s.Index); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Index = &value + case bool: + s.Index = &v } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -407,7 +481,9 @@ func (s *IpRangeProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -686,20 +762,38 @@ func (s *IpRangeProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } case "similarity": - if err := dec.Decode(&s.Similarity); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Similarity = &o case "store": - if err := dec.Decode(&s.Store); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Store = &value + case bool: + s.Store = &v } case "type": @@ -712,6 +806,30 @@ func (s *IpRangeProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s IpRangeProperty) MarshalJSON() ([]byte, error) { + type innerIpRangeProperty IpRangeProperty + tmp := innerIpRangeProperty{ + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + Meta: s.Meta, + Properties: s.Properties, + Similarity: s.Similarity, + Store: s.Store, + Type: s.Type, + } + + tmp.Type = "ip_range" + + return json.Marshal(tmp) +} + // NewIpRangeProperty returns a IpRangeProperty. func NewIpRangeProperty() *IpRangeProperty { r := &IpRangeProperty{ @@ -720,7 +838,5 @@ func NewIpRangeProperty() *IpRangeProperty { Properties: make(map[string]Property, 0), } - r.Type = "ip_range" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/job.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/job.go index 64a350ef0..e9d0837f1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/job.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/job.go @@ -16,41 +16,332 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // Job type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Job.ts#L51-L75 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Job.ts#L61-L180 type Job struct { - AllowLazyOpen bool `json:"allow_lazy_open"` - AnalysisConfig AnalysisConfig `json:"analysis_config"` - AnalysisLimits *AnalysisLimits `json:"analysis_limits,omitempty"` - BackgroundPersistInterval Duration `json:"background_persist_interval,omitempty"` - Blocked *JobBlocked `json:"blocked,omitempty"` - CreateTime DateTime `json:"create_time,omitempty"` - CustomSettings json.RawMessage `json:"custom_settings,omitempty"` - DailyModelSnapshotRetentionAfterDays *int64 `json:"daily_model_snapshot_retention_after_days,omitempty"` - DataDescription DataDescription `json:"data_description"` - DatafeedConfig *MLDatafeed `json:"datafeed_config,omitempty"` - Deleting *bool `json:"deleting,omitempty"` - Description *string `json:"description,omitempty"` - FinishedTime DateTime `json:"finished_time,omitempty"` - Groups []string `json:"groups,omitempty"` - JobId string `json:"job_id"` - JobType *string `json:"job_type,omitempty"` - JobVersion *string `json:"job_version,omitempty"` - ModelPlotConfig *ModelPlotConfig `json:"model_plot_config,omitempty"` - ModelSnapshotId *string `json:"model_snapshot_id,omitempty"` - ModelSnapshotRetentionDays int64 `json:"model_snapshot_retention_days"` - RenormalizationWindowDays *int64 `json:"renormalization_window_days,omitempty"` - ResultsIndexName string `json:"results_index_name"` - ResultsRetentionDays *int64 `json:"results_retention_days,omitempty"` + // AllowLazyOpen Advanced configuration option. + // Specifies whether this job can open when there is insufficient machine + // learning node capacity for it to be immediately assigned to a node. + AllowLazyOpen bool `json:"allow_lazy_open"` + // AnalysisConfig The analysis configuration, which specifies how to analyze the data. + // After you create a job, you cannot change the analysis configuration; all the + // properties are informational. + AnalysisConfig AnalysisConfig `json:"analysis_config"` + // AnalysisLimits Limits can be applied for the resources required to hold the mathematical + // models in memory. + // These limits are approximate and can be set per job. + // They do not control the memory used by other processes, for example the + // Elasticsearch Java processes. + AnalysisLimits *AnalysisLimits `json:"analysis_limits,omitempty"` + // BackgroundPersistInterval Advanced configuration option. + // The time between each periodic persistence of the model. + // The default value is a randomized value between 3 to 4 hours, which avoids + // all jobs persisting at exactly the same time. + // The smallest allowed value is 1 hour. + BackgroundPersistInterval Duration `json:"background_persist_interval,omitempty"` + Blocked *JobBlocked `json:"blocked,omitempty"` + CreateTime DateTime `json:"create_time,omitempty"` + // CustomSettings Advanced configuration option. + // Contains custom metadata about the job. + CustomSettings json.RawMessage `json:"custom_settings,omitempty"` + // DailyModelSnapshotRetentionAfterDays Advanced configuration option, which affects the automatic removal of old + // model snapshots for this job. + // It specifies a period of time (in days) after which only the first snapshot + // per day is retained. + // This period is relative to the timestamp of the most recent snapshot for this + // job. + // Valid values range from 0 to `model_snapshot_retention_days`. + DailyModelSnapshotRetentionAfterDays *int64 `json:"daily_model_snapshot_retention_after_days,omitempty"` + // DataDescription The data description defines the format of the input data when you send data + // to the job by using the post data API. + // Note that when configuring a datafeed, these properties are automatically + // set. + // When data is received via the post data API, it is not stored in + // Elasticsearch. + // Only the results for anomaly detection are retained. + DataDescription DataDescription `json:"data_description"` + // DatafeedConfig The datafeed, which retrieves data from Elasticsearch for analysis by the + // job. + // You can associate only one datafeed with each anomaly detection job. + DatafeedConfig *MLDatafeed `json:"datafeed_config,omitempty"` + // Deleting Indicates that the process of deleting the job is in progress but not yet + // completed. + // It is only reported when `true`. + Deleting *bool `json:"deleting,omitempty"` + // Description A description of the job. + Description *string `json:"description,omitempty"` + // FinishedTime If the job closed or failed, this is the time the job finished, otherwise it + // is `null`. + // This property is informational; you cannot change its value. + FinishedTime DateTime `json:"finished_time,omitempty"` + // Groups A list of job groups. + // A job can belong to no groups or many. + Groups []string `json:"groups,omitempty"` + // JobId Identifier for the anomaly detection job. + // This identifier can contain lowercase alphanumeric characters (a-z and 0-9), + // hyphens, and underscores. + // It must start and end with alphanumeric characters. + JobId string `json:"job_id"` + // JobType Reserved for future use, currently set to `anomaly_detector`. + JobType *string `json:"job_type,omitempty"` + // JobVersion The machine learning configuration version number at which the the job was + // created. + JobVersion *string `json:"job_version,omitempty"` + // ModelPlotConfig This advanced configuration option stores model information along with the + // results. + // It provides a more detailed view into anomaly detection. + // Model plot provides a simplified and indicative view of the model and its + // bounds. + ModelPlotConfig *ModelPlotConfig `json:"model_plot_config,omitempty"` + ModelSnapshotId *string `json:"model_snapshot_id,omitempty"` + // ModelSnapshotRetentionDays Advanced configuration option, which affects the automatic removal of old + // model snapshots for this job. + // It specifies the maximum period of time (in days) that snapshots are + // retained. + // This period is relative to the timestamp of the most recent snapshot for this + // job. + // By default, snapshots ten days older than the newest snapshot are deleted. + ModelSnapshotRetentionDays int64 `json:"model_snapshot_retention_days"` + // RenormalizationWindowDays Advanced configuration option. + // The period over which adjustments to the score are applied, as new data is + // seen. + // The default value is the longer of 30 days or 100 `bucket_spans`. + RenormalizationWindowDays *int64 `json:"renormalization_window_days,omitempty"` + // ResultsIndexName A text string that affects the name of the machine learning results index. + // The default value is `shared`, which generates an index named + // `.ml-anomalies-shared`. + ResultsIndexName string `json:"results_index_name"` + // ResultsRetentionDays Advanced configuration option. + // The period of time (in days) that results are retained. + // Age is calculated relative to the timestamp of the latest bucket result. + // If this property has a non-null value, once per day at 00:30 (server time), + // results that are the specified number of days older than the latest bucket + // result are deleted from Elasticsearch. + // The default value is null, which means all results are retained. + // Annotations generated by the system also count as results for retention + // purposes; they are deleted after the same number of days as results. + // Annotations added by users are retained forever. + ResultsRetentionDays *int64 `json:"results_retention_days,omitempty"` +} + +func (s *Job) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_lazy_open": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.AllowLazyOpen = value + case bool: + s.AllowLazyOpen = v + } + + case "analysis_config": + if err := dec.Decode(&s.AnalysisConfig); err != nil { + return err + } + + case "analysis_limits": + if err := dec.Decode(&s.AnalysisLimits); err != nil { + return err + } + + case "background_persist_interval": + if err := dec.Decode(&s.BackgroundPersistInterval); err != nil { + return err + } + + case "blocked": + if err := dec.Decode(&s.Blocked); err != nil { + return err + } + + case "create_time": + if err := dec.Decode(&s.CreateTime); err != nil { + return err + } + + case "custom_settings": + if err := dec.Decode(&s.CustomSettings); err != nil { + return err + } + + case "daily_model_snapshot_retention_after_days": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DailyModelSnapshotRetentionAfterDays = &value + case float64: + f := int64(v) + s.DailyModelSnapshotRetentionAfterDays = &f + } + + case "data_description": + if err := dec.Decode(&s.DataDescription); err != nil { + return err + } + + case "datafeed_config": + if err := dec.Decode(&s.DatafeedConfig); err != nil { + return err + } + + case "deleting": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Deleting = &value + case bool: + s.Deleting = &v + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "finished_time": + if err := dec.Decode(&s.FinishedTime); err != nil { + return err + } + + case "groups": + if err := dec.Decode(&s.Groups); err != nil { + return err + } + + case "job_id": + if err := dec.Decode(&s.JobId); err != nil { + return err + } + + case "job_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.JobType = &o + + case "job_version": + if err := dec.Decode(&s.JobVersion); err != nil { + return err + } + + case "model_plot_config": + if err := dec.Decode(&s.ModelPlotConfig); err != nil { + return err + } + + case "model_snapshot_id": + if err := dec.Decode(&s.ModelSnapshotId); err != nil { + return err + } + + case "model_snapshot_retention_days": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.ModelSnapshotRetentionDays = value + case float64: + f := int64(v) + s.ModelSnapshotRetentionDays = f + } + + case "renormalization_window_days": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.RenormalizationWindowDays = &value + case float64: + f := int64(v) + s.RenormalizationWindowDays = &f + } + + case "results_index_name": + if err := dec.Decode(&s.ResultsIndexName); err != nil { + return err + } + + case "results_retention_days": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.ResultsRetentionDays = &value + case float64: + f := int64(v) + s.ResultsRetentionDays = &f + } + + } + } + return nil } // NewJob returns a Job. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jobblocked.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jobblocked.go index 3a2cf8687..43e7a0852 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jobblocked.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jobblocked.go @@ -16,22 +16,57 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/jobblockedreason" ) // JobBlocked type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Job.ts#L169-L172 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Job.ts#L392-L395 type JobBlocked struct { Reason jobblockedreason.JobBlockedReason `json:"reason"` TaskId TaskId `json:"task_id,omitempty"` } +func (s *JobBlocked) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "reason": + if err := dec.Decode(&s.Reason); err != nil { + return err + } + + case "task_id": + if err := dec.Decode(&s.TaskId); err != nil { + return err + } + + } + } + return nil +} + // NewJobBlocked returns a JobBlocked. func NewJobBlocked() *JobBlocked { r := &JobBlocked{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jobconfig.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jobconfig.go index 5c5d25739..738cf4e85 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jobconfig.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jobconfig.go @@ -16,35 +16,274 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // JobConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Job.ts#L77-L95 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Job.ts#L182-L283 type JobConfig struct { - AllowLazyOpen *bool `json:"allow_lazy_open,omitempty"` - AnalysisConfig AnalysisConfig `json:"analysis_config"` - AnalysisLimits *AnalysisLimits `json:"analysis_limits,omitempty"` - BackgroundPersistInterval Duration `json:"background_persist_interval,omitempty"` - CustomSettings json.RawMessage `json:"custom_settings,omitempty"` - DailyModelSnapshotRetentionAfterDays *int64 `json:"daily_model_snapshot_retention_after_days,omitempty"` - DataDescription DataDescription `json:"data_description"` - DatafeedConfig *DatafeedConfig `json:"datafeed_config,omitempty"` - Description *string `json:"description,omitempty"` - Groups []string `json:"groups,omitempty"` - JobId *string `json:"job_id,omitempty"` - JobType *string `json:"job_type,omitempty"` - ModelPlotConfig *ModelPlotConfig `json:"model_plot_config,omitempty"` - ModelSnapshotRetentionDays *int64 `json:"model_snapshot_retention_days,omitempty"` - RenormalizationWindowDays *int64 `json:"renormalization_window_days,omitempty"` - ResultsIndexName *string `json:"results_index_name,omitempty"` - ResultsRetentionDays *int64 `json:"results_retention_days,omitempty"` + // AllowLazyOpen Advanced configuration option. Specifies whether this job can open when there + // is insufficient machine learning node capacity for it to be immediately + // assigned to a node. + AllowLazyOpen *bool `json:"allow_lazy_open,omitempty"` + // AnalysisConfig The analysis configuration, which specifies how to analyze the data. + // After you create a job, you cannot change the analysis configuration; all the + // properties are informational. + AnalysisConfig AnalysisConfig `json:"analysis_config"` + // AnalysisLimits Limits can be applied for the resources required to hold the mathematical + // models in memory. + // These limits are approximate and can be set per job. + // They do not control the memory used by other processes, for example the + // Elasticsearch Java processes. + AnalysisLimits *AnalysisLimits `json:"analysis_limits,omitempty"` + // BackgroundPersistInterval Advanced configuration option. + // The time between each periodic persistence of the model. + // The default value is a randomized value between 3 to 4 hours, which avoids + // all jobs persisting at exactly the same time. + // The smallest allowed value is 1 hour. + BackgroundPersistInterval Duration `json:"background_persist_interval,omitempty"` + // CustomSettings Advanced configuration option. + // Contains custom metadata about the job. + CustomSettings json.RawMessage `json:"custom_settings,omitempty"` + // DailyModelSnapshotRetentionAfterDays Advanced configuration option, which affects the automatic removal of old + // model snapshots for this job. + // It specifies a period of time (in days) after which only the first snapshot + // per day is retained. + // This period is relative to the timestamp of the most recent snapshot for this + // job. + DailyModelSnapshotRetentionAfterDays *int64 `json:"daily_model_snapshot_retention_after_days,omitempty"` + // DataDescription The data description defines the format of the input data when you send data + // to the job by using the post data API. + // Note that when configure a datafeed, these properties are automatically set. + DataDescription DataDescription `json:"data_description"` + // DatafeedConfig The datafeed, which retrieves data from Elasticsearch for analysis by the + // job. + // You can associate only one datafeed with each anomaly detection job. + DatafeedConfig *DatafeedConfig `json:"datafeed_config,omitempty"` + // Description A description of the job. + Description *string `json:"description,omitempty"` + // Groups A list of job groups. A job can belong to no groups or many. + Groups []string `json:"groups,omitempty"` + // JobId Identifier for the anomaly detection job. + // This identifier can contain lowercase alphanumeric characters (a-z and 0-9), + // hyphens, and underscores. + // It must start and end with alphanumeric characters. + JobId *string `json:"job_id,omitempty"` + // JobType Reserved for future use, currently set to `anomaly_detector`. + JobType *string `json:"job_type,omitempty"` + // ModelPlotConfig This advanced configuration option stores model information along with the + // results. + // It provides a more detailed view into anomaly detection. + // Model plot provides a simplified and indicative view of the model and its + // bounds. + ModelPlotConfig *ModelPlotConfig `json:"model_plot_config,omitempty"` + // ModelSnapshotRetentionDays Advanced configuration option, which affects the automatic removal of old + // model snapshots for this job. + // It specifies the maximum period of time (in days) that snapshots are + // retained. + // This period is relative to the timestamp of the most recent snapshot for this + // job. + // The default value is `10`, which means snapshots ten days older than the + // newest snapshot are deleted. + ModelSnapshotRetentionDays *int64 `json:"model_snapshot_retention_days,omitempty"` + // RenormalizationWindowDays Advanced configuration option. + // The period over which adjustments to the score are applied, as new data is + // seen. + // The default value is the longer of 30 days or 100 `bucket_spans`. + RenormalizationWindowDays *int64 `json:"renormalization_window_days,omitempty"` + // ResultsIndexName A text string that affects the name of the machine learning results index. + // The default value is `shared`, which generates an index named + // `.ml-anomalies-shared`. + ResultsIndexName *string `json:"results_index_name,omitempty"` + // ResultsRetentionDays Advanced configuration option. + // The period of time (in days) that results are retained. + // Age is calculated relative to the timestamp of the latest bucket result. + // If this property has a non-null value, once per day at 00:30 (server time), + // results that are the specified number of days older than the latest bucket + // result are deleted from Elasticsearch. + // The default value is null, which means all results are retained. + // Annotations generated by the system also count as results for retention + // purposes; they are deleted after the same number of days as results. + // Annotations added by users are retained forever. + ResultsRetentionDays *int64 `json:"results_retention_days,omitempty"` +} + +func (s *JobConfig) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_lazy_open": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.AllowLazyOpen = &value + case bool: + s.AllowLazyOpen = &v + } + + case "analysis_config": + if err := dec.Decode(&s.AnalysisConfig); err != nil { + return err + } + + case "analysis_limits": + if err := dec.Decode(&s.AnalysisLimits); err != nil { + return err + } + + case "background_persist_interval": + if err := dec.Decode(&s.BackgroundPersistInterval); err != nil { + return err + } + + case "custom_settings": + if err := dec.Decode(&s.CustomSettings); err != nil { + return err + } + + case "daily_model_snapshot_retention_after_days": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DailyModelSnapshotRetentionAfterDays = &value + case float64: + f := int64(v) + s.DailyModelSnapshotRetentionAfterDays = &f + } + + case "data_description": + if err := dec.Decode(&s.DataDescription); err != nil { + return err + } + + case "datafeed_config": + if err := dec.Decode(&s.DatafeedConfig); err != nil { + return err + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "groups": + if err := dec.Decode(&s.Groups); err != nil { + return err + } + + case "job_id": + if err := dec.Decode(&s.JobId); err != nil { + return err + } + + case "job_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.JobType = &o + + case "model_plot_config": + if err := dec.Decode(&s.ModelPlotConfig); err != nil { + return err + } + + case "model_snapshot_retention_days": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.ModelSnapshotRetentionDays = &value + case float64: + f := int64(v) + s.ModelSnapshotRetentionDays = &f + } + + case "renormalization_window_days": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.RenormalizationWindowDays = &value + case float64: + f := int64(v) + s.RenormalizationWindowDays = &f + } + + case "results_index_name": + if err := dec.Decode(&s.ResultsIndexName); err != nil { + return err + } + + case "results_retention_days": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.ResultsRetentionDays = &value + case float64: + f := int64(v) + s.ResultsRetentionDays = &f + } + + } + } + return nil } // NewJobConfig returns a JobConfig. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jobforecaststatistics.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jobforecaststatistics.go index a01c60983..bf9eb140b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jobforecaststatistics.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jobforecaststatistics.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // JobForecastStatistics type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Job.ts#L120-L127 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Job.ts#L343-L350 type JobForecastStatistics struct { ForecastedJobs int `json:"forecasted_jobs"` MemoryBytes *JobStatistics `json:"memory_bytes,omitempty"` @@ -32,6 +40,80 @@ type JobForecastStatistics struct { Total int64 `json:"total"` } +func (s *JobForecastStatistics) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "forecasted_jobs": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.ForecastedJobs = value + case float64: + f := int(v) + s.ForecastedJobs = f + } + + case "memory_bytes": + if err := dec.Decode(&s.MemoryBytes); err != nil { + return err + } + + case "processing_time_ms": + if err := dec.Decode(&s.ProcessingTimeMs); err != nil { + return err + } + + case "records": + if err := dec.Decode(&s.Records); err != nil { + return err + } + + case "status": + if s.Status == nil { + s.Status = make(map[string]int64, 0) + } + if err := dec.Decode(&s.Status); err != nil { + return err + } + + case "total": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Total = value + case float64: + f := int64(v) + s.Total = f + } + + } + } + return nil +} + // NewJobForecastStatistics returns a JobForecastStatistics. func NewJobForecastStatistics() *JobForecastStatistics { r := &JobForecastStatistics{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jobsrecord.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jobsrecord.go index 05685be83..8413455b0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jobsrecord.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jobsrecord.go @@ -16,11 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/categorizationstatus" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/jobstate" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/memorystatus" @@ -28,130 +34,874 @@ import ( // JobsRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/ml_jobs/types.ts#L24-L325 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/ml_jobs/types.ts#L24-L347 type JobsRecord struct { - // AssignmentExplanation why the job is or is not assigned to a node + // AssignmentExplanation For open anomaly detection jobs only, contains messages relating to the + // selection of a node to run the job. AssignmentExplanation *string `json:"assignment_explanation,omitempty"` - // BucketsCount bucket count + // BucketsCount The number of bucket results produced by the job. BucketsCount *string `json:"buckets.count,omitempty"` - // BucketsTimeExpAvg exponential average bucket processing time (milliseconds) + // BucketsTimeExpAvg The exponential moving average of all bucket processing times, in + // milliseconds. BucketsTimeExpAvg *string `json:"buckets.time.exp_avg,omitempty"` - // BucketsTimeExpAvgHour exponential average bucket processing time by hour (milliseconds) + // BucketsTimeExpAvgHour The exponential moving average of bucket processing times calculated in a one + // hour time window, in milliseconds. BucketsTimeExpAvgHour *string `json:"buckets.time.exp_avg_hour,omitempty"` - // BucketsTimeMax maximum bucket processing time + // BucketsTimeMax The maximum of all bucket processing times, in milliseconds. BucketsTimeMax *string `json:"buckets.time.max,omitempty"` - // BucketsTimeMin minimum bucket processing time + // BucketsTimeMin The minimum of all bucket processing times, in milliseconds. BucketsTimeMin *string `json:"buckets.time.min,omitempty"` - // BucketsTimeTotal total bucket processing time + // BucketsTimeTotal The sum of all bucket processing times, in milliseconds. BucketsTimeTotal *string `json:"buckets.time.total,omitempty"` - // DataBuckets total bucket count + // DataBuckets The total number of buckets processed. DataBuckets *string `json:"data.buckets,omitempty"` - // DataEarliestRecord earliest record time + // DataEarliestRecord The timestamp of the earliest chronologically input document. DataEarliestRecord *string `json:"data.earliest_record,omitempty"` - // DataEmptyBuckets number of empty buckets + // DataEmptyBuckets The number of buckets which did not contain any data. + // If your data contains many empty buckets, consider increasing your + // `bucket_span` or using functions that are tolerant to gaps in data such as + // mean, `non_null_sum` or `non_zero_count`. DataEmptyBuckets *string `json:"data.empty_buckets,omitempty"` - // DataInputBytes total input bytes + // DataInputBytes The number of bytes of input data posted to the anomaly detection job. DataInputBytes ByteSize `json:"data.input_bytes,omitempty"` - // DataInputFields total field count + // DataInputFields The total number of fields in input documents posted to the anomaly detection + // job. + // This count includes fields that are not used in the analysis. + // However, be aware that if you are using a datafeed, it extracts only the + // required fields from the documents it retrieves before posting them to the + // job. DataInputFields *string `json:"data.input_fields,omitempty"` - // DataInputRecords total record count + // DataInputRecords The number of input documents posted to the anomaly detection job. DataInputRecords *string `json:"data.input_records,omitempty"` - // DataInvalidDates number of records with invalid dates + // DataInvalidDates The number of input documents with either a missing date field or a date that + // could not be parsed. DataInvalidDates *string `json:"data.invalid_dates,omitempty"` - // DataLast last time data was seen + // DataLast The timestamp at which data was last analyzed, according to server time. DataLast *string `json:"data.last,omitempty"` - // DataLastEmptyBucket last time an empty bucket occurred + // DataLastEmptyBucket The timestamp of the last bucket that did not contain any data. DataLastEmptyBucket *string `json:"data.last_empty_bucket,omitempty"` - // DataLastSparseBucket last time a sparse bucket occurred + // DataLastSparseBucket The timestamp of the last bucket that was considered sparse. DataLastSparseBucket *string `json:"data.last_sparse_bucket,omitempty"` - // DataLatestRecord latest record time + // DataLatestRecord The timestamp of the latest chronologically input document. DataLatestRecord *string `json:"data.latest_record,omitempty"` - // DataMissingFields number of records with missing fields + // DataMissingFields The number of input documents that are missing a field that the anomaly + // detection job is configured to analyze. + // Input documents with missing fields are still processed because it is + // possible that not all fields are missing. + // If you are using datafeeds or posting data to the job in JSON format, a high + // `missing_field_count` is often not an indication of data issues. + // It is not necessarily a cause for concern. DataMissingFields *string `json:"data.missing_fields,omitempty"` - // DataOutOfOrderTimestamps number of records handled out of order + // DataOutOfOrderTimestamps The number of input documents that have a timestamp chronologically preceding + // the start of the current anomaly detection bucket offset by the latency + // window. + // This information is applicable only when you provide data to the anomaly + // detection job by using the post data API. + // These out of order documents are discarded, since jobs require time series + // data to be in ascending chronological order. DataOutOfOrderTimestamps *string `json:"data.out_of_order_timestamps,omitempty"` - // DataProcessedFields number of processed fields + // DataProcessedFields The total number of fields in all the documents that have been processed by + // the anomaly detection job. + // Only fields that are specified in the detector configuration object + // contribute to this count. + // The timestamp is not included in this count. DataProcessedFields *string `json:"data.processed_fields,omitempty"` - // DataProcessedRecords number of processed records + // DataProcessedRecords The number of input documents that have been processed by the anomaly + // detection job. + // This value includes documents with missing fields, since they are nonetheless + // analyzed. + // If you use datafeeds and have aggregations in your search query, the + // `processed_record_count` is the number of aggregation results processed, not + // the number of Elasticsearch documents. DataProcessedRecords *string `json:"data.processed_records,omitempty"` - // DataSparseBuckets number of sparse buckets + // DataSparseBuckets The number of buckets that contained few data points compared to the expected + // number of data points. + // If your data contains many sparse buckets, consider using a longer + // `bucket_span`. DataSparseBuckets *string `json:"data.sparse_buckets,omitempty"` - // ForecastsMemoryAvg average memory used by forecasts + // ForecastsMemoryAvg The average memory usage in bytes for forecasts related to the anomaly + // detection job. ForecastsMemoryAvg *string `json:"forecasts.memory.avg,omitempty"` - // ForecastsMemoryMax maximum memory used by forecasts + // ForecastsMemoryMax The maximum memory usage in bytes for forecasts related to the anomaly + // detection job. ForecastsMemoryMax *string `json:"forecasts.memory.max,omitempty"` - // ForecastsMemoryMin minimum memory used by forecasts + // ForecastsMemoryMin The minimum memory usage in bytes for forecasts related to the anomaly + // detection job. ForecastsMemoryMin *string `json:"forecasts.memory.min,omitempty"` - // ForecastsMemoryTotal total memory used by all forecasts + // ForecastsMemoryTotal The total memory usage in bytes for forecasts related to the anomaly + // detection job. ForecastsMemoryTotal *string `json:"forecasts.memory.total,omitempty"` - // ForecastsRecordsAvg average record count for forecasts + // ForecastsRecordsAvg The average number of `model_forecast` documents written for forecasts + // related to the anomaly detection job. ForecastsRecordsAvg *string `json:"forecasts.records.avg,omitempty"` - // ForecastsRecordsMax maximum record count for forecasts + // ForecastsRecordsMax The maximum number of `model_forecast` documents written for forecasts + // related to the anomaly detection job. ForecastsRecordsMax *string `json:"forecasts.records.max,omitempty"` - // ForecastsRecordsMin minimum record count for forecasts + // ForecastsRecordsMin The minimum number of `model_forecast` documents written for forecasts + // related to the anomaly detection job. ForecastsRecordsMin *string `json:"forecasts.records.min,omitempty"` - // ForecastsRecordsTotal total record count for all forecasts + // ForecastsRecordsTotal The total number of `model_forecast` documents written for forecasts related + // to the anomaly detection job. ForecastsRecordsTotal *string `json:"forecasts.records.total,omitempty"` - // ForecastsTimeAvg average runtime for all forecasts (milliseconds) + // ForecastsTimeAvg The average runtime in milliseconds for forecasts related to the anomaly + // detection job. ForecastsTimeAvg *string `json:"forecasts.time.avg,omitempty"` - // ForecastsTimeMax maximum run time for forecasts + // ForecastsTimeMax The maximum runtime in milliseconds for forecasts related to the anomaly + // detection job. ForecastsTimeMax *string `json:"forecasts.time.max,omitempty"` - // ForecastsTimeMin minimum runtime for forecasts + // ForecastsTimeMin The minimum runtime in milliseconds for forecasts related to the anomaly + // detection job. ForecastsTimeMin *string `json:"forecasts.time.min,omitempty"` - // ForecastsTimeTotal total runtime for all forecasts + // ForecastsTimeTotal The total runtime in milliseconds for forecasts related to the anomaly + // detection job. ForecastsTimeTotal *string `json:"forecasts.time.total,omitempty"` - // ForecastsTotal total number of forecasts + // ForecastsTotal The number of individual forecasts currently available for the job. + // A value of one or more indicates that forecasts exist. ForecastsTotal *string `json:"forecasts.total,omitempty"` - // Id the job_id + // Id The anomaly detection job identifier. Id *string `json:"id,omitempty"` - // ModelBucketAllocationFailures number of bucket allocation failures + // ModelBucketAllocationFailures The number of buckets for which new entities in incoming data were not + // processed due to insufficient model memory. + // This situation is also signified by a `hard_limit: memory_status` property + // value. ModelBucketAllocationFailures *string `json:"model.bucket_allocation_failures,omitempty"` - // ModelByFields count of 'by' fields + // ModelByFields The number of `by` field values that were analyzed by the models. + // This value is cumulative for all detectors in the job. ModelByFields *string `json:"model.by_fields,omitempty"` - // ModelBytes model size + // ModelBytes The number of bytes of memory used by the models. + // This is the maximum value since the last time the model was persisted. + // If the job is closed, this value indicates the latest size. ModelBytes ByteSize `json:"model.bytes,omitempty"` - // ModelBytesExceeded how much the model has exceeded the limit + // ModelBytesExceeded The number of bytes over the high limit for memory usage at the last + // allocation failure. ModelBytesExceeded ByteSize `json:"model.bytes_exceeded,omitempty"` - // ModelCategorizationStatus current categorization status + // ModelCategorizationStatus The status of categorization for the job. ModelCategorizationStatus *categorizationstatus.CategorizationStatus `json:"model.categorization_status,omitempty"` - // ModelCategorizedDocCount count of categorized documents + // ModelCategorizedDocCount The number of documents that have had a field categorized. ModelCategorizedDocCount *string `json:"model.categorized_doc_count,omitempty"` - // ModelDeadCategoryCount count of dead categories + // ModelDeadCategoryCount The number of categories created by categorization that will never be + // assigned again because another category’s definition makes it a superset of + // the dead category. + // Dead categories are a side effect of the way categorization has no prior + // training. ModelDeadCategoryCount *string `json:"model.dead_category_count,omitempty"` - // ModelFailedCategoryCount count of failed categories + // ModelFailedCategoryCount The number of times that categorization wanted to create a new category but + // couldn’t because the job had hit its `model_memory_limit`. + // This count does not track which specific categories failed to be created. + // Therefore you cannot use this value to determine the number of unique + // categories that were missed. ModelFailedCategoryCount *string `json:"model.failed_category_count,omitempty"` - // ModelFrequentCategoryCount count of frequent categories + // ModelFrequentCategoryCount The number of categories that match more than 1% of categorized documents. ModelFrequentCategoryCount *string `json:"model.frequent_category_count,omitempty"` - // ModelLogTime when the model stats were gathered + // ModelLogTime The timestamp when the model stats were gathered, according to server time. ModelLogTime *string `json:"model.log_time,omitempty"` - // ModelMemoryLimit model memory limit + // ModelMemoryLimit The upper limit for model memory usage, checked on increasing values. ModelMemoryLimit *string `json:"model.memory_limit,omitempty"` - // ModelMemoryStatus current memory status + // ModelMemoryStatus The status of the mathematical models. ModelMemoryStatus *memorystatus.MemoryStatus `json:"model.memory_status,omitempty"` - // ModelOverFields count of 'over' fields + // ModelOverFields The number of `over` field values that were analyzed by the models. + // This value is cumulative for all detectors in the job. ModelOverFields *string `json:"model.over_fields,omitempty"` - // ModelPartitionFields count of 'partition' fields + // ModelPartitionFields The number of `partition` field values that were analyzed by the models. + // This value is cumulative for all detectors in the job. ModelPartitionFields *string `json:"model.partition_fields,omitempty"` - // ModelRareCategoryCount count of rare categories + // ModelRareCategoryCount The number of categories that match just one categorized document. ModelRareCategoryCount *string `json:"model.rare_category_count,omitempty"` - // ModelTimestamp the time of the last record when the model stats were gathered + // ModelTimestamp The timestamp of the last record when the model stats were gathered. ModelTimestamp *string `json:"model.timestamp,omitempty"` - // ModelTotalCategoryCount count of categories + // ModelTotalCategoryCount The number of categories created by categorization. ModelTotalCategoryCount *string `json:"model.total_category_count,omitempty"` - // NodeAddress network address of the assigned node + // NodeAddress The network address of the assigned node. NodeAddress *string `json:"node.address,omitempty"` - // NodeEphemeralId ephemeral id of the assigned node + // NodeEphemeralId The ephemeral identifier of the assigned node. NodeEphemeralId *string `json:"node.ephemeral_id,omitempty"` - // NodeId id of the assigned node + // NodeId The uniqe identifier of the assigned node. NodeId *string `json:"node.id,omitempty"` - // NodeName name of the assigned node + // NodeName The name of the assigned node. NodeName *string `json:"node.name,omitempty"` - // OpenedTime the amount of time the job has been opened + // OpenedTime For open jobs only, the amount of time the job has been opened. OpenedTime *string `json:"opened_time,omitempty"` - // State the job state + // State The status of the anomaly detection job. State *jobstate.JobState `json:"state,omitempty"` } +func (s *JobsRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "assignment_explanation", "ae": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AssignmentExplanation = &o + + case "buckets.count", "bc", "bucketsCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BucketsCount = &o + + case "buckets.time.exp_avg", "btea", "bucketsTimeExpAvg": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BucketsTimeExpAvg = &o + + case "buckets.time.exp_avg_hour", "bteah", "bucketsTimeExpAvgHour": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BucketsTimeExpAvgHour = &o + + case "buckets.time.max", "btmax", "bucketsTimeMax": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BucketsTimeMax = &o + + case "buckets.time.min", "btmin", "bucketsTimeMin": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BucketsTimeMin = &o + + case "buckets.time.total", "btt", "bucketsTimeTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BucketsTimeTotal = &o + + case "data.buckets", "db", "dataBuckets": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DataBuckets = &o + + case "data.earliest_record", "der", "dataEarliestRecord": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DataEarliestRecord = &o + + case "data.empty_buckets", "deb", "dataEmptyBuckets": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DataEmptyBuckets = &o + + case "data.input_bytes", "dib", "dataInputBytes": + if err := dec.Decode(&s.DataInputBytes); err != nil { + return err + } + + case "data.input_fields", "dif", "dataInputFields": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DataInputFields = &o + + case "data.input_records", "dir", "dataInputRecords": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DataInputRecords = &o + + case "data.invalid_dates", "did", "dataInvalidDates": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DataInvalidDates = &o + + case "data.last", "dl", "dataLast": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DataLast = &o + + case "data.last_empty_bucket", "dleb", "dataLastEmptyBucket": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DataLastEmptyBucket = &o + + case "data.last_sparse_bucket", "dlsb", "dataLastSparseBucket": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DataLastSparseBucket = &o + + case "data.latest_record", "dlr", "dataLatestRecord": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DataLatestRecord = &o + + case "data.missing_fields", "dmf", "dataMissingFields": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DataMissingFields = &o + + case "data.out_of_order_timestamps", "doot", "dataOutOfOrderTimestamps": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DataOutOfOrderTimestamps = &o + + case "data.processed_fields", "dpf", "dataProcessedFields": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DataProcessedFields = &o + + case "data.processed_records", "dpr", "dataProcessedRecords": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DataProcessedRecords = &o + + case "data.sparse_buckets", "dsb", "dataSparseBuckets": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DataSparseBuckets = &o + + case "forecasts.memory.avg", "fmavg", "forecastsMemoryAvg": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ForecastsMemoryAvg = &o + + case "forecasts.memory.max", "fmmax", "forecastsMemoryMax": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ForecastsMemoryMax = &o + + case "forecasts.memory.min", "fmmin", "forecastsMemoryMin": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ForecastsMemoryMin = &o + + case "forecasts.memory.total", "fmt", "forecastsMemoryTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ForecastsMemoryTotal = &o + + case "forecasts.records.avg", "fravg", "forecastsRecordsAvg": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ForecastsRecordsAvg = &o + + case "forecasts.records.max", "frmax", "forecastsRecordsMax": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ForecastsRecordsMax = &o + + case "forecasts.records.min", "frmin", "forecastsRecordsMin": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ForecastsRecordsMin = &o + + case "forecasts.records.total", "frt", "forecastsRecordsTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ForecastsRecordsTotal = &o + + case "forecasts.time.avg", "ftavg", "forecastsTimeAvg": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ForecastsTimeAvg = &o + + case "forecasts.time.max", "ftmax", "forecastsTimeMax": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ForecastsTimeMax = &o + + case "forecasts.time.min", "ftmin", "forecastsTimeMin": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ForecastsTimeMin = &o + + case "forecasts.time.total", "ftt", "forecastsTimeTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ForecastsTimeTotal = &o + + case "forecasts.total", "ft", "forecastsTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ForecastsTotal = &o + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return err + } + + case "model.bucket_allocation_failures", "mbaf", "modelBucketAllocationFailures": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelBucketAllocationFailures = &o + + case "model.by_fields", "mbf", "modelByFields": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelByFields = &o + + case "model.bytes", "mb", "modelBytes": + if err := dec.Decode(&s.ModelBytes); err != nil { + return err + } + + case "model.bytes_exceeded", "mbe", "modelBytesExceeded": + if err := dec.Decode(&s.ModelBytesExceeded); err != nil { + return err + } + + case "model.categorization_status", "mcs", "modelCategorizationStatus": + if err := dec.Decode(&s.ModelCategorizationStatus); err != nil { + return err + } + + case "model.categorized_doc_count", "mcdc", "modelCategorizedDocCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelCategorizedDocCount = &o + + case "model.dead_category_count", "mdcc", "modelDeadCategoryCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelDeadCategoryCount = &o + + case "model.failed_category_count", "mfcc", "modelFailedCategoryCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelFailedCategoryCount = &o + + case "model.frequent_category_count", "modelFrequentCategoryCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelFrequentCategoryCount = &o + + case "model.log_time", "mlt", "modelLogTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelLogTime = &o + + case "model.memory_limit", "mml", "modelMemoryLimit": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelMemoryLimit = &o + + case "model.memory_status", "mms", "modelMemoryStatus": + if err := dec.Decode(&s.ModelMemoryStatus); err != nil { + return err + } + + case "model.over_fields", "mof", "modelOverFields": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelOverFields = &o + + case "model.partition_fields", "mpf", "modelPartitionFields": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelPartitionFields = &o + + case "model.rare_category_count", "mrcc", "modelRareCategoryCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelRareCategoryCount = &o + + case "model.timestamp", "mt", "modelTimestamp": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelTimestamp = &o + + case "model.total_category_count", "mtcc", "modelTotalCategoryCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelTotalCategoryCount = &o + + case "node.address", "na", "nodeAddress": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NodeAddress = &o + + case "node.ephemeral_id", "ne", "nodeEphemeralId": + if err := dec.Decode(&s.NodeEphemeralId); err != nil { + return err + } + + case "node.id", "ni", "nodeId": + if err := dec.Decode(&s.NodeId); err != nil { + return err + } + + case "node.name", "nn", "nodeName": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NodeName = &o + + case "opened_time", "ot": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.OpenedTime = &o + + case "state", "s": + if err := dec.Decode(&s.State); err != nil { + return err + } + + } + } + return nil +} + // NewJobsRecord returns a JobsRecord. func NewJobsRecord() *JobsRecord { r := &JobsRecord{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jobstatistics.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jobstatistics.go index c75b9db85..0b060e5c8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jobstatistics.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jobstatistics.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // JobStatistics type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Job.ts#L44-L49 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Job.ts#L54-L59 type JobStatistics struct { Avg Float64 `json:"avg"` Max Float64 `json:"max"` @@ -30,6 +38,90 @@ type JobStatistics struct { Total Float64 `json:"total"` } +func (s *JobStatistics) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "avg": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Avg = f + case float64: + f := Float64(v) + s.Avg = f + } + + case "max": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Max = f + case float64: + f := Float64(v) + s.Max = f + } + + case "min": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Min = f + case float64: + f := Float64(v) + s.Min = f + } + + case "total": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Total = f + case float64: + f := Float64(v) + s.Total = f + } + + } + } + return nil +} + // NewJobStatistics returns a JobStatistics. func NewJobStatistics() *JobStatistics { r := &JobStatistics{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jobstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jobstats.go index 8bd91903e..edb087a03 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jobstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jobstats.go @@ -16,28 +16,148 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/jobstate" ) // JobStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Job.ts#L96-L107 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Job.ts#L284-L330 type JobStats struct { - AssignmentExplanation *string `json:"assignment_explanation,omitempty"` - DataCounts DataCounts `json:"data_counts"` - Deleting *bool `json:"deleting,omitempty"` - ForecastsStats JobForecastStatistics `json:"forecasts_stats"` - JobId string `json:"job_id"` - ModelSizeStats ModelSizeStats `json:"model_size_stats"` - Node *DiscoveryNode `json:"node,omitempty"` - OpenTime DateTime `json:"open_time,omitempty"` - State jobstate.JobState `json:"state"` - TimingStats JobTimingStats `json:"timing_stats"` + // AssignmentExplanation For open anomaly detection jobs only, contains messages relating to the + // selection of a node to run the job. + AssignmentExplanation *string `json:"assignment_explanation,omitempty"` + // DataCounts An object that describes the quantity of input to the job and any related + // error counts. + // The `data_count` values are cumulative for the lifetime of a job. + // If a model snapshot is reverted or old results are deleted, the job counts + // are not reset. + DataCounts DataCounts `json:"data_counts"` + // Deleting Indicates that the process of deleting the job is in progress but not yet + // completed. It is only reported when `true`. + Deleting *bool `json:"deleting,omitempty"` + // ForecastsStats An object that provides statistical information about forecasts belonging to + // this job. + // Some statistics are omitted if no forecasts have been made. + ForecastsStats JobForecastStatistics `json:"forecasts_stats"` + // JobId Identifier for the anomaly detection job. + JobId string `json:"job_id"` + // ModelSizeStats An object that provides information about the size and contents of the model. + ModelSizeStats ModelSizeStats `json:"model_size_stats"` + // Node Contains properties for the node that runs the job. + // This information is available only for open jobs. + Node *DiscoveryNode `json:"node,omitempty"` + // OpenTime For open jobs only, the elapsed time for which the job has been open. + OpenTime DateTime `json:"open_time,omitempty"` + // State The status of the anomaly detection job, which can be one of the following + // values: `closed`, `closing`, `failed`, `opened`, `opening`. + State jobstate.JobState `json:"state"` + // TimingStats An object that provides statistical information about timing aspect of this + // job. + TimingStats JobTimingStats `json:"timing_stats"` +} + +func (s *JobStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "assignment_explanation": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AssignmentExplanation = &o + + case "data_counts": + if err := dec.Decode(&s.DataCounts); err != nil { + return err + } + + case "deleting": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Deleting = &value + case bool: + s.Deleting = &v + } + + case "forecasts_stats": + if err := dec.Decode(&s.ForecastsStats); err != nil { + return err + } + + case "job_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.JobId = o + + case "model_size_stats": + if err := dec.Decode(&s.ModelSizeStats); err != nil { + return err + } + + case "node": + if err := dec.Decode(&s.Node); err != nil { + return err + } + + case "open_time": + if err := dec.Decode(&s.OpenTime); err != nil { + return err + } + + case "state": + if err := dec.Decode(&s.State); err != nil { + return err + } + + case "timing_stats": + if err := dec.Decode(&s.TimingStats); err != nil { + return err + } + + } + } + return nil } // NewJobStats returns a JobStats. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jobtimingstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jobtimingstats.go index 0c75bdb0a..53e20966e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jobtimingstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jobtimingstats.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // JobTimingStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Job.ts#L109-L118 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Job.ts#L332-L341 type JobTimingStats struct { AverageBucketProcessingTimeMs Float64 `json:"average_bucket_processing_time_ms,omitempty"` BucketCount int64 `json:"bucket_count"` @@ -34,6 +42,76 @@ type JobTimingStats struct { TotalBucketProcessingTimeMs Float64 `json:"total_bucket_processing_time_ms"` } +func (s *JobTimingStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "average_bucket_processing_time_ms": + if err := dec.Decode(&s.AverageBucketProcessingTimeMs); err != nil { + return err + } + + case "bucket_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.BucketCount = value + case float64: + f := int64(v) + s.BucketCount = f + } + + case "exponential_average_bucket_processing_time_ms": + if err := dec.Decode(&s.ExponentialAverageBucketProcessingTimeMs); err != nil { + return err + } + + case "exponential_average_bucket_processing_time_per_hour_ms": + if err := dec.Decode(&s.ExponentialAverageBucketProcessingTimePerHourMs); err != nil { + return err + } + + case "job_id": + if err := dec.Decode(&s.JobId); err != nil { + return err + } + + case "maximum_bucket_processing_time_ms": + if err := dec.Decode(&s.MaximumBucketProcessingTimeMs); err != nil { + return err + } + + case "minimum_bucket_processing_time_ms": + if err := dec.Decode(&s.MinimumBucketProcessingTimeMs); err != nil { + return err + } + + case "total_bucket_processing_time_ms": + if err := dec.Decode(&s.TotalBucketProcessingTimeMs); err != nil { + return err + } + + } + } + return nil +} + // NewJobTimingStats returns a JobTimingStats. func NewJobTimingStats() *JobTimingStats { r := &JobTimingStats{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jobusage.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jobusage.go index fd0146160..b16cff364 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jobusage.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jobusage.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // JobUsage type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L355-L361 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L364-L370 type JobUsage struct { Count int `json:"count"` CreatedBy map[string]int64 `json:"created_by"` @@ -31,6 +39,65 @@ type JobUsage struct { ModelSize JobStatistics `json:"model_size"` } +func (s *JobUsage) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Count = value + case float64: + f := int(v) + s.Count = f + } + + case "created_by": + if s.CreatedBy == nil { + s.CreatedBy = make(map[string]int64, 0) + } + if err := dec.Decode(&s.CreatedBy); err != nil { + return err + } + + case "detectors": + if err := dec.Decode(&s.Detectors); err != nil { + return err + } + + case "forecasts": + if err := dec.Decode(&s.Forecasts); err != nil { + return err + } + + case "model_size": + if err := dec.Decode(&s.ModelSize); err != nil { + return err + } + + } + } + return nil +} + // NewJobUsage returns a JobUsage. func NewJobUsage() *JobUsage { r := &JobUsage{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/joinprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/joinprocessor.go index 3b4d9f474..47a09cc9c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/joinprocessor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/joinprocessor.go @@ -16,22 +16,138 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // JoinProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/_types/Processors.ts#L265-L269 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/_types/Processors.ts#L790-L805 type JoinProcessor struct { - Description *string `json:"description,omitempty"` - Field string `json:"field"` - If *string `json:"if,omitempty"` - IgnoreFailure *bool `json:"ignore_failure,omitempty"` - OnFailure []ProcessorContainer `json:"on_failure,omitempty"` - Separator string `json:"separator"` - Tag *string `json:"tag,omitempty"` - TargetField *string `json:"target_field,omitempty"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field Field containing array values to join. + Field string `json:"field"` + // If Conditionally execute the processor. + If *string `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Separator The separator character. + Separator string `json:"separator"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField The field to assign the joined value to. + // By default, the field is updated in-place. + TargetField *string `json:"target_field,omitempty"` +} + +func (s *JoinProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.If = &o + + case "ignore_failure": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return err + } + + case "separator": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Separator = o + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return err + } + + } + } + return nil } // NewJoinProcessor returns a JoinProcessor. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/joinproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/joinproperty.go index e8974dca0..99be431e5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/joinproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/joinproperty.go @@ -16,23 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" ) // JoinProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/core.ts#L83-L87 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/core.ts#L83-L87 type JoinProperty struct { Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` EagerGlobalOrdinals *bool `json:"eager_global_ordinals,omitempty"` @@ -46,6 +46,7 @@ type JoinProperty struct { } func (s *JoinProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -65,11 +66,23 @@ func (s *JoinProperty) UnmarshalJSON(data []byte) error { } case "eager_global_ordinals": - if err := dec.Decode(&s.EagerGlobalOrdinals); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.EagerGlobalOrdinals = &value + case bool: + s.EagerGlobalOrdinals = &v } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -78,7 +91,9 @@ func (s *JoinProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -357,23 +372,42 @@ func (s *JoinProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -382,7 +416,9 @@ func (s *JoinProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -661,15 +697,37 @@ func (s *JoinProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } case "relations": - if err := dec.Decode(&s.Relations); err != nil { - return err + if s.Relations == nil { + s.Relations = make(map[string][]string, 0) + } + rawMsg := make(map[string]json.RawMessage, 0) + dec.Decode(&rawMsg) + for key, value := range rawMsg { + switch { + case bytes.HasPrefix(value, []byte("\"")), bytes.HasPrefix(value, []byte("{")): + o := new(string) + err := json.NewDecoder(bytes.NewReader(value)).Decode(&o) + if err != nil { + return err + } + s.Relations[key] = append(s.Relations[key], *o) + default: + o := []string{} + err := json.NewDecoder(bytes.NewReader(value)).Decode(&o) + if err != nil { + return err + } + s.Relations[key] = o + } } case "type": @@ -682,6 +740,25 @@ func (s *JoinProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s JoinProperty) MarshalJSON() ([]byte, error) { + type innerJoinProperty JoinProperty + tmp := innerJoinProperty{ + Dynamic: s.Dynamic, + EagerGlobalOrdinals: s.EagerGlobalOrdinals, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + Properties: s.Properties, + Relations: s.Relations, + Type: s.Type, + } + + tmp.Type = "join" + + return json.Marshal(tmp) +} + // NewJoinProperty returns a JoinProperty. func NewJoinProperty() *JoinProperty { r := &JoinProperty{ @@ -691,7 +768,5 @@ func NewJoinProperty() *JoinProperty { Relations: make(map[string][]string, 0), } - r.Type = "join" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jsonprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jsonprocessor.go index b54188360..06325c6bc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jsonprocessor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jsonprocessor.go @@ -16,28 +16,172 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/jsonprocessorconflictstrategy" ) // JsonProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/_types/Processors.ts#L271-L277 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/_types/Processors.ts#L807-L836 type JsonProcessor struct { - AddToRoot *bool `json:"add_to_root,omitempty"` + // AddToRoot Flag that forces the parsed JSON to be added at the top level of the + // document. + // `target_field` must not be set when this option is chosen. + AddToRoot *bool `json:"add_to_root,omitempty"` + // AddToRootConflictStrategy When set to `replace`, root fields that conflict with fields from the parsed + // JSON will be overridden. + // When set to `merge`, conflicting fields will be merged. + // Only applicable `if add_to_root` is set to true. AddToRootConflictStrategy *jsonprocessorconflictstrategy.JsonProcessorConflictStrategy `json:"add_to_root_conflict_strategy,omitempty"` - AllowDuplicateKeys *bool `json:"allow_duplicate_keys,omitempty"` - Description *string `json:"description,omitempty"` - Field string `json:"field"` - If *string `json:"if,omitempty"` - IgnoreFailure *bool `json:"ignore_failure,omitempty"` - OnFailure []ProcessorContainer `json:"on_failure,omitempty"` - Tag *string `json:"tag,omitempty"` - TargetField *string `json:"target_field,omitempty"` + // AllowDuplicateKeys When set to `true`, the JSON parser will not fail if the JSON contains + // duplicate keys. + // Instead, the last encountered value for any duplicate key wins. + AllowDuplicateKeys *bool `json:"allow_duplicate_keys,omitempty"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field to be parsed. + Field string `json:"field"` + // If Conditionally execute the processor. + If *string `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField The field that the converted structured object will be written into. + // Any existing content in this field will be overwritten. + TargetField *string `json:"target_field,omitempty"` +} + +func (s *JsonProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "add_to_root": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.AddToRoot = &value + case bool: + s.AddToRoot = &v + } + + case "add_to_root_conflict_strategy": + if err := dec.Decode(&s.AddToRootConflictStrategy); err != nil { + return err + } + + case "allow_duplicate_keys": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.AllowDuplicateKeys = &value + case bool: + s.AllowDuplicateKeys = &v + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.If = &o + + case "ignore_failure": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return err + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return err + } + + } + } + return nil } // NewJsonProcessor returns a JsonProcessor. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jvm.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jvm.go index 7547cdc72..1a0a57d93 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jvm.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jvm.go @@ -16,22 +16,129 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Jvm type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L324-L333 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L811-L845 type Jvm struct { - BufferPools map[string]NodeBufferPool `json:"buffer_pools,omitempty"` - Classes *JvmClasses `json:"classes,omitempty"` - Gc *GarbageCollector `json:"gc,omitempty"` - Mem *JvmMemoryStats `json:"mem,omitempty"` - Threads *JvmThreads `json:"threads,omitempty"` - Timestamp *int64 `json:"timestamp,omitempty"` - Uptime *string `json:"uptime,omitempty"` - UptimeInMillis *int64 `json:"uptime_in_millis,omitempty"` + // BufferPools Contains statistics about JVM buffer pools for the node. + BufferPools map[string]NodeBufferPool `json:"buffer_pools,omitempty"` + // Classes Contains statistics about classes loaded by JVM for the node. + Classes *JvmClasses `json:"classes,omitempty"` + // Gc Contains statistics about JVM garbage collectors for the node. + Gc *GarbageCollector `json:"gc,omitempty"` + // Mem Contains JVM memory usage statistics for the node. + Mem *JvmMemoryStats `json:"mem,omitempty"` + // Threads Contains statistics about JVM thread usage for the node. + Threads *JvmThreads `json:"threads,omitempty"` + // Timestamp Last time JVM statistics were refreshed. + Timestamp *int64 `json:"timestamp,omitempty"` + // Uptime Human-readable JVM uptime. + // Only returned if the `human` query parameter is `true`. + Uptime *string `json:"uptime,omitempty"` + // UptimeInMillis JVM uptime in milliseconds. + UptimeInMillis *int64 `json:"uptime_in_millis,omitempty"` +} + +func (s *Jvm) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buffer_pools": + if s.BufferPools == nil { + s.BufferPools = make(map[string]NodeBufferPool, 0) + } + if err := dec.Decode(&s.BufferPools); err != nil { + return err + } + + case "classes": + if err := dec.Decode(&s.Classes); err != nil { + return err + } + + case "gc": + if err := dec.Decode(&s.Gc); err != nil { + return err + } + + case "mem": + if err := dec.Decode(&s.Mem); err != nil { + return err + } + + case "threads": + if err := dec.Decode(&s.Threads); err != nil { + return err + } + + case "timestamp": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Timestamp = &value + case float64: + f := int64(v) + s.Timestamp = &f + } + + case "uptime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Uptime = &o + + case "uptime_in_millis": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.UptimeInMillis = &value + case float64: + f := int64(v) + s.UptimeInMillis = &f + } + + } + } + return nil } // NewJvm returns a Jvm. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jvmclasses.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jvmclasses.go index f1590dfcc..978ccb2f9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jvmclasses.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jvmclasses.go @@ -16,19 +16,95 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // JvmClasses type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L357-L361 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L908-L921 type JvmClasses struct { + // CurrentLoadedCount Number of classes currently loaded by JVM. CurrentLoadedCount *int64 `json:"current_loaded_count,omitempty"` - TotalLoadedCount *int64 `json:"total_loaded_count,omitempty"` + // TotalLoadedCount Total number of classes loaded since the JVM started. + TotalLoadedCount *int64 `json:"total_loaded_count,omitempty"` + // TotalUnloadedCount Total number of classes unloaded since the JVM started. TotalUnloadedCount *int64 `json:"total_unloaded_count,omitempty"` } +func (s *JvmClasses) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "current_loaded_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.CurrentLoadedCount = &value + case float64: + f := int64(v) + s.CurrentLoadedCount = &f + } + + case "total_loaded_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TotalLoadedCount = &value + case float64: + f := int64(v) + s.TotalLoadedCount = &f + } + + case "total_unloaded_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TotalUnloadedCount = &value + case float64: + f := int64(v) + s.TotalUnloadedCount = &f + } + + } + } + return nil +} + // NewJvmClasses returns a JvmClasses. func NewJvmClasses() *JvmClasses { r := &JvmClasses{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jvmmemorystats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jvmmemorystats.go index a8ef737a7..85a73feee 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jvmmemorystats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jvmmemorystats.go @@ -16,21 +16,154 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // JvmMemoryStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L335-L343 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L847-L876 type JvmMemoryStats struct { - HeapCommittedInBytes *int64 `json:"heap_committed_in_bytes,omitempty"` - HeapMaxInBytes *int64 `json:"heap_max_in_bytes,omitempty"` - HeapUsedInBytes *int64 `json:"heap_used_in_bytes,omitempty"` - HeapUsedPercent *int64 `json:"heap_used_percent,omitempty"` - NonHeapCommittedInBytes *int64 `json:"non_heap_committed_in_bytes,omitempty"` - NonHeapUsedInBytes *int64 `json:"non_heap_used_in_bytes,omitempty"` - Pools map[string]Pool `json:"pools,omitempty"` + // HeapCommittedInBytes Amount of memory, in bytes, available for use by the heap. + HeapCommittedInBytes *int64 `json:"heap_committed_in_bytes,omitempty"` + // HeapMaxInBytes Maximum amount of memory, in bytes, available for use by the heap. + HeapMaxInBytes *int64 `json:"heap_max_in_bytes,omitempty"` + // HeapUsedInBytes Memory, in bytes, currently in use by the heap. + HeapUsedInBytes *int64 `json:"heap_used_in_bytes,omitempty"` + // HeapUsedPercent Percentage of memory currently in use by the heap. + HeapUsedPercent *int64 `json:"heap_used_percent,omitempty"` + // NonHeapCommittedInBytes Amount of non-heap memory available, in bytes. + NonHeapCommittedInBytes *int64 `json:"non_heap_committed_in_bytes,omitempty"` + // NonHeapUsedInBytes Non-heap memory used, in bytes. + NonHeapUsedInBytes *int64 `json:"non_heap_used_in_bytes,omitempty"` + // Pools Contains statistics about heap memory usage for the node. + Pools map[string]Pool `json:"pools,omitempty"` +} + +func (s *JvmMemoryStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "heap_committed_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.HeapCommittedInBytes = &value + case float64: + f := int64(v) + s.HeapCommittedInBytes = &f + } + + case "heap_max_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.HeapMaxInBytes = &value + case float64: + f := int64(v) + s.HeapMaxInBytes = &f + } + + case "heap_used_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.HeapUsedInBytes = &value + case float64: + f := int64(v) + s.HeapUsedInBytes = &f + } + + case "heap_used_percent": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.HeapUsedPercent = &value + case float64: + f := int64(v) + s.HeapUsedPercent = &f + } + + case "non_heap_committed_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.NonHeapCommittedInBytes = &value + case float64: + f := int64(v) + s.NonHeapCommittedInBytes = &f + } + + case "non_heap_used_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.NonHeapUsedInBytes = &value + case float64: + f := int64(v) + s.NonHeapUsedInBytes = &f + } + + case "pools": + if s.Pools == nil { + s.Pools = make(map[string]Pool, 0) + } + if err := dec.Decode(&s.Pools); err != nil { + return err + } + + } + } + return nil } // NewJvmMemoryStats returns a JvmMemoryStats. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jvmstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jvmstats.go index d27acaffa..6d836aec1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jvmstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jvmstats.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // JvmStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/get_memory_stats/types.ts#L50-L63 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/get_memory_stats/types.ts#L50-L63 type JvmStats struct { // HeapMax Maximum amount of memory available for use by the heap. HeapMax ByteSize `json:"heap_max,omitempty"` @@ -40,6 +48,89 @@ type JvmStats struct { JavaInferenceMaxInBytes int `json:"java_inference_max_in_bytes"` } +func (s *JvmStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "heap_max": + if err := dec.Decode(&s.HeapMax); err != nil { + return err + } + + case "heap_max_in_bytes": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.HeapMaxInBytes = value + case float64: + f := int(v) + s.HeapMaxInBytes = f + } + + case "java_inference": + if err := dec.Decode(&s.JavaInference); err != nil { + return err + } + + case "java_inference_in_bytes": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.JavaInferenceInBytes = value + case float64: + f := int(v) + s.JavaInferenceInBytes = f + } + + case "java_inference_max": + if err := dec.Decode(&s.JavaInferenceMax); err != nil { + return err + } + + case "java_inference_max_in_bytes": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.JavaInferenceMaxInBytes = value + case float64: + f := int(v) + s.JavaInferenceMaxInBytes = f + } + + } + } + return nil +} + // NewJvmStats returns a JvmStats. func NewJvmStats() *JvmStats { r := &JvmStats{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jvmthreads.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jvmthreads.go index 3d4b92a81..713772e96 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jvmthreads.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/jvmthreads.go @@ -16,18 +16,78 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // JvmThreads type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L352-L355 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L897-L906 type JvmThreads struct { - Count *int64 `json:"count,omitempty"` + // Count Number of active threads in use by JVM. + Count *int64 `json:"count,omitempty"` + // PeakCount Highest number of threads used by JVM. PeakCount *int64 `json:"peak_count,omitempty"` } +func (s *JvmThreads) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Count = &value + case float64: + f := int64(v) + s.Count = &f + } + + case "peak_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.PeakCount = &value + case float64: + f := int64(v) + s.PeakCount = &f + } + + } + } + return nil +} + // NewJvmThreads returns a JvmThreads. func NewJvmThreads() *JvmThreads { r := &JvmThreads{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/keeptypestokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/keeptypestokenfilter.go index 6f5edd0bf..53b1c619c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/keeptypestokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/keeptypestokenfilter.go @@ -16,17 +16,22 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/keeptypesmode" ) // KeepTypesTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L217-L221 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L218-L222 type KeepTypesTokenFilter struct { Mode *keeptypesmode.KeepTypesMode `json:"mode,omitempty"` Type string `json:"type,omitempty"` @@ -34,11 +39,64 @@ type KeepTypesTokenFilter struct { Version *string `json:"version,omitempty"` } +func (s *KeepTypesTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "mode": + if err := dec.Decode(&s.Mode); err != nil { + return err + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "types": + if err := dec.Decode(&s.Types); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s KeepTypesTokenFilter) MarshalJSON() ([]byte, error) { + type innerKeepTypesTokenFilter KeepTypesTokenFilter + tmp := innerKeepTypesTokenFilter{ + Mode: s.Mode, + Type: s.Type, + Types: s.Types, + Version: s.Version, + } + + tmp.Type = "keep_types" + + return json.Marshal(tmp) +} + // NewKeepTypesTokenFilter returns a KeepTypesTokenFilter. func NewKeepTypesTokenFilter() *KeepTypesTokenFilter { r := &KeepTypesTokenFilter{} - r.Type = "keep_types" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/keepwordstokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/keepwordstokenfilter.go index b52329bf1..6e312e0d6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/keepwordstokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/keepwordstokenfilter.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // KeepWordsTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L223-L228 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L224-L229 type KeepWordsTokenFilter struct { KeepWords []string `json:"keep_words,omitempty"` KeepWordsCase *bool `json:"keep_words_case,omitempty"` @@ -31,11 +39,86 @@ type KeepWordsTokenFilter struct { Version *string `json:"version,omitempty"` } +func (s *KeepWordsTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "keep_words": + if err := dec.Decode(&s.KeepWords); err != nil { + return err + } + + case "keep_words_case": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.KeepWordsCase = &value + case bool: + s.KeepWordsCase = &v + } + + case "keep_words_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.KeepWordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s KeepWordsTokenFilter) MarshalJSON() ([]byte, error) { + type innerKeepWordsTokenFilter KeepWordsTokenFilter + tmp := innerKeepWordsTokenFilter{ + KeepWords: s.KeepWords, + KeepWordsCase: s.KeepWordsCase, + KeepWordsPath: s.KeepWordsPath, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "keep" + + return json.Marshal(tmp) +} + // NewKeepWordsTokenFilter returns a KeepWordsTokenFilter. func NewKeepWordsTokenFilter() *KeepWordsTokenFilter { r := &KeepWordsTokenFilter{} - r.Type = "keep" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/keyedpercentiles.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/keyedpercentiles.go index 8eb823439..273781034 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/keyedpercentiles.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/keyedpercentiles.go @@ -16,11 +16,48 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "io" + + "bytes" + + "encoding/json" + + "errors" + + "fmt" +) + // KeyedPercentiles type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L157-L157 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L158-L158 type KeyedPercentiles map[string]string + +func (s KeyedPercentiles) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + if key, ok := t.(string); ok { + + var tmp interface{} + if err := dec.Decode(&tmp); err != nil { + return err + } + s[key] = fmt.Sprintf("%v", tmp) + + } + } + return nil +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/keyedprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/keyedprocessor.go index 06c855360..1a83688e9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/keyedprocessor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/keyedprocessor.go @@ -16,18 +16,63 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // KeyedProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L157-L160 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L379-L382 type KeyedProcessor struct { Stats *Processor `json:"stats,omitempty"` Type *string `json:"type,omitempty"` } +func (s *KeyedProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stats": + if err := dec.Decode(&s.Stats); err != nil { + return err + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = &o + + } + } + return nil +} + // NewKeyedProcessor returns a KeyedProcessor. func NewKeyedProcessor() *KeyedProcessor { r := &KeyedProcessor{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/keyvalueprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/keyvalueprocessor.go index 1a4640345..aa2f1b2d6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/keyvalueprocessor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/keyvalueprocessor.go @@ -16,30 +16,246 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // KeyValueProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/_types/Processors.ts#L286-L298 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/_types/Processors.ts#L845-L897 type KeyValueProcessor struct { - Description *string `json:"description,omitempty"` - ExcludeKeys []string `json:"exclude_keys,omitempty"` - Field string `json:"field"` - FieldSplit string `json:"field_split"` - If *string `json:"if,omitempty"` - IgnoreFailure *bool `json:"ignore_failure,omitempty"` - IgnoreMissing *bool `json:"ignore_missing,omitempty"` - IncludeKeys []string `json:"include_keys,omitempty"` - OnFailure []ProcessorContainer `json:"on_failure,omitempty"` - Prefix *string `json:"prefix,omitempty"` - StripBrackets *bool `json:"strip_brackets,omitempty"` - Tag *string `json:"tag,omitempty"` - TargetField *string `json:"target_field,omitempty"` - TrimKey *string `json:"trim_key,omitempty"` - TrimValue *string `json:"trim_value,omitempty"` - ValueSplit string `json:"value_split"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // ExcludeKeys List of keys to exclude from document. + ExcludeKeys []string `json:"exclude_keys,omitempty"` + // Field The field to be parsed. + // Supports template snippets. + Field string `json:"field"` + // FieldSplit Regex pattern to use for splitting key-value pairs. + FieldSplit string `json:"field_split"` + // If Conditionally execute the processor. + If *string `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist or is `null`, the processor quietly + // exits without modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // IncludeKeys List of keys to filter and insert into document. + // Defaults to including all keys. + IncludeKeys []string `json:"include_keys,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Prefix Prefix to be added to extracted keys. + Prefix *string `json:"prefix,omitempty"` + // StripBrackets If `true`. strip brackets `()`, `<>`, `[]` as well as quotes `'` and `"` from + // extracted values. + StripBrackets *bool `json:"strip_brackets,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField The field to insert the extracted keys into. + // Defaults to the root of the document. + // Supports template snippets. + TargetField *string `json:"target_field,omitempty"` + // TrimKey String of characters to trim from extracted keys. + TrimKey *string `json:"trim_key,omitempty"` + // TrimValue String of characters to trim from extracted values. + TrimValue *string `json:"trim_value,omitempty"` + // ValueSplit Regex pattern to use for splitting the key from the value within a key-value + // pair. + ValueSplit string `json:"value_split"` +} + +func (s *KeyValueProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "exclude_keys": + if err := dec.Decode(&s.ExcludeKeys); err != nil { + return err + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "field_split": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FieldSplit = o + + case "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.If = &o + + case "ignore_failure": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "include_keys": + if err := dec.Decode(&s.IncludeKeys); err != nil { + return err + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return err + } + + case "prefix": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Prefix = &o + + case "strip_brackets": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.StripBrackets = &value + case bool: + s.StripBrackets = &v + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return err + } + + case "trim_key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TrimKey = &o + + case "trim_value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TrimValue = &o + + case "value_split": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ValueSplit = o + + } + } + return nil } // NewKeyValueProcessor returns a KeyValueProcessor. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/keywordanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/keywordanalyzer.go index 2aefecd8a..6df902772 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/keywordanalyzer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/keywordanalyzer.go @@ -16,23 +16,71 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // KeywordAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/analyzers.ts#L47-L50 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/analyzers.ts#L47-L50 type KeywordAnalyzer struct { Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` } +func (s *KeywordAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s KeywordAnalyzer) MarshalJSON() ([]byte, error) { + type innerKeywordAnalyzer KeywordAnalyzer + tmp := innerKeywordAnalyzer{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "keyword" + + return json.Marshal(tmp) +} + // NewKeywordAnalyzer returns a KeywordAnalyzer. func NewKeywordAnalyzer() *KeywordAnalyzer { r := &KeywordAnalyzer{} - r.Type = "keyword" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/keywordmarkertokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/keywordmarkertokenfilter.go index 976084af2..1464bc07f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/keywordmarkertokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/keywordmarkertokenfilter.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // KeywordMarkerTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L230-L236 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L231-L237 type KeywordMarkerTokenFilter struct { IgnoreCase *bool `json:"ignore_case,omitempty"` Keywords []string `json:"keywords,omitempty"` @@ -32,11 +40,99 @@ type KeywordMarkerTokenFilter struct { Version *string `json:"version,omitempty"` } +func (s *KeywordMarkerTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "ignore_case": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreCase = &value + case bool: + s.IgnoreCase = &v + } + + case "keywords": + if err := dec.Decode(&s.Keywords); err != nil { + return err + } + + case "keywords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.KeywordsPath = &o + + case "keywords_pattern": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.KeywordsPattern = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s KeywordMarkerTokenFilter) MarshalJSON() ([]byte, error) { + type innerKeywordMarkerTokenFilter KeywordMarkerTokenFilter + tmp := innerKeywordMarkerTokenFilter{ + IgnoreCase: s.IgnoreCase, + Keywords: s.Keywords, + KeywordsPath: s.KeywordsPath, + KeywordsPattern: s.KeywordsPattern, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "keyword_marker" + + return json.Marshal(tmp) +} + // NewKeywordMarkerTokenFilter returns a KeywordMarkerTokenFilter. func NewKeywordMarkerTokenFilter() *KeywordMarkerTokenFilter { r := &KeywordMarkerTokenFilter{} - r.Type = "keyword_marker" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/keywordproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/keywordproperty.go index 7d23b019f..0605a20e7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/keywordproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/keywordproperty.go @@ -16,24 +16,24 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexoptions" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexoptions" ) // KeywordProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/core.ts#L89-L104 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/core.ts#L89-L105 type KeywordProperty struct { Boost *Float64 `json:"boost,omitempty"` CopyTo []string `json:"copy_to,omitempty"` @@ -60,6 +60,7 @@ type KeywordProperty struct { } func (s *KeywordProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -74,18 +75,49 @@ func (s *KeywordProperty) UnmarshalJSON(data []byte) error { switch t { case "boost": - if err := dec.Decode(&s.Boost); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f } case "copy_to": - if err := dec.Decode(&s.CopyTo); err != nil { - return err + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return err + } } case "doc_values": - if err := dec.Decode(&s.DocValues); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DocValues = &value + case bool: + s.DocValues = &v } case "dynamic": @@ -94,11 +126,23 @@ func (s *KeywordProperty) UnmarshalJSON(data []byte) error { } case "eager_global_ordinals": - if err := dec.Decode(&s.EagerGlobalOrdinals); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.EagerGlobalOrdinals = &value + case bool: + s.EagerGlobalOrdinals = &v } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -107,7 +151,9 @@ func (s *KeywordProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -386,20 +432,42 @@ func (s *KeywordProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "index": - if err := dec.Decode(&s.Index); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Index = &value + case bool: + s.Index = &v } case "index_options": @@ -408,26 +476,55 @@ func (s *KeywordProperty) UnmarshalJSON(data []byte) error { } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } case "normalizer": - if err := dec.Decode(&s.Normalizer); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Normalizer = &o case "norms": - if err := dec.Decode(&s.Norms); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Norms = &value + case bool: + s.Norms = &v } case "null_value": - if err := dec.Decode(&s.NullValue); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NullValue = &o case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -436,7 +533,9 @@ func (s *KeywordProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -715,30 +814,66 @@ func (s *KeywordProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } case "similarity": - if err := dec.Decode(&s.Similarity); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Similarity = &o case "split_queries_on_whitespace": - if err := dec.Decode(&s.SplitQueriesOnWhitespace); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.SplitQueriesOnWhitespace = &value + case bool: + s.SplitQueriesOnWhitespace = &v } case "store": - if err := dec.Decode(&s.Store); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Store = &value + case bool: + s.Store = &v } case "time_series_dimension": - if err := dec.Decode(&s.TimeSeriesDimension); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.TimeSeriesDimension = &value + case bool: + s.TimeSeriesDimension = &v } case "type": @@ -751,6 +886,36 @@ func (s *KeywordProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s KeywordProperty) MarshalJSON() ([]byte, error) { + type innerKeywordProperty KeywordProperty + tmp := innerKeywordProperty{ + Boost: s.Boost, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + EagerGlobalOrdinals: s.EagerGlobalOrdinals, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + IndexOptions: s.IndexOptions, + Meta: s.Meta, + Normalizer: s.Normalizer, + Norms: s.Norms, + NullValue: s.NullValue, + Properties: s.Properties, + Similarity: s.Similarity, + SplitQueriesOnWhitespace: s.SplitQueriesOnWhitespace, + Store: s.Store, + TimeSeriesDimension: s.TimeSeriesDimension, + Type: s.Type, + } + + tmp.Type = "keyword" + + return json.Marshal(tmp) +} + // NewKeywordProperty returns a KeywordProperty. func NewKeywordProperty() *KeywordProperty { r := &KeywordProperty{ @@ -759,7 +924,5 @@ func NewKeywordProperty() *KeywordProperty { Properties: make(map[string]Property, 0), } - r.Type = "keyword" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/keywordtokenizer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/keywordtokenizer.go index 2419ade9e..a14f20069 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/keywordtokenizer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/keywordtokenizer.go @@ -16,24 +16,90 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // KeywordTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/tokenizers.ts#L61-L64 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/tokenizers.ts#L62-L65 type KeywordTokenizer struct { BufferSize int `json:"buffer_size"` Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` } +func (s *KeywordTokenizer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buffer_size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.BufferSize = value + case float64: + f := int(v) + s.BufferSize = f + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s KeywordTokenizer) MarshalJSON() ([]byte, error) { + type innerKeywordTokenizer KeywordTokenizer + tmp := innerKeywordTokenizer{ + BufferSize: s.BufferSize, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "keyword" + + return json.Marshal(tmp) +} + // NewKeywordTokenizer returns a KeywordTokenizer. func NewKeywordTokenizer() *KeywordTokenizer { r := &KeywordTokenizer{} - r.Type = "keyword" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/kibanatoken.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/kibanatoken.go index 2e655bdf3..3aefaa3c5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/kibanatoken.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/kibanatoken.go @@ -16,18 +16,70 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // KibanaToken type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/enroll_kibana/Response.ts#L27-L30 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/enroll_kibana/Response.ts#L27-L30 type KibanaToken struct { Name string `json:"name"` Value string `json:"value"` } +func (s *KibanaToken) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = o + + case "value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Value = o + + } + } + return nil +} + // NewKibanaToken returns a KibanaToken. func NewKibanaToken() *KibanaToken { r := &KibanaToken{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/knnquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/knnquery.go index c8f62fe6f..416b38ec3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/knnquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/knnquery.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // KnnQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Knn.ts#L26-L41 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Knn.ts#L26-L43 type KnnQuery struct { // Boost Boost value to apply to kNN scores Boost *float32 `json:"boost,omitempty"` @@ -39,6 +47,121 @@ type KnnQuery struct { // QueryVectorBuilder The query vector builder. You must provide a query_vector_builder or // query_vector, but not both. QueryVectorBuilder *QueryVectorBuilder `json:"query_vector_builder,omitempty"` + // Similarity The minimum similarity for a vector to be considered a match + Similarity *float32 `json:"similarity,omitempty"` +} + +func (s *KnnQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "filter": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewQuery() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Filter = append(s.Filter, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Filter); err != nil { + return err + } + } + + case "k": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.K = value + case float64: + f := int64(v) + s.K = f + } + + case "num_candidates": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.NumCandidates = value + case float64: + f := int64(v) + s.NumCandidates = f + } + + case "query_vector": + if err := dec.Decode(&s.QueryVector); err != nil { + return err + } + + case "query_vector_builder": + if err := dec.Decode(&s.QueryVectorBuilder); err != nil { + return err + } + + case "similarity": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Similarity = &f + case float64: + f := float32(v) + s.Similarity = &f + } + + } + } + return nil } // NewKnnQuery returns a KnnQuery. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/kstemtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/kstemtokenfilter.go index 58eddec7f..074dbdda8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/kstemtokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/kstemtokenfilter.go @@ -16,23 +16,71 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // KStemTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L238-L240 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L239-L241 type KStemTokenFilter struct { Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` } +func (s *KStemTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s KStemTokenFilter) MarshalJSON() ([]byte, error) { + type innerKStemTokenFilter KStemTokenFilter + tmp := innerKStemTokenFilter{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "kstem" + + return json.Marshal(tmp) +} + // NewKStemTokenFilter returns a KStemTokenFilter. func NewKStemTokenFilter() *KStemTokenFilter { r := &KStemTokenFilter{} - r.Type = "kstem" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/kuromojianalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/kuromojianalyzer.go index a53e08b3d..28de19138 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/kuromojianalyzer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/kuromojianalyzer.go @@ -16,28 +16,88 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/kuromojitokenizationmode" ) // KuromojiAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/kuromoji-plugin.ts#L25-L29 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/kuromoji-plugin.ts#L25-L29 type KuromojiAnalyzer struct { Mode kuromojitokenizationmode.KuromojiTokenizationMode `json:"mode"` Type string `json:"type,omitempty"` UserDictionary *string `json:"user_dictionary,omitempty"` } +func (s *KuromojiAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "mode": + if err := dec.Decode(&s.Mode); err != nil { + return err + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "user_dictionary": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.UserDictionary = &o + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s KuromojiAnalyzer) MarshalJSON() ([]byte, error) { + type innerKuromojiAnalyzer KuromojiAnalyzer + tmp := innerKuromojiAnalyzer{ + Mode: s.Mode, + Type: s.Type, + UserDictionary: s.UserDictionary, + } + + tmp.Type = "kuromoji" + + return json.Marshal(tmp) +} + // NewKuromojiAnalyzer returns a KuromojiAnalyzer. func NewKuromojiAnalyzer() *KuromojiAnalyzer { r := &KuromojiAnalyzer{} - r.Type = "kuromoji" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/kuromojiiterationmarkcharfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/kuromojiiterationmarkcharfilter.go index 222727aae..7e56170b9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/kuromojiiterationmarkcharfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/kuromojiiterationmarkcharfilter.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // KuromojiIterationMarkCharFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/kuromoji-plugin.ts#L31-L35 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/kuromoji-plugin.ts#L31-L35 type KuromojiIterationMarkCharFilter struct { NormalizeKana bool `json:"normalize_kana"` NormalizeKanji bool `json:"normalize_kanji"` @@ -30,11 +38,82 @@ type KuromojiIterationMarkCharFilter struct { Version *string `json:"version,omitempty"` } +func (s *KuromojiIterationMarkCharFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "normalize_kana": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.NormalizeKana = value + case bool: + s.NormalizeKana = v + } + + case "normalize_kanji": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.NormalizeKanji = value + case bool: + s.NormalizeKanji = v + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s KuromojiIterationMarkCharFilter) MarshalJSON() ([]byte, error) { + type innerKuromojiIterationMarkCharFilter KuromojiIterationMarkCharFilter + tmp := innerKuromojiIterationMarkCharFilter{ + NormalizeKana: s.NormalizeKana, + NormalizeKanji: s.NormalizeKanji, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "kuromoji_iteration_mark" + + return json.Marshal(tmp) +} + // NewKuromojiIterationMarkCharFilter returns a KuromojiIterationMarkCharFilter. func NewKuromojiIterationMarkCharFilter() *KuromojiIterationMarkCharFilter { r := &KuromojiIterationMarkCharFilter{} - r.Type = "kuromoji_iteration_mark" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/kuromojipartofspeechtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/kuromojipartofspeechtokenfilter.go index 7a9c4aa9d..ab1618212 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/kuromojipartofspeechtokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/kuromojipartofspeechtokenfilter.go @@ -16,24 +16,78 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // KuromojiPartOfSpeechTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/kuromoji-plugin.ts#L37-L40 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/kuromoji-plugin.ts#L37-L40 type KuromojiPartOfSpeechTokenFilter struct { Stoptags []string `json:"stoptags"` Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` } +func (s *KuromojiPartOfSpeechTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stoptags": + if err := dec.Decode(&s.Stoptags); err != nil { + return err + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s KuromojiPartOfSpeechTokenFilter) MarshalJSON() ([]byte, error) { + type innerKuromojiPartOfSpeechTokenFilter KuromojiPartOfSpeechTokenFilter + tmp := innerKuromojiPartOfSpeechTokenFilter{ + Stoptags: s.Stoptags, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "kuromoji_part_of_speech" + + return json.Marshal(tmp) +} + // NewKuromojiPartOfSpeechTokenFilter returns a KuromojiPartOfSpeechTokenFilter. func NewKuromojiPartOfSpeechTokenFilter() *KuromojiPartOfSpeechTokenFilter { r := &KuromojiPartOfSpeechTokenFilter{} - r.Type = "kuromoji_part_of_speech" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/kuromojireadingformtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/kuromojireadingformtokenfilter.go index b8f5bbf76..475d2b1b3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/kuromojireadingformtokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/kuromojireadingformtokenfilter.go @@ -16,24 +16,88 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // KuromojiReadingFormTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/kuromoji-plugin.ts#L42-L45 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/kuromoji-plugin.ts#L42-L45 type KuromojiReadingFormTokenFilter struct { Type string `json:"type,omitempty"` UseRomaji bool `json:"use_romaji"` Version *string `json:"version,omitempty"` } +func (s *KuromojiReadingFormTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "use_romaji": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.UseRomaji = value + case bool: + s.UseRomaji = v + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s KuromojiReadingFormTokenFilter) MarshalJSON() ([]byte, error) { + type innerKuromojiReadingFormTokenFilter KuromojiReadingFormTokenFilter + tmp := innerKuromojiReadingFormTokenFilter{ + Type: s.Type, + UseRomaji: s.UseRomaji, + Version: s.Version, + } + + tmp.Type = "kuromoji_readingform" + + return json.Marshal(tmp) +} + // NewKuromojiReadingFormTokenFilter returns a KuromojiReadingFormTokenFilter. func NewKuromojiReadingFormTokenFilter() *KuromojiReadingFormTokenFilter { r := &KuromojiReadingFormTokenFilter{} - r.Type = "kuromoji_readingform" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/kuromojistemmertokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/kuromojistemmertokenfilter.go index 0e396d364..6e3ac71fb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/kuromojistemmertokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/kuromojistemmertokenfilter.go @@ -16,24 +16,90 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // KuromojiStemmerTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/kuromoji-plugin.ts#L47-L50 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/kuromoji-plugin.ts#L47-L50 type KuromojiStemmerTokenFilter struct { MinimumLength int `json:"minimum_length"` Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` } +func (s *KuromojiStemmerTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "minimum_length": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MinimumLength = value + case float64: + f := int(v) + s.MinimumLength = f + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s KuromojiStemmerTokenFilter) MarshalJSON() ([]byte, error) { + type innerKuromojiStemmerTokenFilter KuromojiStemmerTokenFilter + tmp := innerKuromojiStemmerTokenFilter{ + MinimumLength: s.MinimumLength, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "kuromoji_stemmer" + + return json.Marshal(tmp) +} + // NewKuromojiStemmerTokenFilter returns a KuromojiStemmerTokenFilter. func NewKuromojiStemmerTokenFilter() *KuromojiStemmerTokenFilter { r := &KuromojiStemmerTokenFilter{} - r.Type = "kuromoji_stemmer" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/kuromojitokenizer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/kuromojitokenizer.go index a97587001..9e82c3ede 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/kuromojitokenizer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/kuromojitokenizer.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/kuromojitokenizationmode" ) // KuromojiTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/kuromoji-plugin.ts#L58-L67 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/kuromoji-plugin.ts#L58-L67 type KuromojiTokenizer struct { DiscardCompoundToken *bool `json:"discard_compound_token,omitempty"` DiscardPunctuation *bool `json:"discard_punctuation,omitempty"` @@ -39,11 +45,137 @@ type KuromojiTokenizer struct { Version *string `json:"version,omitempty"` } +func (s *KuromojiTokenizer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "discard_compound_token": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DiscardCompoundToken = &value + case bool: + s.DiscardCompoundToken = &v + } + + case "discard_punctuation": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DiscardPunctuation = &value + case bool: + s.DiscardPunctuation = &v + } + + case "mode": + if err := dec.Decode(&s.Mode); err != nil { + return err + } + + case "nbest_cost": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NbestCost = &value + case float64: + f := int(v) + s.NbestCost = &f + } + + case "nbest_examples": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NbestExamples = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "user_dictionary": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.UserDictionary = &o + + case "user_dictionary_rules": + if err := dec.Decode(&s.UserDictionaryRules); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s KuromojiTokenizer) MarshalJSON() ([]byte, error) { + type innerKuromojiTokenizer KuromojiTokenizer + tmp := innerKuromojiTokenizer{ + DiscardCompoundToken: s.DiscardCompoundToken, + DiscardPunctuation: s.DiscardPunctuation, + Mode: s.Mode, + NbestCost: s.NbestCost, + NbestExamples: s.NbestExamples, + Type: s.Type, + UserDictionary: s.UserDictionary, + UserDictionaryRules: s.UserDictionaryRules, + Version: s.Version, + } + + tmp.Type = "kuromoji_tokenizer" + + return json.Marshal(tmp) +} + // NewKuromojiTokenizer returns a KuromojiTokenizer. func NewKuromojiTokenizer() *KuromojiTokenizer { r := &KuromojiTokenizer{} - r.Type = "kuromoji_tokenizer" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/languageanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/languageanalyzer.go index f2890d448..a1a95bb3d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/languageanalyzer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/languageanalyzer.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/language" ) // LanguageAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/analyzers.ts#L52-L59 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/analyzers.ts#L52-L59 type LanguageAnalyzer struct { Language language.Language `json:"language"` StemExclusion []string `json:"stem_exclusion"` @@ -36,11 +42,94 @@ type LanguageAnalyzer struct { Version *string `json:"version,omitempty"` } +func (s *LanguageAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "language": + if err := dec.Decode(&s.Language); err != nil { + return err + } + + case "stem_exclusion": + if err := dec.Decode(&s.StemExclusion); err != nil { + return err + } + + case "stopwords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Stopwords = append(s.Stopwords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { + return err + } + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s LanguageAnalyzer) MarshalJSON() ([]byte, error) { + type innerLanguageAnalyzer LanguageAnalyzer + tmp := innerLanguageAnalyzer{ + Language: s.Language, + StemExclusion: s.StemExclusion, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "language" + + return json.Marshal(tmp) +} + // NewLanguageAnalyzer returns a LanguageAnalyzer. func NewLanguageAnalyzer() *LanguageAnalyzer { r := &LanguageAnalyzer{} - r.Type = "language" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/languagecontext.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/languagecontext.go index cf33d9c42..d8f16947b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/languagecontext.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/languagecontext.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -26,7 +26,7 @@ import ( // LanguageContext type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/get_script_languages/types.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/get_script_languages/types.ts#L22-L25 type LanguageContext struct { Contexts []string `json:"contexts"` Language scriptlanguage.ScriptLanguage `json:"language"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/laplacesmoothingmodel.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/laplacesmoothingmodel.go index 44b97a71c..e2074b21b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/laplacesmoothingmodel.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/laplacesmoothingmodel.go @@ -16,17 +16,62 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // LaplaceSmoothingModel type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/suggester.ts#L212-L214 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/suggester.ts#L427-L432 type LaplaceSmoothingModel struct { + // Alpha A constant that is added to all counts to balance weights. Alpha Float64 `json:"alpha"` } +func (s *LaplaceSmoothingModel) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "alpha": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Alpha = f + case float64: + f := Float64(v) + s.Alpha = f + } + + } + } + return nil +} + // NewLaplaceSmoothingModel returns a LaplaceSmoothingModel. func NewLaplaceSmoothingModel() *LaplaceSmoothingModel { r := &LaplaceSmoothingModel{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/latest.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/latest.go index a9bbe9eb3..6e9fc9642 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/latest.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/latest.go @@ -16,13 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // Latest type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/transform/_types/Transform.ts#L47-L52 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/transform/_types/Transform.ts#L47-L52 type Latest struct { // Sort Specifies the date field that is used to identify the latest documents. Sort string `json:"sort"` @@ -30,6 +37,36 @@ type Latest struct { UniqueKey []string `json:"unique_key"` } +func (s *Latest) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "sort": + if err := dec.Decode(&s.Sort); err != nil { + return err + } + + case "unique_key": + if err := dec.Decode(&s.UniqueKey); err != nil { + return err + } + + } + } + return nil +} + // NewLatest returns a Latest. func NewLatest() *Latest { r := &Latest{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/latlongeolocation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/latlongeolocation.go index 9998cc70d..c320096fd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/latlongeolocation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/latlongeolocation.go @@ -16,18 +16,80 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // LatLonGeoLocation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Geo.ts#L110-L113 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Geo.ts#L129-L138 type LatLonGeoLocation struct { + // Lat Latitude Lat Float64 `json:"lat"` + // Lon Longitude Lon Float64 `json:"lon"` } +func (s *LatLonGeoLocation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "lat": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Lat = f + case float64: + f := Float64(v) + s.Lat = f + } + + case "lon": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Lon = f + case float64: + f := Float64(v) + s.Lon = f + } + + } + } + return nil +} + // NewLatLonGeoLocation returns a LatLonGeoLocation. func NewLatLonGeoLocation() *LatLonGeoLocation { r := &LatLonGeoLocation{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lengthtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lengthtokenfilter.go index fcea63935..281e8f835 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lengthtokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lengthtokenfilter.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // LengthTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L242-L246 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L243-L247 type LengthTokenFilter struct { Max *int `json:"max,omitempty"` Min *int `json:"min,omitempty"` @@ -30,11 +38,86 @@ type LengthTokenFilter struct { Version *string `json:"version,omitempty"` } +func (s *LengthTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Max = &value + case float64: + f := int(v) + s.Max = &f + } + + case "min": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Min = &value + case float64: + f := int(v) + s.Min = &f + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s LengthTokenFilter) MarshalJSON() ([]byte, error) { + type innerLengthTokenFilter LengthTokenFilter + tmp := innerLengthTokenFilter{ + Max: s.Max, + Min: s.Min, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "length" + + return json.Marshal(tmp) +} + // NewLengthTokenFilter returns a LengthTokenFilter. func NewLengthTokenFilter() *LengthTokenFilter { r := &LengthTokenFilter{} - r.Type = "length" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lettertokenizer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lettertokenizer.go index c9a3eb14c..d9857a257 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lettertokenizer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lettertokenizer.go @@ -16,23 +16,71 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // LetterTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/tokenizers.ts#L66-L68 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/tokenizers.ts#L67-L69 type LetterTokenizer struct { Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` } +func (s *LetterTokenizer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s LetterTokenizer) MarshalJSON() ([]byte, error) { + type innerLetterTokenizer LetterTokenizer + tmp := innerLetterTokenizer{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "letter" + + return json.Marshal(tmp) +} + // NewLetterTokenizer returns a LetterTokenizer. func NewLetterTokenizer() *LetterTokenizer { r := &LetterTokenizer{} - r.Type = "letter" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/license.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/license.go index da38003d6..dec4d412f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/license.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/license.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/licensetype" ) // License type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/license/_types/License.ts#L42-L53 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/license/_types/License.ts#L42-L53 type License struct { ExpiryDateInMillis int64 `json:"expiry_date_in_millis"` IssueDateInMillis int64 `json:"issue_date_in_millis"` @@ -40,6 +46,114 @@ type License struct { Uid string `json:"uid"` } +func (s *License) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "expiry_date_in_millis": + if err := dec.Decode(&s.ExpiryDateInMillis); err != nil { + return err + } + + case "issue_date_in_millis": + if err := dec.Decode(&s.IssueDateInMillis); err != nil { + return err + } + + case "issued_to": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IssuedTo = o + + case "issuer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Issuer = o + + case "max_nodes": + if err := dec.Decode(&s.MaxNodes); err != nil { + return err + } + + case "max_resource_units": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.MaxResourceUnits = &value + case float64: + f := int64(v) + s.MaxResourceUnits = &f + } + + case "signature": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Signature = o + + case "start_date_in_millis": + if err := dec.Decode(&s.StartDateInMillis); err != nil { + return err + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "uid": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Uid = o + + } + } + return nil +} + // NewLicense returns a License. func NewLicense() *License { r := &License{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/licenseinformation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/licenseinformation.go index ffdb2ddc4..3a9a546e1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/licenseinformation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/licenseinformation.go @@ -16,18 +16,24 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/licensestatus" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/licensetype" ) // LicenseInformation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/license/get/types.ts#L25-L38 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/license/get/types.ts#L25-L38 type LicenseInformation struct { ExpiryDate DateTime `json:"expiry_date,omitempty"` ExpiryDateInMillis *int64 `json:"expiry_date_in_millis,omitempty"` @@ -43,6 +49,100 @@ type LicenseInformation struct { Uid string `json:"uid"` } +func (s *LicenseInformation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "expiry_date": + if err := dec.Decode(&s.ExpiryDate); err != nil { + return err + } + + case "expiry_date_in_millis": + if err := dec.Decode(&s.ExpiryDateInMillis); err != nil { + return err + } + + case "issue_date": + if err := dec.Decode(&s.IssueDate); err != nil { + return err + } + + case "issue_date_in_millis": + if err := dec.Decode(&s.IssueDateInMillis); err != nil { + return err + } + + case "issued_to": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IssuedTo = o + + case "issuer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Issuer = o + + case "max_nodes": + if err := dec.Decode(&s.MaxNodes); err != nil { + return err + } + + case "max_resource_units": + if err := dec.Decode(&s.MaxResourceUnits); err != nil { + return err + } + + case "start_date_in_millis": + if err := dec.Decode(&s.StartDateInMillis); err != nil { + return err + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return err + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "uid": + if err := dec.Decode(&s.Uid); err != nil { + return err + } + + } + } + return nil +} + // NewLicenseInformation returns a LicenseInformation. func NewLicenseInformation() *LicenseInformation { r := &LicenseInformation{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lifecycle.go index 44879d53f..6099adfec 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lifecycle.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lifecycle.go @@ -16,19 +16,61 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // Lifecycle type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ilm/get_lifecycle/types.ts#L24-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ilm/get_lifecycle/types.ts#L24-L28 type Lifecycle struct { ModifiedDate DateTime `json:"modified_date"` Policy IlmPolicy `json:"policy"` Version int64 `json:"version"` } +func (s *Lifecycle) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "modified_date": + if err := dec.Decode(&s.ModifiedDate); err != nil { + return err + } + + case "policy": + if err := dec.Decode(&s.Policy); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + // NewLifecycle returns a Lifecycle. func NewLifecycle() *Lifecycle { r := &Lifecycle{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lifecycleexplain.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lifecycleexplain.go index 89d0ca848..337263e5f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lifecycleexplain.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lifecycleexplain.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // LifecycleExplainManaged // LifecycleExplainUnmanaged // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ilm/explain_lifecycle/types.ts#L59-L62 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ilm/explain_lifecycle/types.ts#L59-L62 type LifecycleExplain interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lifecycleexplainmanaged.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lifecycleexplainmanaged.go index e1d526c6f..35ecdedc1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lifecycleexplainmanaged.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lifecycleexplainmanaged.go @@ -16,17 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // LifecycleExplainManaged type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ilm/explain_lifecycle/types.ts#L26-L52 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ilm/explain_lifecycle/types.ts#L26-L52 type LifecycleExplainManaged struct { Action *string `json:"action,omitempty"` ActionTime DateTime `json:"action_time,omitempty"` @@ -53,13 +57,203 @@ type LifecycleExplainManaged struct { TimeSinceIndexCreation Duration `json:"time_since_index_creation,omitempty"` } +func (s *LifecycleExplainManaged) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "action": + if err := dec.Decode(&s.Action); err != nil { + return err + } + + case "action_time": + if err := dec.Decode(&s.ActionTime); err != nil { + return err + } + + case "action_time_millis": + if err := dec.Decode(&s.ActionTimeMillis); err != nil { + return err + } + + case "age": + if err := dec.Decode(&s.Age); err != nil { + return err + } + + case "failed_step": + if err := dec.Decode(&s.FailedStep); err != nil { + return err + } + + case "failed_step_retry_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.FailedStepRetryCount = &value + case float64: + f := int(v) + s.FailedStepRetryCount = &f + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return err + } + + case "index_creation_date": + if err := dec.Decode(&s.IndexCreationDate); err != nil { + return err + } + + case "index_creation_date_millis": + if err := dec.Decode(&s.IndexCreationDateMillis); err != nil { + return err + } + + case "is_auto_retryable_error": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IsAutoRetryableError = &value + case bool: + s.IsAutoRetryableError = &v + } + + case "lifecycle_date": + if err := dec.Decode(&s.LifecycleDate); err != nil { + return err + } + + case "lifecycle_date_millis": + if err := dec.Decode(&s.LifecycleDateMillis); err != nil { + return err + } + + case "managed": + if err := dec.Decode(&s.Managed); err != nil { + return err + } + + case "phase": + if err := dec.Decode(&s.Phase); err != nil { + return err + } + + case "phase_execution": + if err := dec.Decode(&s.PhaseExecution); err != nil { + return err + } + + case "phase_time": + if err := dec.Decode(&s.PhaseTime); err != nil { + return err + } + + case "phase_time_millis": + if err := dec.Decode(&s.PhaseTimeMillis); err != nil { + return err + } + + case "policy": + if err := dec.Decode(&s.Policy); err != nil { + return err + } + + case "step": + if err := dec.Decode(&s.Step); err != nil { + return err + } + + case "step_info": + if s.StepInfo == nil { + s.StepInfo = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.StepInfo); err != nil { + return err + } + + case "step_time": + if err := dec.Decode(&s.StepTime); err != nil { + return err + } + + case "step_time_millis": + if err := dec.Decode(&s.StepTimeMillis); err != nil { + return err + } + + case "time_since_index_creation": + if err := dec.Decode(&s.TimeSinceIndexCreation); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s LifecycleExplainManaged) MarshalJSON() ([]byte, error) { + type innerLifecycleExplainManaged LifecycleExplainManaged + tmp := innerLifecycleExplainManaged{ + Action: s.Action, + ActionTime: s.ActionTime, + ActionTimeMillis: s.ActionTimeMillis, + Age: s.Age, + FailedStep: s.FailedStep, + FailedStepRetryCount: s.FailedStepRetryCount, + Index: s.Index, + IndexCreationDate: s.IndexCreationDate, + IndexCreationDateMillis: s.IndexCreationDateMillis, + IsAutoRetryableError: s.IsAutoRetryableError, + LifecycleDate: s.LifecycleDate, + LifecycleDateMillis: s.LifecycleDateMillis, + Managed: s.Managed, + Phase: s.Phase, + PhaseExecution: s.PhaseExecution, + PhaseTime: s.PhaseTime, + PhaseTimeMillis: s.PhaseTimeMillis, + Policy: s.Policy, + Step: s.Step, + StepInfo: s.StepInfo, + StepTime: s.StepTime, + StepTimeMillis: s.StepTimeMillis, + TimeSinceIndexCreation: s.TimeSinceIndexCreation, + } + + tmp.Managed = true + + return json.Marshal(tmp) +} + // NewLifecycleExplainManaged returns a LifecycleExplainManaged. func NewLifecycleExplainManaged() *LifecycleExplainManaged { r := &LifecycleExplainManaged{ StepInfo: make(map[string]json.RawMessage, 0), } - r.Managed = true - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lifecycleexplainphaseexecution.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lifecycleexplainphaseexecution.go index 9f5df17d5..689d9d5bf 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lifecycleexplainphaseexecution.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lifecycleexplainphaseexecution.go @@ -16,19 +16,61 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // LifecycleExplainPhaseExecution type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ilm/explain_lifecycle/types.ts#L64-L68 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ilm/explain_lifecycle/types.ts#L64-L68 type LifecycleExplainPhaseExecution struct { ModifiedDateInMillis int64 `json:"modified_date_in_millis"` Policy string `json:"policy"` Version int64 `json:"version"` } +func (s *LifecycleExplainPhaseExecution) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "modified_date_in_millis": + if err := dec.Decode(&s.ModifiedDateInMillis); err != nil { + return err + } + + case "policy": + if err := dec.Decode(&s.Policy); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + // NewLifecycleExplainPhaseExecution returns a LifecycleExplainPhaseExecution. func NewLifecycleExplainPhaseExecution() *LifecycleExplainPhaseExecution { r := &LifecycleExplainPhaseExecution{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lifecycleexplainunmanaged.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lifecycleexplainunmanaged.go index 4e0e33c9c..c17facdd3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lifecycleexplainunmanaged.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lifecycleexplainunmanaged.go @@ -16,23 +16,71 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // LifecycleExplainUnmanaged type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ilm/explain_lifecycle/types.ts#L54-L57 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ilm/explain_lifecycle/types.ts#L54-L57 type LifecycleExplainUnmanaged struct { Index string `json:"index"` Managed bool `json:"managed,omitempty"` } +func (s *LifecycleExplainUnmanaged) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return err + } + + case "managed": + if err := dec.Decode(&s.Managed); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s LifecycleExplainUnmanaged) MarshalJSON() ([]byte, error) { + type innerLifecycleExplainUnmanaged LifecycleExplainUnmanaged + tmp := innerLifecycleExplainUnmanaged{ + Index: s.Index, + Managed: s.Managed, + } + + tmp.Managed = false + + return json.Marshal(tmp) +} + // NewLifecycleExplainUnmanaged returns a LifecycleExplainUnmanaged. func NewLifecycleExplainUnmanaged() *LifecycleExplainUnmanaged { r := &LifecycleExplainUnmanaged{} - r.Managed = false - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/like.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/like.go index 45620a875..acfdc47a7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/like.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/like.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // string // LikeDocument // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/specialized.ts#L103-L108 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/specialized.ts#L186-L191 type Like interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/likedocument.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/likedocument.go index f7251f2a4..e6cd42039 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/likedocument.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/likedocument.go @@ -16,23 +16,29 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/versiontype" ) // LikeDocument type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/specialized.ts#L91-L101 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/specialized.ts#L165-L184 type LikeDocument struct { - Doc json.RawMessage `json:"doc,omitempty"` - Fields []string `json:"fields,omitempty"` - Id_ *string `json:"_id,omitempty"` + // Doc A document not present in the index. + Doc json.RawMessage `json:"doc,omitempty"` + Fields []string `json:"fields,omitempty"` + // Id_ ID of a document. + Id_ *string `json:"_id,omitempty"` + // Index_ Index of a document. Index_ *string `json:"_index,omitempty"` PerFieldAnalyzer map[string]string `json:"per_field_analyzer,omitempty"` Routing *string `json:"routing,omitempty"` @@ -40,6 +46,69 @@ type LikeDocument struct { VersionType *versiontype.VersionType `json:"version_type,omitempty"` } +func (s *LikeDocument) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc": + if err := dec.Decode(&s.Doc); err != nil { + return err + } + + case "fields": + if err := dec.Decode(&s.Fields); err != nil { + return err + } + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return err + } + + case "_index": + if err := dec.Decode(&s.Index_); err != nil { + return err + } + + case "per_field_analyzer": + if s.PerFieldAnalyzer == nil { + s.PerFieldAnalyzer = make(map[string]string, 0) + } + if err := dec.Decode(&s.PerFieldAnalyzer); err != nil { + return err + } + + case "routing": + if err := dec.Decode(&s.Routing); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + case "version_type": + if err := dec.Decode(&s.VersionType); err != nil { + return err + } + + } + } + return nil +} + // NewLikeDocument returns a LikeDocument. func NewLikeDocument() *LikeDocument { r := &LikeDocument{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/limits.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/limits.go index 6cb3f9139..195506ab8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/limits.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/limits.go @@ -16,19 +16,83 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Limits type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/info/types.ts#L34-L38 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/info/types.ts#L34-L38 type Limits struct { EffectiveMaxModelMemoryLimit string `json:"effective_max_model_memory_limit"` MaxModelMemoryLimit *string `json:"max_model_memory_limit,omitempty"` TotalMlMemory string `json:"total_ml_memory"` } +func (s *Limits) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "effective_max_model_memory_limit": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.EffectiveMaxModelMemoryLimit = o + + case "max_model_memory_limit": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MaxModelMemoryLimit = &o + + case "total_ml_memory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TotalMlMemory = o + + } + } + return nil +} + // NewLimits returns a Limits. func NewLimits() *Limits { r := &Limits{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/limittokencounttokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/limittokencounttokenfilter.go index 1153eebd3..39a29df6c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/limittokencounttokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/limittokencounttokenfilter.go @@ -16,25 +16,95 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // LimitTokenCountTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L248-L252 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L249-L253 type LimitTokenCountTokenFilter struct { - ConsumeAllTokens *bool `json:"consume_all_tokens,omitempty"` - MaxTokenCount *int `json:"max_token_count,omitempty"` - Type string `json:"type,omitempty"` - Version *string `json:"version,omitempty"` + ConsumeAllTokens *bool `json:"consume_all_tokens,omitempty"` + MaxTokenCount Stringifiedinteger `json:"max_token_count,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *LimitTokenCountTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "consume_all_tokens": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.ConsumeAllTokens = &value + case bool: + s.ConsumeAllTokens = &v + } + + case "max_token_count": + if err := dec.Decode(&s.MaxTokenCount); err != nil { + return err + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s LimitTokenCountTokenFilter) MarshalJSON() ([]byte, error) { + type innerLimitTokenCountTokenFilter LimitTokenCountTokenFilter + tmp := innerLimitTokenCountTokenFilter{ + ConsumeAllTokens: s.ConsumeAllTokens, + MaxTokenCount: s.MaxTokenCount, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "limit" + + return json.Marshal(tmp) } // NewLimitTokenCountTokenFilter returns a LimitTokenCountTokenFilter. func NewLimitTokenCountTokenFilter() *LimitTokenCountTokenFilter { r := &LimitTokenCountTokenFilter{} - r.Type = "limit" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/linearinterpolationsmoothingmodel.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/linearinterpolationsmoothingmodel.go index f1ed2c65c..528aa4df3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/linearinterpolationsmoothingmodel.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/linearinterpolationsmoothingmodel.go @@ -16,19 +16,95 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // LinearInterpolationSmoothingModel type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/suggester.ts#L216-L220 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/suggester.ts#L434-L438 type LinearInterpolationSmoothingModel struct { BigramLambda Float64 `json:"bigram_lambda"` TrigramLambda Float64 `json:"trigram_lambda"` UnigramLambda Float64 `json:"unigram_lambda"` } +func (s *LinearInterpolationSmoothingModel) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bigram_lambda": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.BigramLambda = f + case float64: + f := Float64(v) + s.BigramLambda = f + } + + case "trigram_lambda": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.TrigramLambda = f + case float64: + f := Float64(v) + s.TrigramLambda = f + } + + case "unigram_lambda": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.UnigramLambda = f + case float64: + f := Float64(v) + s.UnigramLambda = f + } + + } + } + return nil +} + // NewLinearInterpolationSmoothingModel returns a LinearInterpolationSmoothingModel. func NewLinearInterpolationSmoothingModel() *LinearInterpolationSmoothingModel { r := &LinearInterpolationSmoothingModel{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/linearmovingaverageaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/linearmovingaverageaggregation.go index 7cfff6a52..f365b3076 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/linearmovingaverageaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/linearmovingaverageaggregation.go @@ -16,38 +16,43 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" ) // LinearMovingAverageAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/pipeline.ts#L202-L205 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/pipeline.ts#L242-L245 type LinearMovingAverageAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. - BucketsPath BucketsPath `json:"buckets_path,omitempty"` - Format *string `json:"format,omitempty"` - GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Minimize *bool `json:"minimize,omitempty"` - Model string `json:"model,omitempty"` - Name *string `json:"name,omitempty"` - Predict *int `json:"predict,omitempty"` - Settings EmptyObject `json:"settings"` - Window *int `json:"window,omitempty"` + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Minimize *bool `json:"minimize,omitempty"` + Model string `json:"model,omitempty"` + Name *string `json:"name,omitempty"` + Predict *int `json:"predict,omitempty"` + Settings EmptyObject `json:"settings"` + Window *int `json:"window,omitempty"` } func (s *LinearMovingAverageAggregation) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -67,9 +72,16 @@ func (s *LinearMovingAverageAggregation) UnmarshalJSON(data []byte) error { } case "format": - if err := dec.Decode(&s.Format); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o case "gap_policy": if err := dec.Decode(&s.GapPolicy); err != nil { @@ -82,8 +94,17 @@ func (s *LinearMovingAverageAggregation) UnmarshalJSON(data []byte) error { } case "minimize": - if err := dec.Decode(&s.Minimize); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Minimize = &value + case bool: + s.Minimize = &v } case "model": @@ -92,13 +113,31 @@ func (s *LinearMovingAverageAggregation) UnmarshalJSON(data []byte) error { } case "name": - if err := dec.Decode(&s.Name); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o case "predict": - if err := dec.Decode(&s.Predict); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Predict = &value + case float64: + f := int(v) + s.Predict = &f } case "settings": @@ -107,8 +146,19 @@ func (s *LinearMovingAverageAggregation) UnmarshalJSON(data []byte) error { } case "window": - if err := dec.Decode(&s.Window); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Window = &value + case float64: + f := int(v) + s.Window = &f } } @@ -116,11 +166,30 @@ func (s *LinearMovingAverageAggregation) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s LinearMovingAverageAggregation) MarshalJSON() ([]byte, error) { + type innerLinearMovingAverageAggregation LinearMovingAverageAggregation + tmp := innerLinearMovingAverageAggregation{ + BucketsPath: s.BucketsPath, + Format: s.Format, + GapPolicy: s.GapPolicy, + Meta: s.Meta, + Minimize: s.Minimize, + Model: s.Model, + Name: s.Name, + Predict: s.Predict, + Settings: s.Settings, + Window: s.Window, + } + + tmp.Model = "linear" + + return json.Marshal(tmp) +} + // NewLinearMovingAverageAggregation returns a LinearMovingAverageAggregation. func NewLinearMovingAverageAggregation() *LinearMovingAverageAggregation { r := &LinearMovingAverageAggregation{} - r.Model = "linear" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/loggingaction.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/loggingaction.go index 6b2ea29c3..8c79f70e6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/loggingaction.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/loggingaction.go @@ -16,19 +16,83 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // LoggingAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Actions.ts#L281-L285 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Actions.ts#L281-L285 type LoggingAction struct { Category *string `json:"category,omitempty"` Level *string `json:"level,omitempty"` Text string `json:"text"` } +func (s *LoggingAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "category": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Category = &o + + case "level": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Level = &o + + case "text": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Text = o + + } + } + return nil +} + // NewLoggingAction returns a LoggingAction. func NewLoggingAction() *LoggingAction { r := &LoggingAction{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/loggingresult.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/loggingresult.go index 0ca7f1f22..0ab4c5b7a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/loggingresult.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/loggingresult.go @@ -16,17 +16,57 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // LoggingResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Actions.ts#L287-L289 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Actions.ts#L287-L289 type LoggingResult struct { LoggedText string `json:"logged_text"` } +func (s *LoggingResult) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "logged_text": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.LoggedText = o + + } + } + return nil +} + // NewLoggingResult returns a LoggingResult. func NewLoggingResult() *LoggingResult { r := &LoggingResult{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/logstashpipeline.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/logstashpipeline.go index ab6eb5af6..454edd1da 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/logstashpipeline.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/logstashpipeline.go @@ -16,20 +16,110 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // LogstashPipeline type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/logstash/_types/Pipeline.ts#L37-L44 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/logstash/_types/Pipeline.ts#L60-L92 type LogstashPipeline struct { - Description string `json:"description"` - LastModified DateTime `json:"last_modified"` - Pipeline string `json:"pipeline"` + // Description Description of the pipeline. + // This description is not used by Elasticsearch or Logstash. + Description string `json:"description"` + // LastModified Date the pipeline was last updated. + // Must be in the `yyyy-MM-dd'T'HH:mm:ss.SSSZZ` strict_date_time format. + LastModified DateTime `json:"last_modified"` + // Pipeline Configuration for the pipeline. + Pipeline string `json:"pipeline"` + // PipelineMetadata Optional metadata about the pipeline. + // May have any contents. + // This metadata is not generated or used by Elasticsearch or Logstash. PipelineMetadata PipelineMetadata `json:"pipeline_metadata"` + // PipelineSettings Settings for the pipeline. + // Supports only flat keys in dot notation. PipelineSettings PipelineSettings `json:"pipeline_settings"` - Username string `json:"username"` + // Username User who last updated the pipeline. + Username string `json:"username"` +} + +func (s *LogstashPipeline) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = o + + case "last_modified": + if err := dec.Decode(&s.LastModified); err != nil { + return err + } + + case "pipeline": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pipeline = o + + case "pipeline_metadata": + if err := dec.Decode(&s.PipelineMetadata); err != nil { + return err + } + + case "pipeline_settings": + if err := dec.Decode(&s.PipelineSettings); err != nil { + return err + } + + case "username": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Username = o + + } + } + return nil } // NewLogstashPipeline returns a LogstashPipeline. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/longnumberproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/longnumberproperty.go index bde78880b..c984aa948 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/longnumberproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/longnumberproperty.go @@ -16,25 +16,25 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" ) // LongNumberProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/core.ts#L151-L154 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/core.ts#L154-L157 type LongNumberProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -63,6 +63,7 @@ type LongNumberProperty struct { } func (s *LongNumberProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -77,23 +78,63 @@ func (s *LongNumberProperty) UnmarshalJSON(data []byte) error { switch t { case "boost": - if err := dec.Decode(&s.Boost); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f } case "coerce": - if err := dec.Decode(&s.Coerce); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Coerce = &value + case bool: + s.Coerce = &v } case "copy_to": - if err := dec.Decode(&s.CopyTo); err != nil { - return err + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return err + } } case "doc_values": - if err := dec.Decode(&s.DocValues); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DocValues = &value + case bool: + s.DocValues = &v } case "dynamic": @@ -102,6 +143,9 @@ func (s *LongNumberProperty) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -110,7 +154,9 @@ func (s *LongNumberProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -389,35 +435,79 @@ func (s *LongNumberProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "ignore_malformed": - if err := dec.Decode(&s.IgnoreMalformed); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreMalformed = &value + case bool: + s.IgnoreMalformed = &v } case "index": - if err := dec.Decode(&s.Index); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Index = &value + case bool: + s.Index = &v } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } case "null_value": - if err := dec.Decode(&s.NullValue); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.NullValue = &value + case float64: + f := int64(v) + s.NullValue = &f } case "on_script_error": @@ -426,6 +516,9 @@ func (s *LongNumberProperty) UnmarshalJSON(data []byte) error { } case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -434,7 +527,9 @@ func (s *LongNumberProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -713,9 +808,11 @@ func (s *LongNumberProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } @@ -725,18 +822,43 @@ func (s *LongNumberProperty) UnmarshalJSON(data []byte) error { } case "similarity": - if err := dec.Decode(&s.Similarity); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Similarity = &o case "store": - if err := dec.Decode(&s.Store); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Store = &value + case bool: + s.Store = &v } case "time_series_dimension": - if err := dec.Decode(&s.TimeSeriesDimension); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.TimeSeriesDimension = &value + case bool: + s.TimeSeriesDimension = &v } case "time_series_metric": @@ -754,6 +876,36 @@ func (s *LongNumberProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s LongNumberProperty) MarshalJSON() ([]byte, error) { + type innerLongNumberProperty LongNumberProperty + tmp := innerLongNumberProperty{ + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + Index: s.Index, + Meta: s.Meta, + NullValue: s.NullValue, + OnScriptError: s.OnScriptError, + Properties: s.Properties, + Script: s.Script, + Similarity: s.Similarity, + Store: s.Store, + TimeSeriesDimension: s.TimeSeriesDimension, + TimeSeriesMetric: s.TimeSeriesMetric, + Type: s.Type, + } + + tmp.Type = "long" + + return json.Marshal(tmp) +} + // NewLongNumberProperty returns a LongNumberProperty. func NewLongNumberProperty() *LongNumberProperty { r := &LongNumberProperty{ @@ -762,7 +914,5 @@ func NewLongNumberProperty() *LongNumberProperty { Properties: make(map[string]Property, 0), } - r.Type = "long" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/longrangeproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/longrangeproperty.go index b3923f840..20527e750 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/longrangeproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/longrangeproperty.go @@ -16,23 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" ) // LongRangeProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/range.ts#L50-L52 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/range.ts#L50-L52 type LongRangeProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -51,6 +51,7 @@ type LongRangeProperty struct { } func (s *LongRangeProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -65,23 +66,63 @@ func (s *LongRangeProperty) UnmarshalJSON(data []byte) error { switch t { case "boost": - if err := dec.Decode(&s.Boost); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f } case "coerce": - if err := dec.Decode(&s.Coerce); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Coerce = &value + case bool: + s.Coerce = &v } case "copy_to": - if err := dec.Decode(&s.CopyTo); err != nil { - return err + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return err + } } case "doc_values": - if err := dec.Decode(&s.DocValues); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DocValues = &value + case bool: + s.DocValues = &v } case "dynamic": @@ -90,6 +131,9 @@ func (s *LongRangeProperty) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -98,7 +142,9 @@ func (s *LongRangeProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -377,28 +423,56 @@ func (s *LongRangeProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "index": - if err := dec.Decode(&s.Index); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Index = &value + case bool: + s.Index = &v } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -407,7 +481,9 @@ func (s *LongRangeProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -686,20 +762,38 @@ func (s *LongRangeProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } case "similarity": - if err := dec.Decode(&s.Similarity); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Similarity = &o case "store": - if err := dec.Decode(&s.Store); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Store = &value + case bool: + s.Store = &v } case "type": @@ -712,6 +806,30 @@ func (s *LongRangeProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s LongRangeProperty) MarshalJSON() ([]byte, error) { + type innerLongRangeProperty LongRangeProperty + tmp := innerLongRangeProperty{ + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + Meta: s.Meta, + Properties: s.Properties, + Similarity: s.Similarity, + Store: s.Store, + Type: s.Type, + } + + tmp.Type = "long_range" + + return json.Marshal(tmp) +} + // NewLongRangeProperty returns a LongRangeProperty. func NewLongRangeProperty() *LongRangeProperty { r := &LongRangeProperty{ @@ -720,7 +838,5 @@ func NewLongRangeProperty() *LongRangeProperty { Properties: make(map[string]Property, 0), } - r.Type = "long_range" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/longraretermsaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/longraretermsaggregate.go index 740dbea67..1dfa5d479 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/longraretermsaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/longraretermsaggregate.go @@ -16,27 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" ) // LongRareTermsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L430-L435 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L431-L436 type LongRareTermsAggregate struct { Buckets BucketsLongRareTermsBucket `json:"buckets"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Meta Metadata `json:"meta,omitempty"` } func (s *LongRareTermsAggregate) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -57,15 +57,17 @@ func (s *LongRareTermsAggregate) UnmarshalJSON(data []byte) error { source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]LongRareTermsBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []LongRareTermsBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/longraretermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/longraretermsbucket.go index dacb49794..2b73b73e3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/longraretermsbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/longraretermsbucket.go @@ -16,25 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "fmt" - "bytes" + "encoding/json" "errors" + "fmt" "io" - + "strconv" "strings" - - "encoding/json" ) // LongRareTermsBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L437-L440 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L438-L441 type LongRareTermsBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -43,6 +41,7 @@ type LongRareTermsBucket struct { } func (s *LongRareTermsBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -56,462 +55,560 @@ func (s *LongRareTermsBucket) UnmarshalJSON(data []byte) error { switch t { - case "aggregations": - for dec.More() { - tt, err := dec.Token() + case "doc_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) if err != nil { - if errors.Is(err, io.EOF) { - break - } return err } - if value, ok := tt.(string); ok { - if strings.Contains(value, "#") { - elems := strings.Split(value, "#") - if len(elems) == 2 { - switch elems[0] { - case "cardinality": - o := NewCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentiles": - o := NewHdrPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentile_ranks": - o := NewHdrPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentiles": - o := NewTDigestPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentile_ranks": - o := NewTDigestPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "percentiles_bucket": - o := NewPercentilesBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "median_absolute_deviation": - o := NewMedianAbsoluteDeviationAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "min": - o := NewMinAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "max": - o := NewMaxAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sum": - o := NewSumAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "avg": - o := NewAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "weighted_avg": - o := NewWeightedAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "value_count": - o := NewValueCountAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_value": - o := NewSimpleValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "derivative": - o := NewDerivativeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "bucket_metric_value": - o := NewBucketMetricValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats": - o := NewStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats_bucket": - o := NewStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats": - o := NewExtendedStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats_bucket": - o := NewExtendedStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_bounds": - o := NewGeoBoundsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_centroid": - o := NewGeoCentroidAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "histogram": - o := NewHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_histogram": - o := NewDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "auto_date_histogram": - o := NewAutoDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "variable_width_histogram": - o := NewVariableWidthHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sterms": - o := NewStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lterms": - o := NewLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "dterms": - o := NewDoubleTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umterms": - o := NewUnmappedTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lrareterms": - o := NewLongRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "srareterms": - o := NewStringRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umrareterms": - o := NewUnmappedRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "multi_terms": - o := NewMultiTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "missing": - o := NewMissingAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "nested": - o := NewNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "reverse_nested": - o := NewReverseNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "global": - o := NewGlobalAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filter": - o := NewFilterAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "children": - o := NewChildrenAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "parent": - o := NewParentAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sampler": - o := NewSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "unmapped_sampler": - o := NewUnmappedSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohash_grid": - o := NewGeoHashGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geotile_grid": - o := NewGeoTileGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohex_grid": - o := NewGeoHexGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "range": - o := NewRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_range": - o := NewDateRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_distance": - o := NewGeoDistanceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_range": - o := NewIpRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_prefix": - o := NewIpPrefixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filters": - o := NewFiltersAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "adjacency_matrix": - o := NewAdjacencyMatrixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "siglterms": - o := NewSignificantLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sigsterms": - o := NewSignificantStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umsigterms": - o := NewUnmappedSignificantTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "composite": - o := NewCompositeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "scripted_metric": - o := NewScriptedMetricAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_hits": - o := NewTopHitsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "inference": - o := NewInferenceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "string_stats": - o := NewStringStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "box_plot": - o := NewBoxPlotAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_metrics": - o := NewTopMetricsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "t_test": - o := NewTTestAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "rate": - o := NewRateAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_long_value": - o := NewCumulativeCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "matrix_stats": - o := NewMatrixStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_line": - o := NewGeoLineAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - default: - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - } - } else { - return errors.New("cannot decode JSON for field Aggregations") - } - } else { - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[value] = o - } - } - } - - case "doc_count": - if err := dec.Decode(&s.DocCount); err != nil { - return err + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f } case "key": - if err := dec.Decode(&s.Key); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Key = value + case float64: + f := int64(v) + s.Key = f } case "key_as_string": - if err := dec.Decode(&s.KeyAsString); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.KeyAsString = &o + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "box_plot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[value] = o + } + } } } @@ -537,6 +634,7 @@ func (s LongRareTermsBucket) MarshalJSON() ([]byte, error) { for key, value := range s.Aggregations { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "Aggregations") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/longtermsaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/longtermsaggregate.go index 17795664b..c48998268 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/longtermsaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/longtermsaggregate.go @@ -16,29 +16,30 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" + "strconv" ) // LongTermsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L398-L403 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L399-L404 type LongTermsAggregate struct { - Buckets BucketsLongTermsBucket `json:"buckets"` - DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - SumOtherDocCount *int64 `json:"sum_other_doc_count,omitempty"` + Buckets BucketsLongTermsBucket `json:"buckets"` + DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` + Meta Metadata `json:"meta,omitempty"` + SumOtherDocCount *int64 `json:"sum_other_doc_count,omitempty"` } func (s *LongTermsAggregate) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -59,21 +60,33 @@ func (s *LongTermsAggregate) UnmarshalJSON(data []byte) error { source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]LongTermsBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []LongTermsBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } case "doc_count_error_upper_bound": - if err := dec.Decode(&s.DocCountErrorUpperBound); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DocCountErrorUpperBound = &value + case float64: + f := int64(v) + s.DocCountErrorUpperBound = &f } case "meta": @@ -82,8 +95,18 @@ func (s *LongTermsAggregate) UnmarshalJSON(data []byte) error { } case "sum_other_doc_count": - if err := dec.Decode(&s.SumOtherDocCount); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.SumOtherDocCount = &value + case float64: + f := int64(v) + s.SumOtherDocCount = &f } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/longtermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/longtermsbucket.go index 51f2589ca..9b2726b8a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/longtermsbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/longtermsbucket.go @@ -16,25 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "fmt" - "bytes" + "encoding/json" "errors" + "fmt" "io" - + "strconv" "strings" - - "encoding/json" ) // LongTermsBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L405-L408 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L406-L409 type LongTermsBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -44,6 +42,7 @@ type LongTermsBucket struct { } func (s *LongTermsBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -57,467 +56,575 @@ func (s *LongTermsBucket) UnmarshalJSON(data []byte) error { switch t { - case "aggregations": - for dec.More() { - tt, err := dec.Token() + case "doc_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) if err != nil { - if errors.Is(err, io.EOF) { - break - } return err } - if value, ok := tt.(string); ok { - if strings.Contains(value, "#") { - elems := strings.Split(value, "#") - if len(elems) == 2 { - switch elems[0] { - case "cardinality": - o := NewCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentiles": - o := NewHdrPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentile_ranks": - o := NewHdrPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentiles": - o := NewTDigestPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentile_ranks": - o := NewTDigestPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "percentiles_bucket": - o := NewPercentilesBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "median_absolute_deviation": - o := NewMedianAbsoluteDeviationAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "min": - o := NewMinAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "max": - o := NewMaxAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sum": - o := NewSumAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "avg": - o := NewAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "weighted_avg": - o := NewWeightedAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "value_count": - o := NewValueCountAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_value": - o := NewSimpleValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "derivative": - o := NewDerivativeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "bucket_metric_value": - o := NewBucketMetricValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats": - o := NewStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats_bucket": - o := NewStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats": - o := NewExtendedStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats_bucket": - o := NewExtendedStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_bounds": - o := NewGeoBoundsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_centroid": - o := NewGeoCentroidAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "histogram": - o := NewHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_histogram": - o := NewDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "auto_date_histogram": - o := NewAutoDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "variable_width_histogram": - o := NewVariableWidthHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sterms": - o := NewStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lterms": - o := NewLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "dterms": - o := NewDoubleTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umterms": - o := NewUnmappedTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lrareterms": - o := NewLongRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "srareterms": - o := NewStringRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umrareterms": - o := NewUnmappedRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "multi_terms": - o := NewMultiTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "missing": - o := NewMissingAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "nested": - o := NewNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "reverse_nested": - o := NewReverseNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "global": - o := NewGlobalAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filter": - o := NewFilterAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "children": - o := NewChildrenAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "parent": - o := NewParentAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sampler": - o := NewSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "unmapped_sampler": - o := NewUnmappedSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohash_grid": - o := NewGeoHashGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geotile_grid": - o := NewGeoTileGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohex_grid": - o := NewGeoHexGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "range": - o := NewRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_range": - o := NewDateRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_distance": - o := NewGeoDistanceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_range": - o := NewIpRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_prefix": - o := NewIpPrefixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filters": - o := NewFiltersAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "adjacency_matrix": - o := NewAdjacencyMatrixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "siglterms": - o := NewSignificantLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sigsterms": - o := NewSignificantStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umsigterms": - o := NewUnmappedSignificantTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "composite": - o := NewCompositeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "scripted_metric": - o := NewScriptedMetricAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_hits": - o := NewTopHitsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "inference": - o := NewInferenceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "string_stats": - o := NewStringStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "box_plot": - o := NewBoxPlotAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_metrics": - o := NewTopMetricsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "t_test": - o := NewTTestAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "rate": - o := NewRateAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_long_value": - o := NewCumulativeCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "matrix_stats": - o := NewMatrixStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_line": - o := NewGeoLineAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - default: - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - } - } else { - return errors.New("cannot decode JSON for field Aggregations") - } - } else { - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[value] = o - } - } - } - - case "doc_count": - if err := dec.Decode(&s.DocCount); err != nil { - return err + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f } case "doc_count_error": - if err := dec.Decode(&s.DocCountError); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DocCountError = &value + case float64: + f := int64(v) + s.DocCountError = &f } case "key": - if err := dec.Decode(&s.Key); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Key = value + case float64: + f := int64(v) + s.Key = f } case "key_as_string": - if err := dec.Decode(&s.KeyAsString); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.KeyAsString = &o + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "box_plot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[value] = o + } + } } } @@ -543,6 +650,7 @@ func (s LongTermsBucket) MarshalJSON() ([]byte, error) { for key, value := range s.Aggregations { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "Aggregations") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lowercasenormalizer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lowercasenormalizer.go index ef12891ae..d0747063d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lowercasenormalizer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lowercasenormalizer.go @@ -16,22 +16,36 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "encoding/json" +) + // LowercaseNormalizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/normalizers.ts#L26-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/normalizers.ts#L26-L28 type LowercaseNormalizer struct { Type string `json:"type,omitempty"` } +// MarshalJSON override marshalling to include literal value +func (s LowercaseNormalizer) MarshalJSON() ([]byte, error) { + type innerLowercaseNormalizer LowercaseNormalizer + tmp := innerLowercaseNormalizer{ + Type: s.Type, + } + + tmp.Type = "lowercase" + + return json.Marshal(tmp) +} + // NewLowercaseNormalizer returns a LowercaseNormalizer. func NewLowercaseNormalizer() *LowercaseNormalizer { r := &LowercaseNormalizer{} - r.Type = "lowercase" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lowercaseprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lowercaseprocessor.go index 1f990a59b..782019d6a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lowercaseprocessor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lowercaseprocessor.go @@ -16,22 +16,141 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // LowercaseProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/_types/Processors.ts#L300-L304 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/_types/Processors.ts#L899-L915 type LowercaseProcessor struct { - Description *string `json:"description,omitempty"` - Field string `json:"field"` - If *string `json:"if,omitempty"` - IgnoreFailure *bool `json:"ignore_failure,omitempty"` - IgnoreMissing *bool `json:"ignore_missing,omitempty"` - OnFailure []ProcessorContainer `json:"on_failure,omitempty"` - Tag *string `json:"tag,omitempty"` - TargetField *string `json:"target_field,omitempty"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field to make lowercase. + Field string `json:"field"` + // If Conditionally execute the processor. + If *string `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist or is `null`, the processor quietly + // exits without modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField The field to assign the converted value to. + // By default, the field is updated in-place. + TargetField *string `json:"target_field,omitempty"` +} + +func (s *LowercaseProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.If = &o + + case "ignore_failure": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return err + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return err + } + + } + } + return nil } // NewLowercaseProcessor returns a LowercaseProcessor. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lowercasetokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lowercasetokenfilter.go index fdf0e06e9..6ff4155d0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lowercasetokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lowercasetokenfilter.go @@ -16,24 +16,86 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // LowercaseTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L254-L257 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L255-L258 type LowercaseTokenFilter struct { Language *string `json:"language,omitempty"` Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` } +func (s *LowercaseTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "language": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Language = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s LowercaseTokenFilter) MarshalJSON() ([]byte, error) { + type innerLowercaseTokenFilter LowercaseTokenFilter + tmp := innerLowercaseTokenFilter{ + Language: s.Language, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "lowercase" + + return json.Marshal(tmp) +} + // NewLowercaseTokenFilter returns a LowercaseTokenFilter. func NewLowercaseTokenFilter() *LowercaseTokenFilter { r := &LowercaseTokenFilter{} - r.Type = "lowercase" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lowercasetokenizer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lowercasetokenizer.go index 8b8a28aa7..c77adeed3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lowercasetokenizer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/lowercasetokenizer.go @@ -16,23 +16,71 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // LowercaseTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/tokenizers.ts#L70-L72 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/tokenizers.ts#L71-L73 type LowercaseTokenizer struct { Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` } +func (s *LowercaseTokenizer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s LowercaseTokenizer) MarshalJSON() ([]byte, error) { + type innerLowercaseTokenizer LowercaseTokenizer + tmp := innerLowercaseTokenizer{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "lowercase" + + return json.Marshal(tmp) +} + // NewLowercaseTokenizer returns a LowercaseTokenizer. func NewLowercaseTokenizer() *LowercaseTokenizer { r := &LowercaseTokenizer{} - r.Type = "lowercase" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/machinelearning.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/machinelearning.go index 9f33db46d..a05c10400 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/machinelearning.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/machinelearning.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // MachineLearning type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L363-L370 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L372-L379 type MachineLearning struct { Available bool `json:"available"` DataFrameAnalyticsJobs MlDataFrameAnalyticsJobs `json:"data_frame_analytics_jobs"` @@ -35,6 +43,96 @@ type MachineLearning struct { NodeCount int `json:"node_count"` } +func (s *MachineLearning) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "available": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Available = value + case bool: + s.Available = v + } + + case "data_frame_analytics_jobs": + if err := dec.Decode(&s.DataFrameAnalyticsJobs); err != nil { + return err + } + + case "datafeeds": + if s.Datafeeds == nil { + s.Datafeeds = make(map[string]XpackDatafeed, 0) + } + if err := dec.Decode(&s.Datafeeds); err != nil { + return err + } + + case "enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "inference": + if err := dec.Decode(&s.Inference); err != nil { + return err + } + + case "jobs": + if s.Jobs == nil { + s.Jobs = make(map[string]JobUsage, 0) + } + if err := dec.Decode(&s.Jobs); err != nil { + return err + } + + case "node_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NodeCount = value + case float64: + f := int(v) + s.NodeCount = f + } + + } + } + return nil +} + // NewMachineLearning returns a MachineLearning. func NewMachineLearning() *MachineLearning { r := &MachineLearning{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/manageuserprivileges.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/manageuserprivileges.go index 321fddc31..31c4a50f9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/manageuserprivileges.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/manageuserprivileges.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // ManageUserPrivileges type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/_types/Privileges.ts#L195-L197 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/_types/Privileges.ts#L197-L199 type ManageUserPrivileges struct { Applications []string `json:"applications"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mapboxvectortiles.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mapboxvectortiles.go index 047940f6b..2599331a3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mapboxvectortiles.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mapboxvectortiles.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // MapboxVectorTiles type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Binary.ts#L21-L21 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Binary.ts#L21-L21 type MapboxVectorTiles []byte diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mappingcharfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mappingcharfilter.go index a7cc766d0..be434b81c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mappingcharfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mappingcharfilter.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // MappingCharFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/char_filters.ts#L47-L51 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/char_filters.ts#L47-L51 type MappingCharFilter struct { Mappings []string `json:"mappings,omitempty"` MappingsPath *string `json:"mappings_path,omitempty"` @@ -30,11 +38,71 @@ type MappingCharFilter struct { Version *string `json:"version,omitempty"` } +func (s *MappingCharFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "mappings": + if err := dec.Decode(&s.Mappings); err != nil { + return err + } + + case "mappings_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MappingsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s MappingCharFilter) MarshalJSON() ([]byte, error) { + type innerMappingCharFilter MappingCharFilter + tmp := innerMappingCharFilter{ + Mappings: s.Mappings, + MappingsPath: s.MappingsPath, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "mapping" + + return json.Marshal(tmp) +} + // NewMappingCharFilter returns a MappingCharFilter. func NewMappingCharFilter() *MappingCharFilter { r := &MappingCharFilter{} - r.Type = "mapping" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mappinglimitsettings.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mappinglimitsettings.go index aacd774da..c6a8f7adb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mappinglimitsettings.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mappinglimitsettings.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // MappingLimitSettings type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L402-L415 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L402-L415 type MappingLimitSettings struct { Coerce *bool `json:"coerce,omitempty"` Depth *MappingLimitSettingsDepth `json:"depth,omitempty"` @@ -34,6 +42,84 @@ type MappingLimitSettings struct { TotalFields *MappingLimitSettingsTotalFields `json:"total_fields,omitempty"` } +func (s *MappingLimitSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "coerce": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Coerce = &value + case bool: + s.Coerce = &v + } + + case "depth": + if err := dec.Decode(&s.Depth); err != nil { + return err + } + + case "dimension_fields": + if err := dec.Decode(&s.DimensionFields); err != nil { + return err + } + + case "field_name_length": + if err := dec.Decode(&s.FieldNameLength); err != nil { + return err + } + + case "ignore_malformed": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreMalformed = &value + case bool: + s.IgnoreMalformed = &v + } + + case "nested_fields": + if err := dec.Decode(&s.NestedFields); err != nil { + return err + } + + case "nested_objects": + if err := dec.Decode(&s.NestedObjects); err != nil { + return err + } + + case "total_fields": + if err := dec.Decode(&s.TotalFields); err != nil { + return err + } + + } + } + return nil +} + // NewMappingLimitSettings returns a MappingLimitSettings. func NewMappingLimitSettings() *MappingLimitSettings { r := &MappingLimitSettings{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mappinglimitsettingsdepth.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mappinglimitsettingsdepth.go index ca6eb1acc..db0627962 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mappinglimitsettingsdepth.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mappinglimitsettingsdepth.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // MappingLimitSettingsDepth type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L427-L434 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L427-L434 type MappingLimitSettingsDepth struct { // Limit The maximum depth for a field, which is measured as the number of inner // objects. For instance, if all fields are defined @@ -31,6 +39,42 @@ type MappingLimitSettingsDepth struct { Limit *int `json:"limit,omitempty"` } +func (s *MappingLimitSettingsDepth) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "limit": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Limit = &value + case float64: + f := int(v) + s.Limit = &f + } + + } + } + return nil +} + // NewMappingLimitSettingsDepth returns a MappingLimitSettingsDepth. func NewMappingLimitSettingsDepth() *MappingLimitSettingsDepth { r := &MappingLimitSettingsDepth{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mappinglimitsettingsdimensionfields.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mappinglimitsettingsdimensionfields.go index d5b9edd7d..bec17a0c3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mappinglimitsettingsdimensionfields.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mappinglimitsettingsdimensionfields.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // MappingLimitSettingsDimensionFields type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L464-L470 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L464-L470 type MappingLimitSettingsDimensionFields struct { // Limit [preview] This functionality is in technical preview and may be changed or // removed in a future release. Elastic will @@ -31,6 +39,42 @@ type MappingLimitSettingsDimensionFields struct { Limit *int `json:"limit,omitempty"` } +func (s *MappingLimitSettingsDimensionFields) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "limit": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Limit = &value + case float64: + f := int(v) + s.Limit = &f + } + + } + } + return nil +} + // NewMappingLimitSettingsDimensionFields returns a MappingLimitSettingsDimensionFields. func NewMappingLimitSettingsDimensionFields() *MappingLimitSettingsDimensionFields { r := &MappingLimitSettingsDimensionFields{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mappinglimitsettingsfieldnamelength.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mappinglimitsettingsfieldnamelength.go index 12155877f..e87e64fdc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mappinglimitsettingsfieldnamelength.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mappinglimitsettingsfieldnamelength.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // MappingLimitSettingsFieldNameLength type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L455-L462 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L455-L462 type MappingLimitSettingsFieldNameLength struct { // Limit Setting for the maximum length of a field name. This setting isn’t really // something that addresses mappings explosion but @@ -33,6 +41,41 @@ type MappingLimitSettingsFieldNameLength struct { Limit *int64 `json:"limit,omitempty"` } +func (s *MappingLimitSettingsFieldNameLength) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "limit": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Limit = &value + case float64: + f := int64(v) + s.Limit = &f + } + + } + } + return nil +} + // NewMappingLimitSettingsFieldNameLength returns a MappingLimitSettingsFieldNameLength. func NewMappingLimitSettingsFieldNameLength() *MappingLimitSettingsFieldNameLength { r := &MappingLimitSettingsFieldNameLength{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mappinglimitsettingsnestedfields.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mappinglimitsettingsnestedfields.go index 64b2fcb41..356126df1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mappinglimitsettingsnestedfields.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mappinglimitsettingsnestedfields.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // MappingLimitSettingsNestedFields type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L436-L444 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L436-L444 type MappingLimitSettingsNestedFields struct { // Limit The maximum number of distinct nested mappings in an index. The nested type // should only be used in special cases, when @@ -32,6 +40,42 @@ type MappingLimitSettingsNestedFields struct { Limit *int `json:"limit,omitempty"` } +func (s *MappingLimitSettingsNestedFields) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "limit": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Limit = &value + case float64: + f := int(v) + s.Limit = &f + } + + } + } + return nil +} + // NewMappingLimitSettingsNestedFields returns a MappingLimitSettingsNestedFields. func NewMappingLimitSettingsNestedFields() *MappingLimitSettingsNestedFields { r := &MappingLimitSettingsNestedFields{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mappinglimitsettingsnestedobjects.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mappinglimitsettingsnestedobjects.go index 994ebcdb8..81aee52e1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mappinglimitsettingsnestedobjects.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mappinglimitsettingsnestedobjects.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // MappingLimitSettingsNestedObjects type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L446-L453 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L446-L453 type MappingLimitSettingsNestedObjects struct { // Limit The maximum number of nested JSON objects that a single document can contain // across all nested types. This limit helps @@ -31,6 +39,42 @@ type MappingLimitSettingsNestedObjects struct { Limit *int `json:"limit,omitempty"` } +func (s *MappingLimitSettingsNestedObjects) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "limit": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Limit = &value + case float64: + f := int(v) + s.Limit = &f + } + + } + } + return nil +} + // NewMappingLimitSettingsNestedObjects returns a MappingLimitSettingsNestedObjects. func NewMappingLimitSettingsNestedObjects() *MappingLimitSettingsNestedObjects { r := &MappingLimitSettingsNestedObjects{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mappinglimitsettingstotalfields.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mappinglimitsettingstotalfields.go index fc3612084..a8b35ca8d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mappinglimitsettingstotalfields.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mappinglimitsettingstotalfields.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // MappingLimitSettingsTotalFields type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L417-L425 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L417-L425 type MappingLimitSettingsTotalFields struct { // Limit The maximum number of fields in an index. Field and object mappings, as well // as field aliases count towards this limit. @@ -33,6 +41,42 @@ type MappingLimitSettingsTotalFields struct { Limit *int `json:"limit,omitempty"` } +func (s *MappingLimitSettingsTotalFields) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "limit": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Limit = &value + case float64: + f := int(v) + s.Limit = &f + } + + } + } + return nil +} + // NewMappingLimitSettingsTotalFields returns a MappingLimitSettingsTotalFields. func NewMappingLimitSettingsTotalFields() *MappingLimitSettingsTotalFields { r := &MappingLimitSettingsTotalFields{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mappingstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mappingstats.go index 72fd694df..7df58843b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mappingstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mappingstats.go @@ -16,19 +16,82 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // MappingStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/stats/types.ts#L177-L181 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/stats/types.ts#L186-L190 type MappingStats struct { TotalCount int64 `json:"total_count"` TotalEstimatedOverhead ByteSize `json:"total_estimated_overhead,omitempty"` TotalEstimatedOverheadInBytes int64 `json:"total_estimated_overhead_in_bytes"` } +func (s *MappingStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "total_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TotalCount = value + case float64: + f := int64(v) + s.TotalCount = f + } + + case "total_estimated_overhead": + if err := dec.Decode(&s.TotalEstimatedOverhead); err != nil { + return err + } + + case "total_estimated_overhead_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TotalEstimatedOverheadInBytes = value + case float64: + f := int64(v) + s.TotalEstimatedOverheadInBytes = f + } + + } + } + return nil +} + // NewMappingStats returns a MappingStats. func NewMappingStats() *MappingStats { r := &MappingStats{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/masterisstableindicator.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/masterisstableindicator.go new file mode 100644 index 000000000..37ae1c7a4 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/masterisstableindicator.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indicatorhealthstatus" +) + +// MasterIsStableIndicator type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/health_report/types.ts#L79-L83 +type MasterIsStableIndicator struct { + Details *MasterIsStableIndicatorDetails `json:"details,omitempty"` + Diagnosis []Diagnosis `json:"diagnosis,omitempty"` + Impacts []Impact `json:"impacts,omitempty"` + Status indicatorhealthstatus.IndicatorHealthStatus `json:"status"` + Symptom string `json:"symptom"` +} + +func (s *MasterIsStableIndicator) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "details": + if err := dec.Decode(&s.Details); err != nil { + return err + } + + case "diagnosis": + if err := dec.Decode(&s.Diagnosis); err != nil { + return err + } + + case "impacts": + if err := dec.Decode(&s.Impacts); err != nil { + return err + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return err + } + + case "symptom": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Symptom = o + + } + } + return nil +} + +// NewMasterIsStableIndicator returns a MasterIsStableIndicator. +func NewMasterIsStableIndicator() *MasterIsStableIndicator { + r := &MasterIsStableIndicator{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/masterisstableindicatorclusterformationnode.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/masterisstableindicatorclusterformationnode.go new file mode 100644 index 000000000..4c569b189 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/masterisstableindicatorclusterformationnode.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + +// MasterIsStableIndicatorClusterFormationNode type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/health_report/types.ts#L98-L102 +type MasterIsStableIndicatorClusterFormationNode struct { + ClusterFormationMessage string `json:"cluster_formation_message"` + Name *string `json:"name,omitempty"` + NodeId string `json:"node_id"` +} + +func (s *MasterIsStableIndicatorClusterFormationNode) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "cluster_formation_message": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ClusterFormationMessage = o + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + case "node_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NodeId = o + + } + } + return nil +} + +// NewMasterIsStableIndicatorClusterFormationNode returns a MasterIsStableIndicatorClusterFormationNode. +func NewMasterIsStableIndicatorClusterFormationNode() *MasterIsStableIndicatorClusterFormationNode { + r := &MasterIsStableIndicatorClusterFormationNode{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/masterisstableindicatordetails.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/masterisstableindicatordetails.go new file mode 100644 index 000000000..053c23dbf --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/masterisstableindicatordetails.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +// MasterIsStableIndicatorDetails type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/health_report/types.ts#L84-L89 +type MasterIsStableIndicatorDetails struct { + ClusterFormation []MasterIsStableIndicatorClusterFormationNode `json:"cluster_formation,omitempty"` + CurrentMaster IndicatorNode `json:"current_master"` + ExceptionFetchingHistory *MasterIsStableIndicatorExceptionFetchingHistory `json:"exception_fetching_history,omitempty"` + RecentMasters []IndicatorNode `json:"recent_masters"` +} + +// NewMasterIsStableIndicatorDetails returns a MasterIsStableIndicatorDetails. +func NewMasterIsStableIndicatorDetails() *MasterIsStableIndicatorDetails { + r := &MasterIsStableIndicatorDetails{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/masterisstableindicatorexceptionfetchinghistory.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/masterisstableindicatorexceptionfetchinghistory.go new file mode 100644 index 000000000..3e8bef8d7 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/masterisstableindicatorexceptionfetchinghistory.go @@ -0,0 +1,88 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + +// MasterIsStableIndicatorExceptionFetchingHistory type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/health_report/types.ts#L94-L97 +type MasterIsStableIndicatorExceptionFetchingHistory struct { + Message string `json:"message"` + StackTrace string `json:"stack_trace"` +} + +func (s *MasterIsStableIndicatorExceptionFetchingHistory) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "message": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Message = o + + case "stack_trace": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StackTrace = o + + } + } + return nil +} + +// NewMasterIsStableIndicatorExceptionFetchingHistory returns a MasterIsStableIndicatorExceptionFetchingHistory. +func NewMasterIsStableIndicatorExceptionFetchingHistory() *MasterIsStableIndicatorExceptionFetchingHistory { + r := &MasterIsStableIndicatorExceptionFetchingHistory{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/masterrecord.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/masterrecord.go index df7637f21..08da29150 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/masterrecord.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/masterrecord.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // MasterRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/master/types.ts#L20-L39 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/master/types.ts#L20-L39 type MasterRecord struct { // Host host name Host *string `json:"host,omitempty"` @@ -34,6 +42,74 @@ type MasterRecord struct { Node *string `json:"node,omitempty"` } +func (s *MasterRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "host", "h": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Host = &o + + case "id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Id = &o + + case "ip": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Ip = &o + + case "node", "n": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Node = &o + + } + } + return nil +} + // NewMasterRecord returns a MasterRecord. func NewMasterRecord() *MasterRecord { r := &MasterRecord{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matchallquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matchallquery.go index 07a98e8cc..cb4246702 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matchallquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matchallquery.go @@ -16,18 +16,79 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // MatchAllQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/MatchAllQuery.ts#L22-L22 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/MatchAllQuery.ts#L22-L22 type MatchAllQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. Boost *float32 `json:"boost,omitempty"` QueryName_ *string `json:"_name,omitempty"` } +func (s *MatchAllQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + } + } + return nil +} + // NewMatchAllQuery returns a MatchAllQuery. func NewMatchAllQuery() *MatchAllQuery { r := &MatchAllQuery{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matchboolprefixquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matchboolprefixquery.go index 67db8c766..6918b7762 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matchboolprefixquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matchboolprefixquery.go @@ -16,29 +16,206 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/operator" ) // MatchBoolPrefixQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/fulltext.ts#L160-L171 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/fulltext.ts#L349-L403 type MatchBoolPrefixQuery struct { - Analyzer *string `json:"analyzer,omitempty"` - Boost *float32 `json:"boost,omitempty"` - Fuzziness Fuzziness `json:"fuzziness,omitempty"` - FuzzyRewrite *string `json:"fuzzy_rewrite,omitempty"` - FuzzyTranspositions *bool `json:"fuzzy_transpositions,omitempty"` - MaxExpansions *int `json:"max_expansions,omitempty"` - MinimumShouldMatch MinimumShouldMatch `json:"minimum_should_match,omitempty"` - Operator *operator.Operator `json:"operator,omitempty"` - PrefixLength *int `json:"prefix_length,omitempty"` - Query string `json:"query"` - QueryName_ *string `json:"_name,omitempty"` + // Analyzer Analyzer used to convert the text in the query value into tokens. + Analyzer *string `json:"analyzer,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Fuzziness Maximum edit distance allowed for matching. + // Can be applied to the term subqueries constructed for all terms but the final + // term. + Fuzziness Fuzziness `json:"fuzziness,omitempty"` + // FuzzyRewrite Method used to rewrite the query. + // Can be applied to the term subqueries constructed for all terms but the final + // term. + FuzzyRewrite *string `json:"fuzzy_rewrite,omitempty"` + // FuzzyTranspositions If `true`, edits for fuzzy matching include transpositions of two adjacent + // characters (for example, `ab` to `ba`). + // Can be applied to the term subqueries constructed for all terms but the final + // term. + FuzzyTranspositions *bool `json:"fuzzy_transpositions,omitempty"` + // MaxExpansions Maximum number of terms to which the query will expand. + // Can be applied to the term subqueries constructed for all terms but the final + // term. + MaxExpansions *int `json:"max_expansions,omitempty"` + // MinimumShouldMatch Minimum number of clauses that must match for a document to be returned. + // Applied to the constructed bool query. + MinimumShouldMatch MinimumShouldMatch `json:"minimum_should_match,omitempty"` + // Operator Boolean logic used to interpret text in the query value. + // Applied to the constructed bool query. + Operator *operator.Operator `json:"operator,omitempty"` + // PrefixLength Number of beginning characters left unchanged for fuzzy matching. + // Can be applied to the term subqueries constructed for all terms but the final + // term. + PrefixLength *int `json:"prefix_length,omitempty"` + // Query Terms you wish to find in the provided field. + // The last term is used in a prefix query. + Query string `json:"query"` + QueryName_ *string `json:"_name,omitempty"` +} + +func (s *MatchBoolPrefixQuery) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Query) + return err + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "fuzziness": + if err := dec.Decode(&s.Fuzziness); err != nil { + return err + } + + case "fuzzy_rewrite": + if err := dec.Decode(&s.FuzzyRewrite); err != nil { + return err + } + + case "fuzzy_transpositions": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.FuzzyTranspositions = &value + case bool: + s.FuzzyTranspositions = &v + } + + case "max_expansions": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxExpansions = &value + case float64: + f := int(v) + s.MaxExpansions = &f + } + + case "minimum_should_match": + if err := dec.Decode(&s.MinimumShouldMatch); err != nil { + return err + } + + case "operator": + if err := dec.Decode(&s.Operator); err != nil { + return err + } + + case "prefix_length": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.PrefixLength = &value + case float64: + f := int(v) + s.PrefixLength = &f + } + + case "query": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Query = o + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + } + } + return nil } // NewMatchBoolPrefixQuery returns a MatchBoolPrefixQuery. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matchnonequery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matchnonequery.go index 0a6d32606..57447de30 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matchnonequery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matchnonequery.go @@ -16,18 +16,79 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // MatchNoneQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/MatchNoneQuery.ts#L22-L22 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/MatchNoneQuery.ts#L22-L22 type MatchNoneQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. Boost *float32 `json:"boost,omitempty"` QueryName_ *string `json:"_name,omitempty"` } +func (s *MatchNoneQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + } + } + return nil +} + // NewMatchNoneQuery returns a MatchNoneQuery. func NewMatchNoneQuery() *MatchNoneQuery { r := &MatchNoneQuery{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matchonlytextproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matchonlytextproperty.go index fd91edb64..f1850fdd6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matchonlytextproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matchonlytextproperty.go @@ -16,21 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" ) // MatchOnlyTextProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/core.ts#L208-L233 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/core.ts#L215-L240 type MatchOnlyTextProperty struct { // CopyTo Allows you to copy the values of multiple fields into a group // field, which can then be queried as a single field. @@ -46,6 +45,7 @@ type MatchOnlyTextProperty struct { } func (s *MatchOnlyTextProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -60,11 +60,25 @@ func (s *MatchOnlyTextProperty) UnmarshalJSON(data []byte) error { switch t { case "copy_to": - if err := dec.Decode(&s.CopyTo); err != nil { - return err + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return err + } } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -73,7 +87,9 @@ func (s *MatchOnlyTextProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -352,13 +368,18 @@ func (s *MatchOnlyTextProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } @@ -373,6 +394,21 @@ func (s *MatchOnlyTextProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s MatchOnlyTextProperty) MarshalJSON() ([]byte, error) { + type innerMatchOnlyTextProperty MatchOnlyTextProperty + tmp := innerMatchOnlyTextProperty{ + CopyTo: s.CopyTo, + Fields: s.Fields, + Meta: s.Meta, + Type: s.Type, + } + + tmp.Type = "match_only_text" + + return json.Marshal(tmp) +} + // NewMatchOnlyTextProperty returns a MatchOnlyTextProperty. func NewMatchOnlyTextProperty() *MatchOnlyTextProperty { r := &MatchOnlyTextProperty{ @@ -380,7 +416,5 @@ func NewMatchOnlyTextProperty() *MatchOnlyTextProperty { Meta: make(map[string]string, 0), } - r.Type = "match_only_text" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matchphraseprefixquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matchphraseprefixquery.go index 301b63fbe..8303b37dd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matchphraseprefixquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matchphraseprefixquery.go @@ -16,27 +16,159 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/zerotermsquery" ) // MatchPhrasePrefixQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/fulltext.ts#L182-L189 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/fulltext.ts#L428-L454 type MatchPhrasePrefixQuery struct { - Analyzer *string `json:"analyzer,omitempty"` - Boost *float32 `json:"boost,omitempty"` - MaxExpansions *int `json:"max_expansions,omitempty"` - Query string `json:"query"` - QueryName_ *string `json:"_name,omitempty"` - Slop *int `json:"slop,omitempty"` + // Analyzer Analyzer used to convert text in the query value into tokens. + Analyzer *string `json:"analyzer,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // MaxExpansions Maximum number of terms to which the last provided term of the query value + // will expand. + MaxExpansions *int `json:"max_expansions,omitempty"` + // Query Text you wish to find in the provided field. + Query string `json:"query"` + QueryName_ *string `json:"_name,omitempty"` + // Slop Maximum number of positions allowed between matching tokens. + Slop *int `json:"slop,omitempty"` + // ZeroTermsQuery Indicates whether no documents are returned if the analyzer removes all + // tokens, such as when using a `stop` filter. ZeroTermsQuery *zerotermsquery.ZeroTermsQuery `json:"zero_terms_query,omitempty"` } +func (s *MatchPhrasePrefixQuery) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Query) + return err + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "max_expansions": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxExpansions = &value + case float64: + f := int(v) + s.MaxExpansions = &f + } + + case "query": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Query = o + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "slop": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Slop = &value + case float64: + f := int(v) + s.Slop = &f + } + + case "zero_terms_query": + if err := dec.Decode(&s.ZeroTermsQuery); err != nil { + return err + } + + } + } + return nil +} + // NewMatchPhrasePrefixQuery returns a MatchPhrasePrefixQuery. func NewMatchPhrasePrefixQuery() *MatchPhrasePrefixQuery { r := &MatchPhrasePrefixQuery{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matchphrasequery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matchphrasequery.go index 4ebcd2931..55f951970 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matchphrasequery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matchphrasequery.go @@ -16,26 +16,140 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/zerotermsquery" ) // MatchPhraseQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/fulltext.ts#L173-L180 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/fulltext.ts#L405-L426 type MatchPhraseQuery struct { - Analyzer *string `json:"analyzer,omitempty"` - Boost *float32 `json:"boost,omitempty"` - Query string `json:"query"` - QueryName_ *string `json:"_name,omitempty"` - Slop *int `json:"slop,omitempty"` + // Analyzer Analyzer used to convert the text in the query value into tokens. + Analyzer *string `json:"analyzer,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Query Query terms that are analyzed and turned into a phrase query. + Query string `json:"query"` + QueryName_ *string `json:"_name,omitempty"` + // Slop Maximum number of positions allowed between matching tokens. + Slop *int `json:"slop,omitempty"` + // ZeroTermsQuery Indicates whether no documents are returned if the `analyzer` removes all + // tokens, such as when using a `stop` filter. ZeroTermsQuery *zerotermsquery.ZeroTermsQuery `json:"zero_terms_query,omitempty"` } +func (s *MatchPhraseQuery) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Query) + return err + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "query": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Query = o + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "slop": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Slop = &value + case float64: + f := int(v) + s.Slop = &f + } + + case "zero_terms_query": + if err := dec.Decode(&s.ZeroTermsQuery); err != nil { + return err + } + + } + } + return nil +} + // NewMatchPhraseQuery returns a MatchPhraseQuery. func NewMatchPhraseQuery() *MatchPhraseQuery { r := &MatchPhraseQuery{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matchquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matchquery.go index b23c25ef4..e9eb6f5a8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matchquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matchquery.go @@ -16,34 +16,253 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/operator" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/zerotermsquery" ) // MatchQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/fulltext.ts#L133-L158 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/fulltext.ts#L282-L347 type MatchQuery struct { - Analyzer *string `json:"analyzer,omitempty"` - AutoGenerateSynonymsPhraseQuery *bool `json:"auto_generate_synonyms_phrase_query,omitempty"` - Boost *float32 `json:"boost,omitempty"` - CutoffFrequency *Float64 `json:"cutoff_frequency,omitempty"` - Fuzziness Fuzziness `json:"fuzziness,omitempty"` - FuzzyRewrite *string `json:"fuzzy_rewrite,omitempty"` - FuzzyTranspositions *bool `json:"fuzzy_transpositions,omitempty"` - Lenient *bool `json:"lenient,omitempty"` - MaxExpansions *int `json:"max_expansions,omitempty"` - MinimumShouldMatch MinimumShouldMatch `json:"minimum_should_match,omitempty"` - Operator *operator.Operator `json:"operator,omitempty"` - PrefixLength *int `json:"prefix_length,omitempty"` - Query string `json:"query"` - QueryName_ *string `json:"_name,omitempty"` - ZeroTermsQuery *zerotermsquery.ZeroTermsQuery `json:"zero_terms_query,omitempty"` + // Analyzer Analyzer used to convert the text in the query value into tokens. + Analyzer *string `json:"analyzer,omitempty"` + // AutoGenerateSynonymsPhraseQuery If `true`, match phrase queries are automatically created for multi-term + // synonyms. + AutoGenerateSynonymsPhraseQuery *bool `json:"auto_generate_synonyms_phrase_query,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + CutoffFrequency *Float64 `json:"cutoff_frequency,omitempty"` + // Fuzziness Maximum edit distance allowed for matching. + Fuzziness Fuzziness `json:"fuzziness,omitempty"` + // FuzzyRewrite Method used to rewrite the query. + FuzzyRewrite *string `json:"fuzzy_rewrite,omitempty"` + // FuzzyTranspositions If `true`, edits for fuzzy matching include transpositions of two adjacent + // characters (for example, `ab` to `ba`). + FuzzyTranspositions *bool `json:"fuzzy_transpositions,omitempty"` + // Lenient If `true`, format-based errors, such as providing a text query value for a + // numeric field, are ignored. + Lenient *bool `json:"lenient,omitempty"` + // MaxExpansions Maximum number of terms to which the query will expand. + MaxExpansions *int `json:"max_expansions,omitempty"` + // MinimumShouldMatch Minimum number of clauses that must match for a document to be returned. + MinimumShouldMatch MinimumShouldMatch `json:"minimum_should_match,omitempty"` + // Operator Boolean logic used to interpret text in the query value. + Operator *operator.Operator `json:"operator,omitempty"` + // PrefixLength Number of beginning characters left unchanged for fuzzy matching. + PrefixLength *int `json:"prefix_length,omitempty"` + // Query Text, number, boolean value or date you wish to find in the provided field. + Query string `json:"query"` + QueryName_ *string `json:"_name,omitempty"` + // ZeroTermsQuery Indicates whether no documents are returned if the `analyzer` removes all + // tokens, such as when using a `stop` filter. + ZeroTermsQuery *zerotermsquery.ZeroTermsQuery `json:"zero_terms_query,omitempty"` +} + +func (s *MatchQuery) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Query) + return err + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o + + case "auto_generate_synonyms_phrase_query": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.AutoGenerateSynonymsPhraseQuery = &value + case bool: + s.AutoGenerateSynonymsPhraseQuery = &v + } + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "cutoff_frequency": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.CutoffFrequency = &f + case float64: + f := Float64(v) + s.CutoffFrequency = &f + } + + case "fuzziness": + if err := dec.Decode(&s.Fuzziness); err != nil { + return err + } + + case "fuzzy_rewrite": + if err := dec.Decode(&s.FuzzyRewrite); err != nil { + return err + } + + case "fuzzy_transpositions": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.FuzzyTranspositions = &value + case bool: + s.FuzzyTranspositions = &v + } + + case "lenient": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Lenient = &value + case bool: + s.Lenient = &v + } + + case "max_expansions": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxExpansions = &value + case float64: + f := int(v) + s.MaxExpansions = &f + } + + case "minimum_should_match": + if err := dec.Decode(&s.MinimumShouldMatch); err != nil { + return err + } + + case "operator": + if err := dec.Decode(&s.Operator); err != nil { + return err + } + + case "prefix_length": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.PrefixLength = &value + case float64: + f := int(v) + s.PrefixLength = &f + } + + case "query": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Query = o + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "zero_terms_query": + if err := dec.Decode(&s.ZeroTermsQuery); err != nil { + return err + } + + } + } + return nil } // NewMatchQuery returns a MatchQuery. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matrixaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matrixaggregation.go index 842b4979b..d6cfaa2af 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matrixaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matrixaggregation.go @@ -16,22 +16,90 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // MatrixAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/matrix.ts#L26-L29 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/matrix.ts#L26-L36 type MatrixAggregation struct { - Fields []string `json:"fields,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Missing map[string]Float64 `json:"missing,omitempty"` - Name *string `json:"name,omitempty"` + // Fields An array of fields for computing the statistics. + Fields []string `json:"fields,omitempty"` + Meta Metadata `json:"meta,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing map[string]Float64 `json:"missing,omitempty"` + Name *string `json:"name,omitempty"` +} + +func (s *MatrixAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fields": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Fields = append(s.Fields, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Fields); err != nil { + return err + } + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "missing": + if s.Missing == nil { + s.Missing = make(map[string]Float64, 0) + } + if err := dec.Decode(&s.Missing); err != nil { + return err + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + } + } + return nil } // NewMatrixAggregation returns a MatrixAggregation. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matrixstatsaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matrixstatsaggregate.go index 07e2ee4af..20da06e7e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matrixstatsaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matrixstatsaggregate.go @@ -16,21 +16,70 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // MatrixStatsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L748-L752 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L757-L761 type MatrixStatsAggregate struct { - DocCount int64 `json:"doc_count"` - Fields []MatrixStatsFields `json:"fields,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + DocCount int64 `json:"doc_count"` + Fields []MatrixStatsFields `json:"fields,omitempty"` + Meta Metadata `json:"meta,omitempty"` +} + +func (s *MatrixStatsAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f + } + + case "fields": + if err := dec.Decode(&s.Fields); err != nil { + return err + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + } + } + return nil } // NewMatrixStatsAggregate returns a MatrixStatsAggregate. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matrixstatsaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matrixstatsaggregation.go index a0f630028..cc9a54218 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matrixstatsaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matrixstatsaggregation.go @@ -16,25 +16,99 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortmode" ) // MatrixStatsAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/matrix.ts#L31-L33 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/matrix.ts#L38-L44 type MatrixStatsAggregation struct { - Fields []string `json:"fields,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Missing map[string]Float64 `json:"missing,omitempty"` - Mode *sortmode.SortMode `json:"mode,omitempty"` - Name *string `json:"name,omitempty"` + // Fields An array of fields for computing the statistics. + Fields []string `json:"fields,omitempty"` + Meta Metadata `json:"meta,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing map[string]Float64 `json:"missing,omitempty"` + // Mode Array value the aggregation will use for array or multi-valued fields. + Mode *sortmode.SortMode `json:"mode,omitempty"` + Name *string `json:"name,omitempty"` +} + +func (s *MatrixStatsAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fields": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Fields = append(s.Fields, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Fields); err != nil { + return err + } + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "missing": + if s.Missing == nil { + s.Missing = make(map[string]Float64, 0) + } + if err := dec.Decode(&s.Missing); err != nil { + return err + } + + case "mode": + if err := dec.Decode(&s.Mode); err != nil { + return err + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + } + } + return nil } // NewMatrixStatsAggregation returns a MatrixStatsAggregation. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matrixstatsfields.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matrixstatsfields.go index 967c604cd..2f71f5a44 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matrixstatsfields.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/matrixstatsfields.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // MatrixStatsFields type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L754-L763 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L763-L772 type MatrixStatsFields struct { Correlation map[string]Float64 `json:"correlation"` Count int64 `json:"count"` @@ -34,6 +42,126 @@ type MatrixStatsFields struct { Variance Float64 `json:"variance"` } +func (s *MatrixStatsFields) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "correlation": + if s.Correlation == nil { + s.Correlation = make(map[string]Float64, 0) + } + if err := dec.Decode(&s.Correlation); err != nil { + return err + } + + case "count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + case "covariance": + if s.Covariance == nil { + s.Covariance = make(map[string]Float64, 0) + } + if err := dec.Decode(&s.Covariance); err != nil { + return err + } + + case "kurtosis": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Kurtosis = f + case float64: + f := Float64(v) + s.Kurtosis = f + } + + case "mean": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Mean = f + case float64: + f := Float64(v) + s.Mean = f + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "skewness": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Skewness = f + case float64: + f := Float64(v) + s.Skewness = f + } + + case "variance": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Variance = f + case float64: + f := Float64(v) + s.Variance = f + } + + } + } + return nil +} + // NewMatrixStatsFields returns a MatrixStatsFields. func NewMatrixStatsFields() *MatrixStatsFields { r := &MatrixStatsFields{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/maxaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/maxaggregate.go index 3c8ba5ff6..3b6580749 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/maxaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/maxaggregate.go @@ -16,19 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // MaxAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L199-L200 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L200-L201 type MaxAggregate struct { - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Meta Metadata `json:"meta,omitempty"` // Value The metric value. A missing value generally means that there was no data to // aggregate, // unless specified otherwise. @@ -36,6 +40,48 @@ type MaxAggregate struct { ValueAsString *string `json:"value_as_string,omitempty"` } +func (s *MaxAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return err + } + + case "value_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ValueAsString = &o + + } + } + return nil +} + // NewMaxAggregate returns a MaxAggregate. func NewMaxAggregate() *MaxAggregate { r := &MaxAggregate{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/maxaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/maxaggregation.go index c9d9c09f6..638bd999c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/maxaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/maxaggregation.go @@ -16,20 +16,78 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // MaxAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/metric.ts#L97-L97 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/metric.ts#L162-L162 type MaxAggregation struct { - Field *string `json:"field,omitempty"` - Format *string `json:"format,omitempty"` + // Field The field on which to run the aggregation. + Field *string `json:"field,omitempty"` + Format *string `json:"format,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. Missing Missing `json:"missing,omitempty"` Script Script `json:"script,omitempty"` } +func (s *MaxAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return err + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + } + } + return nil +} + // NewMaxAggregation returns a MaxAggregation. func NewMaxAggregation() *MaxAggregation { r := &MaxAggregation{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/maxbucketaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/maxbucketaggregation.go index d576f4fa7..329d57047 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/maxbucketaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/maxbucketaggregation.go @@ -16,33 +16,38 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" ) // MaxBucketAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/pipeline.ts#L184-L184 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/pipeline.ts#L224-L224 type MaxBucketAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. - BucketsPath BucketsPath `json:"buckets_path,omitempty"` - Format *string `json:"format,omitempty"` - GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Name *string `json:"name,omitempty"` + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Name *string `json:"name,omitempty"` } func (s *MaxBucketAggregation) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -62,9 +67,16 @@ func (s *MaxBucketAggregation) UnmarshalJSON(data []byte) error { } case "format": - if err := dec.Decode(&s.Format); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o case "gap_policy": if err := dec.Decode(&s.GapPolicy); err != nil { @@ -77,9 +89,16 @@ func (s *MaxBucketAggregation) UnmarshalJSON(data []byte) error { } case "name": - if err := dec.Decode(&s.Name); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/medianabsolutedeviationaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/medianabsolutedeviationaggregate.go index 37ab4763c..48bc40e47 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/medianabsolutedeviationaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/medianabsolutedeviationaggregate.go @@ -16,19 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // MedianAbsoluteDeviationAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L193-L194 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L194-L195 type MedianAbsoluteDeviationAggregate struct { - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Meta Metadata `json:"meta,omitempty"` // Value The metric value. A missing value generally means that there was no data to // aggregate, // unless specified otherwise. @@ -36,6 +40,48 @@ type MedianAbsoluteDeviationAggregate struct { ValueAsString *string `json:"value_as_string,omitempty"` } +func (s *MedianAbsoluteDeviationAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return err + } + + case "value_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ValueAsString = &o + + } + } + return nil +} + // NewMedianAbsoluteDeviationAggregate returns a MedianAbsoluteDeviationAggregate. func NewMedianAbsoluteDeviationAggregate() *MedianAbsoluteDeviationAggregate { r := &MedianAbsoluteDeviationAggregate{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/medianabsolutedeviationaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/medianabsolutedeviationaggregation.go index ad49617ea..8b871996a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/medianabsolutedeviationaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/medianabsolutedeviationaggregation.go @@ -16,19 +16,96 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // MedianAbsoluteDeviationAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/metric.ts#L99-L101 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/metric.ts#L164-L170 type MedianAbsoluteDeviationAggregation struct { + // Compression Limits the maximum number of nodes used by the underlying TDigest algorithm + // to `20 * compression`, enabling control of memory usage and approximation + // error. Compression *Float64 `json:"compression,omitempty"` - Field *string `json:"field,omitempty"` - Format *string `json:"format,omitempty"` - Missing Missing `json:"missing,omitempty"` - Script Script `json:"script,omitempty"` + // Field The field on which to run the aggregation. + Field *string `json:"field,omitempty"` + Format *string `json:"format,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing Missing `json:"missing,omitempty"` + Script Script `json:"script,omitempty"` +} + +func (s *MedianAbsoluteDeviationAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "compression": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Compression = &f + case float64: + f := Float64(v) + s.Compression = &f + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return err + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + } + } + return nil } // NewMedianAbsoluteDeviationAggregation returns a MedianAbsoluteDeviationAggregation. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/memmlstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/memmlstats.go index 8d2960c01..1b5f7a09d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/memmlstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/memmlstats.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // MemMlStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/get_memory_stats/types.ts#L90-L111 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/get_memory_stats/types.ts#L90-L111 type MemMlStats struct { // AnomalyDetectors Amount of native memory set aside for anomaly detection jobs. AnomalyDetectors ByteSize `json:"anomaly_detectors,omitempty"` @@ -52,6 +60,131 @@ type MemMlStats struct { NativeInferenceInBytes int `json:"native_inference_in_bytes"` } +func (s *MemMlStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "anomaly_detectors": + if err := dec.Decode(&s.AnomalyDetectors); err != nil { + return err + } + + case "anomaly_detectors_in_bytes": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.AnomalyDetectorsInBytes = value + case float64: + f := int(v) + s.AnomalyDetectorsInBytes = f + } + + case "data_frame_analytics": + if err := dec.Decode(&s.DataFrameAnalytics); err != nil { + return err + } + + case "data_frame_analytics_in_bytes": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.DataFrameAnalyticsInBytes = value + case float64: + f := int(v) + s.DataFrameAnalyticsInBytes = f + } + + case "max": + if err := dec.Decode(&s.Max); err != nil { + return err + } + + case "max_in_bytes": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxInBytes = value + case float64: + f := int(v) + s.MaxInBytes = f + } + + case "native_code_overhead": + if err := dec.Decode(&s.NativeCodeOverhead); err != nil { + return err + } + + case "native_code_overhead_in_bytes": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NativeCodeOverheadInBytes = value + case float64: + f := int(v) + s.NativeCodeOverheadInBytes = f + } + + case "native_inference": + if err := dec.Decode(&s.NativeInference); err != nil { + return err + } + + case "native_inference_in_bytes": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NativeInferenceInBytes = value + case float64: + f := int(v) + s.NativeInferenceInBytes = f + } + + } + } + return nil +} + // NewMemMlStats returns a MemMlStats. func NewMemMlStats() *MemMlStats { r := &MemMlStats{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/memory.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/memory.go index 6942fd16d..7581ca527 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/memory.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/memory.go @@ -16,13 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // Memory type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/get_memory_stats/types.ts#L25-L48 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/get_memory_stats/types.ts#L25-L48 type Memory struct { Attributes map[string]string `json:"attributes"` EphemeralId string `json:"ephemeral_id"` @@ -39,6 +46,64 @@ type Memory struct { TransportAddress string `json:"transport_address"` } +func (s *Memory) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "attributes": + if s.Attributes == nil { + s.Attributes = make(map[string]string, 0) + } + if err := dec.Decode(&s.Attributes); err != nil { + return err + } + + case "ephemeral_id": + if err := dec.Decode(&s.EphemeralId); err != nil { + return err + } + + case "jvm": + if err := dec.Decode(&s.Jvm); err != nil { + return err + } + + case "mem": + if err := dec.Decode(&s.Mem); err != nil { + return err + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return err + } + + case "transport_address": + if err := dec.Decode(&s.TransportAddress); err != nil { + return err + } + + } + } + return nil +} + // NewMemory returns a Memory. func NewMemory() *Memory { r := &Memory{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/memorystats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/memorystats.go index f82b99e32..baab63e3a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/memorystats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/memorystats.go @@ -16,24 +16,200 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // MemoryStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L248-L259 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L596-L620 type MemoryStats struct { - AdjustedTotalInBytes *int64 `json:"adjusted_total_in_bytes,omitempty"` - FreeInBytes *int64 `json:"free_in_bytes,omitempty"` - Resident *string `json:"resident,omitempty"` - ResidentInBytes *int64 `json:"resident_in_bytes,omitempty"` - Share *string `json:"share,omitempty"` - ShareInBytes *int64 `json:"share_in_bytes,omitempty"` - TotalInBytes *int64 `json:"total_in_bytes,omitempty"` - TotalVirtual *string `json:"total_virtual,omitempty"` - TotalVirtualInBytes *int64 `json:"total_virtual_in_bytes,omitempty"` - UsedInBytes *int64 `json:"used_in_bytes,omitempty"` + // AdjustedTotalInBytes If the amount of physical memory has been overridden using the + // `es`.`total_memory_bytes` system property then this reports the overridden + // value in bytes. + // Otherwise it reports the same value as `total_in_bytes`. + AdjustedTotalInBytes *int64 `json:"adjusted_total_in_bytes,omitempty"` + // FreeInBytes Amount of free physical memory in bytes. + FreeInBytes *int64 `json:"free_in_bytes,omitempty"` + Resident *string `json:"resident,omitempty"` + ResidentInBytes *int64 `json:"resident_in_bytes,omitempty"` + Share *string `json:"share,omitempty"` + ShareInBytes *int64 `json:"share_in_bytes,omitempty"` + // TotalInBytes Total amount of physical memory in bytes. + TotalInBytes *int64 `json:"total_in_bytes,omitempty"` + TotalVirtual *string `json:"total_virtual,omitempty"` + TotalVirtualInBytes *int64 `json:"total_virtual_in_bytes,omitempty"` + // UsedInBytes Amount of used physical memory in bytes. + UsedInBytes *int64 `json:"used_in_bytes,omitempty"` +} + +func (s *MemoryStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "adjusted_total_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.AdjustedTotalInBytes = &value + case float64: + f := int64(v) + s.AdjustedTotalInBytes = &f + } + + case "free_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.FreeInBytes = &value + case float64: + f := int64(v) + s.FreeInBytes = &f + } + + case "resident": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Resident = &o + + case "resident_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.ResidentInBytes = &value + case float64: + f := int64(v) + s.ResidentInBytes = &f + } + + case "share": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Share = &o + + case "share_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.ShareInBytes = &value + case float64: + f := int64(v) + s.ShareInBytes = &f + } + + case "total_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TotalInBytes = &value + case float64: + f := int64(v) + s.TotalInBytes = &f + } + + case "total_virtual": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TotalVirtual = &o + + case "total_virtual_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TotalVirtualInBytes = &value + case float64: + f := int64(v) + s.TotalVirtualInBytes = &f + } + + case "used_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.UsedInBytes = &value + case float64: + f := int64(v) + s.UsedInBytes = &f + } + + } + } + return nil } // NewMemoryStats returns a MemoryStats. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/memstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/memstats.go index 2b6009221..2ecd871be 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/memstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/memstats.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // MemStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/get_memory_stats/types.ts#L65-L88 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/get_memory_stats/types.ts#L65-L88 type MemStats struct { // AdjustedTotal If the amount of physical memory has been overridden using the // es.total_memory_bytes system property @@ -42,6 +50,73 @@ type MemStats struct { TotalInBytes int `json:"total_in_bytes"` } +func (s *MemStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "adjusted_total": + if err := dec.Decode(&s.AdjustedTotal); err != nil { + return err + } + + case "adjusted_total_in_bytes": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.AdjustedTotalInBytes = value + case float64: + f := int(v) + s.AdjustedTotalInBytes = f + } + + case "ml": + if err := dec.Decode(&s.Ml); err != nil { + return err + } + + case "total": + if err := dec.Decode(&s.Total); err != nil { + return err + } + + case "total_in_bytes": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.TotalInBytes = value + case float64: + f := int(v) + s.TotalInBytes = f + } + + } + } + return nil +} + // NewMemStats returns a MemStats. func NewMemStats() *MemStats { r := &MemStats{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/merge.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/merge.go index 3df3a14e6..318a27263 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/merge.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/merge.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // Merge type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L323-L325 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L323-L325 type Merge struct { Scheduler *MergeScheduler `json:"scheduler,omitempty"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mergescheduler.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mergescheduler.go index fe969d534..1fb3511b7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mergescheduler.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mergescheduler.go @@ -16,16 +16,53 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // MergeScheduler type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L327-L330 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L327-L330 type MergeScheduler struct { - MaxMergeCount *int `json:"max_merge_count,omitempty"` - MaxThreadCount *int `json:"max_thread_count,omitempty"` + MaxMergeCount Stringifiedinteger `json:"max_merge_count,omitempty"` + MaxThreadCount Stringifiedinteger `json:"max_thread_count,omitempty"` +} + +func (s *MergeScheduler) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_merge_count": + if err := dec.Decode(&s.MaxMergeCount); err != nil { + return err + } + + case "max_thread_count": + if err := dec.Decode(&s.MaxThreadCount); err != nil { + return err + } + + } + } + return nil } // NewMergeScheduler returns a MergeScheduler. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mergesstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mergesstats.go index f4bf048df..5b14c372a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mergesstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mergesstats.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // MergesStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Stats.ts#L119-L136 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Stats.ts#L161-L178 type MergesStats struct { Current int64 `json:"current"` CurrentDocs int64 `json:"current_docs"` @@ -42,6 +50,197 @@ type MergesStats struct { TotalTimeInMillis int64 `json:"total_time_in_millis"` } +func (s *MergesStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "current": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Current = value + case float64: + f := int64(v) + s.Current = f + } + + case "current_docs": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.CurrentDocs = value + case float64: + f := int64(v) + s.CurrentDocs = f + } + + case "current_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CurrentSize = &o + + case "current_size_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.CurrentSizeInBytes = value + case float64: + f := int64(v) + s.CurrentSizeInBytes = f + } + + case "total": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Total = value + case float64: + f := int64(v) + s.Total = f + } + + case "total_auto_throttle": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TotalAutoThrottle = &o + + case "total_auto_throttle_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TotalAutoThrottleInBytes = value + case float64: + f := int64(v) + s.TotalAutoThrottleInBytes = f + } + + case "total_docs": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TotalDocs = value + case float64: + f := int64(v) + s.TotalDocs = f + } + + case "total_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TotalSize = &o + + case "total_size_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TotalSizeInBytes = value + case float64: + f := int64(v) + s.TotalSizeInBytes = f + } + + case "total_stopped_time": + if err := dec.Decode(&s.TotalStoppedTime); err != nil { + return err + } + + case "total_stopped_time_in_millis": + if err := dec.Decode(&s.TotalStoppedTimeInMillis); err != nil { + return err + } + + case "total_throttled_time": + if err := dec.Decode(&s.TotalThrottledTime); err != nil { + return err + } + + case "total_throttled_time_in_millis": + if err := dec.Decode(&s.TotalThrottledTimeInMillis); err != nil { + return err + } + + case "total_time": + if err := dec.Decode(&s.TotalTime); err != nil { + return err + } + + case "total_time_in_millis": + if err := dec.Decode(&s.TotalTimeInMillis); err != nil { + return err + } + + } + } + return nil +} + // NewMergesStats returns a MergesStats. func NewMergesStats() *MergesStats { r := &MergesStats{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/metadata.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/metadata.go index 6951db8a5..9da985840 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/metadata.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/metadata.go @@ -16,13 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types -import "encoding/json" +import ( + "encoding/json" +) // Metadata type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/common.ts#L91-L91 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/common.ts#L93-L93 type Metadata map[string]json.RawMessage diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/metrics.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/metrics.go index 3519605f4..728f1ce5a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/metrics.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/metrics.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // Metrics type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/common.ts#L70-L70 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/common.ts#L70-L70 type Metrics []string diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mgetoperation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mgetoperation.go index 344b95ddb..056c4eedb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mgetoperation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mgetoperation.go @@ -16,17 +16,22 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/versiontype" ) // MgetOperation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/mget/types.ts#L32-L55 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/mget/types.ts#L32-L55 type MgetOperation struct { // Id_ The unique document ID. Id_ string `json:"_id"` @@ -43,6 +48,72 @@ type MgetOperation struct { VersionType *versiontype.VersionType `json:"version_type,omitempty"` } +func (s *MgetOperation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return err + } + + case "_index": + if err := dec.Decode(&s.Index_); err != nil { + return err + } + + case "routing": + if err := dec.Decode(&s.Routing); err != nil { + return err + } + + case "_source": + if err := dec.Decode(&s.Source_); err != nil { + return err + } + + case "stored_fields": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.StoredFields = append(s.StoredFields, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.StoredFields); err != nil { + return err + } + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + case "version_type": + if err := dec.Decode(&s.VersionType); err != nil { + return err + } + + } + } + return nil +} + // NewMgetOperation returns a MgetOperation. func NewMgetOperation() *MgetOperation { r := &MgetOperation{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ilmactions.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mgetresponseitem.go similarity index 69% rename from vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ilmactions.go rename to vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mgetresponseitem.go index ecb3682f3..6010a126c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ilmactions.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mgetresponseitem.go @@ -16,13 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types -import "encoding/json" - -// IlmActions type alias. +// MgetResponseItem holds the union for the following types: +// +// GetResult +// MultiGetError // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ilm/_types/Phase.ts#L45-L45 -type IlmActions json.RawMessage +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/mget/types.ts#L57-L60 +type MgetResponseItem interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/migrationfeatureindexinfo.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/migrationfeatureindexinfo.go index 37642838c..d625066f5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/migrationfeatureindexinfo.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/migrationfeatureindexinfo.go @@ -16,19 +16,61 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // MigrationFeatureIndexInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/migration/get_feature_upgrade_status/GetFeatureUpgradeStatusResponse.ts#L44-L48 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/migration/get_feature_upgrade_status/GetFeatureUpgradeStatusResponse.ts#L44-L48 type MigrationFeatureIndexInfo struct { FailureCause *ErrorCause `json:"failure_cause,omitempty"` Index string `json:"index"` Version string `json:"version"` } +func (s *MigrationFeatureIndexInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "failure_cause": + if err := dec.Decode(&s.FailureCause); err != nil { + return err + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + // NewMigrationFeatureIndexInfo returns a MigrationFeatureIndexInfo. func NewMigrationFeatureIndexInfo() *MigrationFeatureIndexInfo { r := &MigrationFeatureIndexInfo{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/minaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/minaggregate.go index 4624b90d7..016e9071f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/minaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/minaggregate.go @@ -16,19 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // MinAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L196-L197 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L197-L198 type MinAggregate struct { - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Meta Metadata `json:"meta,omitempty"` // Value The metric value. A missing value generally means that there was no data to // aggregate, // unless specified otherwise. @@ -36,6 +40,48 @@ type MinAggregate struct { ValueAsString *string `json:"value_as_string,omitempty"` } +func (s *MinAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return err + } + + case "value_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ValueAsString = &o + + } + } + return nil +} + // NewMinAggregate returns a MinAggregate. func NewMinAggregate() *MinAggregate { r := &MinAggregate{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/minaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/minaggregation.go index 5279334f3..95d57eb5c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/minaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/minaggregation.go @@ -16,20 +16,78 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // MinAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/metric.ts#L103-L103 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/metric.ts#L172-L172 type MinAggregation struct { - Field *string `json:"field,omitempty"` - Format *string `json:"format,omitempty"` + // Field The field on which to run the aggregation. + Field *string `json:"field,omitempty"` + Format *string `json:"format,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. Missing Missing `json:"missing,omitempty"` Script Script `json:"script,omitempty"` } +func (s *MinAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return err + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + } + } + return nil +} + // NewMinAggregation returns a MinAggregation. func NewMinAggregation() *MinAggregation { r := &MinAggregation{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/minbucketaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/minbucketaggregation.go index 446b1c9cd..58ece2782 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/minbucketaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/minbucketaggregation.go @@ -16,33 +16,38 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" ) // MinBucketAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/pipeline.ts#L186-L186 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/pipeline.ts#L226-L226 type MinBucketAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. - BucketsPath BucketsPath `json:"buckets_path,omitempty"` - Format *string `json:"format,omitempty"` - GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Name *string `json:"name,omitempty"` + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Name *string `json:"name,omitempty"` } func (s *MinBucketAggregation) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -62,9 +67,16 @@ func (s *MinBucketAggregation) UnmarshalJSON(data []byte) error { } case "format": - if err := dec.Decode(&s.Format); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o case "gap_policy": if err := dec.Decode(&s.GapPolicy); err != nil { @@ -77,9 +89,16 @@ func (s *MinBucketAggregation) UnmarshalJSON(data []byte) error { } case "name": - if err := dec.Decode(&s.Name); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/minimallicenseinformation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/minimallicenseinformation.go index 8bd016b99..bc8e57869 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/minimallicenseinformation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/minimallicenseinformation.go @@ -16,18 +16,24 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/licensestatus" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/licensetype" ) // MinimalLicenseInformation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/info/types.ts#L34-L40 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/info/types.ts#L34-L40 type MinimalLicenseInformation struct { ExpiryDateInMillis int64 `json:"expiry_date_in_millis"` Mode licensetype.LicenseType `json:"mode"` @@ -36,6 +42,58 @@ type MinimalLicenseInformation struct { Uid string `json:"uid"` } +func (s *MinimalLicenseInformation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "expiry_date_in_millis": + if err := dec.Decode(&s.ExpiryDateInMillis); err != nil { + return err + } + + case "mode": + if err := dec.Decode(&s.Mode); err != nil { + return err + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return err + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "uid": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Uid = o + + } + } + return nil +} + // NewMinimalLicenseInformation returns a MinimalLicenseInformation. func NewMinimalLicenseInformation() *MinimalLicenseInformation { r := &MinimalLicenseInformation{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/minimumshouldmatch.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/minimumshouldmatch.go index 1c0c73b9b..a1d33ee70 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/minimumshouldmatch.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/minimumshouldmatch.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // int // string // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/common.ts#L143-L147 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/common.ts#L157-L161 type MinimumShouldMatch interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/missing.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/missing.go index 18cebee01..638ece5c9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/missing.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/missing.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -27,5 +27,5 @@ package types // Float64 // bool // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/AggregationContainer.ts#L211-L211 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/AggregationContainer.ts#L517-L517 type Missing interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/missingaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/missingaggregate.go index f41dea233..e3f51e67b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/missingaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/missingaggregate.go @@ -16,32 +16,31 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "fmt" - "bytes" + "encoding/json" "errors" + "fmt" "io" - + "strconv" "strings" - - "encoding/json" ) // MissingAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L482-L483 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L483-L484 type MissingAggregate struct { - Aggregations map[string]Aggregate `json:"-"` - DocCount int64 `json:"doc_count"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Aggregations map[string]Aggregate `json:"-"` + DocCount int64 `json:"doc_count"` + Meta Metadata `json:"meta,omitempty"` } func (s *MissingAggregate) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -55,451 +54,19 @@ func (s *MissingAggregate) UnmarshalJSON(data []byte) error { switch t { - case "aggregations": - for dec.More() { - tt, err := dec.Token() + case "doc_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) if err != nil { - if errors.Is(err, io.EOF) { - break - } return err } - if value, ok := tt.(string); ok { - if strings.Contains(value, "#") { - elems := strings.Split(value, "#") - if len(elems) == 2 { - switch elems[0] { - case "cardinality": - o := NewCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentiles": - o := NewHdrPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentile_ranks": - o := NewHdrPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentiles": - o := NewTDigestPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentile_ranks": - o := NewTDigestPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "percentiles_bucket": - o := NewPercentilesBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "median_absolute_deviation": - o := NewMedianAbsoluteDeviationAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "min": - o := NewMinAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "max": - o := NewMaxAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sum": - o := NewSumAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "avg": - o := NewAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "weighted_avg": - o := NewWeightedAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "value_count": - o := NewValueCountAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_value": - o := NewSimpleValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "derivative": - o := NewDerivativeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "bucket_metric_value": - o := NewBucketMetricValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats": - o := NewStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats_bucket": - o := NewStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats": - o := NewExtendedStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats_bucket": - o := NewExtendedStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_bounds": - o := NewGeoBoundsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_centroid": - o := NewGeoCentroidAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "histogram": - o := NewHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_histogram": - o := NewDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "auto_date_histogram": - o := NewAutoDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "variable_width_histogram": - o := NewVariableWidthHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sterms": - o := NewStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lterms": - o := NewLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "dterms": - o := NewDoubleTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umterms": - o := NewUnmappedTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lrareterms": - o := NewLongRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "srareterms": - o := NewStringRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umrareterms": - o := NewUnmappedRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "multi_terms": - o := NewMultiTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "missing": - o := NewMissingAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "nested": - o := NewNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "reverse_nested": - o := NewReverseNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "global": - o := NewGlobalAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filter": - o := NewFilterAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "children": - o := NewChildrenAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "parent": - o := NewParentAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sampler": - o := NewSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "unmapped_sampler": - o := NewUnmappedSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohash_grid": - o := NewGeoHashGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geotile_grid": - o := NewGeoTileGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohex_grid": - o := NewGeoHexGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "range": - o := NewRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_range": - o := NewDateRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_distance": - o := NewGeoDistanceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_range": - o := NewIpRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_prefix": - o := NewIpPrefixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filters": - o := NewFiltersAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "adjacency_matrix": - o := NewAdjacencyMatrixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "siglterms": - o := NewSignificantLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sigsterms": - o := NewSignificantStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umsigterms": - o := NewUnmappedSignificantTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "composite": - o := NewCompositeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "scripted_metric": - o := NewScriptedMetricAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_hits": - o := NewTopHitsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "inference": - o := NewInferenceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "string_stats": - o := NewStringStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "box_plot": - o := NewBoxPlotAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_metrics": - o := NewTopMetricsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "t_test": - o := NewTTestAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "rate": - o := NewRateAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_long_value": - o := NewCumulativeCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "matrix_stats": - o := NewMatrixStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_line": - o := NewGeoLineAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - default: - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - } - } else { - return errors.New("cannot decode JSON for field Aggregations") - } - } else { - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[value] = o - } - } - } - - case "doc_count": - if err := dec.Decode(&s.DocCount); err != nil { - return err + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f } case "meta": @@ -507,6 +74,519 @@ func (s *MissingAggregate) UnmarshalJSON(data []byte) error { return err } + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "box_plot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[value] = o + } + } + } } return nil @@ -531,6 +611,7 @@ func (s MissingAggregate) MarshalJSON() ([]byte, error) { for key, value := range s.Aggregations { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "Aggregations") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/missingaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/missingaggregation.go index 4d183616f..b6c139715 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/missingaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/missingaggregation.go @@ -16,22 +16,74 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // MissingAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L260-L263 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L574-L580 type MissingAggregation struct { - Field *string `json:"field,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Missing Missing `json:"missing,omitempty"` - Name *string `json:"name,omitempty"` + // Field The name of the field. + Field *string `json:"field,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Missing Missing `json:"missing,omitempty"` + Name *string `json:"name,omitempty"` +} + +func (s *MissingAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return err + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + } + } + return nil } // NewMissingAggregation returns a MissingAggregation. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mlcounter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mlcounter.go index b09092fe2..21260f1c5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mlcounter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mlcounter.go @@ -16,17 +16,60 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // MlCounter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L249-L251 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L255-L257 type MlCounter struct { Count int64 `json:"count"` } +func (s *MlCounter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + } + } + return nil +} + // NewMlCounter returns a MlCounter. func NewMlCounter() *MlCounter { r := &MlCounter{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mldatafeed.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mldatafeed.go index 77bb05945..9f55f0959 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mldatafeed.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mldatafeed.go @@ -16,33 +16,169 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // MLDatafeed type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Datafeed.ts#L37-L58 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Datafeed.ts#L37-L58 type MLDatafeed struct { Aggregations map[string]Aggregations `json:"aggregations,omitempty"` // Authorization The security privileges that the datafeed uses to run its queries. If Elastic // Stack security features were disabled at the time of the most recent update // to the datafeed, this property is omitted. - Authorization *DatafeedAuthorization `json:"authorization,omitempty"` - ChunkingConfig *ChunkingConfig `json:"chunking_config,omitempty"` - DatafeedId string `json:"datafeed_id"` - DelayedDataCheckConfig DelayedDataCheckConfig `json:"delayed_data_check_config"` - Frequency Duration `json:"frequency,omitempty"` - Indexes []string `json:"indexes,omitempty"` - Indices []string `json:"indices"` - IndicesOptions *IndicesOptions `json:"indices_options,omitempty"` - JobId string `json:"job_id"` - MaxEmptySearches *int `json:"max_empty_searches,omitempty"` - Query Query `json:"query"` - QueryDelay Duration `json:"query_delay,omitempty"` - RuntimeMappings map[string]RuntimeField `json:"runtime_mappings,omitempty"` - ScriptFields map[string]ScriptField `json:"script_fields,omitempty"` - ScrollSize *int `json:"scroll_size,omitempty"` + Authorization *DatafeedAuthorization `json:"authorization,omitempty"` + ChunkingConfig *ChunkingConfig `json:"chunking_config,omitempty"` + DatafeedId string `json:"datafeed_id"` + DelayedDataCheckConfig DelayedDataCheckConfig `json:"delayed_data_check_config"` + Frequency Duration `json:"frequency,omitempty"` + Indexes []string `json:"indexes,omitempty"` + Indices []string `json:"indices"` + IndicesOptions *IndicesOptions `json:"indices_options,omitempty"` + JobId string `json:"job_id"` + MaxEmptySearches *int `json:"max_empty_searches,omitempty"` + Query Query `json:"query"` + QueryDelay Duration `json:"query_delay,omitempty"` + RuntimeMappings RuntimeFields `json:"runtime_mappings,omitempty"` + ScriptFields map[string]ScriptField `json:"script_fields,omitempty"` + ScrollSize *int `json:"scroll_size,omitempty"` +} + +func (s *MLDatafeed) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aggregations", "aggs": + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregations, 0) + } + if err := dec.Decode(&s.Aggregations); err != nil { + return err + } + + case "authorization": + if err := dec.Decode(&s.Authorization); err != nil { + return err + } + + case "chunking_config": + if err := dec.Decode(&s.ChunkingConfig); err != nil { + return err + } + + case "datafeed_id": + if err := dec.Decode(&s.DatafeedId); err != nil { + return err + } + + case "delayed_data_check_config": + if err := dec.Decode(&s.DelayedDataCheckConfig); err != nil { + return err + } + + case "frequency": + if err := dec.Decode(&s.Frequency); err != nil { + return err + } + + case "indexes": + if err := dec.Decode(&s.Indexes); err != nil { + return err + } + + case "indices": + if err := dec.Decode(&s.Indices); err != nil { + return err + } + + case "indices_options": + if err := dec.Decode(&s.IndicesOptions); err != nil { + return err + } + + case "job_id": + if err := dec.Decode(&s.JobId); err != nil { + return err + } + + case "max_empty_searches": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxEmptySearches = &value + case float64: + f := int(v) + s.MaxEmptySearches = &f + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return err + } + + case "query_delay": + if err := dec.Decode(&s.QueryDelay); err != nil { + return err + } + + case "runtime_mappings": + if err := dec.Decode(&s.RuntimeMappings); err != nil { + return err + } + + case "script_fields": + if s.ScriptFields == nil { + s.ScriptFields = make(map[string]ScriptField, 0) + } + if err := dec.Decode(&s.ScriptFields); err != nil { + return err + } + + case "scroll_size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.ScrollSize = &value + case float64: + f := int(v) + s.ScrollSize = &f + } + + } + } + return nil } // NewMLDatafeed returns a MLDatafeed. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mldataframeanalyticsjobs.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mldataframeanalyticsjobs.go index c34328e5f..d34f17d29 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mldataframeanalyticsjobs.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mldataframeanalyticsjobs.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // MlDataFrameAnalyticsJobs type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L177-L182 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L177-L182 type MlDataFrameAnalyticsJobs struct { All_ MlDataFrameAnalyticsJobsCount `json:"_all"` AnalysisCounts *MlDataFrameAnalyticsJobsAnalysis `json:"analysis_counts,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mldataframeanalyticsjobsanalysis.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mldataframeanalyticsjobsanalysis.go index 0e2ff3579..0eeb58b50 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mldataframeanalyticsjobsanalysis.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mldataframeanalyticsjobsanalysis.go @@ -16,19 +16,95 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // MlDataFrameAnalyticsJobsAnalysis type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L184-L188 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L184-L188 type MlDataFrameAnalyticsJobsAnalysis struct { Classification *int `json:"classification,omitempty"` OutlierDetection *int `json:"outlier_detection,omitempty"` Regression *int `json:"regression,omitempty"` } +func (s *MlDataFrameAnalyticsJobsAnalysis) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "classification": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Classification = &value + case float64: + f := int(v) + s.Classification = &f + } + + case "outlier_detection": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.OutlierDetection = &value + case float64: + f := int(v) + s.OutlierDetection = &f + } + + case "regression": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Regression = &value + case float64: + f := int(v) + s.Regression = &f + } + + } + } + return nil +} + // NewMlDataFrameAnalyticsJobsAnalysis returns a MlDataFrameAnalyticsJobsAnalysis. func NewMlDataFrameAnalyticsJobsAnalysis() *MlDataFrameAnalyticsJobsAnalysis { r := &MlDataFrameAnalyticsJobsAnalysis{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mldataframeanalyticsjobscount.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mldataframeanalyticsjobscount.go index 1193199a0..add236796 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mldataframeanalyticsjobscount.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mldataframeanalyticsjobscount.go @@ -16,17 +16,60 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // MlDataFrameAnalyticsJobsCount type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L194-L196 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L194-L196 type MlDataFrameAnalyticsJobsCount struct { Count int64 `json:"count"` } +func (s *MlDataFrameAnalyticsJobsCount) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + } + } + return nil +} + // NewMlDataFrameAnalyticsJobsCount returns a MlDataFrameAnalyticsJobsCount. func NewMlDataFrameAnalyticsJobsCount() *MlDataFrameAnalyticsJobsCount { r := &MlDataFrameAnalyticsJobsCount{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mldataframeanalyticsjobsmemory.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mldataframeanalyticsjobsmemory.go index afb652e0a..df23ebd28 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mldataframeanalyticsjobsmemory.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mldataframeanalyticsjobsmemory.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // MlDataFrameAnalyticsJobsMemory type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L190-L192 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L190-L192 type MlDataFrameAnalyticsJobsMemory struct { PeakUsageBytes JobStatistics `json:"peak_usage_bytes"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mlfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mlfilter.go index 1ba085c81..ccf88e577 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mlfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mlfilter.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // MLFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Filter.ts#L22-L29 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Filter.ts#L22-L29 type MLFilter struct { // Description A description of the filter. Description *string `json:"description,omitempty"` @@ -32,6 +40,48 @@ type MLFilter struct { Items []string `json:"items"` } +func (s *MLFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "filter_id": + if err := dec.Decode(&s.FilterId); err != nil { + return err + } + + case "items": + if err := dec.Decode(&s.Items); err != nil { + return err + } + + } + } + return nil +} + // NewMLFilter returns a MLFilter. func NewMLFilter() *MLFilter { r := &MLFilter{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mlinference.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mlinference.go index a7bd62782..23931bc4b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mlinference.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mlinference.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // MlInference type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L198-L203 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L198-L206 type MlInference struct { Deployments *MlInferenceDeployments `json:"deployments,omitempty"` IngestProcessors map[string]MlInferenceIngestProcessor `json:"ingest_processors"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mlinferencedeployments.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mlinferencedeployments.go index 77f61750e..47db477ac 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mlinferencedeployments.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mlinferencedeployments.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // MlInferenceDeployments type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L221-L226 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L227-L232 type MlInferenceDeployments struct { Count int `json:"count"` InferenceCounts JobStatistics `json:"inference_counts"` @@ -30,6 +38,57 @@ type MlInferenceDeployments struct { TimeMs MlInferenceDeploymentsTimeMs `json:"time_ms"` } +func (s *MlInferenceDeployments) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Count = value + case float64: + f := int(v) + s.Count = f + } + + case "inference_counts": + if err := dec.Decode(&s.InferenceCounts); err != nil { + return err + } + + case "model_sizes_bytes": + if err := dec.Decode(&s.ModelSizesBytes); err != nil { + return err + } + + case "time_ms": + if err := dec.Decode(&s.TimeMs); err != nil { + return err + } + + } + } + return nil +} + // NewMlInferenceDeployments returns a MlInferenceDeployments. func NewMlInferenceDeployments() *MlInferenceDeployments { r := &MlInferenceDeployments{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mlinferencedeploymentstimems.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mlinferencedeploymentstimems.go index 038255312..656da69dc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mlinferencedeploymentstimems.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mlinferencedeploymentstimems.go @@ -16,17 +16,61 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // MlInferenceDeploymentsTimeMs type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L228-L230 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L234-L236 type MlInferenceDeploymentsTimeMs struct { Avg Float64 `json:"avg"` } +func (s *MlInferenceDeploymentsTimeMs) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "avg": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Avg = f + case float64: + f := Float64(v) + s.Avg = f + } + + } + } + return nil +} + // NewMlInferenceDeploymentsTimeMs returns a MlInferenceDeploymentsTimeMs. func NewMlInferenceDeploymentsTimeMs() *MlInferenceDeploymentsTimeMs { r := &MlInferenceDeploymentsTimeMs{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mlinferenceingestprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mlinferenceingestprocessor.go index 5bfeb37ad..cf62a8d8c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mlinferenceingestprocessor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mlinferenceingestprocessor.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // MlInferenceIngestProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L205-L210 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L208-L213 type MlInferenceIngestProcessor struct { NumDocsProcessed MlInferenceIngestProcessorCount `json:"num_docs_processed"` NumFailures MlInferenceIngestProcessorCount `json:"num_failures"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mlinferenceingestprocessorcount.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mlinferenceingestprocessorcount.go index 659b4b0f4..37f803215 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mlinferenceingestprocessorcount.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mlinferenceingestprocessorcount.go @@ -16,19 +16,92 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // MlInferenceIngestProcessorCount type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L232-L236 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L238-L242 type MlInferenceIngestProcessorCount struct { Max int64 `json:"max"` Min int64 `json:"min"` Sum int64 `json:"sum"` } +func (s *MlInferenceIngestProcessorCount) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Max = value + case float64: + f := int64(v) + s.Max = f + } + + case "min": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Min = value + case float64: + f := int64(v) + s.Min = f + } + + case "sum": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Sum = value + case float64: + f := int64(v) + s.Sum = f + } + + } + } + return nil +} + // NewMlInferenceIngestProcessorCount returns a MlInferenceIngestProcessorCount. func NewMlInferenceIngestProcessorCount() *MlInferenceIngestProcessorCount { r := &MlInferenceIngestProcessorCount{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mlinferencetrainedmodels.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mlinferencetrainedmodels.go index c7f4dbb5c..2e8a0a23d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mlinferencetrainedmodels.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mlinferencetrainedmodels.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // MlInferenceTrainedModels type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L212-L219 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L215-L225 type MlInferenceTrainedModels struct { All_ MlCounter `json:"_all"` Count *MlInferenceTrainedModelsCount `json:"count,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mlinferencetrainedmodelscount.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mlinferencetrainedmodelscount.go index 6407a70cb..c0ce69822 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mlinferencetrainedmodelscount.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mlinferencetrainedmodelscount.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // MlInferenceTrainedModelsCount type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L238-L247 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L244-L253 type MlInferenceTrainedModelsCount struct { Classification *int64 `json:"classification,omitempty"` Ner *int64 `json:"ner,omitempty"` @@ -34,6 +42,146 @@ type MlInferenceTrainedModelsCount struct { Total int64 `json:"total"` } +func (s *MlInferenceTrainedModelsCount) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "classification": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Classification = &value + case float64: + f := int64(v) + s.Classification = &f + } + + case "ner": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Ner = &value + case float64: + f := int64(v) + s.Ner = &f + } + + case "other": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Other = value + case float64: + f := int64(v) + s.Other = f + } + + case "pass_through": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.PassThrough = &value + case float64: + f := int64(v) + s.PassThrough = &f + } + + case "prepackaged": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Prepackaged = value + case float64: + f := int64(v) + s.Prepackaged = f + } + + case "regression": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Regression = &value + case float64: + f := int64(v) + s.Regression = &f + } + + case "text_embedding": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TextEmbedding = &value + case float64: + f := int64(v) + s.TextEmbedding = &f + } + + case "total": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Total = value + case float64: + f := int64(v) + s.Total = f + } + + } + } + return nil +} + // NewMlInferenceTrainedModelsCount returns a MlInferenceTrainedModelsCount. func NewMlInferenceTrainedModelsCount() *MlInferenceTrainedModelsCount { r := &MlInferenceTrainedModelsCount{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mljobforecasts.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mljobforecasts.go index f1957ac88..c227213f3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mljobforecasts.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mljobforecasts.go @@ -16,18 +16,76 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // MlJobForecasts type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L172-L175 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L172-L175 type MlJobForecasts struct { ForecastedJobs int64 `json:"forecasted_jobs"` Total int64 `json:"total"` } +func (s *MlJobForecasts) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "forecasted_jobs": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.ForecastedJobs = value + case float64: + f := int64(v) + s.ForecastedJobs = f + } + + case "total": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Total = value + case float64: + f := int64(v) + s.Total = f + } + + } + } + return nil +} + // NewMlJobForecasts returns a MlJobForecasts. func NewMlJobForecasts() *MlJobForecasts { r := &MlJobForecasts{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/modelplotconfig.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/modelplotconfig.go index 769bfadf3..cc5fcb648 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/modelplotconfig.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/modelplotconfig.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ModelPlotConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/ModelPlot.ts#L23-L40 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/ModelPlot.ts#L23-L42 type ModelPlotConfig struct { // AnnotationsEnabled If true, enables calculation and storage of the model change annotations for // each entity that is being analyzed. @@ -37,6 +45,59 @@ type ModelPlotConfig struct { Terms *string `json:"terms,omitempty"` } +func (s *ModelPlotConfig) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "annotations_enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.AnnotationsEnabled = &value + case bool: + s.AnnotationsEnabled = &v + } + + case "enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = &value + case bool: + s.Enabled = &v + } + + case "terms": + if err := dec.Decode(&s.Terms); err != nil { + return err + } + + } + } + return nil +} + // NewModelPlotConfig returns a ModelPlotConfig. func NewModelPlotConfig() *ModelPlotConfig { r := &ModelPlotConfig{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/modelsizestats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/modelsizestats.go index acccabd84..5d27e9c59 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/modelsizestats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/modelsizestats.go @@ -16,18 +16,24 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/categorizationstatus" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/memorystatus" ) // ModelSizeStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Model.ts#L56-L78 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Model.ts#L59-L81 type ModelSizeStats struct { AssignmentMemoryBasis *string `json:"assignment_memory_basis,omitempty"` BucketAllocationFailuresCount int64 `json:"bucket_allocation_failures_count"` @@ -52,6 +58,261 @@ type ModelSizeStats struct { TotalPartitionFieldCount int64 `json:"total_partition_field_count"` } +func (s *ModelSizeStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "assignment_memory_basis": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AssignmentMemoryBasis = &o + + case "bucket_allocation_failures_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.BucketAllocationFailuresCount = value + case float64: + f := int64(v) + s.BucketAllocationFailuresCount = f + } + + case "categorization_status": + if err := dec.Decode(&s.CategorizationStatus); err != nil { + return err + } + + case "categorized_doc_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.CategorizedDocCount = value + case float64: + f := int(v) + s.CategorizedDocCount = f + } + + case "dead_category_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.DeadCategoryCount = value + case float64: + f := int(v) + s.DeadCategoryCount = f + } + + case "failed_category_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.FailedCategoryCount = value + case float64: + f := int(v) + s.FailedCategoryCount = f + } + + case "frequent_category_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.FrequentCategoryCount = value + case float64: + f := int(v) + s.FrequentCategoryCount = f + } + + case "job_id": + if err := dec.Decode(&s.JobId); err != nil { + return err + } + + case "log_time": + if err := dec.Decode(&s.LogTime); err != nil { + return err + } + + case "memory_status": + if err := dec.Decode(&s.MemoryStatus); err != nil { + return err + } + + case "model_bytes": + if err := dec.Decode(&s.ModelBytes); err != nil { + return err + } + + case "model_bytes_exceeded": + if err := dec.Decode(&s.ModelBytesExceeded); err != nil { + return err + } + + case "model_bytes_memory_limit": + if err := dec.Decode(&s.ModelBytesMemoryLimit); err != nil { + return err + } + + case "peak_model_bytes": + if err := dec.Decode(&s.PeakModelBytes); err != nil { + return err + } + + case "rare_category_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.RareCategoryCount = value + case float64: + f := int(v) + s.RareCategoryCount = f + } + + case "result_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultType = o + + case "timestamp": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Timestamp = &value + case float64: + f := int64(v) + s.Timestamp = &f + } + + case "total_by_field_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TotalByFieldCount = value + case float64: + f := int64(v) + s.TotalByFieldCount = f + } + + case "total_category_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.TotalCategoryCount = value + case float64: + f := int(v) + s.TotalCategoryCount = f + } + + case "total_over_field_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TotalOverFieldCount = value + case float64: + f := int64(v) + s.TotalOverFieldCount = f + } + + case "total_partition_field_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TotalPartitionFieldCount = value + case float64: + f := int64(v) + s.TotalPartitionFieldCount = f + } + + } + } + return nil +} + // NewModelSizeStats returns a ModelSizeStats. func NewModelSizeStats() *ModelSizeStats { r := &ModelSizeStats{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/modelsnapshot.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/modelsnapshot.go index 561c097f6..0212713d9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/modelsnapshot.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/modelsnapshot.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ModelSnapshot type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Model.ts#L25-L46 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Model.ts#L25-L46 type ModelSnapshot struct { // Description An optional description of the job. Description *string `json:"description,omitempty"` @@ -49,6 +57,134 @@ type ModelSnapshot struct { Timestamp int64 `json:"timestamp"` } +func (s *ModelSnapshot) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "job_id": + if err := dec.Decode(&s.JobId); err != nil { + return err + } + + case "latest_record_time_stamp": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.LatestRecordTimeStamp = &value + case float64: + f := int(v) + s.LatestRecordTimeStamp = &f + } + + case "latest_result_time_stamp": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.LatestResultTimeStamp = &value + case float64: + f := int(v) + s.LatestResultTimeStamp = &f + } + + case "min_version": + if err := dec.Decode(&s.MinVersion); err != nil { + return err + } + + case "model_size_stats": + if err := dec.Decode(&s.ModelSizeStats); err != nil { + return err + } + + case "retain": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Retain = value + case bool: + s.Retain = v + } + + case "snapshot_doc_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.SnapshotDocCount = value + case float64: + f := int64(v) + s.SnapshotDocCount = f + } + + case "snapshot_id": + if err := dec.Decode(&s.SnapshotId); err != nil { + return err + } + + case "timestamp": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Timestamp = value + case float64: + f := int64(v) + s.Timestamp = f + } + + } + } + return nil +} + // NewModelSnapshot returns a ModelSnapshot. func NewModelSnapshot() *ModelSnapshot { r := &ModelSnapshot{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/modelsnapshotupgrade.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/modelsnapshotupgrade.go index cf41fa42e..d6b45dfb1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/modelsnapshotupgrade.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/modelsnapshotupgrade.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/snapshotupgradestate" ) // ModelSnapshotUpgrade type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Model.ts#L48-L54 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Model.ts#L48-L57 type ModelSnapshotUpgrade struct { AssignmentExplanation string `json:"assignment_explanation"` JobId string `json:"job_id"` @@ -35,6 +41,58 @@ type ModelSnapshotUpgrade struct { State snapshotupgradestate.SnapshotUpgradeState `json:"state"` } +func (s *ModelSnapshotUpgrade) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "assignment_explanation": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AssignmentExplanation = o + + case "job_id": + if err := dec.Decode(&s.JobId); err != nil { + return err + } + + case "node": + if err := dec.Decode(&s.Node); err != nil { + return err + } + + case "snapshot_id": + if err := dec.Decode(&s.SnapshotId); err != nil { + return err + } + + case "state": + if err := dec.Decode(&s.State); err != nil { + return err + } + + } + } + return nil +} + // NewModelSnapshotUpgrade returns a ModelSnapshotUpgrade. func NewModelSnapshotUpgrade() *ModelSnapshotUpgrade { r := &ModelSnapshotUpgrade{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/monitoring.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/monitoring.go index 4e117e5ae..04ae3356e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/monitoring.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/monitoring.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Monitoring type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L372-L375 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L381-L384 type Monitoring struct { Available bool `json:"available"` CollectionEnabled bool `json:"collection_enabled"` @@ -30,6 +38,76 @@ type Monitoring struct { EnabledExporters map[string]int64 `json:"enabled_exporters"` } +func (s *Monitoring) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "available": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Available = value + case bool: + s.Available = v + } + + case "collection_enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.CollectionEnabled = value + case bool: + s.CollectionEnabled = v + } + + case "enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "enabled_exporters": + if s.EnabledExporters == nil { + s.EnabledExporters = make(map[string]int64, 0) + } + if err := dec.Decode(&s.EnabledExporters); err != nil { + return err + } + + } + } + return nil +} + // NewMonitoring returns a Monitoring. func NewMonitoring() *Monitoring { r := &Monitoring{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/morelikethisquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/morelikethisquery.go index 7179f85a7..446a97191 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/morelikethisquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/morelikethisquery.go @@ -16,39 +16,362 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/versiontype" ) // MoreLikeThisQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/specialized.ts#L62-L89 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/specialized.ts#L78-L163 type MoreLikeThisQuery struct { - Analyzer *string `json:"analyzer,omitempty"` - Boost *float32 `json:"boost,omitempty"` - BoostTerms *Float64 `json:"boost_terms,omitempty"` - FailOnUnsupportedField *bool `json:"fail_on_unsupported_field,omitempty"` - Fields []string `json:"fields,omitempty"` - Include *bool `json:"include,omitempty"` - Like []Like `json:"like"` - MaxDocFreq *int `json:"max_doc_freq,omitempty"` - MaxQueryTerms *int `json:"max_query_terms,omitempty"` - MaxWordLength *int `json:"max_word_length,omitempty"` - MinDocFreq *int `json:"min_doc_freq,omitempty"` - MinTermFreq *int `json:"min_term_freq,omitempty"` - MinWordLength *int `json:"min_word_length,omitempty"` - MinimumShouldMatch MinimumShouldMatch `json:"minimum_should_match,omitempty"` - PerFieldAnalyzer map[string]string `json:"per_field_analyzer,omitempty"` - QueryName_ *string `json:"_name,omitempty"` - Routing *string `json:"routing,omitempty"` - StopWords []string `json:"stop_words,omitempty"` - Unlike []Like `json:"unlike,omitempty"` - Version *int64 `json:"version,omitempty"` - VersionType *versiontype.VersionType `json:"version_type,omitempty"` + // Analyzer The analyzer that is used to analyze the free form text. + // Defaults to the analyzer associated with the first field in fields. + Analyzer *string `json:"analyzer,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // BoostTerms Each term in the formed query could be further boosted by their tf-idf score. + // This sets the boost factor to use when using this feature. + // Defaults to deactivated (0). + BoostTerms *Float64 `json:"boost_terms,omitempty"` + // FailOnUnsupportedField Controls whether the query should fail (throw an exception) if any of the + // specified fields are not of the supported types (`text` or `keyword`). + FailOnUnsupportedField *bool `json:"fail_on_unsupported_field,omitempty"` + // Fields A list of fields to fetch and analyze the text from. + // Defaults to the `index.query.default_field` index setting, which has a + // default value of `*`. + Fields []string `json:"fields,omitempty"` + // Include Specifies whether the input documents should also be included in the search + // results returned. + Include *bool `json:"include,omitempty"` + // Like Specifies free form text and/or a single or multiple documents for which you + // want to find similar documents. + Like []Like `json:"like"` + // MaxDocFreq The maximum document frequency above which the terms are ignored from the + // input document. + MaxDocFreq *int `json:"max_doc_freq,omitempty"` + // MaxQueryTerms The maximum number of query terms that can be selected. + MaxQueryTerms *int `json:"max_query_terms,omitempty"` + // MaxWordLength The maximum word length above which the terms are ignored. + // Defaults to unbounded (`0`). + MaxWordLength *int `json:"max_word_length,omitempty"` + // MinDocFreq The minimum document frequency below which the terms are ignored from the + // input document. + MinDocFreq *int `json:"min_doc_freq,omitempty"` + // MinTermFreq The minimum term frequency below which the terms are ignored from the input + // document. + MinTermFreq *int `json:"min_term_freq,omitempty"` + // MinWordLength The minimum word length below which the terms are ignored. + MinWordLength *int `json:"min_word_length,omitempty"` + // MinimumShouldMatch After the disjunctive query has been formed, this parameter controls the + // number of terms that must match. + MinimumShouldMatch MinimumShouldMatch `json:"minimum_should_match,omitempty"` + // PerFieldAnalyzer Overrides the default analyzer. + PerFieldAnalyzer map[string]string `json:"per_field_analyzer,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + Routing *string `json:"routing,omitempty"` + // StopWords An array of stop words. + // Any word in this set is ignored. + StopWords []string `json:"stop_words,omitempty"` + // Unlike Used in combination with `like` to exclude documents that match a set of + // terms. + Unlike []Like `json:"unlike,omitempty"` + Version *int64 `json:"version,omitempty"` + VersionType *versiontype.VersionType `json:"version_type,omitempty"` +} + +func (s *MoreLikeThisQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "boost_terms": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.BoostTerms = &f + case float64: + f := Float64(v) + s.BoostTerms = &f + } + + case "fail_on_unsupported_field": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.FailOnUnsupportedField = &value + case bool: + s.FailOnUnsupportedField = &v + } + + case "fields": + if err := dec.Decode(&s.Fields); err != nil { + return err + } + + case "include": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Include = &value + case bool: + s.Include = &v + } + + case "like": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(Like) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Like = append(s.Like, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Like); err != nil { + return err + } + } + + case "max_doc_freq": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxDocFreq = &value + case float64: + f := int(v) + s.MaxDocFreq = &f + } + + case "max_query_terms": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxQueryTerms = &value + case float64: + f := int(v) + s.MaxQueryTerms = &f + } + + case "max_word_length": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxWordLength = &value + case float64: + f := int(v) + s.MaxWordLength = &f + } + + case "min_doc_freq": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MinDocFreq = &value + case float64: + f := int(v) + s.MinDocFreq = &f + } + + case "min_term_freq": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MinTermFreq = &value + case float64: + f := int(v) + s.MinTermFreq = &f + } + + case "min_word_length": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MinWordLength = &value + case float64: + f := int(v) + s.MinWordLength = &f + } + + case "minimum_should_match": + if err := dec.Decode(&s.MinimumShouldMatch); err != nil { + return err + } + + case "per_field_analyzer": + if s.PerFieldAnalyzer == nil { + s.PerFieldAnalyzer = make(map[string]string, 0) + } + if err := dec.Decode(&s.PerFieldAnalyzer); err != nil { + return err + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "routing": + if err := dec.Decode(&s.Routing); err != nil { + return err + } + + case "stop_words": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.StopWords = append(s.StopWords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.StopWords); err != nil { + return err + } + } + + case "unlike": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(Like) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Unlike = append(s.Unlike, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Unlike); err != nil { + return err + } + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + case "version_type": + if err := dec.Decode(&s.VersionType); err != nil { + return err + } + + } + } + return nil } // NewMoreLikeThisQuery returns a MoreLikeThisQuery. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mountedsnapshot.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mountedsnapshot.go index f73f4cbb7..b9f85fd77 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mountedsnapshot.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mountedsnapshot.go @@ -16,19 +16,72 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // MountedSnapshot type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/searchable_snapshots/mount/types.ts#L23-L27 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/searchable_snapshots/mount/types.ts#L23-L27 type MountedSnapshot struct { Indices []string `json:"indices"` Shards ShardStatistics `json:"shards"` Snapshot string `json:"snapshot"` } +func (s *MountedSnapshot) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "indices": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Indices = append(s.Indices, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Indices); err != nil { + return err + } + } + + case "shards": + if err := dec.Decode(&s.Shards); err != nil { + return err + } + + case "snapshot": + if err := dec.Decode(&s.Snapshot); err != nil { + return err + } + + } + } + return nil +} + // NewMountedSnapshot returns a MountedSnapshot. func NewMountedSnapshot() *MountedSnapshot { r := &MountedSnapshot{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/movingaverageaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/movingaverageaggregation.go index 31c32276a..5d36cbbba 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/movingaverageaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/movingaverageaggregation.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -28,5 +28,5 @@ package types // HoltMovingAverageAggregation // HoltWintersMovingAverageAggregation // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/pipeline.ts#L188-L194 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/pipeline.ts#L228-L234 type MovingAverageAggregation interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/movingfunctionaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/movingfunctionaggregation.go index b1b36e0d5..cfcabd356 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/movingfunctionaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/movingfunctionaggregation.go @@ -16,36 +16,47 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" ) // MovingFunctionAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/pipeline.ts#L250-L254 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/pipeline.ts#L290-L305 type MovingFunctionAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. - BucketsPath BucketsPath `json:"buckets_path,omitempty"` - Format *string `json:"format,omitempty"` - GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Name *string `json:"name,omitempty"` - Script *string `json:"script,omitempty"` - Shift *int `json:"shift,omitempty"` - Window *int `json:"window,omitempty"` + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Name *string `json:"name,omitempty"` + // Script The script that should be executed on each window of data. + Script *string `json:"script,omitempty"` + // Shift By default, the window consists of the last n values excluding the current + // bucket. + // Increasing `shift` by 1, moves the starting window position by 1 to the + // right. + Shift *int `json:"shift,omitempty"` + // Window The size of window to "slide" across the histogram. + Window *int `json:"window,omitempty"` } func (s *MovingFunctionAggregation) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -65,9 +76,16 @@ func (s *MovingFunctionAggregation) UnmarshalJSON(data []byte) error { } case "format": - if err := dec.Decode(&s.Format); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o case "gap_policy": if err := dec.Decode(&s.GapPolicy); err != nil { @@ -80,23 +98,59 @@ func (s *MovingFunctionAggregation) UnmarshalJSON(data []byte) error { } case "name": - if err := dec.Decode(&s.Name); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o case "script": - if err := dec.Decode(&s.Script); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Script = &o case "shift": - if err := dec.Decode(&s.Shift); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Shift = &value + case float64: + f := int(v) + s.Shift = &f } case "window": - if err := dec.Decode(&s.Window); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Window = &value + case float64: + f := int(v) + s.Window = &f } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/movingpercentilesaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/movingpercentilesaggregation.go index 308993988..9ab2dd76d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/movingpercentilesaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/movingpercentilesaggregation.go @@ -16,36 +16,46 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" ) // MovingPercentilesAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/pipeline.ts#L256-L260 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/pipeline.ts#L307-L319 type MovingPercentilesAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. - BucketsPath BucketsPath `json:"buckets_path,omitempty"` - Format *string `json:"format,omitempty"` - GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` - Keyed *bool `json:"keyed,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Name *string `json:"name,omitempty"` - Shift *int `json:"shift,omitempty"` - Window *int `json:"window,omitempty"` + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` + Keyed *bool `json:"keyed,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Name *string `json:"name,omitempty"` + // Shift By default, the window consists of the last n values excluding the current + // bucket. + // Increasing `shift` by 1, moves the starting window position by 1 to the + // right. + Shift *int `json:"shift,omitempty"` + // Window The size of window to "slide" across the histogram. + Window *int `json:"window,omitempty"` } func (s *MovingPercentilesAggregation) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -65,9 +75,16 @@ func (s *MovingPercentilesAggregation) UnmarshalJSON(data []byte) error { } case "format": - if err := dec.Decode(&s.Format); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o case "gap_policy": if err := dec.Decode(&s.GapPolicy); err != nil { @@ -75,8 +92,17 @@ func (s *MovingPercentilesAggregation) UnmarshalJSON(data []byte) error { } case "keyed": - if err := dec.Decode(&s.Keyed); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Keyed = &value + case bool: + s.Keyed = &v } case "meta": @@ -85,18 +111,47 @@ func (s *MovingPercentilesAggregation) UnmarshalJSON(data []byte) error { } case "name": - if err := dec.Decode(&s.Name); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o case "shift": - if err := dec.Decode(&s.Shift); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Shift = &value + case float64: + f := int(v) + s.Shift = &f } case "window": - if err := dec.Decode(&s.Window); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Window = &value + case float64: + f := int(v) + s.Window = &f } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/msearchrequestitem.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/msearchrequestitem.go new file mode 100644 index 000000000..9b061db58 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/msearchrequestitem.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +// MsearchRequestItem holds the union for the following types: +// +// MultisearchHeader +// MultisearchBody +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/msearch/types.ts#L48-L51 +type MsearchRequestItem interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/msearchresponseitem.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/msearchresponseitem.go new file mode 100644 index 000000000..595c8c72d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/msearchresponseitem.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +// MsearchResponseItem holds the union for the following types: +// +// MultiSearchItem +// ErrorResponseBase +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/msearch/types.ts#L209-L212 +type MsearchResponseItem interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mtermvectorsoperation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mtermvectorsoperation.go index 79c7a107f..64ade03f0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mtermvectorsoperation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mtermvectorsoperation.go @@ -16,33 +16,196 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/versiontype" ) // MTermVectorsOperation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/mtermvectors/types.ts#L35-L49 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/mtermvectors/types.ts#L35-L94 type MTermVectorsOperation struct { - Doc json.RawMessage `json:"doc,omitempty"` - FieldStatistics *bool `json:"field_statistics,omitempty"` - Fields []string `json:"fields,omitempty"` - Filter *TermVectorsFilter `json:"filter,omitempty"` - Id_ string `json:"_id"` - Index_ *string `json:"_index,omitempty"` - Offsets *bool `json:"offsets,omitempty"` - Payloads *bool `json:"payloads,omitempty"` - Positions *bool `json:"positions,omitempty"` - Routing *string `json:"routing,omitempty"` - TermStatistics *bool `json:"term_statistics,omitempty"` - Version *int64 `json:"version,omitempty"` - VersionType *versiontype.VersionType `json:"version_type,omitempty"` + // Doc An artificial document (a document not present in the index) for which you + // want to retrieve term vectors. + Doc json.RawMessage `json:"doc,omitempty"` + // FieldStatistics If `true`, the response includes the document count, sum of document + // frequencies, and sum of total term frequencies. + FieldStatistics *bool `json:"field_statistics,omitempty"` + // Fields Comma-separated list or wildcard expressions of fields to include in the + // statistics. + // Used as the default list unless a specific field list is provided in the + // `completion_fields` or `fielddata_fields` parameters. + Fields []string `json:"fields,omitempty"` + // Filter Filter terms based on their tf-idf scores. + Filter *TermVectorsFilter `json:"filter,omitempty"` + // Id_ The ID of the document. + Id_ string `json:"_id"` + // Index_ The index of the document. + Index_ *string `json:"_index,omitempty"` + // Offsets If `true`, the response includes term offsets. + Offsets *bool `json:"offsets,omitempty"` + // Payloads If `true`, the response includes term payloads. + Payloads *bool `json:"payloads,omitempty"` + // Positions If `true`, the response includes term positions. + Positions *bool `json:"positions,omitempty"` + // Routing Custom value used to route operations to a specific shard. + Routing *string `json:"routing,omitempty"` + // TermStatistics If true, the response includes term frequency and document frequency. + TermStatistics *bool `json:"term_statistics,omitempty"` + // Version If `true`, returns the document version as part of a hit. + Version *int64 `json:"version,omitempty"` + // VersionType Specific version type. + VersionType *versiontype.VersionType `json:"version_type,omitempty"` +} + +func (s *MTermVectorsOperation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc": + if err := dec.Decode(&s.Doc); err != nil { + return err + } + + case "field_statistics": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.FieldStatistics = &value + case bool: + s.FieldStatistics = &v + } + + case "fields": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Fields = append(s.Fields, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Fields); err != nil { + return err + } + } + + case "filter": + if err := dec.Decode(&s.Filter); err != nil { + return err + } + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return err + } + + case "_index": + if err := dec.Decode(&s.Index_); err != nil { + return err + } + + case "offsets": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Offsets = &value + case bool: + s.Offsets = &v + } + + case "payloads": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Payloads = &value + case bool: + s.Payloads = &v + } + + case "positions": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Positions = &value + case bool: + s.Positions = &v + } + + case "routing": + if err := dec.Decode(&s.Routing); err != nil { + return err + } + + case "term_statistics": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.TermStatistics = &value + case bool: + s.TermStatistics = &v + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + case "version_type": + if err := dec.Decode(&s.VersionType); err != nil { + return err + } + + } + } + return nil } // NewMTermVectorsOperation returns a MTermVectorsOperation. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebaseadjacencymatrixbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebaseadjacencymatrixbucket.go index e16033457..905fe9d2f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebaseadjacencymatrixbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebaseadjacencymatrixbucket.go @@ -16,27 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" ) // MultiBucketAggregateBaseAdjacencyMatrixBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L326-L328 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L327-L329 type MultiBucketAggregateBaseAdjacencyMatrixBucket struct { Buckets BucketsAdjacencyMatrixBucket `json:"buckets"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Meta Metadata `json:"meta,omitempty"` } func (s *MultiBucketAggregateBaseAdjacencyMatrixBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -57,15 +57,17 @@ func (s *MultiBucketAggregateBaseAdjacencyMatrixBucket) UnmarshalJSON(data []byt source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]AdjacencyMatrixBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []AdjacencyMatrixBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasecompositebucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasecompositebucket.go index 1651f6f85..670f6e012 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasecompositebucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasecompositebucket.go @@ -16,27 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" ) // MultiBucketAggregateBaseCompositeBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L326-L328 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L327-L329 type MultiBucketAggregateBaseCompositeBucket struct { - Buckets BucketsCompositeBucket `json:"buckets"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Buckets BucketsCompositeBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` } func (s *MultiBucketAggregateBaseCompositeBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -57,15 +57,17 @@ func (s *MultiBucketAggregateBaseCompositeBucket) UnmarshalJSON(data []byte) err source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]CompositeBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []CompositeBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasedatehistogrambucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasedatehistogrambucket.go index b94a0c2e6..876b1ff8a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasedatehistogrambucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasedatehistogrambucket.go @@ -16,27 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" ) // MultiBucketAggregateBaseDateHistogramBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L326-L328 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L327-L329 type MultiBucketAggregateBaseDateHistogramBucket struct { Buckets BucketsDateHistogramBucket `json:"buckets"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Meta Metadata `json:"meta,omitempty"` } func (s *MultiBucketAggregateBaseDateHistogramBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -57,15 +57,17 @@ func (s *MultiBucketAggregateBaseDateHistogramBucket) UnmarshalJSON(data []byte) source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]DateHistogramBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []DateHistogramBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasedoubletermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasedoubletermsbucket.go index d4f2e479d..0e000ff8c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasedoubletermsbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasedoubletermsbucket.go @@ -16,27 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" ) // MultiBucketAggregateBaseDoubleTermsBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L326-L328 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L327-L329 type MultiBucketAggregateBaseDoubleTermsBucket struct { - Buckets BucketsDoubleTermsBucket `json:"buckets"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Buckets BucketsDoubleTermsBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` } func (s *MultiBucketAggregateBaseDoubleTermsBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -57,15 +57,17 @@ func (s *MultiBucketAggregateBaseDoubleTermsBucket) UnmarshalJSON(data []byte) e source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]DoubleTermsBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []DoubleTermsBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasefiltersbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasefiltersbucket.go index 30081ece7..c9530cd69 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasefiltersbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasefiltersbucket.go @@ -16,27 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" ) // MultiBucketAggregateBaseFiltersBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L326-L328 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L327-L329 type MultiBucketAggregateBaseFiltersBucket struct { - Buckets BucketsFiltersBucket `json:"buckets"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Buckets BucketsFiltersBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` } func (s *MultiBucketAggregateBaseFiltersBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -57,15 +57,17 @@ func (s *MultiBucketAggregateBaseFiltersBucket) UnmarshalJSON(data []byte) error source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]FiltersBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []FiltersBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasefrequentitemsetsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasefrequentitemsetsbucket.go new file mode 100644 index 000000000..cfbf3448d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasefrequentitemsetsbucket.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + +// MultiBucketAggregateBaseFrequentItemSetsBucket type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L327-L329 +type MultiBucketAggregateBaseFrequentItemSetsBucket struct { + Buckets BucketsFrequentItemSetsBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` +} + +func (s *MultiBucketAggregateBaseFrequentItemSetsBucket) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(map[string]FrequentItemSetsBucket, 0) + if err := localDec.Decode(&o); err != nil { + return err + } + s.Buckets = o + case '[': + o := []FrequentItemSetsBucket{} + if err := localDec.Decode(&o); err != nil { + return err + } + s.Buckets = o + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + } + } + return nil +} + +// NewMultiBucketAggregateBaseFrequentItemSetsBucket returns a MultiBucketAggregateBaseFrequentItemSetsBucket. +func NewMultiBucketAggregateBaseFrequentItemSetsBucket() *MultiBucketAggregateBaseFrequentItemSetsBucket { + r := &MultiBucketAggregateBaseFrequentItemSetsBucket{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasegeohashgridbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasegeohashgridbucket.go index 469e476a3..cbca5115c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasegeohashgridbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasegeohashgridbucket.go @@ -16,27 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" ) // MultiBucketAggregateBaseGeoHashGridBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L326-L328 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L327-L329 type MultiBucketAggregateBaseGeoHashGridBucket struct { - Buckets BucketsGeoHashGridBucket `json:"buckets"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Buckets BucketsGeoHashGridBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` } func (s *MultiBucketAggregateBaseGeoHashGridBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -57,15 +57,17 @@ func (s *MultiBucketAggregateBaseGeoHashGridBucket) UnmarshalJSON(data []byte) e source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]GeoHashGridBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []GeoHashGridBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasegeohexgridbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasegeohexgridbucket.go index 360cef4bc..a07008558 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasegeohexgridbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasegeohexgridbucket.go @@ -16,27 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" ) // MultiBucketAggregateBaseGeoHexGridBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L326-L328 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L327-L329 type MultiBucketAggregateBaseGeoHexGridBucket struct { - Buckets BucketsGeoHexGridBucket `json:"buckets"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Buckets BucketsGeoHexGridBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` } func (s *MultiBucketAggregateBaseGeoHexGridBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -57,15 +57,17 @@ func (s *MultiBucketAggregateBaseGeoHexGridBucket) UnmarshalJSON(data []byte) er source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]GeoHexGridBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []GeoHexGridBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasegeotilegridbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasegeotilegridbucket.go index 7ba59b682..7f1947a19 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasegeotilegridbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasegeotilegridbucket.go @@ -16,27 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" ) // MultiBucketAggregateBaseGeoTileGridBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L326-L328 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L327-L329 type MultiBucketAggregateBaseGeoTileGridBucket struct { - Buckets BucketsGeoTileGridBucket `json:"buckets"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Buckets BucketsGeoTileGridBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` } func (s *MultiBucketAggregateBaseGeoTileGridBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -57,15 +57,17 @@ func (s *MultiBucketAggregateBaseGeoTileGridBucket) UnmarshalJSON(data []byte) e source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]GeoTileGridBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []GeoTileGridBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasehistogrambucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasehistogrambucket.go index 81295ee4a..b4d79204a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasehistogrambucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasehistogrambucket.go @@ -16,27 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" ) // MultiBucketAggregateBaseHistogramBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L326-L328 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L327-L329 type MultiBucketAggregateBaseHistogramBucket struct { - Buckets BucketsHistogramBucket `json:"buckets"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Buckets BucketsHistogramBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` } func (s *MultiBucketAggregateBaseHistogramBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -57,15 +57,17 @@ func (s *MultiBucketAggregateBaseHistogramBucket) UnmarshalJSON(data []byte) err source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]HistogramBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []HistogramBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebaseipprefixbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebaseipprefixbucket.go index 69cda7bab..f0129350f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebaseipprefixbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebaseipprefixbucket.go @@ -16,27 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" ) // MultiBucketAggregateBaseIpPrefixBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L326-L328 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L327-L329 type MultiBucketAggregateBaseIpPrefixBucket struct { - Buckets BucketsIpPrefixBucket `json:"buckets"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Buckets BucketsIpPrefixBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` } func (s *MultiBucketAggregateBaseIpPrefixBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -57,15 +57,17 @@ func (s *MultiBucketAggregateBaseIpPrefixBucket) UnmarshalJSON(data []byte) erro source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]IpPrefixBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []IpPrefixBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebaseiprangebucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebaseiprangebucket.go index 18684a270..b0139061a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebaseiprangebucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebaseiprangebucket.go @@ -16,27 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" ) // MultiBucketAggregateBaseIpRangeBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L326-L328 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L327-L329 type MultiBucketAggregateBaseIpRangeBucket struct { - Buckets BucketsIpRangeBucket `json:"buckets"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Buckets BucketsIpRangeBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` } func (s *MultiBucketAggregateBaseIpRangeBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -57,15 +57,17 @@ func (s *MultiBucketAggregateBaseIpRangeBucket) UnmarshalJSON(data []byte) error source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]IpRangeBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []IpRangeBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebaselongraretermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebaselongraretermsbucket.go index f4da711c8..98f0af7d4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebaselongraretermsbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebaselongraretermsbucket.go @@ -16,27 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" ) // MultiBucketAggregateBaseLongRareTermsBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L326-L328 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L327-L329 type MultiBucketAggregateBaseLongRareTermsBucket struct { Buckets BucketsLongRareTermsBucket `json:"buckets"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Meta Metadata `json:"meta,omitempty"` } func (s *MultiBucketAggregateBaseLongRareTermsBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -57,15 +57,17 @@ func (s *MultiBucketAggregateBaseLongRareTermsBucket) UnmarshalJSON(data []byte) source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]LongRareTermsBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []LongRareTermsBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebaselongtermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebaselongtermsbucket.go index 71346db34..b93479d85 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebaselongtermsbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebaselongtermsbucket.go @@ -16,27 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" ) // MultiBucketAggregateBaseLongTermsBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L326-L328 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L327-L329 type MultiBucketAggregateBaseLongTermsBucket struct { - Buckets BucketsLongTermsBucket `json:"buckets"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Buckets BucketsLongTermsBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` } func (s *MultiBucketAggregateBaseLongTermsBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -57,15 +57,17 @@ func (s *MultiBucketAggregateBaseLongTermsBucket) UnmarshalJSON(data []byte) err source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]LongTermsBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []LongTermsBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasemultitermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasemultitermsbucket.go index 21d021207..c7c5a934a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasemultitermsbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasemultitermsbucket.go @@ -16,27 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" ) // MultiBucketAggregateBaseMultiTermsBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L326-L328 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L327-L329 type MultiBucketAggregateBaseMultiTermsBucket struct { - Buckets BucketsMultiTermsBucket `json:"buckets"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Buckets BucketsMultiTermsBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` } func (s *MultiBucketAggregateBaseMultiTermsBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -57,15 +57,17 @@ func (s *MultiBucketAggregateBaseMultiTermsBucket) UnmarshalJSON(data []byte) er source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]MultiTermsBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []MultiTermsBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebaserangebucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebaserangebucket.go index 6d3ab4a41..7eb1c40a7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebaserangebucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebaserangebucket.go @@ -16,27 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" ) // MultiBucketAggregateBaseRangeBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L326-L328 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L327-L329 type MultiBucketAggregateBaseRangeBucket struct { - Buckets BucketsRangeBucket `json:"buckets"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Buckets BucketsRangeBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` } func (s *MultiBucketAggregateBaseRangeBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -57,15 +57,17 @@ func (s *MultiBucketAggregateBaseRangeBucket) UnmarshalJSON(data []byte) error { source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]RangeBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []RangeBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasesignificantlongtermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasesignificantlongtermsbucket.go index 8b831ab8d..931acdc36 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasesignificantlongtermsbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasesignificantlongtermsbucket.go @@ -16,27 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" ) // MultiBucketAggregateBaseSignificantLongTermsBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L326-L328 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L327-L329 type MultiBucketAggregateBaseSignificantLongTermsBucket struct { Buckets BucketsSignificantLongTermsBucket `json:"buckets"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Meta Metadata `json:"meta,omitempty"` } func (s *MultiBucketAggregateBaseSignificantLongTermsBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -57,15 +57,17 @@ func (s *MultiBucketAggregateBaseSignificantLongTermsBucket) UnmarshalJSON(data source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]SignificantLongTermsBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []SignificantLongTermsBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasesignificantstringtermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasesignificantstringtermsbucket.go index 10c18ddbe..0f3b389f7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasesignificantstringtermsbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasesignificantstringtermsbucket.go @@ -16,27 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" ) // MultiBucketAggregateBaseSignificantStringTermsBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L326-L328 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L327-L329 type MultiBucketAggregateBaseSignificantStringTermsBucket struct { Buckets BucketsSignificantStringTermsBucket `json:"buckets"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Meta Metadata `json:"meta,omitempty"` } func (s *MultiBucketAggregateBaseSignificantStringTermsBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -57,15 +57,17 @@ func (s *MultiBucketAggregateBaseSignificantStringTermsBucket) UnmarshalJSON(dat source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]SignificantStringTermsBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []SignificantStringTermsBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasestringraretermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasestringraretermsbucket.go index f923170b6..9262619ae 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasestringraretermsbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasestringraretermsbucket.go @@ -16,27 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" ) // MultiBucketAggregateBaseStringRareTermsBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L326-L328 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L327-L329 type MultiBucketAggregateBaseStringRareTermsBucket struct { Buckets BucketsStringRareTermsBucket `json:"buckets"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Meta Metadata `json:"meta,omitempty"` } func (s *MultiBucketAggregateBaseStringRareTermsBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -57,15 +57,17 @@ func (s *MultiBucketAggregateBaseStringRareTermsBucket) UnmarshalJSON(data []byt source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]StringRareTermsBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []StringRareTermsBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasestringtermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasestringtermsbucket.go index c3d5b4c92..0a66b749b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasestringtermsbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasestringtermsbucket.go @@ -16,27 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" ) // MultiBucketAggregateBaseStringTermsBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L326-L328 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L327-L329 type MultiBucketAggregateBaseStringTermsBucket struct { - Buckets BucketsStringTermsBucket `json:"buckets"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Buckets BucketsStringTermsBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` } func (s *MultiBucketAggregateBaseStringTermsBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -57,15 +57,17 @@ func (s *MultiBucketAggregateBaseStringTermsBucket) UnmarshalJSON(data []byte) e source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]StringTermsBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []StringTermsBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasevariablewidthhistogrambucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasevariablewidthhistogrambucket.go index 992018116..1980d46da 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasevariablewidthhistogrambucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasevariablewidthhistogrambucket.go @@ -16,27 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" ) // MultiBucketAggregateBaseVariableWidthHistogramBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L326-L328 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L327-L329 type MultiBucketAggregateBaseVariableWidthHistogramBucket struct { Buckets BucketsVariableWidthHistogramBucket `json:"buckets"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Meta Metadata `json:"meta,omitempty"` } func (s *MultiBucketAggregateBaseVariableWidthHistogramBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -57,15 +57,17 @@ func (s *MultiBucketAggregateBaseVariableWidthHistogramBucket) UnmarshalJSON(dat source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]VariableWidthHistogramBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []VariableWidthHistogramBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasevoid.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasevoid.go index 78493072f..1f6e6273f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasevoid.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multibucketaggregatebasevoid.go @@ -16,27 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" ) // MultiBucketAggregateBaseVoid type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L326-L328 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L327-L329 type MultiBucketAggregateBaseVoid struct { - Buckets BucketsVoid `json:"buckets"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Buckets BucketsVoid `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` } func (s *MultiBucketAggregateBaseVoid) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -57,15 +57,17 @@ func (s *MultiBucketAggregateBaseVoid) UnmarshalJSON(data []byte) error { source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': - o := make(map[string]struct{}, 0) - localDec.Decode(&o) + o := make(map[string]interface{}, 0) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': - o := []struct{}{} - localDec.Decode(&o) + o := []interface{}{} + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multigeterror.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multigeterror.go index 3071d5692..e8fd27cd7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multigeterror.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multigeterror.go @@ -16,19 +16,61 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // MultiGetError type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/mget/types.ts#L62-L66 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/mget/types.ts#L62-L66 type MultiGetError struct { Error ErrorCause `json:"error"` Id_ string `json:"_id"` Index_ string `json:"_index"` } +func (s *MultiGetError) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "error": + if err := dec.Decode(&s.Error); err != nil { + return err + } + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return err + } + + case "_index": + if err := dec.Decode(&s.Index_); err != nil { + return err + } + + } + } + return nil +} + // NewMultiGetError returns a MultiGetError. func NewMultiGetError() *MultiGetError { r := &MultiGetError{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multimatchquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multimatchquery.go index 2ed1dad45..3e18b0895 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multimatchquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multimatchquery.go @@ -16,11 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/operator" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/textquerytype" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/zerotermsquery" @@ -28,27 +34,297 @@ import ( // MultiMatchQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/fulltext.ts#L191-L217 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/fulltext.ts#L456-L539 type MultiMatchQuery struct { - Analyzer *string `json:"analyzer,omitempty"` - AutoGenerateSynonymsPhraseQuery *bool `json:"auto_generate_synonyms_phrase_query,omitempty"` - Boost *float32 `json:"boost,omitempty"` - CutoffFrequency *Float64 `json:"cutoff_frequency,omitempty"` - Fields []string `json:"fields,omitempty"` - Fuzziness Fuzziness `json:"fuzziness,omitempty"` - FuzzyRewrite *string `json:"fuzzy_rewrite,omitempty"` - FuzzyTranspositions *bool `json:"fuzzy_transpositions,omitempty"` - Lenient *bool `json:"lenient,omitempty"` - MaxExpansions *int `json:"max_expansions,omitempty"` - MinimumShouldMatch MinimumShouldMatch `json:"minimum_should_match,omitempty"` - Operator *operator.Operator `json:"operator,omitempty"` - PrefixLength *int `json:"prefix_length,omitempty"` - Query string `json:"query"` - QueryName_ *string `json:"_name,omitempty"` - Slop *int `json:"slop,omitempty"` - TieBreaker *Float64 `json:"tie_breaker,omitempty"` - Type *textquerytype.TextQueryType `json:"type,omitempty"` - ZeroTermsQuery *zerotermsquery.ZeroTermsQuery `json:"zero_terms_query,omitempty"` + // Analyzer Analyzer used to convert the text in the query value into tokens. + Analyzer *string `json:"analyzer,omitempty"` + // AutoGenerateSynonymsPhraseQuery If `true`, match phrase queries are automatically created for multi-term + // synonyms. + AutoGenerateSynonymsPhraseQuery *bool `json:"auto_generate_synonyms_phrase_query,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + CutoffFrequency *Float64 `json:"cutoff_frequency,omitempty"` + // Fields The fields to be queried. + // Defaults to the `index.query.default_field` index settings, which in turn + // defaults to `*`. + Fields []string `json:"fields,omitempty"` + // Fuzziness Maximum edit distance allowed for matching. + Fuzziness Fuzziness `json:"fuzziness,omitempty"` + // FuzzyRewrite Method used to rewrite the query. + FuzzyRewrite *string `json:"fuzzy_rewrite,omitempty"` + // FuzzyTranspositions If `true`, edits for fuzzy matching include transpositions of two adjacent + // characters (for example, `ab` to `ba`). + // Can be applied to the term subqueries constructed for all terms but the final + // term. + FuzzyTranspositions *bool `json:"fuzzy_transpositions,omitempty"` + // Lenient If `true`, format-based errors, such as providing a text query value for a + // numeric field, are ignored. + Lenient *bool `json:"lenient,omitempty"` + // MaxExpansions Maximum number of terms to which the query will expand. + MaxExpansions *int `json:"max_expansions,omitempty"` + // MinimumShouldMatch Minimum number of clauses that must match for a document to be returned. + MinimumShouldMatch MinimumShouldMatch `json:"minimum_should_match,omitempty"` + // Operator Boolean logic used to interpret text in the query value. + Operator *operator.Operator `json:"operator,omitempty"` + // PrefixLength Number of beginning characters left unchanged for fuzzy matching. + PrefixLength *int `json:"prefix_length,omitempty"` + // Query Text, number, boolean value or date you wish to find in the provided field. + Query string `json:"query"` + QueryName_ *string `json:"_name,omitempty"` + // Slop Maximum number of positions allowed between matching tokens. + Slop *int `json:"slop,omitempty"` + // TieBreaker Determines how scores for each per-term blended query and scores across + // groups are combined. + TieBreaker *Float64 `json:"tie_breaker,omitempty"` + // Type How `the` multi_match query is executed internally. + Type *textquerytype.TextQueryType `json:"type,omitempty"` + // ZeroTermsQuery Indicates whether no documents are returned if the `analyzer` removes all + // tokens, such as when using a `stop` filter. + ZeroTermsQuery *zerotermsquery.ZeroTermsQuery `json:"zero_terms_query,omitempty"` +} + +func (s *MultiMatchQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o + + case "auto_generate_synonyms_phrase_query": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.AutoGenerateSynonymsPhraseQuery = &value + case bool: + s.AutoGenerateSynonymsPhraseQuery = &v + } + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "cutoff_frequency": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.CutoffFrequency = &f + case float64: + f := Float64(v) + s.CutoffFrequency = &f + } + + case "fields": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Fields = append(s.Fields, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Fields); err != nil { + return err + } + } + + case "fuzziness": + if err := dec.Decode(&s.Fuzziness); err != nil { + return err + } + + case "fuzzy_rewrite": + if err := dec.Decode(&s.FuzzyRewrite); err != nil { + return err + } + + case "fuzzy_transpositions": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.FuzzyTranspositions = &value + case bool: + s.FuzzyTranspositions = &v + } + + case "lenient": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Lenient = &value + case bool: + s.Lenient = &v + } + + case "max_expansions": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxExpansions = &value + case float64: + f := int(v) + s.MaxExpansions = &f + } + + case "minimum_should_match": + if err := dec.Decode(&s.MinimumShouldMatch); err != nil { + return err + } + + case "operator": + if err := dec.Decode(&s.Operator); err != nil { + return err + } + + case "prefix_length": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.PrefixLength = &value + case float64: + f := int(v) + s.PrefixLength = &f + } + + case "query": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Query = o + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "slop": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Slop = &value + case float64: + f := int(v) + s.Slop = &f + } + + case "tie_breaker": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.TieBreaker = &f + case float64: + f := Float64(v) + s.TieBreaker = &f + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "zero_terms_query": + if err := dec.Decode(&s.ZeroTermsQuery); err != nil { + return err + } + + } + } + return nil } // NewMultiMatchQuery returns a MultiMatchQuery. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multiplexertokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multiplexertokenfilter.go index f3a9790f6..2c022f5d4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multiplexertokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multiplexertokenfilter.go @@ -16,25 +16,85 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // MultiplexerTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L259-L263 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L260-L264 type MultiplexerTokenFilter struct { - Filters []string `json:"filters"` - PreserveOriginal *bool `json:"preserve_original,omitempty"` - Type string `json:"type,omitempty"` - Version *string `json:"version,omitempty"` + Filters []string `json:"filters"` + PreserveOriginal Stringifiedboolean `json:"preserve_original,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *MultiplexerTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "filters": + if err := dec.Decode(&s.Filters); err != nil { + return err + } + + case "preserve_original": + if err := dec.Decode(&s.PreserveOriginal); err != nil { + return err + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s MultiplexerTokenFilter) MarshalJSON() ([]byte, error) { + type innerMultiplexerTokenFilter MultiplexerTokenFilter + tmp := innerMultiplexerTokenFilter{ + Filters: s.Filters, + PreserveOriginal: s.PreserveOriginal, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "multiplexer" + + return json.Marshal(tmp) } // NewMultiplexerTokenFilter returns a MultiplexerTokenFilter. func NewMultiplexerTokenFilter() *MultiplexerTokenFilter { r := &MultiplexerTokenFilter{} - r.Type = "multiplexer" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multisearchbody.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multisearchbody.go new file mode 100644 index 000000000..0ba35ee03 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multisearchbody.go @@ -0,0 +1,455 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + +// MultisearchBody type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/msearch/types.ts#L71-L202 +type MultisearchBody struct { + Aggregations map[string]Aggregations `json:"aggregations,omitempty"` + Collapse *FieldCollapse `json:"collapse,omitempty"` + // DocvalueFields Array of wildcard (*) patterns. The request returns doc values for field + // names matching these patterns in the hits.fields property of the response. + DocvalueFields []FieldAndFormat `json:"docvalue_fields,omitempty"` + // Explain If true, returns detailed information about score computation as part of a + // hit. + Explain *bool `json:"explain,omitempty"` + // Ext Configuration of search extensions defined by Elasticsearch plugins. + Ext map[string]json.RawMessage `json:"ext,omitempty"` + // Fields Array of wildcard (*) patterns. The request returns values for field names + // matching these patterns in the hits.fields property of the response. + Fields []FieldAndFormat `json:"fields,omitempty"` + // From Starting document offset. By default, you cannot page through more than + // 10,000 + // hits using the from and size parameters. To page through more hits, use the + // search_after parameter. + From *int `json:"from,omitempty"` + Highlight *Highlight `json:"highlight,omitempty"` + // IndicesBoost Boosts the _score of documents from specified indices. + IndicesBoost []map[string]Float64 `json:"indices_boost,omitempty"` + // Knn Defines the approximate kNN search to run. + Knn []KnnQuery `json:"knn,omitempty"` + // MinScore Minimum _score for matching documents. Documents with a lower _score are + // not included in the search results. + MinScore *Float64 `json:"min_score,omitempty"` + // Pit Limits the search to a point in time (PIT). If you provide a PIT, you + // cannot specify an in the request path. + Pit *PointInTimeReference `json:"pit,omitempty"` + PostFilter *Query `json:"post_filter,omitempty"` + Profile *bool `json:"profile,omitempty"` + // Query Defines the search definition using the Query DSL. + Query *Query `json:"query,omitempty"` + Rescore []Rescore `json:"rescore,omitempty"` + // RuntimeMappings Defines one or more runtime fields in the search request. These fields take + // precedence over mapped fields with the same name. + RuntimeMappings RuntimeFields `json:"runtime_mappings,omitempty"` + // ScriptFields Retrieve a script evaluation (based on different fields) for each hit. + ScriptFields map[string]ScriptField `json:"script_fields,omitempty"` + SearchAfter []FieldValue `json:"search_after,omitempty"` + // SeqNoPrimaryTerm If true, returns sequence number and primary term of the last modification + // of each hit. See Optimistic concurrency control. + SeqNoPrimaryTerm *bool `json:"seq_no_primary_term,omitempty"` + // Size The number of hits to return. By default, you cannot page through more + // than 10,000 hits using the from and size parameters. To page through more + // hits, use the search_after parameter. + Size *int `json:"size,omitempty"` + Sort []SortCombinations `json:"sort,omitempty"` + // Source_ Indicates which source fields are returned for matching documents. These + // fields are returned in the hits._source property of the search response. + Source_ SourceConfig `json:"_source,omitempty"` + // Stats Stats groups to associate with the search. Each group maintains a statistics + // aggregation for its associated searches. You can retrieve these stats using + // the indices stats API. + Stats []string `json:"stats,omitempty"` + // StoredFields List of stored fields to return as part of a hit. If no fields are specified, + // no stored fields are included in the response. If this field is specified, + // the _source + // parameter defaults to false. You can pass _source: true to return both source + // fields + // and stored fields in the search response. + StoredFields []string `json:"stored_fields,omitempty"` + Suggest *Suggester `json:"suggest,omitempty"` + // TerminateAfter Maximum number of documents to collect for each shard. If a query reaches + // this + // limit, Elasticsearch terminates the query early. Elasticsearch collects + // documents + // before sorting. Defaults to 0, which does not terminate query execution + // early. + TerminateAfter *int64 `json:"terminate_after,omitempty"` + // Timeout Specifies the period of time to wait for a response from each shard. If no + // response + // is received before the timeout expires, the request fails and returns an + // error. + // Defaults to no timeout. + Timeout *string `json:"timeout,omitempty"` + // TrackScores If true, calculate and return document scores, even if the scores are not + // used for sorting. + TrackScores *bool `json:"track_scores,omitempty"` + // TrackTotalHits Number of hits matching the query to count accurately. If true, the exact + // number of hits is returned at the cost of some performance. If false, the + // response does not include the total number of hits matching the query. + // Defaults to 10,000 hits. + TrackTotalHits TrackHits `json:"track_total_hits,omitempty"` + // Version If true, returns document version as part of a hit. + Version *bool `json:"version,omitempty"` +} + +func (s *MultisearchBody) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aggregations", "aggs": + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregations, 0) + } + if err := dec.Decode(&s.Aggregations); err != nil { + return err + } + + case "collapse": + if err := dec.Decode(&s.Collapse); err != nil { + return err + } + + case "docvalue_fields": + if err := dec.Decode(&s.DocvalueFields); err != nil { + return err + } + + case "explain": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Explain = &value + case bool: + s.Explain = &v + } + + case "ext": + if s.Ext == nil { + s.Ext = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Ext); err != nil { + return err + } + + case "fields": + if err := dec.Decode(&s.Fields); err != nil { + return err + } + + case "from": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.From = &value + case float64: + f := int(v) + s.From = &f + } + + case "highlight": + if err := dec.Decode(&s.Highlight); err != nil { + return err + } + + case "indices_boost": + if err := dec.Decode(&s.IndicesBoost); err != nil { + return err + } + + case "knn": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewKnnQuery() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Knn = append(s.Knn, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Knn); err != nil { + return err + } + } + + case "min_score": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.MinScore = &f + case float64: + f := Float64(v) + s.MinScore = &f + } + + case "pit": + if err := dec.Decode(&s.Pit); err != nil { + return err + } + + case "post_filter": + if err := dec.Decode(&s.PostFilter); err != nil { + return err + } + + case "profile": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Profile = &value + case bool: + s.Profile = &v + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return err + } + + case "rescore": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewRescore() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Rescore = append(s.Rescore, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Rescore); err != nil { + return err + } + } + + case "runtime_mappings": + if err := dec.Decode(&s.RuntimeMappings); err != nil { + return err + } + + case "script_fields": + if s.ScriptFields == nil { + s.ScriptFields = make(map[string]ScriptField, 0) + } + if err := dec.Decode(&s.ScriptFields); err != nil { + return err + } + + case "search_after": + if err := dec.Decode(&s.SearchAfter); err != nil { + return err + } + + case "seq_no_primary_term": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.SeqNoPrimaryTerm = &value + case bool: + s.SeqNoPrimaryTerm = &v + } + + case "size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + case "sort": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(SortCombinations) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Sort = append(s.Sort, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Sort); err != nil { + return err + } + } + + case "_source": + if err := dec.Decode(&s.Source_); err != nil { + return err + } + + case "stats": + if err := dec.Decode(&s.Stats); err != nil { + return err + } + + case "stored_fields": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.StoredFields = append(s.StoredFields, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.StoredFields); err != nil { + return err + } + } + + case "suggest": + if err := dec.Decode(&s.Suggest); err != nil { + return err + } + + case "terminate_after": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TerminateAfter = &value + case float64: + f := int64(v) + s.TerminateAfter = &f + } + + case "timeout": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Timeout = &o + + case "track_scores": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.TrackScores = &value + case bool: + s.TrackScores = &v + } + + case "track_total_hits": + if err := dec.Decode(&s.TrackTotalHits); err != nil { + return err + } + + case "version": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Version = &value + case bool: + s.Version = &v + } + + } + } + return nil +} + +// NewMultisearchBody returns a MultisearchBody. +func NewMultisearchBody() *MultisearchBody { + r := &MultisearchBody{ + Aggregations: make(map[string]Aggregations, 0), + Ext: make(map[string]json.RawMessage, 0), + ScriptFields: make(map[string]ScriptField, 0), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multisearchheader.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multisearchheader.go new file mode 100644 index 000000000..b8f1a9030 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multisearchheader.go @@ -0,0 +1,214 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/searchtype" +) + +// MultisearchHeader type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/msearch/types.ts#L53-L68 +type MultisearchHeader struct { + AllowNoIndices *bool `json:"allow_no_indices,omitempty"` + AllowPartialSearchResults *bool `json:"allow_partial_search_results,omitempty"` + CcsMinimizeRoundtrips *bool `json:"ccs_minimize_roundtrips,omitempty"` + ExpandWildcards []expandwildcard.ExpandWildcard `json:"expand_wildcards,omitempty"` + IgnoreThrottled *bool `json:"ignore_throttled,omitempty"` + IgnoreUnavailable *bool `json:"ignore_unavailable,omitempty"` + Index []string `json:"index,omitempty"` + Preference *string `json:"preference,omitempty"` + RequestCache *bool `json:"request_cache,omitempty"` + Routing *string `json:"routing,omitempty"` + SearchType *searchtype.SearchType `json:"search_type,omitempty"` +} + +func (s *MultisearchHeader) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_no_indices": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.AllowNoIndices = &value + case bool: + s.AllowNoIndices = &v + } + + case "allow_partial_search_results": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.AllowPartialSearchResults = &value + case bool: + s.AllowPartialSearchResults = &v + } + + case "ccs_minimize_roundtrips": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.CcsMinimizeRoundtrips = &value + case bool: + s.CcsMinimizeRoundtrips = &v + } + + case "expand_wildcards": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := &expandwildcard.ExpandWildcard{} + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.ExpandWildcards = append(s.ExpandWildcards, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.ExpandWildcards); err != nil { + return err + } + } + + case "ignore_throttled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreThrottled = &value + case bool: + s.IgnoreThrottled = &v + } + + case "ignore_unavailable": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreUnavailable = &value + case bool: + s.IgnoreUnavailable = &v + } + + case "index": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Index = append(s.Index, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Index); err != nil { + return err + } + } + + case "preference": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Preference = &o + + case "request_cache": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.RequestCache = &value + case bool: + s.RequestCache = &v + } + + case "routing": + if err := dec.Decode(&s.Routing); err != nil { + return err + } + + case "search_type": + if err := dec.Decode(&s.SearchType); err != nil { + return err + } + + } + } + return nil +} + +// NewMultisearchHeader returns a MultisearchHeader. +func NewMultisearchHeader() *MultisearchHeader { + r := &MultisearchHeader{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multisearchitem.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multisearchitem.go new file mode 100644 index 000000000..b35e599e2 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multisearchitem.go @@ -0,0 +1,798 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "strings" +) + +// MultiSearchItem type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/msearch/types.ts#L214-L217 +type MultiSearchItem struct { + Aggregations map[string]Aggregate `json:"aggregations,omitempty"` + Clusters_ *ClusterStatistics `json:"_clusters,omitempty"` + Fields map[string]json.RawMessage `json:"fields,omitempty"` + Hits HitsMetadata `json:"hits"` + MaxScore *Float64 `json:"max_score,omitempty"` + NumReducePhases *int64 `json:"num_reduce_phases,omitempty"` + PitId *string `json:"pit_id,omitempty"` + Profile *Profile `json:"profile,omitempty"` + ScrollId_ *string `json:"_scroll_id,omitempty"` + Shards_ ShardStatistics `json:"_shards"` + Status *int `json:"status,omitempty"` + Suggest map[string][]Suggest `json:"suggest,omitempty"` + TerminatedEarly *bool `json:"terminated_early,omitempty"` + TimedOut bool `json:"timed_out"` + Took int64 `json:"took"` +} + +func (s *MultiSearchItem) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aggregations": + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + + for dec.More() { + tt, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + if value, ok := tt.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "box_plot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[value] = o + } + } + } + + case "_clusters": + if err := dec.Decode(&s.Clusters_); err != nil { + return err + } + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Fields); err != nil { + return err + } + + case "hits": + if err := dec.Decode(&s.Hits); err != nil { + return err + } + + case "max_score": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.MaxScore = &f + case float64: + f := Float64(v) + s.MaxScore = &f + } + + case "num_reduce_phases": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.NumReducePhases = &value + case float64: + f := int64(v) + s.NumReducePhases = &f + } + + case "pit_id": + if err := dec.Decode(&s.PitId); err != nil { + return err + } + + case "profile": + if err := dec.Decode(&s.Profile); err != nil { + return err + } + + case "_scroll_id": + if err := dec.Decode(&s.ScrollId_); err != nil { + return err + } + + case "_shards": + if err := dec.Decode(&s.Shards_); err != nil { + return err + } + + case "status": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Status = &value + case float64: + f := int(v) + s.Status = &f + } + + case "suggest": + if s.Suggest == nil { + s.Suggest = make(map[string][]Suggest, 0) + } + + for dec.More() { + tt, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + if value, ok := tt.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Suggest == nil { + s.Suggest = make(map[string][]Suggest, 0) + } + switch elems[0] { + + case "completion": + o := NewCompletionSuggest() + if err := dec.Decode(&o); err != nil { + return err + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + case "phrase": + o := NewPhraseSuggest() + if err := dec.Decode(&o); err != nil { + return err + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + case "term": + o := NewTermSuggest() + if err := dec.Decode(&o); err != nil { + return err + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + default: + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + } + } else { + return errors.New("cannot decode JSON for field Suggest") + } + } else { + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Suggest[value] = append(s.Suggest[value], o) + } + } + } + + case "terminated_early": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.TerminatedEarly = &value + case bool: + s.TerminatedEarly = &v + } + + case "timed_out": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.TimedOut = value + case bool: + s.TimedOut = v + } + + case "took": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Took = value + case float64: + f := int64(v) + s.Took = f + } + + } + } + return nil +} + +// NewMultiSearchItem returns a MultiSearchItem. +func NewMultiSearchItem() *MultiSearchItem { + r := &MultiSearchItem{ + Aggregations: make(map[string]Aggregate, 0), + Fields: make(map[string]json.RawMessage, 0), + Suggest: make(map[string][]Suggest, 0), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multisearchresult.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multisearchresult.go new file mode 100644 index 000000000..e4fe8198f --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multisearchresult.go @@ -0,0 +1,84 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + +// MultiSearchResult type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/msearch/types.ts#L204-L207 +type MultiSearchResult struct { + Responses []MsearchResponseItem `json:"responses"` + Took int64 `json:"took"` +} + +func (s *MultiSearchResult) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "responses": + if err := dec.Decode(&s.Responses); err != nil { + return err + } + + case "took": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Took = value + case float64: + f := int64(v) + s.Took = f + } + + } + } + return nil +} + +// NewMultiSearchResult returns a MultiSearchResult. +func NewMultiSearchResult() *MultiSearchResult { + r := &MultiSearchResult{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multitermlookup.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multitermlookup.go index 231c6f579..948a48c8f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multitermlookup.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multitermlookup.go @@ -16,18 +16,58 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // MultiTermLookup type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L276-L279 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L624-L634 type MultiTermLookup struct { - Field string `json:"field"` + // Field A fields from which to retrieve terms. + Field string `json:"field"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. Missing Missing `json:"missing,omitempty"` } +func (s *MultiTermLookup) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return err + } + + } + } + return nil +} + // NewMultiTermLookup returns a MultiTermLookup. func NewMultiTermLookup() *MultiTermLookup { r := &MultiTermLookup{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multitermsaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multitermsaggregate.go index df058c974..271b160d3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multitermsaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multitermsaggregate.go @@ -16,29 +16,30 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" + "strconv" ) // MultiTermsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L460-L462 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L461-L463 type MultiTermsAggregate struct { - Buckets BucketsMultiTermsBucket `json:"buckets"` - DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - SumOtherDocCount *int64 `json:"sum_other_doc_count,omitempty"` + Buckets BucketsMultiTermsBucket `json:"buckets"` + DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` + Meta Metadata `json:"meta,omitempty"` + SumOtherDocCount *int64 `json:"sum_other_doc_count,omitempty"` } func (s *MultiTermsAggregate) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -59,21 +60,33 @@ func (s *MultiTermsAggregate) UnmarshalJSON(data []byte) error { source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]MultiTermsBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []MultiTermsBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } case "doc_count_error_upper_bound": - if err := dec.Decode(&s.DocCountErrorUpperBound); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DocCountErrorUpperBound = &value + case float64: + f := int64(v) + s.DocCountErrorUpperBound = &f } case "meta": @@ -82,8 +95,18 @@ func (s *MultiTermsAggregate) UnmarshalJSON(data []byte) error { } case "sum_other_doc_count": - if err := dec.Decode(&s.SumOtherDocCount); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.SumOtherDocCount = &value + case float64: + f := int64(v) + s.SumOtherDocCount = &f } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multitermsaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multitermsaggregation.go index 93b1b5fa9..d5c568a01 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multitermsaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multitermsaggregation.go @@ -16,38 +16,51 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortorder" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/termsaggregationcollectmode" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortorder" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/termsaggregationcollectmode" ) // MultiTermsAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L265-L274 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L582-L622 type MultiTermsAggregation struct { - CollectMode *termsaggregationcollectmode.TermsAggregationCollectMode `json:"collect_mode,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - MinDocCount *int64 `json:"min_doc_count,omitempty"` - Name *string `json:"name,omitempty"` - Order AggregateOrder `json:"order,omitempty"` - ShardMinDocCount *int64 `json:"shard_min_doc_count,omitempty"` - ShardSize *int `json:"shard_size,omitempty"` - ShowTermDocCountError *bool `json:"show_term_doc_count_error,omitempty"` - Size *int `json:"size,omitempty"` - Terms []MultiTermLookup `json:"terms"` + // CollectMode Specifies the strategy for data collection. + CollectMode *termsaggregationcollectmode.TermsAggregationCollectMode `json:"collect_mode,omitempty"` + Meta Metadata `json:"meta,omitempty"` + // MinDocCount The minimum number of documents in a bucket for it to be returned. + MinDocCount *int64 `json:"min_doc_count,omitempty"` + Name *string `json:"name,omitempty"` + // Order Specifies the sort order of the buckets. + // Defaults to sorting by descending document count. + Order AggregateOrder `json:"order,omitempty"` + // ShardMinDocCount The minimum number of documents in a bucket on each shard for it to be + // returned. + ShardMinDocCount *int64 `json:"shard_min_doc_count,omitempty"` + // ShardSize The number of candidate terms produced by each shard. + // By default, `shard_size` will be automatically estimated based on the number + // of shards and the `size` parameter. + ShardSize *int `json:"shard_size,omitempty"` + // ShowTermDocCountError Calculates the doc count error on per term basis. + ShowTermDocCountError *bool `json:"show_term_doc_count_error,omitempty"` + // Size The number of term buckets should be returned out of the overall terms list. + Size *int `json:"size,omitempty"` + // Terms The field from which to generate sets of terms. + Terms []MultiTermLookup `json:"terms"` } func (s *MultiTermsAggregation) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -72,14 +85,31 @@ func (s *MultiTermsAggregation) UnmarshalJSON(data []byte) error { } case "min_doc_count": - if err := dec.Decode(&s.MinDocCount); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.MinDocCount = &value + case float64: + f := int64(v) + s.MinDocCount = &f } case "name": - if err := dec.Decode(&s.Name); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o case "order": @@ -88,36 +118,79 @@ func (s *MultiTermsAggregation) UnmarshalJSON(data []byte) error { source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]sortorder.SortOrder, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Order = o - case '[': o := make([]map[string]sortorder.SortOrder, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Order = o } case "shard_min_doc_count": - if err := dec.Decode(&s.ShardMinDocCount); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.ShardMinDocCount = &value + case float64: + f := int64(v) + s.ShardMinDocCount = &f } case "shard_size": - if err := dec.Decode(&s.ShardSize); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.ShardSize = &value + case float64: + f := int(v) + s.ShardSize = &f } case "show_term_doc_count_error": - if err := dec.Decode(&s.ShowTermDocCountError); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.ShowTermDocCountError = &value + case bool: + s.ShowTermDocCountError = &v } case "size": - if err := dec.Decode(&s.Size); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f } case "terms": diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multitermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multitermsbucket.go index ecf2a0112..88183a638 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multitermsbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/multitermsbucket.go @@ -16,25 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "fmt" - "bytes" + "encoding/json" "errors" + "fmt" "io" - + "strconv" "strings" - - "encoding/json" ) // MultiTermsBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L464-L468 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L465-L469 type MultiTermsBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -44,6 +42,7 @@ type MultiTermsBucket struct { } func (s *MultiTermsBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -57,456 +56,34 @@ func (s *MultiTermsBucket) UnmarshalJSON(data []byte) error { switch t { - case "aggregations": - for dec.More() { - tt, err := dec.Token() + case "doc_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) if err != nil { - if errors.Is(err, io.EOF) { - break - } return err } - if value, ok := tt.(string); ok { - if strings.Contains(value, "#") { - elems := strings.Split(value, "#") - if len(elems) == 2 { - switch elems[0] { - case "cardinality": - o := NewCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentiles": - o := NewHdrPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentile_ranks": - o := NewHdrPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentiles": - o := NewTDigestPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentile_ranks": - o := NewTDigestPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "percentiles_bucket": - o := NewPercentilesBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "median_absolute_deviation": - o := NewMedianAbsoluteDeviationAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "min": - o := NewMinAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "max": - o := NewMaxAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sum": - o := NewSumAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "avg": - o := NewAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "weighted_avg": - o := NewWeightedAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "value_count": - o := NewValueCountAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_value": - o := NewSimpleValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "derivative": - o := NewDerivativeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "bucket_metric_value": - o := NewBucketMetricValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats": - o := NewStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats_bucket": - o := NewStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats": - o := NewExtendedStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats_bucket": - o := NewExtendedStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_bounds": - o := NewGeoBoundsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_centroid": - o := NewGeoCentroidAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "histogram": - o := NewHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_histogram": - o := NewDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "auto_date_histogram": - o := NewAutoDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "variable_width_histogram": - o := NewVariableWidthHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sterms": - o := NewStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lterms": - o := NewLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "dterms": - o := NewDoubleTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umterms": - o := NewUnmappedTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lrareterms": - o := NewLongRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "srareterms": - o := NewStringRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umrareterms": - o := NewUnmappedRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "multi_terms": - o := NewMultiTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "missing": - o := NewMissingAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "nested": - o := NewNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "reverse_nested": - o := NewReverseNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "global": - o := NewGlobalAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filter": - o := NewFilterAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "children": - o := NewChildrenAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "parent": - o := NewParentAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sampler": - o := NewSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "unmapped_sampler": - o := NewUnmappedSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohash_grid": - o := NewGeoHashGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geotile_grid": - o := NewGeoTileGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohex_grid": - o := NewGeoHexGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "range": - o := NewRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_range": - o := NewDateRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_distance": - o := NewGeoDistanceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_range": - o := NewIpRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_prefix": - o := NewIpPrefixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filters": - o := NewFiltersAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "adjacency_matrix": - o := NewAdjacencyMatrixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "siglterms": - o := NewSignificantLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sigsterms": - o := NewSignificantStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umsigterms": - o := NewUnmappedSignificantTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "composite": - o := NewCompositeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "scripted_metric": - o := NewScriptedMetricAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_hits": - o := NewTopHitsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "inference": - o := NewInferenceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "string_stats": - o := NewStringStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "box_plot": - o := NewBoxPlotAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_metrics": - o := NewTopMetricsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "t_test": - o := NewTTestAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "rate": - o := NewRateAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_long_value": - o := NewCumulativeCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "matrix_stats": - o := NewMatrixStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_line": - o := NewGeoLineAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - default: - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - } - } else { - return errors.New("cannot decode JSON for field Aggregations") - } - } else { - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[value] = o - } - } - } - - case "doc_count": - if err := dec.Decode(&s.DocCount); err != nil { - return err + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f } case "doc_count_error_upper_bound": - if err := dec.Decode(&s.DocCountErrorUpperBound); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DocCountErrorUpperBound = &value + case float64: + f := int64(v) + s.DocCountErrorUpperBound = &f } case "key": @@ -515,9 +92,529 @@ func (s *MultiTermsBucket) UnmarshalJSON(data []byte) error { } case "key_as_string": - if err := dec.Decode(&s.KeyAsString); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.KeyAsString = &o + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "box_plot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[value] = o + } + } } } @@ -543,6 +640,7 @@ func (s MultiTermsBucket) MarshalJSON() ([]byte, error) { for key, value := range s.Aggregations { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "Aggregations") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/murmur3hashproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/murmur3hashproperty.go index 6aeed8e2c..1cdb8a88d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/murmur3hashproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/murmur3hashproperty.go @@ -16,23 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" ) // Murmur3HashProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/specialized.ts#L74-L76 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/specialized.ts#L75-L77 type Murmur3HashProperty struct { CopyTo []string `json:"copy_to,omitempty"` DocValues *bool `json:"doc_values,omitempty"` @@ -48,6 +48,7 @@ type Murmur3HashProperty struct { } func (s *Murmur3HashProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -62,13 +63,33 @@ func (s *Murmur3HashProperty) UnmarshalJSON(data []byte) error { switch t { case "copy_to": - if err := dec.Decode(&s.CopyTo); err != nil { - return err + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return err + } } case "doc_values": - if err := dec.Decode(&s.DocValues); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DocValues = &value + case bool: + s.DocValues = &v } case "dynamic": @@ -77,6 +98,9 @@ func (s *Murmur3HashProperty) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -85,7 +109,9 @@ func (s *Murmur3HashProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -364,23 +390,42 @@ func (s *Murmur3HashProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -389,7 +434,9 @@ func (s *Murmur3HashProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -668,20 +715,38 @@ func (s *Murmur3HashProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } case "similarity": - if err := dec.Decode(&s.Similarity); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Similarity = &o case "store": - if err := dec.Decode(&s.Store); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Store = &value + case bool: + s.Store = &v } case "type": @@ -694,6 +759,27 @@ func (s *Murmur3HashProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s Murmur3HashProperty) MarshalJSON() ([]byte, error) { + type innerMurmur3HashProperty Murmur3HashProperty + tmp := innerMurmur3HashProperty{ + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + Properties: s.Properties, + Similarity: s.Similarity, + Store: s.Store, + Type: s.Type, + } + + tmp.Type = "murmur3" + + return json.Marshal(tmp) +} + // NewMurmur3HashProperty returns a Murmur3HashProperty. func NewMurmur3HashProperty() *Murmur3HashProperty { r := &Murmur3HashProperty{ @@ -702,7 +788,5 @@ func NewMurmur3HashProperty() *Murmur3HashProperty { Properties: make(map[string]Property, 0), } - r.Type = "murmur3" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mutualinformationheuristic.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mutualinformationheuristic.go index 79f535283..1b150c7ef 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mutualinformationheuristic.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/mutualinformationheuristic.go @@ -16,16 +16,76 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // MutualInformationHeuristic type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L331-L334 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L753-L762 type MutualInformationHeuristic struct { + // BackgroundIsSuperset Set to `false` if you defined a custom background filter that represents a + // different set of documents that you want to compare to. BackgroundIsSuperset *bool `json:"background_is_superset,omitempty"` - IncludeNegatives *bool `json:"include_negatives,omitempty"` + // IncludeNegatives Set to `false` to filter out the terms that appear less often in the subset + // than in documents outside the subset. + IncludeNegatives *bool `json:"include_negatives,omitempty"` +} + +func (s *MutualInformationHeuristic) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "background_is_superset": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.BackgroundIsSuperset = &value + case bool: + s.BackgroundIsSuperset = &v + } + + case "include_negatives": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IncludeNegatives = &value + case bool: + s.IncludeNegatives = &v + } + + } + } + return nil } // NewMutualInformationHeuristic returns a MutualInformationHeuristic. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/names.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/names.go index 25b0634a4..c12c374d0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/names.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/names.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // Names type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/common.ts#L73-L73 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/common.ts#L75-L75 type Names []string diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nativecode.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nativecode.go index de74aeb1a..abd8ae203 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nativecode.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nativecode.go @@ -16,18 +16,63 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NativeCode type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/info/types.ts#L29-L32 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/info/types.ts#L29-L32 type NativeCode struct { BuildHash string `json:"build_hash"` Version string `json:"version"` } +func (s *NativeCode) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "build_hash": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BuildHash = o + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + // NewNativeCode returns a NativeCode. func NewNativeCode() *NativeCode { r := &NativeCode{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nativecodeinformation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nativecodeinformation.go index 9187c074a..670cd21df 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nativecodeinformation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nativecodeinformation.go @@ -16,18 +16,63 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NativeCodeInformation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/info/types.ts#L29-L32 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/info/types.ts#L29-L32 type NativeCodeInformation struct { BuildHash string `json:"build_hash"` Version string `json:"version"` } +func (s *NativeCodeInformation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "build_hash": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BuildHash = o + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + // NewNativeCodeInformation returns a NativeCodeInformation. func NewNativeCodeInformation() *NativeCodeInformation { r := &NativeCodeInformation{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nerinferenceoptions.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nerinferenceoptions.go index 2b1ff88b9..eaa0a06e4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nerinferenceoptions.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nerinferenceoptions.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NerInferenceOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/inference.ts#L230-L239 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/inference.ts#L255-L264 type NerInferenceOptions struct { // ClassificationLabels The token classification labels. Must be IOB formatted tags ClassificationLabels []string `json:"classification_labels,omitempty"` @@ -34,6 +42,53 @@ type NerInferenceOptions struct { Vocabulary *Vocabulary `json:"vocabulary,omitempty"` } +func (s *NerInferenceOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "classification_labels": + if err := dec.Decode(&s.ClassificationLabels); err != nil { + return err + } + + case "results_field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultsField = &o + + case "tokenization": + if err := dec.Decode(&s.Tokenization); err != nil { + return err + } + + case "vocabulary": + if err := dec.Decode(&s.Vocabulary); err != nil { + return err + } + + } + } + return nil +} + // NewNerInferenceOptions returns a NerInferenceOptions. func NewNerInferenceOptions() *NerInferenceOptions { r := &NerInferenceOptions{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nerinferenceupdateoptions.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nerinferenceupdateoptions.go index fa73872b4..8f30b6b20 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nerinferenceupdateoptions.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nerinferenceupdateoptions.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NerInferenceUpdateOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/inference.ts#L363-L368 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/inference.ts#L404-L409 type NerInferenceUpdateOptions struct { // ResultsField The field that is added to incoming documents to contain the inference // prediction. Defaults to predicted_value. @@ -31,6 +39,43 @@ type NerInferenceUpdateOptions struct { Tokenization *NlpTokenizationUpdateOptions `json:"tokenization,omitempty"` } +func (s *NerInferenceUpdateOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "results_field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultsField = &o + + case "tokenization": + if err := dec.Decode(&s.Tokenization); err != nil { + return err + } + + } + } + return nil +} + // NewNerInferenceUpdateOptions returns a NerInferenceUpdateOptions. func NewNerInferenceUpdateOptions() *NerInferenceUpdateOptions { r := &NerInferenceUpdateOptions{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nestedaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nestedaggregate.go index 57d79cc5f..581c6ee9d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nestedaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nestedaggregate.go @@ -16,32 +16,31 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "fmt" - "bytes" + "encoding/json" "errors" + "fmt" "io" - + "strconv" "strings" - - "encoding/json" ) // NestedAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L485-L486 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L486-L487 type NestedAggregate struct { - Aggregations map[string]Aggregate `json:"-"` - DocCount int64 `json:"doc_count"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Aggregations map[string]Aggregate `json:"-"` + DocCount int64 `json:"doc_count"` + Meta Metadata `json:"meta,omitempty"` } func (s *NestedAggregate) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -55,451 +54,19 @@ func (s *NestedAggregate) UnmarshalJSON(data []byte) error { switch t { - case "aggregations": - for dec.More() { - tt, err := dec.Token() + case "doc_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) if err != nil { - if errors.Is(err, io.EOF) { - break - } return err } - if value, ok := tt.(string); ok { - if strings.Contains(value, "#") { - elems := strings.Split(value, "#") - if len(elems) == 2 { - switch elems[0] { - case "cardinality": - o := NewCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentiles": - o := NewHdrPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentile_ranks": - o := NewHdrPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentiles": - o := NewTDigestPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentile_ranks": - o := NewTDigestPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "percentiles_bucket": - o := NewPercentilesBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "median_absolute_deviation": - o := NewMedianAbsoluteDeviationAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "min": - o := NewMinAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "max": - o := NewMaxAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sum": - o := NewSumAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "avg": - o := NewAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "weighted_avg": - o := NewWeightedAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "value_count": - o := NewValueCountAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_value": - o := NewSimpleValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "derivative": - o := NewDerivativeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "bucket_metric_value": - o := NewBucketMetricValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats": - o := NewStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats_bucket": - o := NewStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats": - o := NewExtendedStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats_bucket": - o := NewExtendedStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_bounds": - o := NewGeoBoundsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_centroid": - o := NewGeoCentroidAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "histogram": - o := NewHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_histogram": - o := NewDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "auto_date_histogram": - o := NewAutoDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "variable_width_histogram": - o := NewVariableWidthHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sterms": - o := NewStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lterms": - o := NewLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "dterms": - o := NewDoubleTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umterms": - o := NewUnmappedTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lrareterms": - o := NewLongRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "srareterms": - o := NewStringRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umrareterms": - o := NewUnmappedRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "multi_terms": - o := NewMultiTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "missing": - o := NewMissingAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "nested": - o := NewNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "reverse_nested": - o := NewReverseNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "global": - o := NewGlobalAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filter": - o := NewFilterAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "children": - o := NewChildrenAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "parent": - o := NewParentAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sampler": - o := NewSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "unmapped_sampler": - o := NewUnmappedSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohash_grid": - o := NewGeoHashGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geotile_grid": - o := NewGeoTileGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohex_grid": - o := NewGeoHexGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "range": - o := NewRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_range": - o := NewDateRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_distance": - o := NewGeoDistanceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_range": - o := NewIpRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_prefix": - o := NewIpPrefixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filters": - o := NewFiltersAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "adjacency_matrix": - o := NewAdjacencyMatrixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "siglterms": - o := NewSignificantLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sigsterms": - o := NewSignificantStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umsigterms": - o := NewUnmappedSignificantTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "composite": - o := NewCompositeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "scripted_metric": - o := NewScriptedMetricAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_hits": - o := NewTopHitsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "inference": - o := NewInferenceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "string_stats": - o := NewStringStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "box_plot": - o := NewBoxPlotAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_metrics": - o := NewTopMetricsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "t_test": - o := NewTTestAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "rate": - o := NewRateAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_long_value": - o := NewCumulativeCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "matrix_stats": - o := NewMatrixStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_line": - o := NewGeoLineAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - default: - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - } - } else { - return errors.New("cannot decode JSON for field Aggregations") - } - } else { - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[value] = o - } - } - } - - case "doc_count": - if err := dec.Decode(&s.DocCount); err != nil { - return err + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f } case "meta": @@ -507,6 +74,519 @@ func (s *NestedAggregate) UnmarshalJSON(data []byte) error { return err } + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "box_plot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[value] = o + } + } + } } return nil @@ -531,6 +611,7 @@ func (s NestedAggregate) MarshalJSON() ([]byte, error) { for key, value := range s.Aggregations { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "Aggregations") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nestedaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nestedaggregation.go index ae481da9f..7d4ea4bbe 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nestedaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nestedaggregation.go @@ -16,21 +16,68 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // NestedAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L281-L283 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L636-L641 type NestedAggregation struct { - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Name *string `json:"name,omitempty"` - Path *string `json:"path,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Name *string `json:"name,omitempty"` + // Path The path to the field of type `nested`. + Path *string `json:"path,omitempty"` +} + +func (s *NestedAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + case "path": + if err := dec.Decode(&s.Path); err != nil { + return err + } + + } + } + return nil } // NewNestedAggregation returns a NestedAggregation. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nestedidentity.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nestedidentity.go index 35a189d29..a2a7c6ab5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nestedidentity.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nestedidentity.go @@ -16,19 +16,73 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NestedIdentity type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/hits.ts#L88-L92 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/hits.ts#L88-L92 type NestedIdentity struct { Field string `json:"field"` Nested_ *NestedIdentity `json:"_nested,omitempty"` Offset int `json:"offset"` } +func (s *NestedIdentity) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "_nested": + if err := dec.Decode(&s.Nested_); err != nil { + return err + } + + case "offset": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Offset = value + case float64: + f := int(v) + s.Offset = f + } + + } + } + return nil +} + // NewNestedIdentity returns a NestedIdentity. func NewNestedIdentity() *NestedIdentity { r := &NestedIdentity{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nestedproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nestedproperty.go index 7f2fc66b3..7350fbdc4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nestedproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nestedproperty.go @@ -16,23 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" ) // NestedProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/complex.ts#L39-L44 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/complex.ts#L39-L44 type NestedProperty struct { CopyTo []string `json:"copy_to,omitempty"` Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` @@ -50,6 +50,7 @@ type NestedProperty struct { } func (s *NestedProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -64,8 +65,19 @@ func (s *NestedProperty) UnmarshalJSON(data []byte) error { switch t { case "copy_to": - if err := dec.Decode(&s.CopyTo); err != nil { - return err + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return err + } } case "dynamic": @@ -74,11 +86,23 @@ func (s *NestedProperty) UnmarshalJSON(data []byte) error { } case "enabled": - if err := dec.Decode(&s.Enabled); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = &value + case bool: + s.Enabled = &v } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -87,7 +111,9 @@ func (s *NestedProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -366,33 +392,70 @@ func (s *NestedProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "include_in_parent": - if err := dec.Decode(&s.IncludeInParent); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IncludeInParent = &value + case bool: + s.IncludeInParent = &v } case "include_in_root": - if err := dec.Decode(&s.IncludeInRoot); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IncludeInRoot = &value + case bool: + s.IncludeInRoot = &v } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -401,7 +464,9 @@ func (s *NestedProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -680,20 +745,38 @@ func (s *NestedProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } case "similarity": - if err := dec.Decode(&s.Similarity); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Similarity = &o case "store": - if err := dec.Decode(&s.Store); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Store = &value + case bool: + s.Store = &v } case "type": @@ -706,6 +789,29 @@ func (s *NestedProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s NestedProperty) MarshalJSON() ([]byte, error) { + type innerNestedProperty NestedProperty + tmp := innerNestedProperty{ + CopyTo: s.CopyTo, + Dynamic: s.Dynamic, + Enabled: s.Enabled, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IncludeInParent: s.IncludeInParent, + IncludeInRoot: s.IncludeInRoot, + Meta: s.Meta, + Properties: s.Properties, + Similarity: s.Similarity, + Store: s.Store, + Type: s.Type, + } + + tmp.Type = "nested" + + return json.Marshal(tmp) +} + // NewNestedProperty returns a NestedProperty. func NewNestedProperty() *NestedProperty { r := &NestedProperty{ @@ -714,7 +820,5 @@ func NewNestedProperty() *NestedProperty { Properties: make(map[string]Property, 0), } - r.Type = "nested" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nestedquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nestedquery.go index 784a2b5c0..d09240542 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nestedquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nestedquery.go @@ -16,25 +16,125 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/childscoremode" ) // NestedQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/joining.ts#L63-L71 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/joining.ts#L106-L130 type NestedQuery struct { - Boost *float32 `json:"boost,omitempty"` - IgnoreUnmapped *bool `json:"ignore_unmapped,omitempty"` - InnerHits *InnerHits `json:"inner_hits,omitempty"` - Path string `json:"path"` - Query *Query `json:"query,omitempty"` - QueryName_ *string `json:"_name,omitempty"` - ScoreMode *childscoremode.ChildScoreMode `json:"score_mode,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // IgnoreUnmapped Indicates whether to ignore an unmapped path and not return any documents + // instead of an error. + IgnoreUnmapped *bool `json:"ignore_unmapped,omitempty"` + // InnerHits If defined, each search hit will contain inner hits. + InnerHits *InnerHits `json:"inner_hits,omitempty"` + // Path Path to the nested object you wish to search. + Path string `json:"path"` + // Query Query you wish to run on nested objects in the path. + Query *Query `json:"query,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + // ScoreMode How scores for matching child objects affect the root parent document’s + // relevance score. + ScoreMode *childscoremode.ChildScoreMode `json:"score_mode,omitempty"` +} + +func (s *NestedQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "ignore_unmapped": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreUnmapped = &value + case bool: + s.IgnoreUnmapped = &v + } + + case "inner_hits": + if err := dec.Decode(&s.InnerHits); err != nil { + return err + } + + case "path": + if err := dec.Decode(&s.Path); err != nil { + return err + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return err + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "score_mode": + if err := dec.Decode(&s.ScoreMode); err != nil { + return err + } + + } + } + return nil } // NewNestedQuery returns a NestedQuery. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nestedsortvalue.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nestedsortvalue.go index aa7ef1326..8b2110e9c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nestedsortvalue.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nestedsortvalue.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NestedSortValue type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/sort.ts#L30-L35 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/sort.ts#L30-L35 type NestedSortValue struct { Filter *Query `json:"filter,omitempty"` MaxChildren *int `json:"max_children,omitempty"` @@ -30,6 +38,57 @@ type NestedSortValue struct { Path string `json:"path"` } +func (s *NestedSortValue) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "filter": + if err := dec.Decode(&s.Filter); err != nil { + return err + } + + case "max_children": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxChildren = &value + case float64: + f := int(v) + s.MaxChildren = &f + } + + case "nested": + if err := dec.Decode(&s.Nested); err != nil { + return err + } + + case "path": + if err := dec.Decode(&s.Path); err != nil { + return err + } + + } + } + return nil +} + // NewNestedSortValue returns a NestedSortValue. func NewNestedSortValue() *NestedSortValue { r := &NestedSortValue{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nevercondition.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nevercondition.go index e454b15b2..459937958 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nevercondition.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nevercondition.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // NeverCondition type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Conditions.ts#L69-L69 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Conditions.ts#L69-L69 type NeverCondition struct { } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ngramtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ngramtokenfilter.go index c719889ff..86c46f4dd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ngramtokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ngramtokenfilter.go @@ -16,26 +16,115 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NGramTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L265-L270 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L266-L271 type NGramTokenFilter struct { - MaxGram *int `json:"max_gram,omitempty"` - MinGram *int `json:"min_gram,omitempty"` - PreserveOriginal *bool `json:"preserve_original,omitempty"` - Type string `json:"type,omitempty"` - Version *string `json:"version,omitempty"` + MaxGram *int `json:"max_gram,omitempty"` + MinGram *int `json:"min_gram,omitempty"` + PreserveOriginal Stringifiedboolean `json:"preserve_original,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *NGramTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_gram": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxGram = &value + case float64: + f := int(v) + s.MaxGram = &f + } + + case "min_gram": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MinGram = &value + case float64: + f := int(v) + s.MinGram = &f + } + + case "preserve_original": + if err := dec.Decode(&s.PreserveOriginal); err != nil { + return err + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s NGramTokenFilter) MarshalJSON() ([]byte, error) { + type innerNGramTokenFilter NGramTokenFilter + tmp := innerNGramTokenFilter{ + MaxGram: s.MaxGram, + MinGram: s.MinGram, + PreserveOriginal: s.PreserveOriginal, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "ngram" + + return json.Marshal(tmp) } // NewNGramTokenFilter returns a NGramTokenFilter. func NewNGramTokenFilter() *NGramTokenFilter { r := &NGramTokenFilter{} - r.Type = "ngram" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ngramtokenizer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ngramtokenizer.go index b54b5417e..7a5ecb436 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ngramtokenizer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ngramtokenizer.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/tokenchar" ) // NGramTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/tokenizers.ts#L38-L44 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/tokenizers.ts#L39-L45 type NGramTokenizer struct { CustomTokenChars *string `json:"custom_token_chars,omitempty"` MaxGram int `json:"max_gram"` @@ -36,11 +42,105 @@ type NGramTokenizer struct { Version *string `json:"version,omitempty"` } +func (s *NGramTokenizer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "custom_token_chars": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CustomTokenChars = &o + + case "max_gram": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxGram = value + case float64: + f := int(v) + s.MaxGram = f + } + + case "min_gram": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MinGram = value + case float64: + f := int(v) + s.MinGram = f + } + + case "token_chars": + if err := dec.Decode(&s.TokenChars); err != nil { + return err + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s NGramTokenizer) MarshalJSON() ([]byte, error) { + type innerNGramTokenizer NGramTokenizer + tmp := innerNGramTokenizer{ + CustomTokenChars: s.CustomTokenChars, + MaxGram: s.MaxGram, + MinGram: s.MinGram, + TokenChars: s.TokenChars, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "ngram" + + return json.Marshal(tmp) +} + // NewNGramTokenizer returns a NGramTokenizer. func NewNGramTokenizer() *NGramTokenizer { r := &NGramTokenizer{} - r.Type = "ngram" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nlpberttokenizationconfig.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nlpberttokenizationconfig.go index bfecb1ae8..ed16601ee 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nlpberttokenizationconfig.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nlpberttokenizationconfig.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/tokenizationtruncate" ) // NlpBertTokenizationConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/inference.ts#L116-L143 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/inference.ts#L131-L158 type NlpBertTokenizationConfig struct { // DoLowerCase Should the tokenizer lower case the text DoLowerCase *bool `json:"do_lower_case,omitempty"` @@ -42,6 +48,91 @@ type NlpBertTokenizationConfig struct { WithSpecialTokens *bool `json:"with_special_tokens,omitempty"` } +func (s *NlpBertTokenizationConfig) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "do_lower_case": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DoLowerCase = &value + case bool: + s.DoLowerCase = &v + } + + case "max_sequence_length": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxSequenceLength = &value + case float64: + f := int(v) + s.MaxSequenceLength = &f + } + + case "span": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Span = &value + case float64: + f := int(v) + s.Span = &f + } + + case "truncate": + if err := dec.Decode(&s.Truncate); err != nil { + return err + } + + case "with_special_tokens": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.WithSpecialTokens = &value + case bool: + s.WithSpecialTokens = &v + } + + } + } + return nil +} + // NewNlpBertTokenizationConfig returns a NlpBertTokenizationConfig. func NewNlpBertTokenizationConfig() *NlpBertTokenizationConfig { r := &NlpBertTokenizationConfig{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nlprobertatokenizationconfig.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nlprobertatokenizationconfig.go index a41da7877..da168667b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nlprobertatokenizationconfig.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nlprobertatokenizationconfig.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/tokenizationtruncate" ) // NlpRobertaTokenizationConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/inference.ts#L145-L172 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/inference.ts#L160-L187 type NlpRobertaTokenizationConfig struct { // AddPrefixSpace Should the tokenizer prefix input with a space character AddPrefixSpace *bool `json:"add_prefix_space,omitempty"` @@ -42,6 +48,91 @@ type NlpRobertaTokenizationConfig struct { WithSpecialTokens *bool `json:"with_special_tokens,omitempty"` } +func (s *NlpRobertaTokenizationConfig) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "add_prefix_space": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.AddPrefixSpace = &value + case bool: + s.AddPrefixSpace = &v + } + + case "max_sequence_length": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxSequenceLength = &value + case float64: + f := int(v) + s.MaxSequenceLength = &f + } + + case "span": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Span = &value + case float64: + f := int(v) + s.Span = &f + } + + case "truncate": + if err := dec.Decode(&s.Truncate); err != nil { + return err + } + + case "with_special_tokens": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.WithSpecialTokens = &value + case bool: + s.WithSpecialTokens = &v + } + + } + } + return nil +} + // NewNlpRobertaTokenizationConfig returns a NlpRobertaTokenizationConfig. func NewNlpRobertaTokenizationConfig() *NlpRobertaTokenizationConfig { r := &NlpRobertaTokenizationConfig{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nlptokenizationupdateoptions.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nlptokenizationupdateoptions.go index 50ba6ec8e..46e03377b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nlptokenizationupdateoptions.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nlptokenizationupdateoptions.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/tokenizationtruncate" ) // NlpTokenizationUpdateOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/inference.ts#L321-L326 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/inference.ts#L356-L361 type NlpTokenizationUpdateOptions struct { // Span Span options to apply Span *int `json:"span,omitempty"` @@ -34,6 +40,47 @@ type NlpTokenizationUpdateOptions struct { Truncate *tokenizationtruncate.TokenizationTruncate `json:"truncate,omitempty"` } +func (s *NlpTokenizationUpdateOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "span": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Span = &value + case float64: + f := int(v) + s.Span = &f + } + + case "truncate": + if err := dec.Decode(&s.Truncate); err != nil { + return err + } + + } + } + return nil +} + // NewNlpTokenizationUpdateOptions returns a NlpTokenizationUpdateOptions. func NewNlpTokenizationUpdateOptions() *NlpTokenizationUpdateOptions { r := &NlpTokenizationUpdateOptions{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/node.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/node.go index 952c32c83..147d3e899 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/node.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/node.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // Node type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/searchable_snapshots/cache_stats/Response.ts#L30-L32 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/searchable_snapshots/cache_stats/Response.ts#L30-L32 type Node struct { SharedCache Shared `json:"shared_cache"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeallocationexplanation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeallocationexplanation.go index 98e07fe6d..d0551445c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeallocationexplanation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeallocationexplanation.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/decision" ) // NodeAllocationExplanation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/allocation_explain/types.ts#L97-L106 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/allocation_explain/types.ts#L97-L106 type NodeAllocationExplanation struct { Deciders []AllocationDecision `json:"deciders"` NodeAttributes map[string]string `json:"node_attributes"` @@ -38,6 +44,80 @@ type NodeAllocationExplanation struct { WeightRanking int `json:"weight_ranking"` } +func (s *NodeAllocationExplanation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "deciders": + if err := dec.Decode(&s.Deciders); err != nil { + return err + } + + case "node_attributes": + if s.NodeAttributes == nil { + s.NodeAttributes = make(map[string]string, 0) + } + if err := dec.Decode(&s.NodeAttributes); err != nil { + return err + } + + case "node_decision": + if err := dec.Decode(&s.NodeDecision); err != nil { + return err + } + + case "node_id": + if err := dec.Decode(&s.NodeId); err != nil { + return err + } + + case "node_name": + if err := dec.Decode(&s.NodeName); err != nil { + return err + } + + case "store": + if err := dec.Decode(&s.Store); err != nil { + return err + } + + case "transport_address": + if err := dec.Decode(&s.TransportAddress); err != nil { + return err + } + + case "weight_ranking": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.WeightRanking = value + case float64: + f := int(v) + s.WeightRanking = f + } + + } + } + return nil +} + // NewNodeAllocationExplanation returns a NodeAllocationExplanation. func NewNodeAllocationExplanation() *NodeAllocationExplanation { r := &NodeAllocationExplanation{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeattributes.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeattributes.go index 18e6cf39d..06c868d5a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeattributes.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeattributes.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/noderole" ) // NodeAttributes type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Node.ts#L41-L57 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Node.ts#L41-L58 type NodeAttributes struct { // Attributes Lists node attributes. Attributes map[string]string `json:"attributes"` @@ -42,6 +48,71 @@ type NodeAttributes struct { TransportAddress string `json:"transport_address"` } +func (s *NodeAttributes) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "attributes": + if s.Attributes == nil { + s.Attributes = make(map[string]string, 0) + } + if err := dec.Decode(&s.Attributes); err != nil { + return err + } + + case "ephemeral_id": + if err := dec.Decode(&s.EphemeralId); err != nil { + return err + } + + case "external_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ExternalId = &o + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return err + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return err + } + + case "transport_address": + if err := dec.Decode(&s.TransportAddress); err != nil { + return err + } + + } + } + return nil +} + // NewNodeAttributes returns a NodeAttributes. func NewNodeAttributes() *NodeAttributes { r := &NodeAttributes{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeattributesrecord.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeattributesrecord.go index a0b3d4d96..a61040743 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeattributesrecord.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeattributesrecord.go @@ -16,32 +16,156 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NodeAttributesRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/nodeattrs/types.ts#L20-L55 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/nodeattrs/types.ts#L20-L55 type NodeAttributesRecord struct { - // Attr attribute description + // Attr The attribute name. Attr *string `json:"attr,omitempty"` - // Host host name + // Host The host name. Host *string `json:"host,omitempty"` - // Id unique node id + // Id The unique node identifier. Id *string `json:"id,omitempty"` - // Ip ip address + // Ip The IP address. Ip *string `json:"ip,omitempty"` - // Node node name + // Node The node name. Node *string `json:"node,omitempty"` - // Pid process id + // Pid The process identifier. Pid *string `json:"pid,omitempty"` - // Port bound transport port + // Port The bound transport port. Port *string `json:"port,omitempty"` - // Value attribute value + // Value The attribute value. Value *string `json:"value,omitempty"` } +func (s *NodeAttributesRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "attr": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Attr = &o + + case "host", "h": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Host = &o + + case "id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Id = &o + + case "ip", "i": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Ip = &o + + case "node": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Node = &o + + case "pid": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pid = &o + + case "port": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Port = &o + + case "value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Value = &o + + } + } + return nil +} + // NewNodeAttributesRecord returns a NodeAttributesRecord. func NewNodeAttributesRecord() *NodeAttributesRecord { r := &NodeAttributesRecord{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodebufferpool.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodebufferpool.go index a3660b944..a74875175 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodebufferpool.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodebufferpool.go @@ -16,19 +16,121 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NodeBufferPool type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L316-L322 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L788-L809 type NodeBufferPool struct { - Count *int64 `json:"count,omitempty"` - TotalCapacity *string `json:"total_capacity,omitempty"` - TotalCapacityInBytes *int64 `json:"total_capacity_in_bytes,omitempty"` - Used *string `json:"used,omitempty"` - UsedInBytes *int64 `json:"used_in_bytes,omitempty"` + // Count Number of buffer pools. + Count *int64 `json:"count,omitempty"` + // TotalCapacity Total capacity of buffer pools. + TotalCapacity *string `json:"total_capacity,omitempty"` + // TotalCapacityInBytes Total capacity of buffer pools in bytes. + TotalCapacityInBytes *int64 `json:"total_capacity_in_bytes,omitempty"` + // Used Size of buffer pools. + Used *string `json:"used,omitempty"` + // UsedInBytes Size of buffer pools in bytes. + UsedInBytes *int64 `json:"used_in_bytes,omitempty"` +} + +func (s *NodeBufferPool) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Count = &value + case float64: + f := int64(v) + s.Count = &f + } + + case "total_capacity": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TotalCapacity = &o + + case "total_capacity_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TotalCapacityInBytes = &value + case float64: + f := int64(v) + s.TotalCapacityInBytes = &f + } + + case "used": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Used = &o + + case "used_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.UsedInBytes = &value + case float64: + f := int64(v) + s.UsedInBytes = &f + } + + } + } + return nil } // NewNodeBufferPool returns a NodeBufferPool. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodediskusage.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodediskusage.go index 2afbc1d1b..027117859 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodediskusage.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodediskusage.go @@ -16,19 +16,61 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // NodeDiskUsage type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/allocation_explain/types.ts#L56-L60 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/allocation_explain/types.ts#L56-L60 type NodeDiskUsage struct { LeastAvailable DiskUsage `json:"least_available"` MostAvailable DiskUsage `json:"most_available"` NodeName string `json:"node_name"` } +func (s *NodeDiskUsage) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "least_available": + if err := dec.Decode(&s.LeastAvailable); err != nil { + return err + } + + case "most_available": + if err := dec.Decode(&s.MostAvailable); err != nil { + return err + } + + case "node_name": + if err := dec.Decode(&s.NodeName); err != nil { + return err + } + + } + } + return nil +} + // NewNodeDiskUsage returns a NodeDiskUsage. func NewNodeDiskUsage() *NodeDiskUsage { r := &NodeDiskUsage{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeids.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeids.go index 87f5b0d45..779bc042b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeids.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeids.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // NodeIds type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/common.ts#L58-L58 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/common.ts#L58-L58 type NodeIds []string diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfo.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfo.go index ebcee442d..f5527a4e3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfo.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfo.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/noderole" ) // NodeInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L30-L66 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L30-L66 type NodeInfo struct { Aggregations map[string]NodeInfoAggregation `json:"aggregations,omitempty"` Attributes map[string]string `json:"attributes"` @@ -64,6 +70,186 @@ type NodeInfo struct { Version string `json:"version"` } +func (s *NodeInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aggregations": + if s.Aggregations == nil { + s.Aggregations = make(map[string]NodeInfoAggregation, 0) + } + if err := dec.Decode(&s.Aggregations); err != nil { + return err + } + + case "attributes": + if s.Attributes == nil { + s.Attributes = make(map[string]string, 0) + } + if err := dec.Decode(&s.Attributes); err != nil { + return err + } + + case "build_flavor": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BuildFlavor = o + + case "build_hash": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BuildHash = o + + case "build_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BuildType = o + + case "host": + if err := dec.Decode(&s.Host); err != nil { + return err + } + + case "http": + if err := dec.Decode(&s.Http); err != nil { + return err + } + + case "ingest": + if err := dec.Decode(&s.Ingest); err != nil { + return err + } + + case "ip": + if err := dec.Decode(&s.Ip); err != nil { + return err + } + + case "jvm": + if err := dec.Decode(&s.Jvm); err != nil { + return err + } + + case "modules": + if err := dec.Decode(&s.Modules); err != nil { + return err + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "network": + if err := dec.Decode(&s.Network); err != nil { + return err + } + + case "os": + if err := dec.Decode(&s.Os); err != nil { + return err + } + + case "plugins": + if err := dec.Decode(&s.Plugins); err != nil { + return err + } + + case "process": + if err := dec.Decode(&s.Process); err != nil { + return err + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return err + } + + case "settings": + if err := dec.Decode(&s.Settings); err != nil { + return err + } + + case "thread_pool": + if s.ThreadPool == nil { + s.ThreadPool = make(map[string]NodeThreadPoolInfo, 0) + } + if err := dec.Decode(&s.ThreadPool); err != nil { + return err + } + + case "total_indexing_buffer": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TotalIndexingBuffer = &value + case float64: + f := int64(v) + s.TotalIndexingBuffer = &f + } + + case "total_indexing_buffer_in_bytes": + if err := dec.Decode(&s.TotalIndexingBufferInBytes); err != nil { + return err + } + + case "transport": + if err := dec.Decode(&s.Transport); err != nil { + return err + } + + case "transport_address": + if err := dec.Decode(&s.TransportAddress); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + // NewNodeInfo returns a NodeInfo. func NewNodeInfo() *NodeInfo { r := &NodeInfo{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoaction.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoaction.go index b78ef1c12..2da482d8a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoaction.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoaction.go @@ -16,17 +16,57 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NodeInfoAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L173-L175 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L176-L178 type NodeInfoAction struct { DestructiveRequiresName string `json:"destructive_requires_name"` } +func (s *NodeInfoAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "destructive_requires_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DestructiveRequiresName = o + + } + } + return nil +} + // NewNodeInfoAction returns a NodeInfoAction. func NewNodeInfoAction() *NodeInfoAction { r := &NodeInfoAction{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoaggregation.go index cc9490d26..bcfa84882 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoaggregation.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // NodeInfoAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L224-L226 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L227-L229 type NodeInfoAggregation struct { Types []string `json:"types"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfobootstrap.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfobootstrap.go index 678be8a48..80f5b8fac 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfobootstrap.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfobootstrap.go @@ -16,17 +16,57 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NodeInfoBootstrap type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L193-L195 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L196-L198 type NodeInfoBootstrap struct { MemoryLock string `json:"memory_lock"` } +func (s *NodeInfoBootstrap) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "memory_lock": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MemoryLock = o + + } + } + return nil +} + // NewNodeInfoBootstrap returns a NodeInfoBootstrap. func NewNodeInfoBootstrap() *NodeInfoBootstrap { r := &NodeInfoBootstrap{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoclient.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoclient.go index dd90bfd3c..34b2d91ac 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoclient.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoclient.go @@ -16,17 +16,57 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NodeInfoClient type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L177-L179 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L180-L182 type NodeInfoClient struct { Type string `json:"type"` } +func (s *NodeInfoClient) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + // NewNodeInfoClient returns a NodeInfoClient. func NewNodeInfoClient() *NodeInfoClient { r := &NodeInfoClient{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfodiscover.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfodiscover.go index 3b8827fe2..0d971238b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfodiscover.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfodiscover.go @@ -16,17 +16,57 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NodeInfoDiscover type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L169-L171 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L172-L174 type NodeInfoDiscover struct { SeedHosts string `json:"seed_hosts"` } +func (s *NodeInfoDiscover) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "seed_hosts": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SeedHosts = o + + } + } + return nil +} + // NewNodeInfoDiscover returns a NodeInfoDiscover. func NewNodeInfoDiscover() *NodeInfoDiscover { r := &NodeInfoDiscover{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfohttp.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfohttp.go index d07a44536..82048350f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfohttp.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfohttp.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NodeInfoHttp type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L295-L300 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L298-L303 type NodeInfoHttp struct { BoundAddress []string `json:"bound_address"` MaxContentLength ByteSize `json:"max_content_length,omitempty"` @@ -30,6 +38,63 @@ type NodeInfoHttp struct { PublishAddress string `json:"publish_address"` } +func (s *NodeInfoHttp) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bound_address": + if err := dec.Decode(&s.BoundAddress); err != nil { + return err + } + + case "max_content_length": + if err := dec.Decode(&s.MaxContentLength); err != nil { + return err + } + + case "max_content_length_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.MaxContentLengthInBytes = value + case float64: + f := int64(v) + s.MaxContentLengthInBytes = f + } + + case "publish_address": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PublishAddress = o + + } + } + return nil +} + // NewNodeInfoHttp returns a NodeInfoHttp. func NewNodeInfoHttp() *NodeInfoHttp { r := &NodeInfoHttp{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoingest.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoingest.go index 21e6fd785..777456a6a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoingest.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoingest.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // NodeInfoIngest type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L216-L218 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L219-L221 type NodeInfoIngest struct { Processors []NodeInfoIngestProcessor `json:"processors"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoingestdownloader.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoingestdownloader.go index 6c437237c..da6a9c2e0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoingestdownloader.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoingestdownloader.go @@ -16,17 +16,57 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NodeInfoIngestDownloader type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L127-L129 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L127-L129 type NodeInfoIngestDownloader struct { Enabled string `json:"enabled"` } +func (s *NodeInfoIngestDownloader) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "enabled": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Enabled = o + + } + } + return nil +} + // NewNodeInfoIngestDownloader returns a NodeInfoIngestDownloader. func NewNodeInfoIngestDownloader() *NodeInfoIngestDownloader { r := &NodeInfoIngestDownloader{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoingestinfo.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoingestinfo.go index ad6fffe26..8d64b4490 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoingestinfo.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoingestinfo.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // NodeInfoIngestInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L123-L125 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L123-L125 type NodeInfoIngestInfo struct { Downloader NodeInfoIngestDownloader `json:"downloader"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoingestprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoingestprocessor.go index 264a9da36..02a7dd39d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoingestprocessor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoingestprocessor.go @@ -16,17 +16,57 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NodeInfoIngestProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L220-L222 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L223-L225 type NodeInfoIngestProcessor struct { Type string `json:"type"` } +func (s *NodeInfoIngestProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + // NewNodeInfoIngestProcessor returns a NodeInfoIngestProcessor. func NewNodeInfoIngestProcessor() *NodeInfoIngestProcessor { r := &NodeInfoIngestProcessor{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfojvmmemory.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfojvmmemory.go index 480d8caf0..461852867 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfojvmmemory.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfojvmmemory.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NodeInfoJvmMemory type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L302-L313 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L305-L316 type NodeInfoJvmMemory struct { DirectMax ByteSize `json:"direct_max,omitempty"` DirectMaxInBytes int64 `json:"direct_max_in_bytes"` @@ -36,6 +44,126 @@ type NodeInfoJvmMemory struct { NonHeapMaxInBytes int64 `json:"non_heap_max_in_bytes"` } +func (s *NodeInfoJvmMemory) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "direct_max": + if err := dec.Decode(&s.DirectMax); err != nil { + return err + } + + case "direct_max_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DirectMaxInBytes = value + case float64: + f := int64(v) + s.DirectMaxInBytes = f + } + + case "heap_init": + if err := dec.Decode(&s.HeapInit); err != nil { + return err + } + + case "heap_init_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.HeapInitInBytes = value + case float64: + f := int64(v) + s.HeapInitInBytes = f + } + + case "heap_max": + if err := dec.Decode(&s.HeapMax); err != nil { + return err + } + + case "heap_max_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.HeapMaxInBytes = value + case float64: + f := int64(v) + s.HeapMaxInBytes = f + } + + case "non_heap_init": + if err := dec.Decode(&s.NonHeapInit); err != nil { + return err + } + + case "non_heap_init_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.NonHeapInitInBytes = value + case float64: + f := int64(v) + s.NonHeapInitInBytes = f + } + + case "non_heap_max": + if err := dec.Decode(&s.NonHeapMax); err != nil { + return err + } + + case "non_heap_max_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.NonHeapMaxInBytes = value + case float64: + f := int64(v) + s.NonHeapMaxInBytes = f + } + + } + } + return nil +} + // NewNodeInfoJvmMemory returns a NodeInfoJvmMemory. func NewNodeInfoJvmMemory() *NodeInfoJvmMemory { r := &NodeInfoJvmMemory{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfomemory.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfomemory.go index 22aae5862..01686db42 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfomemory.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfomemory.go @@ -16,18 +16,73 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NodeInfoMemory type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L315-L318 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L318-L321 type NodeInfoMemory struct { Total string `json:"total"` TotalInBytes int64 `json:"total_in_bytes"` } +func (s *NodeInfoMemory) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "total": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Total = o + + case "total_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TotalInBytes = value + case float64: + f := int64(v) + s.TotalInBytes = f + } + + } + } + return nil +} + // NewNodeInfoMemory returns a NodeInfoMemory. func NewNodeInfoMemory() *NodeInfoMemory { r := &NodeInfoMemory{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfonetwork.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfonetwork.go index daf7a39f7..caa86f10f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfonetwork.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfonetwork.go @@ -16,18 +16,67 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NodeInfoNetwork type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L320-L323 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L323-L326 type NodeInfoNetwork struct { PrimaryInterface NodeInfoNetworkInterface `json:"primary_interface"` RefreshInterval int `json:"refresh_interval"` } +func (s *NodeInfoNetwork) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "primary_interface": + if err := dec.Decode(&s.PrimaryInterface); err != nil { + return err + } + + case "refresh_interval": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.RefreshInterval = value + case float64: + f := int(v) + s.RefreshInterval = f + } + + } + } + return nil +} + // NewNodeInfoNetwork returns a NodeInfoNetwork. func NewNodeInfoNetwork() *NodeInfoNetwork { r := &NodeInfoNetwork{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfonetworkinterface.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfonetworkinterface.go index af46fb52f..f6c6ba5a5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfonetworkinterface.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfonetworkinterface.go @@ -16,19 +16,76 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NodeInfoNetworkInterface type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L325-L329 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L328-L332 type NodeInfoNetworkInterface struct { Address string `json:"address"` MacAddress string `json:"mac_address"` Name string `json:"name"` } +func (s *NodeInfoNetworkInterface) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "address": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Address = o + + case "mac_address": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MacAddress = o + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + } + } + return nil +} + // NewNodeInfoNetworkInterface returns a NodeInfoNetworkInterface. func NewNodeInfoNetworkInterface() *NodeInfoNetworkInterface { r := &NodeInfoNetworkInterface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfooscpu.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfooscpu.go index 216b9c6fb..a3bb8806d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfooscpu.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfooscpu.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NodeInfoOSCPU type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L331-L340 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L334-L343 type NodeInfoOSCPU struct { CacheSize string `json:"cache_size"` CacheSizeInBytes int `json:"cache_size_in_bytes"` @@ -34,6 +42,142 @@ type NodeInfoOSCPU struct { Vendor string `json:"vendor"` } +func (s *NodeInfoOSCPU) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "cache_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CacheSize = o + + case "cache_size_in_bytes": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.CacheSizeInBytes = value + case float64: + f := int(v) + s.CacheSizeInBytes = f + } + + case "cores_per_socket": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.CoresPerSocket = value + case float64: + f := int(v) + s.CoresPerSocket = f + } + + case "mhz": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Mhz = value + case float64: + f := int(v) + s.Mhz = f + } + + case "model": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Model = o + + case "total_cores": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.TotalCores = value + case float64: + f := int(v) + s.TotalCores = f + } + + case "total_sockets": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.TotalSockets = value + case float64: + f := int(v) + s.TotalSockets = f + } + + case "vendor": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Vendor = o + + } + } + return nil +} + // NewNodeInfoOSCPU returns a NodeInfoOSCPU. func NewNodeInfoOSCPU() *NodeInfoOSCPU { r := &NodeInfoOSCPU{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfopath.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfopath.go index d0000f4eb..4d4888d18 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfopath.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfopath.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NodeInfoPath type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L154-L159 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L157-L162 type NodeInfoPath struct { Data []string `json:"data,omitempty"` Home string `json:"home"` @@ -30,6 +38,60 @@ type NodeInfoPath struct { Repo []string `json:"repo"` } +func (s *NodeInfoPath) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "data": + if err := dec.Decode(&s.Data); err != nil { + return err + } + + case "home": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Home = o + + case "logs": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Logs = o + + case "repo": + if err := dec.Decode(&s.Repo); err != nil { + return err + } + + } + } + return nil +} + // NewNodeInfoPath returns a NodeInfoPath. func NewNodeInfoPath() *NodeInfoPath { r := &NodeInfoPath{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinforepositories.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinforepositories.go index 2a27f96ed..2434d6d7a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinforepositories.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinforepositories.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // NodeInfoRepositories type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L161-L163 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L164-L166 type NodeInfoRepositories struct { Url NodeInfoRepositoriesUrl `json:"url"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinforepositoriesurl.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinforepositoriesurl.go index 1f18b3c48..f3b38d383 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinforepositoriesurl.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinforepositoriesurl.go @@ -16,17 +16,57 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NodeInfoRepositoriesUrl type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L165-L167 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L168-L170 type NodeInfoRepositoriesUrl struct { AllowedUrls string `json:"allowed_urls"` } +func (s *NodeInfoRepositoriesUrl) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allowed_urls": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AllowedUrls = o + + } + } + return nil +} + // NewNodeInfoRepositoriesUrl returns a NodeInfoRepositoriesUrl. func NewNodeInfoRepositoriesUrl() *NodeInfoRepositoriesUrl { r := &NodeInfoRepositoriesUrl{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoscript.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoscript.go index 36f38f545..285bf41d6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoscript.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoscript.go @@ -16,18 +16,70 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NodeInfoScript type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L273-L276 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L276-L279 type NodeInfoScript struct { AllowedTypes string `json:"allowed_types"` DisableMaxCompilationsRate string `json:"disable_max_compilations_rate"` } +func (s *NodeInfoScript) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allowed_types": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AllowedTypes = o + + case "disable_max_compilations_rate": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DisableMaxCompilationsRate = o + + } + } + return nil +} + // NewNodeInfoScript returns a NodeInfoScript. func NewNodeInfoScript() *NodeInfoScript { r := &NodeInfoScript{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosearch.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosearch.go index a3042c0f7..0ba784d06 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosearch.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosearch.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // NodeInfoSearch type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L278-L280 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L281-L283 type NodeInfoSearch struct { Remote NodeInfoSearchRemote `json:"remote"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosearchremote.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosearchremote.go index 47240941d..bef9f0cbe 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosearchremote.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosearchremote.go @@ -16,17 +16,57 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NodeInfoSearchRemote type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L282-L284 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L285-L287 type NodeInfoSearchRemote struct { Connect string `json:"connect"` } +func (s *NodeInfoSearchRemote) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "connect": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Connect = o + + } + } + return nil +} + // NewNodeInfoSearchRemote returns a NodeInfoSearchRemote. func NewNodeInfoSearchRemote() *NodeInfoSearchRemote { r := &NodeInfoSearchRemote{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettings.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettings.go index a917b7b93..40daffbf6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettings.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettings.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // NodeInfoSettings type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L68-L84 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L68-L84 type NodeInfoSettings struct { Action *NodeInfoAction `json:"action,omitempty"` Bootstrap *NodeInfoBootstrap `json:"bootstrap,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettingscluster.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettingscluster.go index 103096aef..d5fca6f1f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettingscluster.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettingscluster.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NodeInfoSettingsCluster type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L131-L138 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L131-L141 type NodeInfoSettingsCluster struct { DeprecationIndexing *DeprecationIndexing `json:"deprecation_indexing,omitempty"` Election NodeInfoSettingsClusterElection `json:"election"` @@ -31,6 +39,58 @@ type NodeInfoSettingsCluster struct { Routing *IndexRouting `json:"routing,omitempty"` } +func (s *NodeInfoSettingsCluster) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "deprecation_indexing": + if err := dec.Decode(&s.DeprecationIndexing); err != nil { + return err + } + + case "election": + if err := dec.Decode(&s.Election); err != nil { + return err + } + + case "initial_master_nodes": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.InitialMasterNodes = &o + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "routing": + if err := dec.Decode(&s.Routing); err != nil { + return err + } + + } + } + return nil +} + // NewNodeInfoSettingsCluster returns a NodeInfoSettingsCluster. func NewNodeInfoSettingsCluster() *NodeInfoSettingsCluster { r := &NodeInfoSettingsCluster{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettingsclusterelection.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettingsclusterelection.go index 900746517..7641d3f07 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettingsclusterelection.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettingsclusterelection.go @@ -16,17 +16,49 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // NodeInfoSettingsClusterElection type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L144-L146 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L147-L149 type NodeInfoSettingsClusterElection struct { Strategy string `json:"strategy"` } +func (s *NodeInfoSettingsClusterElection) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "strategy": + if err := dec.Decode(&s.Strategy); err != nil { + return err + } + + } + } + return nil +} + // NewNodeInfoSettingsClusterElection returns a NodeInfoSettingsClusterElection. func NewNodeInfoSettingsClusterElection() *NodeInfoSettingsClusterElection { r := &NodeInfoSettingsClusterElection{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettingshttp.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettingshttp.go index a2969be29..5e71fdd53 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettingshttp.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettingshttp.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NodeInfoSettingsHttp type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L181-L186 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L184-L189 type NodeInfoSettingsHttp struct { Compression string `json:"compression,omitempty"` Port string `json:"port,omitempty"` @@ -30,6 +38,67 @@ type NodeInfoSettingsHttp struct { TypeDefault *string `json:"type.default,omitempty"` } +func (s *NodeInfoSettingsHttp) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "compression": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Compression = o + + case "port": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Port = o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "type.default": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TypeDefault = &o + + } + } + return nil +} + // NewNodeInfoSettingsHttp returns a NodeInfoSettingsHttp. func NewNodeInfoSettingsHttp() *NodeInfoSettingsHttp { r := &NodeInfoSettingsHttp{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettingshttptype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettingshttptype.go index 8194e4370..b27ab279f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettingshttptype.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettingshttptype.go @@ -16,17 +16,62 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NodeInfoSettingsHttpType type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L188-L191 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L191-L194 type NodeInfoSettingsHttpType struct { Default string `json:"default"` } +func (s *NodeInfoSettingsHttpType) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Default) + return err + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "default": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Default = o + + } + } + return nil +} + // NewNodeInfoSettingsHttpType returns a NodeInfoSettingsHttpType. func NewNodeInfoSettingsHttpType() *NodeInfoSettingsHttpType { r := &NodeInfoSettingsHttpType{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettingsingest.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettingsingest.go index c05e51408..4309a1f3d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettingsingest.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettingsingest.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // NodeInfoSettingsIngest type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L86-L121 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L86-L121 type NodeInfoSettingsIngest struct { Append *NodeInfoIngestInfo `json:"append,omitempty"` Attachment *NodeInfoIngestInfo `json:"attachment,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettingsnetwork.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettingsnetwork.go index 4c10476e8..56439efbb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettingsnetwork.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettingsnetwork.go @@ -16,17 +16,49 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // NodeInfoSettingsNetwork type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L212-L214 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L215-L217 type NodeInfoSettingsNetwork struct { Host string `json:"host"` } +func (s *NodeInfoSettingsNetwork) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "host": + if err := dec.Decode(&s.Host); err != nil { + return err + } + + } + } + return nil +} + // NewNodeInfoSettingsNetwork returns a NodeInfoSettingsNetwork. func NewNodeInfoSettingsNetwork() *NodeInfoSettingsNetwork { r := &NodeInfoSettingsNetwork{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettingsnode.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettingsnode.go index a2b48b5cd..03c09fbc7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettingsnode.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettingsnode.go @@ -16,23 +16,72 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // NodeInfoSettingsNode type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L148-L152 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L151-L155 type NodeInfoSettingsNode struct { Attr map[string]json.RawMessage `json:"attr"` MaxLocalStorageNodes *string `json:"max_local_storage_nodes,omitempty"` Name string `json:"name"` } +func (s *NodeInfoSettingsNode) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "attr": + if s.Attr == nil { + s.Attr = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Attr); err != nil { + return err + } + + case "max_local_storage_nodes": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MaxLocalStorageNodes = &o + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + } + } + return nil +} + // NewNodeInfoSettingsNode returns a NodeInfoSettingsNode. func NewNodeInfoSettingsNode() *NodeInfoSettingsNode { r := &NodeInfoSettingsNode{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettingstransport.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettingstransport.go index e3e32685c..374d15e8f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettingstransport.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettingstransport.go @@ -16,19 +16,69 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NodeInfoSettingsTransport type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L197-L201 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L200-L204 type NodeInfoSettingsTransport struct { Features *NodeInfoSettingsTransportFeatures `json:"features,omitempty"` Type NodeInfoSettingsTransportType `json:"type"` TypeDefault *string `json:"type.default,omitempty"` } +func (s *NodeInfoSettingsTransport) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "features": + if err := dec.Decode(&s.Features); err != nil { + return err + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "type.default": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TypeDefault = &o + + } + } + return nil +} + // NewNodeInfoSettingsTransport returns a NodeInfoSettingsTransport. func NewNodeInfoSettingsTransport() *NodeInfoSettingsTransport { r := &NodeInfoSettingsTransport{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettingstransportfeatures.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettingstransportfeatures.go index 2e17704c8..e38c94631 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettingstransportfeatures.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettingstransportfeatures.go @@ -16,17 +16,57 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NodeInfoSettingsTransportFeatures type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L208-L210 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L211-L213 type NodeInfoSettingsTransportFeatures struct { XPack string `json:"x-pack"` } +func (s *NodeInfoSettingsTransportFeatures) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "x-pack": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.XPack = o + + } + } + return nil +} + // NewNodeInfoSettingsTransportFeatures returns a NodeInfoSettingsTransportFeatures. func NewNodeInfoSettingsTransportFeatures() *NodeInfoSettingsTransportFeatures { r := &NodeInfoSettingsTransportFeatures{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettingstransporttype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettingstransporttype.go index eddf0514a..b0da5ee1b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettingstransporttype.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfosettingstransporttype.go @@ -16,17 +16,62 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NodeInfoSettingsTransportType type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L203-L206 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L206-L209 type NodeInfoSettingsTransportType struct { Default string `json:"default"` } +func (s *NodeInfoSettingsTransportType) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Default) + return err + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "default": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Default = o + + } + } + return nil +} + // NewNodeInfoSettingsTransportType returns a NodeInfoSettingsTransportType. func NewNodeInfoSettingsTransportType() *NodeInfoSettingsTransportType { r := &NodeInfoSettingsTransportType{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfotransport.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfotransport.go index 99696f101..8e9239f8d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfotransport.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfotransport.go @@ -16,19 +16,72 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NodeInfoTransport type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L342-L346 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L345-L349 type NodeInfoTransport struct { BoundAddress []string `json:"bound_address"` Profiles map[string]string `json:"profiles"` PublishAddress string `json:"publish_address"` } +func (s *NodeInfoTransport) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bound_address": + if err := dec.Decode(&s.BoundAddress); err != nil { + return err + } + + case "profiles": + if s.Profiles == nil { + s.Profiles = make(map[string]string, 0) + } + if err := dec.Decode(&s.Profiles); err != nil { + return err + } + + case "publish_address": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PublishAddress = o + + } + } + return nil +} + // NewNodeInfoTransport returns a NodeInfoTransport. func NewNodeInfoTransport() *NodeInfoTransport { r := &NodeInfoTransport{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoxpack.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoxpack.go index 0b574ca88..4fec38e70 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoxpack.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoxpack.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -26,7 +26,7 @@ import ( // NodeInfoXpack type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L228-L232 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L231-L235 type NodeInfoXpack struct { License *NodeInfoXpackLicense `json:"license,omitempty"` Notification map[string]json.RawMessage `json:"notification,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoxpacklicense.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoxpacklicense.go index 2f3c5a23d..4c1832ca7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoxpacklicense.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoxpacklicense.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // NodeInfoXpackLicense type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L265-L267 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L268-L270 type NodeInfoXpackLicense struct { SelfGenerated NodeInfoXpackLicenseType `json:"self_generated"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoxpacklicensetype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoxpacklicensetype.go index 120339c9f..05f0cbb8d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoxpacklicensetype.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoxpacklicensetype.go @@ -16,17 +16,57 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NodeInfoXpackLicenseType type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L269-L271 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L272-L274 type NodeInfoXpackLicenseType struct { Type string `json:"type"` } +func (s *NodeInfoXpackLicenseType) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + // NewNodeInfoXpackLicenseType returns a NodeInfoXpackLicenseType. func NewNodeInfoXpackLicenseType() *NodeInfoXpackLicenseType { r := &NodeInfoXpackLicenseType{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoxpacksecurity.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoxpacksecurity.go index e05ba0f68..9ac47dc71 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoxpacksecurity.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoxpacksecurity.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NodeInfoXpackSecurity type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L234-L239 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L237-L242 type NodeInfoXpackSecurity struct { Authc *NodeInfoXpackSecurityAuthc `json:"authc,omitempty"` Enabled string `json:"enabled"` @@ -30,6 +38,53 @@ type NodeInfoXpackSecurity struct { Transport *NodeInfoXpackSecuritySsl `json:"transport,omitempty"` } +func (s *NodeInfoXpackSecurity) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "authc": + if err := dec.Decode(&s.Authc); err != nil { + return err + } + + case "enabled": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Enabled = o + + case "http": + if err := dec.Decode(&s.Http); err != nil { + return err + } + + case "transport": + if err := dec.Decode(&s.Transport); err != nil { + return err + } + + } + } + return nil +} + // NewNodeInfoXpackSecurity returns a NodeInfoXpackSecurity. func NewNodeInfoXpackSecurity() *NodeInfoXpackSecurity { r := &NodeInfoXpackSecurity{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoxpacksecurityauthc.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoxpacksecurityauthc.go index b2f9ffcba..63f4634c6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoxpacksecurityauthc.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoxpacksecurityauthc.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // NodeInfoXpackSecurityAuthc type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L245-L248 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L248-L251 type NodeInfoXpackSecurityAuthc struct { Realms NodeInfoXpackSecurityAuthcRealms `json:"realms"` Token NodeInfoXpackSecurityAuthcToken `json:"token"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoxpacksecurityauthcrealms.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoxpacksecurityauthcrealms.go index a028aa707..66cfd9f40 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoxpacksecurityauthcrealms.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoxpacksecurityauthcrealms.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // NodeInfoXpackSecurityAuthcRealms type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L250-L254 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L253-L257 type NodeInfoXpackSecurityAuthcRealms struct { File map[string]NodeInfoXpackSecurityAuthcRealmsStatus `json:"file,omitempty"` Native map[string]NodeInfoXpackSecurityAuthcRealmsStatus `json:"native,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoxpacksecurityauthcrealmsstatus.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoxpacksecurityauthcrealmsstatus.go index acecc192f..706599dbb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoxpacksecurityauthcrealmsstatus.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoxpacksecurityauthcrealmsstatus.go @@ -16,18 +16,70 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NodeInfoXpackSecurityAuthcRealmsStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L260-L263 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L263-L266 type NodeInfoXpackSecurityAuthcRealmsStatus struct { Enabled *string `json:"enabled,omitempty"` Order string `json:"order"` } +func (s *NodeInfoXpackSecurityAuthcRealmsStatus) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "enabled": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Enabled = &o + + case "order": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Order = o + + } + } + return nil +} + // NewNodeInfoXpackSecurityAuthcRealmsStatus returns a NodeInfoXpackSecurityAuthcRealmsStatus. func NewNodeInfoXpackSecurityAuthcRealmsStatus() *NodeInfoXpackSecurityAuthcRealmsStatus { r := &NodeInfoXpackSecurityAuthcRealmsStatus{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoxpacksecurityauthctoken.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoxpacksecurityauthctoken.go index 963e3799f..62dd5dc98 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoxpacksecurityauthctoken.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoxpacksecurityauthctoken.go @@ -16,17 +16,57 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NodeInfoXpackSecurityAuthcToken type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L256-L258 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L259-L261 type NodeInfoXpackSecurityAuthcToken struct { Enabled string `json:"enabled"` } +func (s *NodeInfoXpackSecurityAuthcToken) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "enabled": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Enabled = o + + } + } + return nil +} + // NewNodeInfoXpackSecurityAuthcToken returns a NodeInfoXpackSecurityAuthcToken. func NewNodeInfoXpackSecurityAuthcToken() *NodeInfoXpackSecurityAuthcToken { r := &NodeInfoXpackSecurityAuthcToken{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoxpacksecurityssl.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoxpacksecurityssl.go index a6a242351..c379ae5ff 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoxpacksecurityssl.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeinfoxpacksecurityssl.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // NodeInfoXpackSecuritySsl type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L241-L243 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L244-L246 type NodeInfoXpackSecuritySsl struct { Ssl map[string]string `json:"ssl"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodejvminfo.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodejvminfo.go index 53abe326d..cc2587838 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodejvminfo.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodejvminfo.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NodeJvmInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L348-L362 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L351-L365 type NodeJvmInfo struct { GcCollectors []string `json:"gc_collectors"` InputArguments []string `json:"input_arguments"` @@ -38,6 +46,120 @@ type NodeJvmInfo struct { VmVersion string `json:"vm_version"` } +func (s *NodeJvmInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "gc_collectors": + if err := dec.Decode(&s.GcCollectors); err != nil { + return err + } + + case "input_arguments": + if err := dec.Decode(&s.InputArguments); err != nil { + return err + } + + case "mem": + if err := dec.Decode(&s.Mem); err != nil { + return err + } + + case "memory_pools": + if err := dec.Decode(&s.MemoryPools); err != nil { + return err + } + + case "pid": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Pid = value + case float64: + f := int(v) + s.Pid = f + } + + case "start_time_in_millis": + if err := dec.Decode(&s.StartTimeInMillis); err != nil { + return err + } + + case "using_bundled_jdk", "bundled_jdk": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.UsingBundledJdk = value + case bool: + s.UsingBundledJdk = v + } + + case "using_compressed_ordinary_object_pointers": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.UsingCompressedOrdinaryObjectPointers = o + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + case "vm_name": + if err := dec.Decode(&s.VmName); err != nil { + return err + } + + case "vm_vendor": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.VmVendor = o + + case "vm_version": + if err := dec.Decode(&s.VmVersion); err != nil { + return err + } + + } + } + return nil +} + // NewNodeJvmInfo returns a NodeJvmInfo. func NewNodeJvmInfo() *NodeJvmInfo { r := &NodeJvmInfo{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeoperatingsysteminfo.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeoperatingsysteminfo.go index e17a9825e..f30a5a726 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeoperatingsysteminfo.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeoperatingsysteminfo.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NodeOperatingSystemInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L364-L381 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L367-L384 type NodeOperatingSystemInfo struct { // AllocatedProcessors The number of processors actually used to calculate thread pool size. This // number can be set with the node.processors setting of a node and defaults to @@ -44,6 +52,105 @@ type NodeOperatingSystemInfo struct { Version string `json:"version"` } +func (s *NodeOperatingSystemInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allocated_processors": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.AllocatedProcessors = &value + case float64: + f := int(v) + s.AllocatedProcessors = &f + } + + case "arch": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Arch = o + + case "available_processors": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.AvailableProcessors = value + case float64: + f := int(v) + s.AvailableProcessors = f + } + + case "cpu": + if err := dec.Decode(&s.Cpu); err != nil { + return err + } + + case "mem": + if err := dec.Decode(&s.Mem); err != nil { + return err + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "pretty_name": + if err := dec.Decode(&s.PrettyName); err != nil { + return err + } + + case "refresh_interval_in_millis": + if err := dec.Decode(&s.RefreshIntervalInMillis); err != nil { + return err + } + + case "swap": + if err := dec.Decode(&s.Swap); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + // NewNodeOperatingSystemInfo returns a NodeOperatingSystemInfo. func NewNodeOperatingSystemInfo() *NodeOperatingSystemInfo { r := &NodeOperatingSystemInfo{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodepackagingtype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodepackagingtype.go index 3a1574989..0bdb4829b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodepackagingtype.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodepackagingtype.go @@ -16,17 +16,88 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NodePackagingType type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/stats/types.ts#L283-L287 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/stats/types.ts#L526-L539 type NodePackagingType struct { - Count int `json:"count"` + // Count Number of selected nodes using the distribution flavor and file type. + Count int `json:"count"` + // Flavor Type of Elasticsearch distribution. This is always `default`. Flavor string `json:"flavor"` - Type string `json:"type"` + // Type File type (such as `tar` or `zip`) used for the distribution package. + Type string `json:"type"` +} + +func (s *NodePackagingType) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Count = value + case float64: + f := int(v) + s.Count = f + } + + case "flavor": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Flavor = o + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil } // NewNodePackagingType returns a NodePackagingType. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeprocessinfo.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeprocessinfo.go index bd7fed42f..75ace81a7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeprocessinfo.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeprocessinfo.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NodeProcessInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L383-L390 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L386-L393 type NodeProcessInfo struct { // Id Process identifier (PID) Id int64 `json:"id"` @@ -32,6 +40,60 @@ type NodeProcessInfo struct { RefreshIntervalInMillis int64 `json:"refresh_interval_in_millis"` } +func (s *NodeProcessInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "id": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Id = value + case float64: + f := int64(v) + s.Id = f + } + + case "mlockall": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Mlockall = value + case bool: + s.Mlockall = v + } + + case "refresh_interval_in_millis": + if err := dec.Decode(&s.RefreshIntervalInMillis); err != nil { + return err + } + + } + } + return nil +} + // NewNodeProcessInfo returns a NodeProcessInfo. func NewNodeProcessInfo() *NodeProcessInfo { r := &NodeProcessInfo{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodereloaderror.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodereloaderror.go index ca3523b4e..123685c54 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodereloaderror.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodereloaderror.go @@ -16,18 +16,55 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // NodeReloadError type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/NodeReloadResult.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/NodeReloadResult.ts#L24-L27 type NodeReloadError struct { Name string `json:"name"` ReloadException *ErrorCause `json:"reload_exception,omitempty"` } +func (s *NodeReloadError) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "reload_exception": + if err := dec.Decode(&s.ReloadException); err != nil { + return err + } + + } + } + return nil +} + // NewNodeReloadError returns a NodeReloadError. func NewNodeReloadError() *NodeReloadError { r := &NodeReloadError{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodereloadresult.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodereloadresult.go index 2077f413f..326c4950f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodereloadresult.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodereloadresult.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // Stats // NodeReloadError // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/NodeReloadResult.ts#L29-L30 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/NodeReloadResult.ts#L29-L30 type NodeReloadResult interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodescontext.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodescontext.go index f3e98ba87..d8a3c5d44 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodescontext.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodescontext.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NodesContext type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L397-L402 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L997-L1002 type NodesContext struct { CacheEvictions *int64 `json:"cache_evictions,omitempty"` CompilationLimitTriggered *int64 `json:"compilation_limit_triggered,omitempty"` @@ -30,6 +38,83 @@ type NodesContext struct { Context *string `json:"context,omitempty"` } +func (s *NodesContext) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "cache_evictions": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.CacheEvictions = &value + case float64: + f := int64(v) + s.CacheEvictions = &f + } + + case "compilation_limit_triggered": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.CompilationLimitTriggered = &value + case float64: + f := int64(v) + s.CompilationLimitTriggered = &f + } + + case "compilations": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Compilations = &value + case float64: + f := int64(v) + s.Compilations = &f + } + + case "context": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Context = &o + + } + } + return nil +} + // NewNodesContext returns a NodesContext. func NewNodesContext() *NodesContext { r := &NodesContext{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodescredentials.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodescredentials.go index 210e1efa8..b6a2f1b8f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodescredentials.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodescredentials.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // NodesCredentials type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/get_service_credentials/types.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/get_service_credentials/types.ts#L23-L28 type NodesCredentials struct { // FileTokens File-backed tokens collected from all nodes FileTokens map[string]NodesCredentialsFileToken `json:"file_tokens"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodescredentialsfiletoken.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodescredentialsfiletoken.go index fa6cebde2..80a489b98 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodescredentialsfiletoken.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodescredentialsfiletoken.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // NodesCredentialsFileToken type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/get_service_credentials/types.ts#L30-L32 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/get_service_credentials/types.ts#L30-L32 type NodesCredentialsFileToken struct { Nodes []string `json:"nodes"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeshard.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeshard.go index ba6e1d407..3214b9ea2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeshard.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeshard.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/shardroutingstate" ) // NodeShard type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Node.ts#L59-L70 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Node.ts#L60-L71 type NodeShard struct { AllocationId map[string]string `json:"allocation_id,omitempty"` Index string `json:"index"` @@ -40,6 +46,102 @@ type NodeShard struct { UnassignedInfo *UnassignedInformation `json:"unassigned_info,omitempty"` } +func (s *NodeShard) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allocation_id": + if s.AllocationId == nil { + s.AllocationId = make(map[string]string, 0) + } + if err := dec.Decode(&s.AllocationId); err != nil { + return err + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return err + } + + case "node": + if err := dec.Decode(&s.Node); err != nil { + return err + } + + case "primary": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Primary = value + case bool: + s.Primary = v + } + + case "recovery_source": + if s.RecoverySource == nil { + s.RecoverySource = make(map[string]string, 0) + } + if err := dec.Decode(&s.RecoverySource); err != nil { + return err + } + + case "relocating_node": + if err := dec.Decode(&s.RelocatingNode); err != nil { + return err + } + + case "relocation_failure_info": + if err := dec.Decode(&s.RelocationFailureInfo); err != nil { + return err + } + + case "shard": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Shard = value + case float64: + f := int(v) + s.Shard = f + } + + case "state": + if err := dec.Decode(&s.State); err != nil { + return err + } + + case "unassigned_info": + if err := dec.Decode(&s.UnassignedInfo); err != nil { + return err + } + + } + } + return nil +} + // NewNodeShard returns a NodeShard. func NewNodeShard() *NodeShard { r := &NodeShard{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeshutdownstatus.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeshutdownstatus.go index f4c10d275..35b054a90 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeshutdownstatus.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeshutdownstatus.go @@ -16,18 +16,24 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/shutdownstatus" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/shutdowntype" ) // NodeShutdownStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L29-L38 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L29-L38 type NodeShutdownStatus struct { NodeId string `json:"node_id"` PersistentTasks PersistentTaskStatus `json:"persistent_tasks"` @@ -39,6 +45,73 @@ type NodeShutdownStatus struct { Type shutdowntype.ShutdownType `json:"type"` } +func (s *NodeShutdownStatus) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "node_id": + if err := dec.Decode(&s.NodeId); err != nil { + return err + } + + case "persistent_tasks": + if err := dec.Decode(&s.PersistentTasks); err != nil { + return err + } + + case "plugins": + if err := dec.Decode(&s.Plugins); err != nil { + return err + } + + case "reason": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Reason = o + + case "shard_migration": + if err := dec.Decode(&s.ShardMigration); err != nil { + return err + } + + case "shutdown_startedmillis": + if err := dec.Decode(&s.ShutdownStartedmillis); err != nil { + return err + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return err + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + } + } + return nil +} + // NewNodeShutdownStatus returns a NodeShutdownStatus. func NewNodeShutdownStatus() *NodeShutdownStatus { r := &NodeShutdownStatus{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodesindexingpressure.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodesindexingpressure.go index ddd604f9f..00a112e2e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodesindexingpressure.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodesindexingpressure.go @@ -16,14 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // NodesIndexingPressure type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L55-L57 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L116-L121 type NodesIndexingPressure struct { + // Memory Contains statistics for memory consumption from indexing load. Memory *NodesIndexingPressureMemory `json:"memory,omitempty"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodesindexingpressurememory.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodesindexingpressurememory.go index bcab414c5..ec0af54e2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodesindexingpressurememory.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodesindexingpressurememory.go @@ -16,18 +16,82 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NodesIndexingPressureMemory type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L59-L64 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L123-L142 type NodesIndexingPressureMemory struct { - Current *PressureMemory `json:"current,omitempty"` - Limit ByteSize `json:"limit,omitempty"` - LimitInBytes *int64 `json:"limit_in_bytes,omitempty"` - Total *PressureMemory `json:"total,omitempty"` + // Current Contains statistics for current indexing load. + Current *PressureMemory `json:"current,omitempty"` + // Limit Configured memory limit for the indexing requests. + // Replica requests have an automatic limit that is 1.5x this value. + Limit ByteSize `json:"limit,omitempty"` + // LimitInBytes Configured memory limit, in bytes, for the indexing requests. + // Replica requests have an automatic limit that is 1.5x this value. + LimitInBytes *int64 `json:"limit_in_bytes,omitempty"` + // Total Contains statistics for the cumulative indexing load since the node started. + Total *PressureMemory `json:"total,omitempty"` +} + +func (s *NodesIndexingPressureMemory) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "current": + if err := dec.Decode(&s.Current); err != nil { + return err + } + + case "limit": + if err := dec.Decode(&s.Limit); err != nil { + return err + } + + case "limit_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.LimitInBytes = &value + case float64: + f := int64(v) + s.LimitInBytes = &f + } + + case "total": + if err := dec.Decode(&s.Total); err != nil { + return err + } + + } + } + return nil } // NewNodesIndexingPressureMemory returns a NodesIndexingPressureMemory. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodesingest.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodesingest.go index b2e86832b..8fa66fd8f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodesingest.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodesingest.go @@ -16,16 +16,18 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // NodesIngest type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L144-L147 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L345-L354 type NodesIngest struct { + // Pipelines Contains statistics about ingest pipelines for the node. Pipelines map[string]IngestTotal `json:"pipelines,omitempty"` - Total *IngestTotal `json:"total,omitempty"` + // Total Contains statistics about ingest operations for the node. + Total *IngestTotal `json:"total,omitempty"` } // NewNodesIngest returns a NodesIngest. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodesrecord.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodesrecord.go index 2697174ce..938af1267 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodesrecord.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodesrecord.go @@ -16,209 +16,1325 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NodesRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/nodes/types.ts#L23-L541 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/nodes/types.ts#L23-L542 type NodesRecord struct { - // Build es build hash + // Build The Elasticsearch build hash. Build *string `json:"build,omitempty"` - // BulkAvgSizeInBytes average size in bytes of shard bulk + // BulkAvgSizeInBytes The average size in bytes of shard bulk. BulkAvgSizeInBytes *string `json:"bulk.avg_size_in_bytes,omitempty"` - // BulkAvgTime average time spend in shard bulk + // BulkAvgTime The average time spend in shard bulk. BulkAvgTime *string `json:"bulk.avg_time,omitempty"` - // BulkTotalOperations number of bulk shard ops + // BulkTotalOperations The number of bulk shard operations. BulkTotalOperations *string `json:"bulk.total_operations,omitempty"` - // BulkTotalSizeInBytes total size in bytes of shard bulk + // BulkTotalSizeInBytes The total size in bytes of shard bulk. BulkTotalSizeInBytes *string `json:"bulk.total_size_in_bytes,omitempty"` - // BulkTotalTime time spend in shard bulk + // BulkTotalTime The time spend in shard bulk. BulkTotalTime *string `json:"bulk.total_time,omitempty"` - // CompletionSize size of completion + // CompletionSize The size of completion. CompletionSize *string `json:"completion.size,omitempty"` - // Cpu recent cpu usage + // Cpu The recent system CPU usage as a percentage. Cpu *string `json:"cpu,omitempty"` - // DiskAvail available disk space + // DiskAvail The available disk space. DiskAvail ByteSize `json:"disk.avail,omitempty"` - // DiskTotal total disk space + // DiskTotal The total disk space. DiskTotal ByteSize `json:"disk.total,omitempty"` - // DiskUsed used disk space + // DiskUsed The used disk space. DiskUsed ByteSize `json:"disk.used,omitempty"` - // DiskUsedPercent used disk space percentage + // DiskUsedPercent The used disk space percentage. DiskUsedPercent Percentage `json:"disk.used_percent,omitempty"` - // FielddataEvictions fielddata evictions + // FielddataEvictions The fielddata evictions. FielddataEvictions *string `json:"fielddata.evictions,omitempty"` - // FielddataMemorySize used fielddata cache + // FielddataMemorySize The used fielddata cache. FielddataMemorySize *string `json:"fielddata.memory_size,omitempty"` - // FileDescCurrent used file descriptors + // FileDescCurrent The used file descriptors. FileDescCurrent *string `json:"file_desc.current,omitempty"` - // FileDescMax max file descriptors + // FileDescMax The maximum number of file descriptors. FileDescMax *string `json:"file_desc.max,omitempty"` - // FileDescPercent used file descriptor ratio + // FileDescPercent The used file descriptor ratio. FileDescPercent Percentage `json:"file_desc.percent,omitempty"` - // Flavor es distribution flavor + // Flavor The Elasticsearch distribution flavor. Flavor *string `json:"flavor,omitempty"` - // FlushTotal number of flushes + // FlushTotal The number of flushes. FlushTotal *string `json:"flush.total,omitempty"` - // FlushTotalTime time spent in flush + // FlushTotalTime The time spent in flush. FlushTotalTime *string `json:"flush.total_time,omitempty"` - // GetCurrent number of current get ops + // GetCurrent The number of current get ops. GetCurrent *string `json:"get.current,omitempty"` - // GetExistsTime time spent in successful gets + // GetExistsTime The time spent in successful gets. GetExistsTime *string `json:"get.exists_time,omitempty"` - // GetExistsTotal number of successful gets + // GetExistsTotal The number of successful get operations. GetExistsTotal *string `json:"get.exists_total,omitempty"` - // GetMissingTime time spent in failed gets + // GetMissingTime The time spent in failed gets. GetMissingTime *string `json:"get.missing_time,omitempty"` - // GetMissingTotal number of failed gets + // GetMissingTotal The number of failed gets. GetMissingTotal *string `json:"get.missing_total,omitempty"` - // GetTime time spent in get + // GetTime The time spent in get. GetTime *string `json:"get.time,omitempty"` - // GetTotal number of get ops + // GetTotal The number of get ops. GetTotal *string `json:"get.total,omitempty"` - // HeapCurrent used heap + // HeapCurrent The used heap. HeapCurrent *string `json:"heap.current,omitempty"` - // HeapMax max configured heap + // HeapMax The maximum configured heap. HeapMax *string `json:"heap.max,omitempty"` - // HeapPercent used heap ratio + // HeapPercent The used heap ratio. HeapPercent Percentage `json:"heap.percent,omitempty"` - // HttpAddress bound http address + // HttpAddress The bound HTTP address. HttpAddress *string `json:"http_address,omitempty"` - // Id unique node id + // Id The unique node identifier. Id *string `json:"id,omitempty"` - // IndexingDeleteCurrent number of current deletions + // IndexingDeleteCurrent The number of current deletions. IndexingDeleteCurrent *string `json:"indexing.delete_current,omitempty"` - // IndexingDeleteTime time spent in deletions + // IndexingDeleteTime The time spent in deletions. IndexingDeleteTime *string `json:"indexing.delete_time,omitempty"` - // IndexingDeleteTotal number of delete ops + // IndexingDeleteTotal The number of delete operations. IndexingDeleteTotal *string `json:"indexing.delete_total,omitempty"` - // IndexingIndexCurrent number of current indexing ops + // IndexingIndexCurrent The number of current indexing operations. IndexingIndexCurrent *string `json:"indexing.index_current,omitempty"` - // IndexingIndexFailed number of failed indexing ops + // IndexingIndexFailed The number of failed indexing operations. IndexingIndexFailed *string `json:"indexing.index_failed,omitempty"` - // IndexingIndexTime time spent in indexing + // IndexingIndexTime The time spent in indexing. IndexingIndexTime *string `json:"indexing.index_time,omitempty"` - // IndexingIndexTotal number of indexing ops + // IndexingIndexTotal The number of indexing operations. IndexingIndexTotal *string `json:"indexing.index_total,omitempty"` - // Ip ip address + // Ip The IP address. Ip *string `json:"ip,omitempty"` - // Jdk jdk version + // Jdk The Java version. Jdk *string `json:"jdk,omitempty"` - // Load15M 15m load avg + // Load15M The load average for the last fifteen minutes. Load15M *string `json:"load_15m,omitempty"` - // Load1M 1m load avg + // Load1M The load average for the most recent minute. Load1M *string `json:"load_1m,omitempty"` - // Load5M 5m load avg + // Load5M The load average for the last five minutes. Load5M *string `json:"load_5m,omitempty"` - // Master *:current master + // Master Indicates whether the node is the elected master node. + // Returned values include `*`(elected master) and `-`(not elected master). Master *string `json:"master,omitempty"` - // MergesCurrent number of current merges + // MergesCurrent The number of current merges. MergesCurrent *string `json:"merges.current,omitempty"` - // MergesCurrentDocs number of current merging docs + // MergesCurrentDocs The number of current merging docs. MergesCurrentDocs *string `json:"merges.current_docs,omitempty"` - // MergesCurrentSize size of current merges + // MergesCurrentSize The size of current merges. MergesCurrentSize *string `json:"merges.current_size,omitempty"` - // MergesTotal number of completed merge ops + // MergesTotal The number of completed merge operations. MergesTotal *string `json:"merges.total,omitempty"` - // MergesTotalDocs docs merged + // MergesTotalDocs The docs merged. MergesTotalDocs *string `json:"merges.total_docs,omitempty"` - // MergesTotalSize size merged + // MergesTotalSize The size merged. MergesTotalSize *string `json:"merges.total_size,omitempty"` - // MergesTotalTime time spent in merges + // MergesTotalTime The time spent in merges. MergesTotalTime *string `json:"merges.total_time,omitempty"` - // Name node name + // Name The node name. Name *string `json:"name,omitempty"` - // NodeRole m:master eligible node, d:data node, i:ingest node, -:coordinating node only + // NodeRole The roles of the node. + // Returned values include `c`(cold node), `d`(data node), `f`(frozen node), + // `h`(hot node), `i`(ingest node), `l`(machine learning node), `m` (master + // eligible node), `r`(remote cluster client node), `s`(content node), + // `t`(transform node), `v`(voting-only node), `w`(warm node),and + // `-`(coordinating node only). NodeRole *string `json:"node.role,omitempty"` - // Pid process id + // Pid The process identifier. Pid *string `json:"pid,omitempty"` - // Port bound transport port + // Port The bound transport port. Port *string `json:"port,omitempty"` - // QueryCacheEvictions query cache evictions + // QueryCacheEvictions The query cache evictions. QueryCacheEvictions *string `json:"query_cache.evictions,omitempty"` - // QueryCacheHitCount query cache hit counts + // QueryCacheHitCount The query cache hit counts. QueryCacheHitCount *string `json:"query_cache.hit_count,omitempty"` - // QueryCacheMemorySize used query cache + // QueryCacheMemorySize The used query cache. QueryCacheMemorySize *string `json:"query_cache.memory_size,omitempty"` - // QueryCacheMissCount query cache miss counts + // QueryCacheMissCount The query cache miss counts. QueryCacheMissCount *string `json:"query_cache.miss_count,omitempty"` - // RamCurrent used machine memory + // RamCurrent The used machine memory. RamCurrent *string `json:"ram.current,omitempty"` - // RamMax total machine memory + // RamMax The total machine memory. RamMax *string `json:"ram.max,omitempty"` - // RamPercent used machine memory ratio + // RamPercent The used machine memory ratio. RamPercent Percentage `json:"ram.percent,omitempty"` - // RefreshExternalTime time spent in external refreshes + // RefreshExternalTime The time spent in external refreshes. RefreshExternalTime *string `json:"refresh.external_time,omitempty"` - // RefreshExternalTotal total external refreshes + // RefreshExternalTotal The total external refreshes. RefreshExternalTotal *string `json:"refresh.external_total,omitempty"` - // RefreshListeners number of pending refresh listeners + // RefreshListeners The number of pending refresh listeners. RefreshListeners *string `json:"refresh.listeners,omitempty"` - // RefreshTime time spent in refreshes + // RefreshTime The time spent in refreshes. RefreshTime *string `json:"refresh.time,omitempty"` - // RefreshTotal total refreshes + // RefreshTotal The total refreshes. RefreshTotal *string `json:"refresh.total,omitempty"` - // RequestCacheEvictions request cache evictions + // RequestCacheEvictions The request cache evictions. RequestCacheEvictions *string `json:"request_cache.evictions,omitempty"` - // RequestCacheHitCount request cache hit counts + // RequestCacheHitCount The request cache hit counts. RequestCacheHitCount *string `json:"request_cache.hit_count,omitempty"` - // RequestCacheMemorySize used request cache + // RequestCacheMemorySize The used request cache. RequestCacheMemorySize *string `json:"request_cache.memory_size,omitempty"` - // RequestCacheMissCount request cache miss counts + // RequestCacheMissCount The request cache miss counts. RequestCacheMissCount *string `json:"request_cache.miss_count,omitempty"` - // ScriptCacheEvictions script cache evictions + // ScriptCacheEvictions The total compiled scripts evicted from the cache. ScriptCacheEvictions *string `json:"script.cache_evictions,omitempty"` - // ScriptCompilationLimitTriggered script cache compilation limit triggered + // ScriptCompilationLimitTriggered The script cache compilation limit triggered. ScriptCompilationLimitTriggered *string `json:"script.compilation_limit_triggered,omitempty"` - // ScriptCompilations script compilations + // ScriptCompilations The total script compilations. ScriptCompilations *string `json:"script.compilations,omitempty"` - // SearchFetchCurrent current fetch phase ops + // SearchFetchCurrent The current fetch phase operations. SearchFetchCurrent *string `json:"search.fetch_current,omitempty"` - // SearchFetchTime time spent in fetch phase + // SearchFetchTime The time spent in fetch phase. SearchFetchTime *string `json:"search.fetch_time,omitempty"` - // SearchFetchTotal total fetch ops + // SearchFetchTotal The total fetch operations. SearchFetchTotal *string `json:"search.fetch_total,omitempty"` - // SearchOpenContexts open search contexts + // SearchOpenContexts The open search contexts. SearchOpenContexts *string `json:"search.open_contexts,omitempty"` - // SearchQueryCurrent current query phase ops + // SearchQueryCurrent The current query phase operations. SearchQueryCurrent *string `json:"search.query_current,omitempty"` - // SearchQueryTime time spent in query phase + // SearchQueryTime The time spent in query phase. SearchQueryTime *string `json:"search.query_time,omitempty"` - // SearchQueryTotal total query phase ops + // SearchQueryTotal The total query phase operations. SearchQueryTotal *string `json:"search.query_total,omitempty"` - // SearchScrollCurrent open scroll contexts + // SearchScrollCurrent The open scroll contexts. SearchScrollCurrent *string `json:"search.scroll_current,omitempty"` - // SearchScrollTime time scroll contexts held open + // SearchScrollTime The time scroll contexts held open. SearchScrollTime *string `json:"search.scroll_time,omitempty"` - // SearchScrollTotal completed scroll contexts + // SearchScrollTotal The completed scroll contexts. SearchScrollTotal *string `json:"search.scroll_total,omitempty"` - // SegmentsCount number of segments + // SegmentsCount The number of segments. SegmentsCount *string `json:"segments.count,omitempty"` - // SegmentsFixedBitsetMemory memory used by fixed bit sets for nested object field types and export type - // filters for types referred in _parent fields + // SegmentsFixedBitsetMemory The memory used by fixed bit sets for nested object field types and export + // type filters for types referred in _parent fields. SegmentsFixedBitsetMemory *string `json:"segments.fixed_bitset_memory,omitempty"` - // SegmentsIndexWriterMemory memory used by index writer + // SegmentsIndexWriterMemory The memory used by the index writer. SegmentsIndexWriterMemory *string `json:"segments.index_writer_memory,omitempty"` - // SegmentsMemory memory used by segments + // SegmentsMemory The memory used by segments. SegmentsMemory *string `json:"segments.memory,omitempty"` - // SegmentsVersionMapMemory memory used by version map + // SegmentsVersionMapMemory The memory used by the version map. SegmentsVersionMapMemory *string `json:"segments.version_map_memory,omitempty"` - // SuggestCurrent number of current suggest ops + // SuggestCurrent The number of current suggest operations. SuggestCurrent *string `json:"suggest.current,omitempty"` - // SuggestTime time spend in suggest + // SuggestTime The time spend in suggest. SuggestTime *string `json:"suggest.time,omitempty"` - // SuggestTotal number of suggest ops + // SuggestTotal The number of suggest operations. SuggestTotal *string `json:"suggest.total,omitempty"` - // Type es distribution type + // Type The Elasticsearch distribution type. Type *string `json:"type,omitempty"` - // Uptime node uptime + // Uptime The node uptime. Uptime *string `json:"uptime,omitempty"` - // Version es version + // Version The Elasticsearch version. Version *string `json:"version,omitempty"` } +func (s *NodesRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "build", "b": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Build = &o + + case "bulk.avg_size_in_bytes", "basi", "bulkAvgSizeInBytes": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BulkAvgSizeInBytes = &o + + case "bulk.avg_time", "bati", "bulkAvgTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BulkAvgTime = &o + + case "bulk.total_operations", "bto", "bulkTotalOperations": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BulkTotalOperations = &o + + case "bulk.total_size_in_bytes", "btsi", "bulkTotalSizeInBytes": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BulkTotalSizeInBytes = &o + + case "bulk.total_time", "btti", "bulkTotalTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BulkTotalTime = &o + + case "completion.size", "cs", "completionSize": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CompletionSize = &o + + case "cpu": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Cpu = &o + + case "disk.avail", "d", "da", "disk", "diskAvail": + if err := dec.Decode(&s.DiskAvail); err != nil { + return err + } + + case "disk.total", "dt", "diskTotal": + if err := dec.Decode(&s.DiskTotal); err != nil { + return err + } + + case "disk.used", "du", "diskUsed": + if err := dec.Decode(&s.DiskUsed); err != nil { + return err + } + + case "disk.used_percent", "dup", "diskUsedPercent": + if err := dec.Decode(&s.DiskUsedPercent); err != nil { + return err + } + + case "fielddata.evictions", "fe", "fielddataEvictions": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FielddataEvictions = &o + + case "fielddata.memory_size", "fm", "fielddataMemory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FielddataMemorySize = &o + + case "file_desc.current", "fdc", "fileDescriptorCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FileDescCurrent = &o + + case "file_desc.max", "fdm", "fileDescriptorMax": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FileDescMax = &o + + case "file_desc.percent", "fdp", "fileDescriptorPercent": + if err := dec.Decode(&s.FileDescPercent); err != nil { + return err + } + + case "flavor", "f": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Flavor = &o + + case "flush.total", "ft", "flushTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FlushTotal = &o + + case "flush.total_time", "ftt", "flushTotalTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FlushTotalTime = &o + + case "get.current", "gc", "getCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.GetCurrent = &o + + case "get.exists_time", "geti", "getExistsTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.GetExistsTime = &o + + case "get.exists_total", "geto", "getExistsTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.GetExistsTotal = &o + + case "get.missing_time", "gmti", "getMissingTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.GetMissingTime = &o + + case "get.missing_total", "gmto", "getMissingTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.GetMissingTotal = &o + + case "get.time", "gti", "getTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.GetTime = &o + + case "get.total", "gto", "getTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.GetTotal = &o + + case "heap.current", "hc", "heapCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.HeapCurrent = &o + + case "heap.max", "hm", "heapMax": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.HeapMax = &o + + case "heap.percent", "hp", "heapPercent": + if err := dec.Decode(&s.HeapPercent); err != nil { + return err + } + + case "http_address", "http": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.HttpAddress = &o + + case "id", "nodeId": + if err := dec.Decode(&s.Id); err != nil { + return err + } + + case "indexing.delete_current", "idc", "indexingDeleteCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexingDeleteCurrent = &o + + case "indexing.delete_time", "idti", "indexingDeleteTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexingDeleteTime = &o + + case "indexing.delete_total", "idto", "indexingDeleteTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexingDeleteTotal = &o + + case "indexing.index_current", "iic", "indexingIndexCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexingIndexCurrent = &o + + case "indexing.index_failed", "iif", "indexingIndexFailed": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexingIndexFailed = &o + + case "indexing.index_time", "iiti", "indexingIndexTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexingIndexTime = &o + + case "indexing.index_total", "iito", "indexingIndexTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexingIndexTotal = &o + + case "ip", "i": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Ip = &o + + case "jdk", "j": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Jdk = &o + + case "load_15m", "l": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Load15M = &o + + case "load_1m": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Load1M = &o + + case "load_5m": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Load5M = &o + + case "master", "m": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Master = &o + + case "merges.current", "mc", "mergesCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MergesCurrent = &o + + case "merges.current_docs", "mcd", "mergesCurrentDocs": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MergesCurrentDocs = &o + + case "merges.current_size", "mcs", "mergesCurrentSize": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MergesCurrentSize = &o + + case "merges.total", "mt", "mergesTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MergesTotal = &o + + case "merges.total_docs", "mtd", "mergesTotalDocs": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MergesTotalDocs = &o + + case "merges.total_size", "mts", "mergesTotalSize": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MergesTotalSize = &o + + case "merges.total_time", "mtt", "mergesTotalTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MergesTotalTime = &o + + case "name", "n": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "node.role", "r", "role", "nodeRole": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NodeRole = &o + + case "pid", "p": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pid = &o + + case "port", "po": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Port = &o + + case "query_cache.evictions", "qce", "queryCacheEvictions": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryCacheEvictions = &o + + case "query_cache.hit_count", "qchc", "queryCacheHitCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryCacheHitCount = &o + + case "query_cache.memory_size", "qcm", "queryCacheMemory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryCacheMemorySize = &o + + case "query_cache.miss_count", "qcmc", "queryCacheMissCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryCacheMissCount = &o + + case "ram.current", "rc", "ramCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RamCurrent = &o + + case "ram.max", "rn", "ramMax": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RamMax = &o + + case "ram.percent", "rp", "ramPercent": + if err := dec.Decode(&s.RamPercent); err != nil { + return err + } + + case "refresh.external_time", "rti", "refreshTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RefreshExternalTime = &o + + case "refresh.external_total", "rto", "refreshTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RefreshExternalTotal = &o + + case "refresh.listeners", "rli", "refreshListeners": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RefreshListeners = &o + + case "refresh.time": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RefreshTime = &o + + case "refresh.total": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RefreshTotal = &o + + case "request_cache.evictions", "rce", "requestCacheEvictions": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RequestCacheEvictions = &o + + case "request_cache.hit_count", "rchc", "requestCacheHitCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RequestCacheHitCount = &o + + case "request_cache.memory_size", "rcm", "requestCacheMemory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RequestCacheMemorySize = &o + + case "request_cache.miss_count", "rcmc", "requestCacheMissCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RequestCacheMissCount = &o + + case "script.cache_evictions", "scrce", "scriptCacheEvictions": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ScriptCacheEvictions = &o + + case "script.compilation_limit_triggered", "scrclt", "scriptCacheCompilationLimitTriggered": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ScriptCompilationLimitTriggered = &o + + case "script.compilations", "scrcc", "scriptCompilations": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ScriptCompilations = &o + + case "search.fetch_current", "sfc", "searchFetchCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchFetchCurrent = &o + + case "search.fetch_time", "sfti", "searchFetchTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchFetchTime = &o + + case "search.fetch_total", "sfto", "searchFetchTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchFetchTotal = &o + + case "search.open_contexts", "so", "searchOpenContexts": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchOpenContexts = &o + + case "search.query_current", "sqc", "searchQueryCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchQueryCurrent = &o + + case "search.query_time", "sqti", "searchQueryTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchQueryTime = &o + + case "search.query_total", "sqto", "searchQueryTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchQueryTotal = &o + + case "search.scroll_current", "scc", "searchScrollCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchScrollCurrent = &o + + case "search.scroll_time", "scti", "searchScrollTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchScrollTime = &o + + case "search.scroll_total", "scto", "searchScrollTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchScrollTotal = &o + + case "segments.count", "sc", "segmentsCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SegmentsCount = &o + + case "segments.fixed_bitset_memory", "sfbm", "fixedBitsetMemory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SegmentsFixedBitsetMemory = &o + + case "segments.index_writer_memory", "siwm", "segmentsIndexWriterMemory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SegmentsIndexWriterMemory = &o + + case "segments.memory", "sm", "segmentsMemory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SegmentsMemory = &o + + case "segments.version_map_memory", "svmm", "segmentsVersionMapMemory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SegmentsVersionMapMemory = &o + + case "suggest.current", "suc", "suggestCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SuggestCurrent = &o + + case "suggest.time", "suti", "suggestTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SuggestTime = &o + + case "suggest.total", "suto", "suggestTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SuggestTotal = &o + + case "type", "t": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = &o + + case "uptime", "u": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Uptime = &o + + case "version", "v": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + // NewNodesRecord returns a NodesRecord. func NewNodesRecord() *NodesRecord { r := &NodesRecord{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodestatistics.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodestatistics.go index a84128445..72f6b4379 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodestatistics.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodestatistics.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NodeStatistics type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Node.ts#L28-L39 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Node.ts#L28-L39 type NodeStatistics struct { // Failed Number of nodes that rejected the request or failed to respond. If this value // is not 0, a reason for the rejection or failure is included in the response. @@ -34,6 +42,79 @@ type NodeStatistics struct { Total int `json:"total"` } +func (s *NodeStatistics) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "failed": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Failed = value + case float64: + f := int(v) + s.Failed = f + } + + case "failures": + if err := dec.Decode(&s.Failures); err != nil { + return err + } + + case "successful": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Successful = value + case float64: + f := int(v) + s.Successful = f + } + + case "total": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Total = value + case float64: + f := int(v) + s.Total = f + } + + } + } + return nil +} + // NewNodeStatistics returns a NodeStatistics. func NewNodeStatistics() *NodeStatistics { r := &NodeStatistics{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodetasks.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodetasks.go index e7e05065b..62e9f1810 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodetasks.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodetasks.go @@ -16,28 +16,96 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // NodeTasks type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/tasks/_types/TaskListResponseBase.ts#L49-L57 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/tasks/_types/TaskListResponseBase.ts#L49-L57 type NodeTasks struct { Attributes map[string]string `json:"attributes,omitempty"` Host *string `json:"host,omitempty"` Ip *string `json:"ip,omitempty"` Name *string `json:"name,omitempty"` Roles []string `json:"roles,omitempty"` - Tasks map[TaskId]TaskInfo `json:"tasks"` + Tasks map[string]TaskInfo `json:"tasks"` TransportAddress *string `json:"transport_address,omitempty"` } +func (s *NodeTasks) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "attributes": + if s.Attributes == nil { + s.Attributes = make(map[string]string, 0) + } + if err := dec.Decode(&s.Attributes); err != nil { + return err + } + + case "host": + if err := dec.Decode(&s.Host); err != nil { + return err + } + + case "ip": + if err := dec.Decode(&s.Ip); err != nil { + return err + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return err + } + + case "tasks": + if s.Tasks == nil { + s.Tasks = make(map[string]TaskInfo, 0) + } + if err := dec.Decode(&s.Tasks); err != nil { + return err + } + + case "transport_address": + if err := dec.Decode(&s.TransportAddress); err != nil { + return err + } + + } + } + return nil +} + // NewNodeTasks returns a NodeTasks. func NewNodeTasks() *NodeTasks { r := &NodeTasks{ Attributes: make(map[string]string, 0), - Tasks: make(map[TaskId]TaskInfo, 0), + Tasks: make(map[string]TaskInfo, 0), } return r diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodethreadpoolinfo.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodethreadpoolinfo.go index 1a2201a36..e26d32d61 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodethreadpoolinfo.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodethreadpoolinfo.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // NodeThreadPoolInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/info/types.ts#L286-L293 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/info/types.ts#L289-L296 type NodeThreadPoolInfo struct { Core *int `json:"core,omitempty"` KeepAlive Duration `json:"keep_alive,omitempty"` @@ -32,6 +40,107 @@ type NodeThreadPoolInfo struct { Type string `json:"type"` } +func (s *NodeThreadPoolInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "core": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Core = &value + case float64: + f := int(v) + s.Core = &f + } + + case "keep_alive": + if err := dec.Decode(&s.KeepAlive); err != nil { + return err + } + + case "max": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Max = &value + case float64: + f := int(v) + s.Max = &f + } + + case "queue_size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.QueueSize = value + case float64: + f := int(v) + s.QueueSize = f + } + + case "size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + // NewNodeThreadPoolInfo returns a NodeThreadPoolInfo. func NewNodeThreadPoolInfo() *NodeThreadPoolInfo { r := &NodeThreadPoolInfo{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeusage.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeusage.go index 0a7aa539a..e36ccc420 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeusage.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/nodeusage.go @@ -16,17 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" ) // NodeUsage type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/usage/types.ts#L25-L30 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/usage/types.ts#L25-L30 type NodeUsage struct { Aggregations map[string]json.RawMessage `json:"aggregations"` RestActions map[string]int `json:"rest_actions"` @@ -34,6 +37,52 @@ type NodeUsage struct { Timestamp int64 `json:"timestamp"` } +func (s *NodeUsage) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aggregations": + if s.Aggregations == nil { + s.Aggregations = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Aggregations); err != nil { + return err + } + + case "rest_actions": + if s.RestActions == nil { + s.RestActions = make(map[string]int, 0) + } + if err := dec.Decode(&s.RestActions); err != nil { + return err + } + + case "since": + if err := dec.Decode(&s.Since); err != nil { + return err + } + + case "timestamp": + if err := dec.Decode(&s.Timestamp); err != nil { + return err + } + + } + } + return nil +} + // NewNodeUsage returns a NodeUsage. func NewNodeUsage() *NodeUsage { r := &NodeUsage{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/norianalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/norianalyzer.go index 795aeac56..137636b87 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/norianalyzer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/norianalyzer.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/noridecompoundmode" ) // NoriAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/analyzers.ts#L66-L72 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/analyzers.ts#L66-L72 type NoriAnalyzer struct { DecompoundMode *noridecompoundmode.NoriDecompoundMode `json:"decompound_mode,omitempty"` Stoptags []string `json:"stoptags,omitempty"` @@ -35,11 +41,77 @@ type NoriAnalyzer struct { Version *string `json:"version,omitempty"` } +func (s *NoriAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "decompound_mode": + if err := dec.Decode(&s.DecompoundMode); err != nil { + return err + } + + case "stoptags": + if err := dec.Decode(&s.Stoptags); err != nil { + return err + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "user_dictionary": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.UserDictionary = &o + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s NoriAnalyzer) MarshalJSON() ([]byte, error) { + type innerNoriAnalyzer NoriAnalyzer + tmp := innerNoriAnalyzer{ + DecompoundMode: s.DecompoundMode, + Stoptags: s.Stoptags, + Type: s.Type, + UserDictionary: s.UserDictionary, + Version: s.Version, + } + + tmp.Type = "nori" + + return json.Marshal(tmp) +} + // NewNoriAnalyzer returns a NoriAnalyzer. func NewNoriAnalyzer() *NoriAnalyzer { r := &NoriAnalyzer{} - r.Type = "nori" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/noripartofspeechtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/noripartofspeechtokenfilter.go index ac71d5084..ab3f06c89 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/noripartofspeechtokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/noripartofspeechtokenfilter.go @@ -16,24 +16,78 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // NoriPartOfSpeechTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L272-L275 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L273-L276 type NoriPartOfSpeechTokenFilter struct { Stoptags []string `json:"stoptags,omitempty"` Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` } +func (s *NoriPartOfSpeechTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stoptags": + if err := dec.Decode(&s.Stoptags); err != nil { + return err + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s NoriPartOfSpeechTokenFilter) MarshalJSON() ([]byte, error) { + type innerNoriPartOfSpeechTokenFilter NoriPartOfSpeechTokenFilter + tmp := innerNoriPartOfSpeechTokenFilter{ + Stoptags: s.Stoptags, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "nori_part_of_speech" + + return json.Marshal(tmp) +} + // NewNoriPartOfSpeechTokenFilter returns a NoriPartOfSpeechTokenFilter. func NewNoriPartOfSpeechTokenFilter() *NoriPartOfSpeechTokenFilter { r := &NoriPartOfSpeechTokenFilter{} - r.Type = "nori_part_of_speech" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/noritokenizer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/noritokenizer.go index 288efbf14..b250fa799 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/noritokenizer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/noritokenizer.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/noridecompoundmode" ) // NoriTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/tokenizers.ts#L80-L86 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/tokenizers.ts#L81-L87 type NoriTokenizer struct { DecompoundMode *noridecompoundmode.NoriDecompoundMode `json:"decompound_mode,omitempty"` DiscardPunctuation *bool `json:"discard_punctuation,omitempty"` @@ -36,11 +42,92 @@ type NoriTokenizer struct { Version *string `json:"version,omitempty"` } +func (s *NoriTokenizer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "decompound_mode": + if err := dec.Decode(&s.DecompoundMode); err != nil { + return err + } + + case "discard_punctuation": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DiscardPunctuation = &value + case bool: + s.DiscardPunctuation = &v + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "user_dictionary": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.UserDictionary = &o + + case "user_dictionary_rules": + if err := dec.Decode(&s.UserDictionaryRules); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s NoriTokenizer) MarshalJSON() ([]byte, error) { + type innerNoriTokenizer NoriTokenizer + tmp := innerNoriTokenizer{ + DecompoundMode: s.DecompoundMode, + DiscardPunctuation: s.DiscardPunctuation, + Type: s.Type, + UserDictionary: s.UserDictionary, + UserDictionaryRules: s.UserDictionaryRules, + Version: s.Version, + } + + tmp.Type = "nori_tokenizer" + + return json.Marshal(tmp) +} + // NewNoriTokenizer returns a NoriTokenizer. func NewNoriTokenizer() *NoriTokenizer { r := &NoriTokenizer{} - r.Type = "nori_tokenizer" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/normalizeaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/normalizeaggregation.go index b941b2597..d93f891c0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/normalizeaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/normalizeaggregation.go @@ -16,35 +16,41 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/normalizemethod" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/normalizemethod" ) // NormalizeAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/pipeline.ts#L262-L264 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/pipeline.ts#L321-L326 type NormalizeAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. - BucketsPath BucketsPath `json:"buckets_path,omitempty"` - Format *string `json:"format,omitempty"` - GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Method *normalizemethod.NormalizeMethod `json:"method,omitempty"` - Name *string `json:"name,omitempty"` + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` + Meta Metadata `json:"meta,omitempty"` + // Method The specific method to apply. + Method *normalizemethod.NormalizeMethod `json:"method,omitempty"` + Name *string `json:"name,omitempty"` } func (s *NormalizeAggregation) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -64,9 +70,16 @@ func (s *NormalizeAggregation) UnmarshalJSON(data []byte) error { } case "format": - if err := dec.Decode(&s.Format); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o case "gap_policy": if err := dec.Decode(&s.GapPolicy); err != nil { @@ -84,9 +97,16 @@ func (s *NormalizeAggregation) UnmarshalJSON(data []byte) error { } case "name": - if err := dec.Decode(&s.Name); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/normalizer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/normalizer.go index cd03c869c..475c910e0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/normalizer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/normalizer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // LowercaseNormalizer // CustomNormalizer // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/normalizers.ts#L20-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/normalizers.ts#L20-L24 type Normalizer interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/numberrangequery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/numberrangequery.go index 2b91cfcec..9d4723d21 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/numberrangequery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/numberrangequery.go @@ -16,27 +16,170 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/rangerelation" ) // NumberRangeQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/term.ts#L83-L90 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/term.ts#L145-L164 type NumberRangeQuery struct { - Boost *float32 `json:"boost,omitempty"` - From Float64 `json:"from,omitempty"` - Gt *Float64 `json:"gt,omitempty"` - Gte *Float64 `json:"gte,omitempty"` - Lt *Float64 `json:"lt,omitempty"` - Lte *Float64 `json:"lte,omitempty"` - QueryName_ *string `json:"_name,omitempty"` - Relation *rangerelation.RangeRelation `json:"relation,omitempty"` - To Float64 `json:"to,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + From Float64 `json:"from,omitempty"` + // Gt Greater than. + Gt *Float64 `json:"gt,omitempty"` + // Gte Greater than or equal to. + Gte *Float64 `json:"gte,omitempty"` + // Lt Less than. + Lt *Float64 `json:"lt,omitempty"` + // Lte Less than or equal to. + Lte *Float64 `json:"lte,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + // Relation Indicates how the range query matches values for `range` fields. + Relation *rangerelation.RangeRelation `json:"relation,omitempty"` + To Float64 `json:"to,omitempty"` +} + +func (s *NumberRangeQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "from": + if err := dec.Decode(&s.From); err != nil { + return err + } + + case "gt": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Gt = &f + case float64: + f := Float64(v) + s.Gt = &f + } + + case "gte": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Gte = &f + case float64: + f := Float64(v) + s.Gte = &f + } + + case "lt": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Lt = &f + case float64: + f := Float64(v) + s.Lt = &f + } + + case "lte": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Lte = &f + case float64: + f := Float64(v) + s.Lte = &f + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "relation": + if err := dec.Decode(&s.Relation); err != nil { + return err + } + + case "to": + if err := dec.Decode(&s.To); err != nil { + return err + } + + } + } + return nil } // NewNumberRangeQuery returns a NumberRangeQuery. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/numericdecayfunction.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/numericdecayfunction.go index 19b0c045d..c5ba2f429 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/numericdecayfunction.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/numericdecayfunction.go @@ -16,23 +16,25 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/multivaluemode" - "encoding/json" "fmt" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/multivaluemode" ) // NumericDecayFunction type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/compound.ts#L88-L90 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/compound.ts#L182-L184 type NumericDecayFunction struct { + // MultiValueMode Determines how the distance is calculated when a field used for computing the + // decay contains multiple values. MultiValueMode *multivaluemode.MultiValueMode `json:"multi_value_mode,omitempty"` - NumericDecayFunction map[string]DecayPlacementdoubledouble `json:"-"` + NumericDecayFunction map[string]DecayPlacementdoubledouble `json:"NumericDecayFunction,omitempty"` } // MarhsalJSON overrides marshalling for types with additional properties @@ -54,6 +56,7 @@ func (s NumericDecayFunction) MarshalJSON() ([]byte, error) { for key, value := range s.NumericDecayFunction { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "NumericDecayFunction") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/numericfielddata.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/numericfielddata.go index 38ae128a1..233f68afd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/numericfielddata.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/numericfielddata.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -26,7 +26,7 @@ import ( // NumericFielddata type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/NumericFielddata.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/NumericFielddata.ts#L22-L24 type NumericFielddata struct { Format numericfielddataformat.NumericFielddataFormat `json:"format"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/objectproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/objectproperty.go index c88a5e238..0837d4d28 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/objectproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/objectproperty.go @@ -16,23 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" ) // ObjectProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/complex.ts#L46-L49 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/complex.ts#L46-L49 type ObjectProperty struct { CopyTo []string `json:"copy_to,omitempty"` Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` @@ -48,6 +48,7 @@ type ObjectProperty struct { } func (s *ObjectProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -62,8 +63,19 @@ func (s *ObjectProperty) UnmarshalJSON(data []byte) error { switch t { case "copy_to": - if err := dec.Decode(&s.CopyTo); err != nil { - return err + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return err + } } case "dynamic": @@ -72,11 +84,23 @@ func (s *ObjectProperty) UnmarshalJSON(data []byte) error { } case "enabled": - if err := dec.Decode(&s.Enabled); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = &value + case bool: + s.Enabled = &v } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -85,7 +109,9 @@ func (s *ObjectProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -364,23 +390,42 @@ func (s *ObjectProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -389,7 +434,9 @@ func (s *ObjectProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -668,20 +715,38 @@ func (s *ObjectProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } case "similarity": - if err := dec.Decode(&s.Similarity); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Similarity = &o case "store": - if err := dec.Decode(&s.Store); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Store = &value + case bool: + s.Store = &v } case "type": @@ -694,6 +759,27 @@ func (s *ObjectProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s ObjectProperty) MarshalJSON() ([]byte, error) { + type innerObjectProperty ObjectProperty + tmp := innerObjectProperty{ + CopyTo: s.CopyTo, + Dynamic: s.Dynamic, + Enabled: s.Enabled, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + Properties: s.Properties, + Similarity: s.Similarity, + Store: s.Store, + Type: s.Type, + } + + tmp.Type = "object" + + return json.Marshal(tmp) +} + // NewObjectProperty returns a ObjectProperty. func NewObjectProperty() *ObjectProperty { r := &ObjectProperty{ @@ -702,7 +788,5 @@ func NewObjectProperty() *ObjectProperty { Properties: make(map[string]Property, 0), } - r.Type = "object" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/onehotencodingpreprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/onehotencodingpreprocessor.go index 18abd5657..d535a27fc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/onehotencodingpreprocessor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/onehotencodingpreprocessor.go @@ -16,18 +16,66 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // OneHotEncodingPreprocessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/put_trained_model/types.ts#L44-L47 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/put_trained_model/types.ts#L44-L47 type OneHotEncodingPreprocessor struct { Field string `json:"field"` HotMap map[string]string `json:"hot_map"` } +func (s *OneHotEncodingPreprocessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Field = o + + case "hot_map": + if s.HotMap == nil { + s.HotMap = make(map[string]string, 0) + } + if err := dec.Decode(&s.HotMap); err != nil { + return err + } + + } + } + return nil +} + // NewOneHotEncodingPreprocessor returns a OneHotEncodingPreprocessor. func NewOneHotEncodingPreprocessor() *OneHotEncodingPreprocessor { r := &OneHotEncodingPreprocessor{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/operatingsystem.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/operatingsystem.go index 495c53514..f99aa91cc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/operatingsystem.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/operatingsystem.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // OperatingSystem type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L373-L379 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L945-L951 type OperatingSystem struct { Cgroup *Cgroup `json:"cgroup,omitempty"` Cpu *Cpu `json:"cpu,omitempty"` @@ -31,6 +39,61 @@ type OperatingSystem struct { Timestamp *int64 `json:"timestamp,omitempty"` } +func (s *OperatingSystem) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "cgroup": + if err := dec.Decode(&s.Cgroup); err != nil { + return err + } + + case "cpu": + if err := dec.Decode(&s.Cpu); err != nil { + return err + } + + case "mem": + if err := dec.Decode(&s.Mem); err != nil { + return err + } + + case "swap": + if err := dec.Decode(&s.Swap); err != nil { + return err + } + + case "timestamp": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Timestamp = &value + case float64: + f := int64(v) + s.Timestamp = &f + } + + } + } + return nil +} + // NewOperatingSystem returns a OperatingSystem. func NewOperatingSystem() *OperatingSystem { r := &OperatingSystem{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/operatingsystemmemoryinfo.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/operatingsystemmemoryinfo.go index 5f9870526..a2651135a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/operatingsystemmemoryinfo.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/operatingsystemmemoryinfo.go @@ -16,20 +16,148 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // OperatingSystemMemoryInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/stats/types.ts#L289-L297 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/stats/types.ts#L541-L568 type OperatingSystemMemoryInfo struct { + // AdjustedTotalInBytes Total amount, in bytes, of memory across all selected nodes, but using the + // value specified using the `es.total_memory_bytes` system property instead of + // measured total memory for those nodes where that system property was set. AdjustedTotalInBytes *int64 `json:"adjusted_total_in_bytes,omitempty"` - FreeInBytes int64 `json:"free_in_bytes"` - FreePercent int `json:"free_percent"` - TotalInBytes int64 `json:"total_in_bytes"` - UsedInBytes int64 `json:"used_in_bytes"` - UsedPercent int `json:"used_percent"` + // FreeInBytes Amount, in bytes, of free physical memory across all selected nodes. + FreeInBytes int64 `json:"free_in_bytes"` + // FreePercent Percentage of free physical memory across all selected nodes. + FreePercent int `json:"free_percent"` + // TotalInBytes Total amount, in bytes, of physical memory across all selected nodes. + TotalInBytes int64 `json:"total_in_bytes"` + // UsedInBytes Amount, in bytes, of physical memory in use across all selected nodes. + UsedInBytes int64 `json:"used_in_bytes"` + // UsedPercent Percentage of physical memory in use across all selected nodes. + UsedPercent int `json:"used_percent"` +} + +func (s *OperatingSystemMemoryInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "adjusted_total_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.AdjustedTotalInBytes = &value + case float64: + f := int64(v) + s.AdjustedTotalInBytes = &f + } + + case "free_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.FreeInBytes = value + case float64: + f := int64(v) + s.FreeInBytes = f + } + + case "free_percent": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.FreePercent = value + case float64: + f := int(v) + s.FreePercent = f + } + + case "total_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TotalInBytes = value + case float64: + f := int64(v) + s.TotalInBytes = f + } + + case "used_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.UsedInBytes = value + case float64: + f := int64(v) + s.UsedInBytes = f + } + + case "used_percent": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.UsedPercent = value + case float64: + f := int(v) + s.UsedPercent = f + } + + } + } + return nil } // NewOperatingSystemMemoryInfo returns a OperatingSystemMemoryInfo. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/operationcontainer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/operationcontainer.go new file mode 100644 index 000000000..a7898586a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/operationcontainer.go @@ -0,0 +1,46 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +// OperationContainer type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/bulk/types.ts#L145-L167 +type OperationContainer struct { + // Create Indexes the specified document if it does not already exist. + // The following line must contain the source data to be indexed. + Create *CreateOperation `json:"create,omitempty"` + // Delete Removes the specified document from the index. + Delete *DeleteOperation `json:"delete,omitempty"` + // Index Indexes the specified document. + // If the document exists, replaces the document and increments the version. + // The following line must contain the source data to be indexed. + Index *IndexOperation `json:"index,omitempty"` + // Update Performs a partial document update. + // The following line must contain the partial document and update options. + Update *UpdateOperation `json:"update,omitempty"` +} + +// NewOperationContainer returns a OperationContainer. +func NewOperationContainer() *OperationContainer { + r := &OperationContainer{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/outlierdetectionparameters.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/outlierdetectionparameters.go index f7101ea0b..567d55551 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/outlierdetectionparameters.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/outlierdetectionparameters.go @@ -16,20 +16,159 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // OutlierDetectionParameters type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/DataframeAnalytics.ts#L412-L419 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/DataframeAnalytics.ts#L527-L561 type OutlierDetectionParameters struct { - ComputeFeatureInfluence *bool `json:"compute_feature_influence,omitempty"` + // ComputeFeatureInfluence Specifies whether the feature influence calculation is enabled. + ComputeFeatureInfluence *bool `json:"compute_feature_influence,omitempty"` + // FeatureInfluenceThreshold The minimum outlier score that a document needs to have in order to calculate + // its feature influence score. + // Value range: 0-1 FeatureInfluenceThreshold *Float64 `json:"feature_influence_threshold,omitempty"` - Method *string `json:"method,omitempty"` - NNeighbors *int `json:"n_neighbors,omitempty"` - OutlierFraction *Float64 `json:"outlier_fraction,omitempty"` - StandardizationEnabled *bool `json:"standardization_enabled,omitempty"` + // Method The method that outlier detection uses. + // Available methods are `lof`, `ldof`, `distance_kth_nn`, `distance_knn`, and + // `ensemble`. + // The default value is ensemble, which means that outlier detection uses an + // ensemble of different methods and normalises and combines their individual + // outlier scores to obtain the overall outlier score. + Method *string `json:"method,omitempty"` + // NNeighbors Defines the value for how many nearest neighbors each method of outlier + // detection uses to calculate its outlier score. + // When the value is not set, different values are used for different ensemble + // members. + // This default behavior helps improve the diversity in the ensemble; only + // override it if you are confident that the value you choose is appropriate for + // the data set. + NNeighbors *int `json:"n_neighbors,omitempty"` + // OutlierFraction The proportion of the data set that is assumed to be outlying prior to + // outlier detection. + // For example, 0.05 means it is assumed that 5% of values are real outliers and + // 95% are inliers. + OutlierFraction *Float64 `json:"outlier_fraction,omitempty"` + // StandardizationEnabled If `true`, the following operation is performed on the columns before + // computing outlier scores: (x_i - mean(x_i)) / sd(x_i). + StandardizationEnabled *bool `json:"standardization_enabled,omitempty"` +} + +func (s *OutlierDetectionParameters) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "compute_feature_influence": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.ComputeFeatureInfluence = &value + case bool: + s.ComputeFeatureInfluence = &v + } + + case "feature_influence_threshold": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.FeatureInfluenceThreshold = &f + case float64: + f := Float64(v) + s.FeatureInfluenceThreshold = &f + } + + case "method": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Method = &o + + case "n_neighbors": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NNeighbors = &value + case float64: + f := int(v) + s.NNeighbors = &f + } + + case "outlier_fraction": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.OutlierFraction = &f + case float64: + f := Float64(v) + s.OutlierFraction = &f + } + + case "standardization_enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.StandardizationEnabled = &value + case bool: + s.StandardizationEnabled = &v + } + + } + } + return nil } // NewOutlierDetectionParameters returns a OutlierDetectionParameters. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/overallbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/overallbucket.go index 55e35ab68..d887a115a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/overallbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/overallbucket.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // OverallBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Bucket.ts#L130-L145 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Bucket.ts#L130-L145 type OverallBucket struct { // BucketSpan The length of the bucket in seconds. Matches the job with the longest // bucket_span value. @@ -42,6 +50,88 @@ type OverallBucket struct { TimestampString DateTime `json:"timestamp_string"` } +func (s *OverallBucket) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bucket_span": + if err := dec.Decode(&s.BucketSpan); err != nil { + return err + } + + case "is_interim": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IsInterim = value + case bool: + s.IsInterim = v + } + + case "jobs": + if err := dec.Decode(&s.Jobs); err != nil { + return err + } + + case "overall_score": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.OverallScore = f + case float64: + f := Float64(v) + s.OverallScore = f + } + + case "result_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultType = o + + case "timestamp": + if err := dec.Decode(&s.Timestamp); err != nil { + return err + } + + case "timestamp_string": + if err := dec.Decode(&s.TimestampString); err != nil { + return err + } + + } + } + return nil +} + // NewOverallBucket returns a OverallBucket. func NewOverallBucket() *OverallBucket { r := &OverallBucket{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/overallbucketjob.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/overallbucketjob.go index 044cb0ad8..d94f4ca69 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/overallbucketjob.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/overallbucketjob.go @@ -16,18 +16,67 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // OverallBucketJob type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Bucket.ts#L146-L149 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Bucket.ts#L146-L149 type OverallBucketJob struct { JobId string `json:"job_id"` MaxAnomalyScore Float64 `json:"max_anomaly_score"` } +func (s *OverallBucketJob) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "job_id": + if err := dec.Decode(&s.JobId); err != nil { + return err + } + + case "max_anomaly_score": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.MaxAnomalyScore = f + case float64: + f := Float64(v) + s.MaxAnomalyScore = f + } + + } + } + return nil +} + // NewOverallBucketJob returns a OverallBucketJob. func NewOverallBucketJob() *OverallBucketJob { r := &OverallBucketJob{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/overlapping.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/overlapping.go index 756e71ef1..5544e7ed3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/overlapping.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/overlapping.go @@ -16,18 +16,55 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // Overlapping type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/simulate_template/IndicesSimulateTemplateResponse.ts#L39-L42 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/simulate_template/IndicesSimulateTemplateResponse.ts#L39-L42 type Overlapping struct { IndexPatterns []string `json:"index_patterns"` Name string `json:"name"` } +func (s *Overlapping) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index_patterns": + if err := dec.Decode(&s.IndexPatterns); err != nil { + return err + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + } + } + return nil +} + // NewOverlapping returns a Overlapping. func NewOverlapping() *Overlapping { r := &Overlapping{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/page.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/page.go index 7d85d0df3..1232ada7d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/page.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/page.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Page type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Page.ts#L22-L33 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Page.ts#L22-L33 type Page struct { // From Skips the specified number of items. From *int `json:"from,omitempty"` @@ -30,6 +38,58 @@ type Page struct { Size *int `json:"size,omitempty"` } +func (s *Page) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "from": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.From = &value + case float64: + f := int(v) + s.From = &f + } + + case "size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + } + } + return nil +} + // NewPage returns a Page. func NewPage() *Page { r := &Page{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pagerdutyaction.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pagerdutyaction.go index 3938a34fc..da1cd1263 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pagerdutyaction.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pagerdutyaction.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/pagerdutyeventtype" ) // PagerDutyAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Actions.ts#L54-L54 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Actions.ts#L54-L54 type PagerDutyAction struct { Account *string `json:"account,omitempty"` AttachPayload bool `json:"attach_payload"` @@ -39,6 +45,115 @@ type PagerDutyAction struct { Proxy *PagerDutyEventProxy `json:"proxy,omitempty"` } +func (s *PagerDutyAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "account": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Account = &o + + case "attach_payload": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.AttachPayload = value + case bool: + s.AttachPayload = v + } + + case "client": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Client = &o + + case "client_url": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ClientUrl = &o + + case "contexts", "context": + if err := dec.Decode(&s.Contexts); err != nil { + return err + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = o + + case "event_type": + if err := dec.Decode(&s.EventType); err != nil { + return err + } + + case "incident_key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IncidentKey = o + + case "proxy": + if err := dec.Decode(&s.Proxy); err != nil { + return err + } + + } + } + return nil +} + // NewPagerDutyAction returns a PagerDutyAction. func NewPagerDutyAction() *PagerDutyAction { r := &PagerDutyAction{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pagerdutycontext.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pagerdutycontext.go index 5a19451a8..36ffb75e6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pagerdutycontext.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pagerdutycontext.go @@ -16,23 +16,78 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/pagerdutycontexttype" ) // PagerDutyContext type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Actions.ts#L61-L65 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Actions.ts#L61-L65 type PagerDutyContext struct { Href *string `json:"href,omitempty"` Src *string `json:"src,omitempty"` Type pagerdutycontexttype.PagerDutyContextType `json:"type"` } +func (s *PagerDutyContext) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "href": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Href = &o + + case "src": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Src = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + } + } + return nil +} + // NewPagerDutyContext returns a PagerDutyContext. func NewPagerDutyContext() *PagerDutyContext { r := &PagerDutyContext{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pagerdutyevent.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pagerdutyevent.go index de20daaf2..91d8af18a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pagerdutyevent.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pagerdutyevent.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/pagerdutyeventtype" ) // PagerDutyEvent type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Actions.ts#L40-L52 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Actions.ts#L40-L52 type PagerDutyEvent struct { Account *string `json:"account,omitempty"` AttachPayload bool `json:"attach_payload"` @@ -39,6 +45,115 @@ type PagerDutyEvent struct { Proxy *PagerDutyEventProxy `json:"proxy,omitempty"` } +func (s *PagerDutyEvent) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "account": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Account = &o + + case "attach_payload": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.AttachPayload = value + case bool: + s.AttachPayload = v + } + + case "client": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Client = &o + + case "client_url": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ClientUrl = &o + + case "contexts", "context": + if err := dec.Decode(&s.Contexts); err != nil { + return err + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = o + + case "event_type": + if err := dec.Decode(&s.EventType); err != nil { + return err + } + + case "incident_key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IncidentKey = o + + case "proxy": + if err := dec.Decode(&s.Proxy); err != nil { + return err + } + + } + } + return nil +} + // NewPagerDutyEvent returns a PagerDutyEvent. func NewPagerDutyEvent() *PagerDutyEvent { r := &PagerDutyEvent{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pagerdutyeventproxy.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pagerdutyeventproxy.go index d76e1acc6..a499c04c3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pagerdutyeventproxy.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pagerdutyeventproxy.go @@ -16,18 +16,67 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // PagerDutyEventProxy type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Actions.ts#L56-L59 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Actions.ts#L56-L59 type PagerDutyEventProxy struct { Host *string `json:"host,omitempty"` Port *int `json:"port,omitempty"` } +func (s *PagerDutyEventProxy) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "host": + if err := dec.Decode(&s.Host); err != nil { + return err + } + + case "port": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Port = &value + case float64: + f := int(v) + s.Port = &f + } + + } + } + return nil +} + // NewPagerDutyEventProxy returns a PagerDutyEventProxy. func NewPagerDutyEventProxy() *PagerDutyEventProxy { r := &PagerDutyEventProxy{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pagerdutyresult.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pagerdutyresult.go index 2f2e2cd92..1d33b1aaa 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pagerdutyresult.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pagerdutyresult.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // PagerDutyResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Actions.ts#L78-L83 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Actions.ts#L78-L83 type PagerDutyResult struct { Event PagerDutyEvent `json:"event"` Reason *string `json:"reason,omitempty"` @@ -30,6 +38,53 @@ type PagerDutyResult struct { Response *HttpInputResponseResult `json:"response,omitempty"` } +func (s *PagerDutyResult) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "event": + if err := dec.Decode(&s.Event); err != nil { + return err + } + + case "reason": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Reason = &o + + case "request": + if err := dec.Decode(&s.Request); err != nil { + return err + } + + case "response": + if err := dec.Decode(&s.Response); err != nil { + return err + } + + } + } + return nil +} + // NewPagerDutyResult returns a PagerDutyResult. func NewPagerDutyResult() *PagerDutyResult { r := &PagerDutyResult{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/painlesscontextsetup.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/painlesscontextsetup.go index 543bb40fb..a939d92bc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/painlesscontextsetup.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/painlesscontextsetup.go @@ -16,21 +16,64 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" ) // PainlessContextSetup type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/scripts_painless_execute/types.ts#L25-L29 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/scripts_painless_execute/types.ts#L25-L39 type PainlessContextSetup struct { + // Document Document that’s temporarily indexed in-memory and accessible from the script. Document json.RawMessage `json:"document,omitempty"` - Index string `json:"index"` - Query Query `json:"query"` + // Index Index containing a mapping that’s compatible with the indexed document. + // You may specify a remote index by prefixing the index with the remote cluster + // alias. + Index string `json:"index"` + // Query Use this parameter to specify a query for computing a score. + Query Query `json:"query"` +} + +func (s *PainlessContextSetup) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "document": + if err := dec.Decode(&s.Document); err != nil { + return err + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return err + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return err + } + + } + } + return nil } // NewPainlessContextSetup returns a PainlessContextSetup. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/parentaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/parentaggregate.go index d3045bf8e..ab6a00250 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/parentaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/parentaggregate.go @@ -16,32 +16,31 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "fmt" - "bytes" + "encoding/json" "errors" + "fmt" "io" - + "strconv" "strings" - - "encoding/json" ) // ParentAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L770-L771 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L779-L780 type ParentAggregate struct { - Aggregations map[string]Aggregate `json:"-"` - DocCount int64 `json:"doc_count"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Aggregations map[string]Aggregate `json:"-"` + DocCount int64 `json:"doc_count"` + Meta Metadata `json:"meta,omitempty"` } func (s *ParentAggregate) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -55,451 +54,19 @@ func (s *ParentAggregate) UnmarshalJSON(data []byte) error { switch t { - case "aggregations": - for dec.More() { - tt, err := dec.Token() + case "doc_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) if err != nil { - if errors.Is(err, io.EOF) { - break - } return err } - if value, ok := tt.(string); ok { - if strings.Contains(value, "#") { - elems := strings.Split(value, "#") - if len(elems) == 2 { - switch elems[0] { - case "cardinality": - o := NewCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentiles": - o := NewHdrPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentile_ranks": - o := NewHdrPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentiles": - o := NewTDigestPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentile_ranks": - o := NewTDigestPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "percentiles_bucket": - o := NewPercentilesBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "median_absolute_deviation": - o := NewMedianAbsoluteDeviationAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "min": - o := NewMinAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "max": - o := NewMaxAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sum": - o := NewSumAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "avg": - o := NewAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "weighted_avg": - o := NewWeightedAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "value_count": - o := NewValueCountAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_value": - o := NewSimpleValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "derivative": - o := NewDerivativeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "bucket_metric_value": - o := NewBucketMetricValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats": - o := NewStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats_bucket": - o := NewStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats": - o := NewExtendedStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats_bucket": - o := NewExtendedStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_bounds": - o := NewGeoBoundsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_centroid": - o := NewGeoCentroidAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "histogram": - o := NewHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_histogram": - o := NewDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "auto_date_histogram": - o := NewAutoDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "variable_width_histogram": - o := NewVariableWidthHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sterms": - o := NewStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lterms": - o := NewLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "dterms": - o := NewDoubleTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umterms": - o := NewUnmappedTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lrareterms": - o := NewLongRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "srareterms": - o := NewStringRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umrareterms": - o := NewUnmappedRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "multi_terms": - o := NewMultiTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "missing": - o := NewMissingAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "nested": - o := NewNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "reverse_nested": - o := NewReverseNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "global": - o := NewGlobalAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filter": - o := NewFilterAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "children": - o := NewChildrenAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "parent": - o := NewParentAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sampler": - o := NewSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "unmapped_sampler": - o := NewUnmappedSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohash_grid": - o := NewGeoHashGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geotile_grid": - o := NewGeoTileGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohex_grid": - o := NewGeoHexGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "range": - o := NewRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_range": - o := NewDateRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_distance": - o := NewGeoDistanceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_range": - o := NewIpRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_prefix": - o := NewIpPrefixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filters": - o := NewFiltersAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "adjacency_matrix": - o := NewAdjacencyMatrixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "siglterms": - o := NewSignificantLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sigsterms": - o := NewSignificantStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umsigterms": - o := NewUnmappedSignificantTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "composite": - o := NewCompositeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "scripted_metric": - o := NewScriptedMetricAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_hits": - o := NewTopHitsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "inference": - o := NewInferenceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "string_stats": - o := NewStringStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "box_plot": - o := NewBoxPlotAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_metrics": - o := NewTopMetricsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "t_test": - o := NewTTestAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "rate": - o := NewRateAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_long_value": - o := NewCumulativeCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "matrix_stats": - o := NewMatrixStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_line": - o := NewGeoLineAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - default: - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - } - } else { - return errors.New("cannot decode JSON for field Aggregations") - } - } else { - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[value] = o - } - } - } - - case "doc_count": - if err := dec.Decode(&s.DocCount); err != nil { - return err + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f } case "meta": @@ -507,6 +74,519 @@ func (s *ParentAggregate) UnmarshalJSON(data []byte) error { return err } + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "box_plot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[value] = o + } + } + } } return nil @@ -531,6 +611,7 @@ func (s ParentAggregate) MarshalJSON() ([]byte, error) { for key, value := range s.Aggregations { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "Aggregations") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/parentaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/parentaggregation.go index 090493500..9ecc813f2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/parentaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/parentaggregation.go @@ -16,21 +16,68 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // ParentAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L285-L287 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L643-L648 type ParentAggregation struct { - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Name *string `json:"name,omitempty"` - Type *string `json:"type,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Name *string `json:"name,omitempty"` + // Type The child type that should be selected. + Type *string `json:"type,omitempty"` +} + +func (s *ParentAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + } + } + return nil } // NewParentAggregation returns a ParentAggregation. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/parentidquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/parentidquery.go index 7eb0fee9e..89698c5a6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/parentidquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/parentidquery.go @@ -16,19 +16,108 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ParentIdQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/joining.ts#L73-L78 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/joining.ts#L132-L146 type ParentIdQuery struct { - Boost *float32 `json:"boost,omitempty"` - Id *string `json:"id,omitempty"` - IgnoreUnmapped *bool `json:"ignore_unmapped,omitempty"` - QueryName_ *string `json:"_name,omitempty"` - Type *string `json:"type,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Id ID of the parent document. + Id *string `json:"id,omitempty"` + // IgnoreUnmapped Indicates whether to ignore an unmapped `type` and not return any documents + // instead of an error. + IgnoreUnmapped *bool `json:"ignore_unmapped,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + // Type Name of the child relationship mapped for the `join` field. + Type *string `json:"type,omitempty"` +} + +func (s *ParentIdQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return err + } + + case "ignore_unmapped": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreUnmapped = &value + case bool: + s.IgnoreUnmapped = &v + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + } + } + return nil } // NewParentIdQuery returns a ParentIdQuery. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/parenttaskinfo.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/parenttaskinfo.go index 54d99e281..cf17be56d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/parenttaskinfo.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/parenttaskinfo.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ParentTaskInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/tasks/_types/TaskListResponseBase.ts#L45-L47 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/tasks/_types/TaskListResponseBase.ts#L45-L47 type ParentTaskInfo struct { Action string `json:"action"` Cancellable bool `json:"cancellable"` @@ -36,8 +44,151 @@ type ParentTaskInfo struct { RunningTime Duration `json:"running_time,omitempty"` RunningTimeInNanos int64 `json:"running_time_in_nanos"` StartTimeInMillis int64 `json:"start_time_in_millis"` - Status *TaskStatus `json:"status,omitempty"` - Type string `json:"type"` + // Status Task status information can vary wildly from task to task. + Status json.RawMessage `json:"status,omitempty"` + Type string `json:"type"` +} + +func (s *ParentTaskInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "action": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Action = o + + case "cancellable": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Cancellable = value + case bool: + s.Cancellable = v + } + + case "cancelled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Cancelled = &value + case bool: + s.Cancelled = &v + } + + case "children": + if err := dec.Decode(&s.Children); err != nil { + return err + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "headers": + if s.Headers == nil { + s.Headers = make(map[string]string, 0) + } + if err := dec.Decode(&s.Headers); err != nil { + return err + } + + case "id": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Id = value + case float64: + f := int64(v) + s.Id = f + } + + case "node": + if err := dec.Decode(&s.Node); err != nil { + return err + } + + case "parent_task_id": + if err := dec.Decode(&s.ParentTaskId); err != nil { + return err + } + + case "running_time": + if err := dec.Decode(&s.RunningTime); err != nil { + return err + } + + case "running_time_in_nanos": + if err := dec.Decode(&s.RunningTimeInNanos); err != nil { + return err + } + + case "start_time_in_millis": + if err := dec.Decode(&s.StartTimeInMillis); err != nil { + return err + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return err + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil } // NewParentTaskInfo returns a ParentTaskInfo. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/passthroughinferenceoptions.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/passthroughinferenceoptions.go index 2525d506c..6f3532ba6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/passthroughinferenceoptions.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/passthroughinferenceoptions.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // PassThroughInferenceOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/inference.ts#L209-L216 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/inference.ts#L224-L231 type PassThroughInferenceOptions struct { // ResultsField The field that is added to incoming documents to contain the inference // prediction. Defaults to predicted_value. @@ -32,6 +40,48 @@ type PassThroughInferenceOptions struct { Vocabulary *Vocabulary `json:"vocabulary,omitempty"` } +func (s *PassThroughInferenceOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "results_field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultsField = &o + + case "tokenization": + if err := dec.Decode(&s.Tokenization); err != nil { + return err + } + + case "vocabulary": + if err := dec.Decode(&s.Vocabulary); err != nil { + return err + } + + } + } + return nil +} + // NewPassThroughInferenceOptions returns a PassThroughInferenceOptions. func NewPassThroughInferenceOptions() *PassThroughInferenceOptions { r := &PassThroughInferenceOptions{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/passthroughinferenceupdateoptions.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/passthroughinferenceupdateoptions.go index ffcf7f14c..b2b535a56 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/passthroughinferenceupdateoptions.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/passthroughinferenceupdateoptions.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // PassThroughInferenceUpdateOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/inference.ts#L350-L355 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/inference.ts#L385-L390 type PassThroughInferenceUpdateOptions struct { // ResultsField The field that is added to incoming documents to contain the inference // prediction. Defaults to predicted_value. @@ -31,6 +39,43 @@ type PassThroughInferenceUpdateOptions struct { Tokenization *NlpTokenizationUpdateOptions `json:"tokenization,omitempty"` } +func (s *PassThroughInferenceUpdateOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "results_field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultsField = &o + + case "tokenization": + if err := dec.Decode(&s.Tokenization); err != nil { + return err + } + + } + } + return nil +} + // NewPassThroughInferenceUpdateOptions returns a PassThroughInferenceUpdateOptions. func NewPassThroughInferenceUpdateOptions() *PassThroughInferenceUpdateOptions { r := &PassThroughInferenceUpdateOptions{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pathhierarchytokenizer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pathhierarchytokenizer.go index 3644b1b4c..35cffada7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pathhierarchytokenizer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pathhierarchytokenizer.go @@ -16,28 +16,121 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // PathHierarchyTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/tokenizers.ts#L88-L95 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/tokenizers.ts#L89-L96 type PathHierarchyTokenizer struct { - BufferSize int `json:"buffer_size"` - Delimiter string `json:"delimiter"` - Replacement string `json:"replacement"` - Reverse bool `json:"reverse"` - Skip int `json:"skip"` - Type string `json:"type,omitempty"` - Version *string `json:"version,omitempty"` + BufferSize Stringifiedinteger `json:"buffer_size"` + Delimiter string `json:"delimiter"` + Replacement string `json:"replacement"` + Reverse Stringifiedboolean `json:"reverse"` + Skip Stringifiedinteger `json:"skip"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *PathHierarchyTokenizer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buffer_size": + if err := dec.Decode(&s.BufferSize); err != nil { + return err + } + + case "delimiter": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Delimiter = o + + case "replacement": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Replacement = o + + case "reverse": + if err := dec.Decode(&s.Reverse); err != nil { + return err + } + + case "skip": + if err := dec.Decode(&s.Skip); err != nil { + return err + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s PathHierarchyTokenizer) MarshalJSON() ([]byte, error) { + type innerPathHierarchyTokenizer PathHierarchyTokenizer + tmp := innerPathHierarchyTokenizer{ + BufferSize: s.BufferSize, + Delimiter: s.Delimiter, + Replacement: s.Replacement, + Reverse: s.Reverse, + Skip: s.Skip, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "path_hierarchy" + + return json.Marshal(tmp) } // NewPathHierarchyTokenizer returns a PathHierarchyTokenizer. func NewPathHierarchyTokenizer() *PathHierarchyTokenizer { r := &PathHierarchyTokenizer{} - r.Type = "path_hierarchy" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/patternanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/patternanalyzer.go index d868f30af..198ee76bc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/patternanalyzer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/patternanalyzer.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // PatternAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/analyzers.ts#L74-L81 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/analyzers.ts#L74-L81 type PatternAnalyzer struct { Flags *string `json:"flags,omitempty"` Lowercase *bool `json:"lowercase,omitempty"` @@ -32,11 +40,110 @@ type PatternAnalyzer struct { Version *string `json:"version,omitempty"` } +func (s *PatternAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "flags": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Flags = &o + + case "lowercase": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Lowercase = &value + case bool: + s.Lowercase = &v + } + + case "pattern": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pattern = o + + case "stopwords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Stopwords = append(s.Stopwords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { + return err + } + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s PatternAnalyzer) MarshalJSON() ([]byte, error) { + type innerPatternAnalyzer PatternAnalyzer + tmp := innerPatternAnalyzer{ + Flags: s.Flags, + Lowercase: s.Lowercase, + Pattern: s.Pattern, + Stopwords: s.Stopwords, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "pattern" + + return json.Marshal(tmp) +} + // NewPatternAnalyzer returns a PatternAnalyzer. func NewPatternAnalyzer() *PatternAnalyzer { r := &PatternAnalyzer{} - r.Type = "pattern" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/patterncapturetokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/patterncapturetokenfilter.go index 4cf9a7861..9ea68a383 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/patterncapturetokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/patterncapturetokenfilter.go @@ -16,25 +16,85 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // PatternCaptureTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L277-L281 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L278-L282 type PatternCaptureTokenFilter struct { - Patterns []string `json:"patterns"` - PreserveOriginal *bool `json:"preserve_original,omitempty"` - Type string `json:"type,omitempty"` - Version *string `json:"version,omitempty"` + Patterns []string `json:"patterns"` + PreserveOriginal Stringifiedboolean `json:"preserve_original,omitempty"` + Type string `json:"type,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *PatternCaptureTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "patterns": + if err := dec.Decode(&s.Patterns); err != nil { + return err + } + + case "preserve_original": + if err := dec.Decode(&s.PreserveOriginal); err != nil { + return err + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s PatternCaptureTokenFilter) MarshalJSON() ([]byte, error) { + type innerPatternCaptureTokenFilter PatternCaptureTokenFilter + tmp := innerPatternCaptureTokenFilter{ + Patterns: s.Patterns, + PreserveOriginal: s.PreserveOriginal, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "pattern_capture" + + return json.Marshal(tmp) } // NewPatternCaptureTokenFilter returns a PatternCaptureTokenFilter. func NewPatternCaptureTokenFilter() *PatternCaptureTokenFilter { r := &PatternCaptureTokenFilter{} - r.Type = "pattern_capture" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/patternreplacecharfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/patternreplacecharfilter.go index 31ec169fe..0dddb3d09 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/patternreplacecharfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/patternreplacecharfilter.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // PatternReplaceCharFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/char_filters.ts#L53-L58 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/char_filters.ts#L53-L58 type PatternReplaceCharFilter struct { Flags *string `json:"flags,omitempty"` Pattern string `json:"pattern"` @@ -31,11 +39,91 @@ type PatternReplaceCharFilter struct { Version *string `json:"version,omitempty"` } +func (s *PatternReplaceCharFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "flags": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Flags = &o + + case "pattern": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pattern = o + + case "replacement": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Replacement = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s PatternReplaceCharFilter) MarshalJSON() ([]byte, error) { + type innerPatternReplaceCharFilter PatternReplaceCharFilter + tmp := innerPatternReplaceCharFilter{ + Flags: s.Flags, + Pattern: s.Pattern, + Replacement: s.Replacement, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "pattern_replace" + + return json.Marshal(tmp) +} + // NewPatternReplaceCharFilter returns a PatternReplaceCharFilter. func NewPatternReplaceCharFilter() *PatternReplaceCharFilter { r := &PatternReplaceCharFilter{} - r.Type = "pattern_replace" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/patternreplacetokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/patternreplacetokenfilter.go index f962fbe53..ab2a66669 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/patternreplacetokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/patternreplacetokenfilter.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // PatternReplaceTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L283-L289 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L284-L290 type PatternReplaceTokenFilter struct { All *bool `json:"all,omitempty"` Flags *string `json:"flags,omitempty"` @@ -32,11 +40,106 @@ type PatternReplaceTokenFilter struct { Version *string `json:"version,omitempty"` } +func (s *PatternReplaceTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "all": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.All = &value + case bool: + s.All = &v + } + + case "flags": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Flags = &o + + case "pattern": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pattern = o + + case "replacement": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Replacement = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s PatternReplaceTokenFilter) MarshalJSON() ([]byte, error) { + type innerPatternReplaceTokenFilter PatternReplaceTokenFilter + tmp := innerPatternReplaceTokenFilter{ + All: s.All, + Flags: s.Flags, + Pattern: s.Pattern, + Replacement: s.Replacement, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "pattern_replace" + + return json.Marshal(tmp) +} + // NewPatternReplaceTokenFilter returns a PatternReplaceTokenFilter. func NewPatternReplaceTokenFilter() *PatternReplaceTokenFilter { r := &PatternReplaceTokenFilter{} - r.Type = "pattern_replace" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/patterntokenizer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/patterntokenizer.go index abbf89cf6..77765bac4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/patterntokenizer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/patterntokenizer.go @@ -16,26 +16,118 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // PatternTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/tokenizers.ts#L97-L102 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/tokenizers.ts#L98-L103 type PatternTokenizer struct { - Flags string `json:"flags"` - Group int `json:"group"` - Pattern string `json:"pattern"` + Flags *string `json:"flags,omitempty"` + Group *int `json:"group,omitempty"` + Pattern *string `json:"pattern,omitempty"` Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` } +func (s *PatternTokenizer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "flags": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Flags = &o + + case "group": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Group = &value + case float64: + f := int(v) + s.Group = &f + } + + case "pattern": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pattern = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s PatternTokenizer) MarshalJSON() ([]byte, error) { + type innerPatternTokenizer PatternTokenizer + tmp := innerPatternTokenizer{ + Flags: s.Flags, + Group: s.Group, + Pattern: s.Pattern, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "pattern" + + return json.Marshal(tmp) +} + // NewPatternTokenizer returns a PatternTokenizer. func NewPatternTokenizer() *PatternTokenizer { r := &PatternTokenizer{} - r.Type = "pattern" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pendingtask.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pendingtask.go index 47e678479..ec0d77f69 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pendingtask.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pendingtask.go @@ -16,20 +16,123 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // PendingTask type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/pending_tasks/types.ts#L23-L30 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/pending_tasks/types.ts#L23-L47 type PendingTask struct { - Executing bool `json:"executing"` - InsertOrder int `json:"insert_order"` - Priority string `json:"priority"` - Source string `json:"source"` - TimeInQueue Duration `json:"time_in_queue,omitempty"` - TimeInQueueMillis int64 `json:"time_in_queue_millis"` + // Executing Indicates whether the pending tasks are currently executing or not. + Executing bool `json:"executing"` + // InsertOrder The number that represents when the task has been inserted into the task + // queue. + InsertOrder int `json:"insert_order"` + // Priority The priority of the pending task. + // The valid priorities in descending priority order are: `IMMEDIATE` > `URGENT` + // > `HIGH` > `NORMAL` > `LOW` > `LANGUID`. + Priority string `json:"priority"` + // Source A general description of the cluster task that may include a reason and + // origin. + Source string `json:"source"` + // TimeInQueue The time since the task is waiting for being performed. + TimeInQueue Duration `json:"time_in_queue,omitempty"` + // TimeInQueueMillis The time expressed in milliseconds since the task is waiting for being + // performed. + TimeInQueueMillis int64 `json:"time_in_queue_millis"` +} + +func (s *PendingTask) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "executing": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Executing = value + case bool: + s.Executing = v + } + + case "insert_order": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.InsertOrder = value + case float64: + f := int(v) + s.InsertOrder = f + } + + case "priority": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Priority = o + + case "source": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Source = o + + case "time_in_queue": + if err := dec.Decode(&s.TimeInQueue); err != nil { + return err + } + + case "time_in_queue_millis": + if err := dec.Decode(&s.TimeInQueueMillis); err != nil { + return err + } + + } + } + return nil } // NewPendingTask returns a PendingTask. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pendingtasksrecord.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pendingtasksrecord.go index 395de89f4..a3294f6fd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pendingtasksrecord.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pendingtasksrecord.go @@ -16,24 +16,100 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // PendingTasksRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/pending_tasks/types.ts#L20-L41 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/pending_tasks/types.ts#L20-L41 type PendingTasksRecord struct { - // InsertOrder task insertion order + // InsertOrder The task insertion order. InsertOrder *string `json:"insertOrder,omitempty"` - // Priority task priority + // Priority The task priority. Priority *string `json:"priority,omitempty"` - // Source task source + // Source The task source. Source *string `json:"source,omitempty"` - // TimeInQueue how long task has been in queue + // TimeInQueue Indicates how long the task has been in queue. TimeInQueue *string `json:"timeInQueue,omitempty"` } +func (s *PendingTasksRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "insertOrder", "o": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.InsertOrder = &o + + case "priority", "p": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Priority = &o + + case "source", "s": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Source = &o + + case "timeInQueue", "t": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TimeInQueue = &o + + } + } + return nil +} + // NewPendingTasksRecord returns a PendingTasksRecord. func NewPendingTasksRecord() *PendingTasksRecord { r := &PendingTasksRecord{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/percentage.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/percentage.go index 847c4117b..5a9c9d251 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/percentage.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/percentage.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // string // float32 // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Numeric.ts#L28-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Numeric.ts#L28-L28 type Percentage interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/percentagescoreheuristic.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/percentagescoreheuristic.go index 942fa244b..1079af50a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/percentagescoreheuristic.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/percentagescoreheuristic.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // PercentageScoreHeuristic type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L336-L336 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L764-L764 type PercentageScoreHeuristic struct { } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/percentileranksaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/percentileranksaggregation.go index 7d113898e..cfa332ab0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/percentileranksaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/percentileranksaggregation.go @@ -16,22 +16,117 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // PercentileRanksAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/metric.ts#L105-L110 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/metric.ts#L174-L193 type PercentileRanksAggregation struct { - Field *string `json:"field,omitempty"` - Format *string `json:"format,omitempty"` - Hdr *HdrMethod `json:"hdr,omitempty"` - Keyed *bool `json:"keyed,omitempty"` - Missing Missing `json:"missing,omitempty"` - Script Script `json:"script,omitempty"` - Tdigest *TDigest `json:"tdigest,omitempty"` - Values []Float64 `json:"values,omitempty"` + // Field The field on which to run the aggregation. + Field *string `json:"field,omitempty"` + Format *string `json:"format,omitempty"` + // Hdr Uses the alternative High Dynamic Range Histogram algorithm to calculate + // percentile ranks. + Hdr *HdrMethod `json:"hdr,omitempty"` + // Keyed By default, the aggregation associates a unique string key with each bucket + // and returns the ranges as a hash rather than an array. + // Set to `false` to disable this behavior. + Keyed *bool `json:"keyed,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing Missing `json:"missing,omitempty"` + Script Script `json:"script,omitempty"` + // Tdigest Sets parameters for the default TDigest algorithm used to calculate + // percentile ranks. + Tdigest *TDigest `json:"tdigest,omitempty"` + // Values An array of values for which to calculate the percentile ranks. + Values []Float64 `json:"values,omitempty"` +} + +func (s *PercentileRanksAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "hdr": + if err := dec.Decode(&s.Hdr); err != nil { + return err + } + + case "keyed": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Keyed = &value + case bool: + s.Keyed = &v + } + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return err + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + case "tdigest": + if err := dec.Decode(&s.Tdigest); err != nil { + return err + } + + case "values": + if err := dec.Decode(&s.Values); err != nil { + return err + } + + } + } + return nil } // NewPercentileRanksAggregation returns a PercentileRanksAggregation. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/percentiles.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/percentiles.go index bd3f58b29..cc0cae3e4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/percentiles.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/percentiles.go @@ -16,14 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // Percentiles holds the union for the following types: // -// map[string]string +// KeyedPercentiles // []ArrayPercentilesItem // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L149-L150 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L150-L151 type Percentiles interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/percentilesaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/percentilesaggregation.go index 26ef464cd..15053c222 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/percentilesaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/percentilesaggregation.go @@ -16,22 +16,117 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // PercentilesAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/metric.ts#L112-L117 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/metric.ts#L195-L214 type PercentilesAggregation struct { - Field *string `json:"field,omitempty"` - Format *string `json:"format,omitempty"` - Hdr *HdrMethod `json:"hdr,omitempty"` - Keyed *bool `json:"keyed,omitempty"` - Missing Missing `json:"missing,omitempty"` - Percents []Float64 `json:"percents,omitempty"` - Script Script `json:"script,omitempty"` - Tdigest *TDigest `json:"tdigest,omitempty"` + // Field The field on which to run the aggregation. + Field *string `json:"field,omitempty"` + Format *string `json:"format,omitempty"` + // Hdr Uses the alternative High Dynamic Range Histogram algorithm to calculate + // percentiles. + Hdr *HdrMethod `json:"hdr,omitempty"` + // Keyed By default, the aggregation associates a unique string key with each bucket + // and returns the ranges as a hash rather than an array. + // Set to `false` to disable this behavior. + Keyed *bool `json:"keyed,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing Missing `json:"missing,omitempty"` + // Percents The percentiles to calculate. + Percents []Float64 `json:"percents,omitempty"` + Script Script `json:"script,omitempty"` + // Tdigest Sets parameters for the default TDigest algorithm used to calculate + // percentiles. + Tdigest *TDigest `json:"tdigest,omitempty"` +} + +func (s *PercentilesAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "hdr": + if err := dec.Decode(&s.Hdr); err != nil { + return err + } + + case "keyed": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Keyed = &value + case bool: + s.Keyed = &v + } + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return err + } + + case "percents": + if err := dec.Decode(&s.Percents); err != nil { + return err + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + case "tdigest": + if err := dec.Decode(&s.Tdigest); err != nil { + return err + } + + } + } + return nil } // NewPercentilesAggregation returns a PercentilesAggregation. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/percentilesbucketaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/percentilesbucketaggregate.go index be136666d..27b34c68f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/percentilesbucketaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/percentilesbucketaggregate.go @@ -16,20 +16,69 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" ) // PercentilesBucketAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L177-L178 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L178-L179 type PercentilesBucketAggregate struct { - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Values Percentiles `json:"values"` + Meta Metadata `json:"meta,omitempty"` + Values Percentiles `json:"values"` +} + +func (s *PercentilesBucketAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "values": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(KeyedPercentiles, 0) + if err := localDec.Decode(&o); err != nil { + return err + } + s.Values = o + case '[': + o := []ArrayPercentilesItem{} + if err := localDec.Decode(&o); err != nil { + return err + } + s.Values = o + } + + } + } + return nil } // NewPercentilesBucketAggregate returns a PercentilesBucketAggregate. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/percentilesbucketaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/percentilesbucketaggregation.go index 53dc44e28..656684724 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/percentilesbucketaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/percentilesbucketaggregation.go @@ -16,34 +16,40 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" ) // PercentilesBucketAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/pipeline.ts#L276-L278 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/pipeline.ts#L356-L361 type PercentilesBucketAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. - BucketsPath BucketsPath `json:"buckets_path,omitempty"` - Format *string `json:"format,omitempty"` - GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Name *string `json:"name,omitempty"` - Percents []Float64 `json:"percents,omitempty"` + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Name *string `json:"name,omitempty"` + // Percents The list of percentiles to calculate. + Percents []Float64 `json:"percents,omitempty"` } func (s *PercentilesBucketAggregation) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -63,9 +69,16 @@ func (s *PercentilesBucketAggregation) UnmarshalJSON(data []byte) error { } case "format": - if err := dec.Decode(&s.Format); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o case "gap_policy": if err := dec.Decode(&s.GapPolicy); err != nil { @@ -78,9 +91,16 @@ func (s *PercentilesBucketAggregation) UnmarshalJSON(data []byte) error { } case "name": - if err := dec.Decode(&s.Name); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o case "percents": if err := dec.Decode(&s.Percents); err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/percolatequery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/percolatequery.go index 20fd826bc..2b3135967 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/percolatequery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/percolatequery.go @@ -16,29 +16,156 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // PercolateQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/specialized.ts#L110-L120 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/specialized.ts#L193-L230 type PercolateQuery struct { - Boost *float32 `json:"boost,omitempty"` - Document json.RawMessage `json:"document,omitempty"` - Documents []json.RawMessage `json:"documents,omitempty"` - Field string `json:"field"` - Id *string `json:"id,omitempty"` - Index *string `json:"index,omitempty"` - Name *string `json:"name,omitempty"` - Preference *string `json:"preference,omitempty"` - QueryName_ *string `json:"_name,omitempty"` - Routing *string `json:"routing,omitempty"` - Version *int64 `json:"version,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Document The source of the document being percolated. + Document json.RawMessage `json:"document,omitempty"` + // Documents An array of sources of the documents being percolated. + Documents []json.RawMessage `json:"documents,omitempty"` + // Field Field that holds the indexed queries. The field must use the `percolator` + // mapping type. + Field string `json:"field"` + // Id The ID of a stored document to percolate. + Id *string `json:"id,omitempty"` + // Index The index of a stored document to percolate. + Index *string `json:"index,omitempty"` + // Name The suffix used for the `_percolator_document_slot` field when multiple + // `percolate` queries are specified. + Name *string `json:"name,omitempty"` + // Preference Preference used to fetch document to percolate. + Preference *string `json:"preference,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + // Routing Routing used to fetch document to percolate. + Routing *string `json:"routing,omitempty"` + // Version The expected version of a stored document to percolate. + Version *int64 `json:"version,omitempty"` +} + +func (s *PercolateQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "document": + if err := dec.Decode(&s.Document); err != nil { + return err + } + + case "documents": + if err := dec.Decode(&s.Documents); err != nil { + return err + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return err + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return err + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + case "preference": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Preference = &o + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "routing": + if err := dec.Decode(&s.Routing); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil } // NewPercolateQuery returns a PercolateQuery. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/percolatorproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/percolatorproperty.go index 624a9850b..5092ceed1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/percolatorproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/percolatorproperty.go @@ -16,23 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" ) // PercolatorProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/core.ts#L177-L179 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/core.ts#L180-L182 type PercolatorProperty struct { Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` Fields map[string]Property `json:"fields,omitempty"` @@ -44,6 +44,7 @@ type PercolatorProperty struct { } func (s *PercolatorProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -63,6 +64,9 @@ func (s *PercolatorProperty) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -71,7 +75,9 @@ func (s *PercolatorProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -350,23 +356,42 @@ func (s *PercolatorProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -375,7 +400,9 @@ func (s *PercolatorProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -654,9 +681,11 @@ func (s *PercolatorProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } @@ -670,6 +699,23 @@ func (s *PercolatorProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s PercolatorProperty) MarshalJSON() ([]byte, error) { + type innerPercolatorProperty PercolatorProperty + tmp := innerPercolatorProperty{ + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + Properties: s.Properties, + Type: s.Type, + } + + tmp.Type = "percolator" + + return json.Marshal(tmp) +} + // NewPercolatorProperty returns a PercolatorProperty. func NewPercolatorProperty() *PercolatorProperty { r := &PercolatorProperty{ @@ -678,7 +724,5 @@ func NewPercolatorProperty() *PercolatorProperty { Properties: make(map[string]Property, 0), } - r.Type = "percolator" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/perpartitioncategorization.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/perpartitioncategorization.go index 690d6f014..9095352b7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/perpartitioncategorization.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/perpartitioncategorization.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // PerPartitionCategorization type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Analysis.ts#L93-L102 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Analysis.ts#L150-L159 type PerPartitionCategorization struct { // Enabled To enable this setting, you must also set the `partition_field_name` property // to the same value in every detector that uses the keyword `mlcategory`. @@ -37,6 +45,54 @@ type PerPartitionCategorization struct { StopOnWarn *bool `json:"stop_on_warn,omitempty"` } +func (s *PerPartitionCategorization) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = &value + case bool: + s.Enabled = &v + } + + case "stop_on_warn": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.StopOnWarn = &value + case bool: + s.StopOnWarn = &v + } + + } + } + return nil +} + // NewPerPartitionCategorization returns a PerPartitionCategorization. func NewPerPartitionCategorization() *PerPartitionCategorization { r := &PerPartitionCategorization{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/persistenttaskstatus.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/persistenttaskstatus.go index d220b64da..d610108f9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/persistenttaskstatus.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/persistenttaskstatus.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -26,7 +26,7 @@ import ( // PersistentTaskStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L56-L58 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L56-L58 type PersistentTaskStatus struct { Status shutdownstatus.ShutdownStatus `json:"status"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/phase.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/phase.go index 346ad2f55..59c1aa901 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/phase.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/phase.go @@ -16,19 +16,61 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // Phase type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ilm/_types/Phase.ts#L25-L33 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ilm/_types/Phase.ts#L25-L36 type Phase struct { - Actions *IlmActions `json:"actions,omitempty"` + Actions json.RawMessage `json:"actions,omitempty"` Configurations *Configurations `json:"configurations,omitempty"` MinAge *Duration `json:"min_age,omitempty"` } +func (s *Phase) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "actions": + if err := dec.Decode(&s.Actions); err != nil { + return err + } + + case "configurations": + if err := dec.Decode(&s.Configurations); err != nil { + return err + } + + case "min_age": + if err := dec.Decode(&s.MinAge); err != nil { + return err + } + + } + } + return nil +} + // NewPhase returns a Phase. func NewPhase() *Phase { r := &Phase{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/phases.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/phases.go index 8849dab94..5bb6d1266 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/phases.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/phases.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // Phases type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ilm/_types/Phase.ts#L35-L41 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ilm/_types/Phase.ts#L38-L44 type Phases struct { Cold *Phase `json:"cold,omitempty"` Delete *Phase `json:"delete,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/phonetictokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/phonetictokenfilter.go index 63bbc7e21..a51843a4d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/phonetictokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/phonetictokenfilter.go @@ -16,11 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/phoneticencoder" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/phoneticlanguage" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/phoneticnametype" @@ -29,7 +35,7 @@ import ( // PhoneticTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/phonetic-plugin.ts#L64-L72 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/phonetic-plugin.ts#L64-L72 type PhoneticTokenFilter struct { Encoder phoneticencoder.PhoneticEncoder `json:"encoder"` Languageset []phoneticlanguage.PhoneticLanguage `json:"languageset"` @@ -41,11 +47,108 @@ type PhoneticTokenFilter struct { Version *string `json:"version,omitempty"` } +func (s *PhoneticTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "encoder": + if err := dec.Decode(&s.Encoder); err != nil { + return err + } + + case "languageset": + if err := dec.Decode(&s.Languageset); err != nil { + return err + } + + case "max_code_len": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxCodeLen = &value + case float64: + f := int(v) + s.MaxCodeLen = &f + } + + case "name_type": + if err := dec.Decode(&s.NameType); err != nil { + return err + } + + case "replace": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Replace = &value + case bool: + s.Replace = &v + } + + case "rule_type": + if err := dec.Decode(&s.RuleType); err != nil { + return err + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s PhoneticTokenFilter) MarshalJSON() ([]byte, error) { + type innerPhoneticTokenFilter PhoneticTokenFilter + tmp := innerPhoneticTokenFilter{ + Encoder: s.Encoder, + Languageset: s.Languageset, + MaxCodeLen: s.MaxCodeLen, + NameType: s.NameType, + Replace: s.Replace, + RuleType: s.RuleType, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "phonetic" + + return json.Marshal(tmp) +} + // NewPhoneticTokenFilter returns a PhoneticTokenFilter. func NewPhoneticTokenFilter() *PhoneticTokenFilter { r := &PhoneticTokenFilter{} - r.Type = "phonetic" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/phrasesuggest.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/phrasesuggest.go index efa5b643f..ab61a32a5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/phrasesuggest.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/phrasesuggest.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // PhraseSuggest type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/suggester.ts#L57-L62 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/suggester.ts#L57-L62 type PhraseSuggest struct { Length int `json:"length"` Offset int `json:"offset"` @@ -30,6 +38,86 @@ type PhraseSuggest struct { Text string `json:"text"` } +func (s *PhraseSuggest) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "length": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Length = value + case float64: + f := int(v) + s.Length = f + } + + case "offset": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Offset = value + case float64: + f := int(v) + s.Offset = f + } + + case "options": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewPhraseSuggestOption() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Options = append(s.Options, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Options); err != nil { + return err + } + } + + case "text": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Text = o + + } + } + return nil +} + // NewPhraseSuggest returns a PhraseSuggest. func NewPhraseSuggest() *PhraseSuggest { r := &PhraseSuggest{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/phrasesuggestcollate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/phrasesuggestcollate.go index 4e324b342..c99beb307 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/phrasesuggestcollate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/phrasesuggestcollate.go @@ -16,21 +16,76 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // PhraseSuggestCollate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/suggester.ts#L180-L184 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/suggester.ts#L330-L343 type PhraseSuggestCollate struct { + // Params Parameters to use if the query is templated. Params map[string]json.RawMessage `json:"params,omitempty"` - Prune *bool `json:"prune,omitempty"` - Query PhraseSuggestCollateQuery `json:"query"` + // Prune Returns all suggestions with an extra `collate_match` option indicating + // whether the generated phrase matched any document. + Prune *bool `json:"prune,omitempty"` + // Query A collate query that is run once for every suggestion. + Query PhraseSuggestCollateQuery `json:"query"` +} + +func (s *PhraseSuggestCollate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "params": + if s.Params == nil { + s.Params = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Params); err != nil { + return err + } + + case "prune": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Prune = &value + case bool: + s.Prune = &v + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return err + } + + } + } + return nil } // NewPhraseSuggestCollate returns a PhraseSuggestCollate. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/phrasesuggestcollatequery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/phrasesuggestcollatequery.go index a554edf17..31e8b5a0d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/phrasesuggestcollatequery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/phrasesuggestcollatequery.go @@ -16,18 +16,65 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // PhraseSuggestCollateQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/suggester.ts#L186-L189 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/suggester.ts#L345-L354 type PhraseSuggestCollateQuery struct { - Id *string `json:"id,omitempty"` + // Id The search template ID. + Id *string `json:"id,omitempty"` + // Source The query source. Source *string `json:"source,omitempty"` } +func (s *PhraseSuggestCollateQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return err + } + + case "source": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Source = &o + + } + } + return nil +} + // NewPhraseSuggestCollateQuery returns a PhraseSuggestCollateQuery. func NewPhraseSuggestCollateQuery() *PhraseSuggestCollateQuery { r := &PhraseSuggestCollateQuery{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/phrasesuggester.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/phrasesuggester.go index 860e981d3..466232409 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/phrasesuggester.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/phrasesuggester.go @@ -16,30 +16,280 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // PhraseSuggester type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/suggester.ts#L191-L205 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/suggester.ts#L356-L414 type PhraseSuggester struct { - Analyzer *string `json:"analyzer,omitempty"` - Collate *PhraseSuggestCollate `json:"collate,omitempty"` - Confidence *Float64 `json:"confidence,omitempty"` - DirectGenerator []DirectGenerator `json:"direct_generator,omitempty"` - Field string `json:"field"` - ForceUnigrams *bool `json:"force_unigrams,omitempty"` - GramSize *int `json:"gram_size,omitempty"` - Highlight *PhraseSuggestHighlight `json:"highlight,omitempty"` - MaxErrors *Float64 `json:"max_errors,omitempty"` - RealWordErrorLikelihood *Float64 `json:"real_word_error_likelihood,omitempty"` - Separator *string `json:"separator,omitempty"` - ShardSize *int `json:"shard_size,omitempty"` - Size *int `json:"size,omitempty"` - Smoothing *SmoothingModelContainer `json:"smoothing,omitempty"` - Text *string `json:"text,omitempty"` - TokenLimit *int `json:"token_limit,omitempty"` + // Analyzer The analyzer to analyze the suggest text with. + // Defaults to the search analyzer of the suggest field. + Analyzer *string `json:"analyzer,omitempty"` + // Collate Checks each suggestion against the specified query to prune suggestions for + // which no matching docs exist in the index. + Collate *PhraseSuggestCollate `json:"collate,omitempty"` + // Confidence Defines a factor applied to the input phrases score, which is used as a + // threshold for other suggest candidates. + // Only candidates that score higher than the threshold will be included in the + // result. + Confidence *Float64 `json:"confidence,omitempty"` + // DirectGenerator A list of candidate generators that produce a list of possible terms per term + // in the given text. + DirectGenerator []DirectGenerator `json:"direct_generator,omitempty"` + // Field The field to fetch the candidate suggestions from. + // Needs to be set globally or per suggestion. + Field string `json:"field"` + ForceUnigrams *bool `json:"force_unigrams,omitempty"` + // GramSize Sets max size of the n-grams (shingles) in the field. + // If the field doesn’t contain n-grams (shingles), this should be omitted or + // set to `1`. + // If the field uses a shingle filter, the `gram_size` is set to the + // `max_shingle_size` if not explicitly set. + GramSize *int `json:"gram_size,omitempty"` + // Highlight Sets up suggestion highlighting. + // If not provided, no highlighted field is returned. + Highlight *PhraseSuggestHighlight `json:"highlight,omitempty"` + // MaxErrors The maximum percentage of the terms considered to be misspellings in order to + // form a correction. + // This method accepts a float value in the range `[0..1)` as a fraction of the + // actual query terms or a number `>=1` as an absolute number of query terms. + MaxErrors *Float64 `json:"max_errors,omitempty"` + // RealWordErrorLikelihood The likelihood of a term being misspelled even if the term exists in the + // dictionary. + RealWordErrorLikelihood *Float64 `json:"real_word_error_likelihood,omitempty"` + // Separator The separator that is used to separate terms in the bigram field. + // If not set, the whitespace character is used as a separator. + Separator *string `json:"separator,omitempty"` + // ShardSize Sets the maximum number of suggested terms to be retrieved from each + // individual shard. + ShardSize *int `json:"shard_size,omitempty"` + // Size The maximum corrections to be returned per suggest text token. + Size *int `json:"size,omitempty"` + // Smoothing The smoothing model used to balance weight between infrequent grams (grams + // (shingles) are not existing in the index) and frequent grams (appear at least + // once in the index). + // The default model is Stupid Backoff. + Smoothing *SmoothingModelContainer `json:"smoothing,omitempty"` + // Text The text/query to provide suggestions for. + Text *string `json:"text,omitempty"` + TokenLimit *int `json:"token_limit,omitempty"` +} + +func (s *PhraseSuggester) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o + + case "collate": + if err := dec.Decode(&s.Collate); err != nil { + return err + } + + case "confidence": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Confidence = &f + case float64: + f := Float64(v) + s.Confidence = &f + } + + case "direct_generator": + if err := dec.Decode(&s.DirectGenerator); err != nil { + return err + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "force_unigrams": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.ForceUnigrams = &value + case bool: + s.ForceUnigrams = &v + } + + case "gram_size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.GramSize = &value + case float64: + f := int(v) + s.GramSize = &f + } + + case "highlight": + if err := dec.Decode(&s.Highlight); err != nil { + return err + } + + case "max_errors": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.MaxErrors = &f + case float64: + f := Float64(v) + s.MaxErrors = &f + } + + case "real_word_error_likelihood": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.RealWordErrorLikelihood = &f + case float64: + f := Float64(v) + s.RealWordErrorLikelihood = &f + } + + case "separator": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Separator = &o + + case "shard_size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.ShardSize = &value + case float64: + f := int(v) + s.ShardSize = &f + } + + case "size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + case "smoothing": + if err := dec.Decode(&s.Smoothing); err != nil { + return err + } + + case "text": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Text = &o + + case "token_limit": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.TokenLimit = &value + case float64: + f := int(v) + s.TokenLimit = &f + } + + } + } + return nil } // NewPhraseSuggester returns a PhraseSuggester. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/phrasesuggesthighlight.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/phrasesuggesthighlight.go index c7a2e0689..6261ac880 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/phrasesuggesthighlight.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/phrasesuggesthighlight.go @@ -16,16 +16,72 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // PhraseSuggestHighlight type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/suggester.ts#L207-L210 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/suggester.ts#L416-L425 type PhraseSuggestHighlight struct { + // PostTag Use in conjunction with `pre_tag` to define the HTML tags to use for the + // highlighted text. PostTag string `json:"post_tag"` - PreTag string `json:"pre_tag"` + // PreTag Use in conjunction with `post_tag` to define the HTML tags to use for the + // highlighted text. + PreTag string `json:"pre_tag"` +} + +func (s *PhraseSuggestHighlight) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "post_tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PostTag = o + + case "pre_tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PreTag = o + + } + } + return nil } // NewPhraseSuggestHighlight returns a PhraseSuggestHighlight. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/phrasesuggestoption.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/phrasesuggestoption.go index e928bbafe..5ec29bae3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/phrasesuggestoption.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/phrasesuggestoption.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // PhraseSuggestOption type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/suggester.ts#L86-L91 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/suggester.ts#L86-L91 type PhraseSuggestOption struct { CollateMatch *bool `json:"collate_match,omitempty"` Highlighted *string `json:"highlighted,omitempty"` @@ -30,6 +38,80 @@ type PhraseSuggestOption struct { Text string `json:"text"` } +func (s *PhraseSuggestOption) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "collate_match": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.CollateMatch = &value + case bool: + s.CollateMatch = &v + } + + case "highlighted": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Highlighted = &o + + case "score": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Score = f + case float64: + f := Float64(v) + s.Score = f + } + + case "text": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Text = o + + } + } + return nil +} + // NewPhraseSuggestOption returns a PhraseSuggestOption. func NewPhraseSuggestOption() *PhraseSuggestOption { r := &PhraseSuggestOption{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pinneddoc.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pinneddoc.go index d431ca247..0dee3aaf7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pinneddoc.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pinneddoc.go @@ -16,18 +16,57 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // PinnedDoc type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/specialized.ts#L132-L135 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/specialized.ts#L253-L262 type PinnedDoc struct { - Id_ string `json:"_id"` + // Id_ The unique document ID. + Id_ string `json:"_id"` + // Index_ The index that contains the document. Index_ string `json:"_index"` } +func (s *PinnedDoc) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return err + } + + case "_index": + if err := dec.Decode(&s.Index_); err != nil { + return err + } + + } + } + return nil +} + // NewPinnedDoc returns a PinnedDoc. func NewPinnedDoc() *PinnedDoc { r := &PinnedDoc{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pinnedquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pinnedquery.go index 42ed95647..855794448 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pinnedquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pinnedquery.go @@ -16,19 +16,101 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // PinnedQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/specialized.ts#L122-L130 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/specialized.ts#L232-L251 type PinnedQuery struct { - Boost *float32 `json:"boost,omitempty"` - Docs []PinnedDoc `json:"docs,omitempty"` - Ids []string `json:"ids,omitempty"` - Organic *Query `json:"organic,omitempty"` - QueryName_ *string `json:"_name,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Docs Documents listed in the order they are to appear in results. + // Required if `ids` is not specified. + Docs []PinnedDoc `json:"docs,omitempty"` + // Ids Document IDs listed in the order they are to appear in results. + // Required if `docs` is not specified. + Ids []string `json:"ids,omitempty"` + // Organic Any choice of query used to rank documents which will be ranked below the + // "pinned" documents. + Organic *Query `json:"organic,omitempty"` + QueryName_ *string `json:"_name,omitempty"` +} + +func (s *PinnedQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "docs": + if err := dec.Decode(&s.Docs); err != nil { + return err + } + + case "ids": + if err := dec.Decode(&s.Ids); err != nil { + return err + } + + case "organic": + if err := dec.Decode(&s.Organic); err != nil { + return err + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + } + } + return nil } // NewPinnedQuery returns a PinnedQuery. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pipelineconfig.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pipelineconfig.go index 4787822c8..3df038811 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pipelineconfig.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pipelineconfig.go @@ -16,17 +16,71 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // PipelineConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/_types/Pipeline.ts#L44-L48 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/_types/Pipeline.ts#L57-L71 type PipelineConfig struct { - Description *string `json:"description,omitempty"` - Processors []ProcessorContainer `json:"processors"` - Version *int64 `json:"version,omitempty"` + // Description Description of the ingest pipeline. + Description *string `json:"description,omitempty"` + // Processors Processors used to perform transformations on documents before indexing. + // Processors run sequentially in the order specified. + Processors []ProcessorContainer `json:"processors"` + // Version Version number used by external systems to track ingest pipelines. + Version *int64 `json:"version,omitempty"` +} + +func (s *PipelineConfig) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "processors": + if err := dec.Decode(&s.Processors); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil } // NewPipelineConfig returns a PipelineConfig. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pipelinemetadata.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pipelinemetadata.go index 384f16310..f42593c6a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pipelinemetadata.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pipelinemetadata.go @@ -16,18 +16,70 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // PipelineMetadata type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/logstash/_types/Pipeline.ts#L23-L26 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/logstash/_types/Pipeline.ts#L23-L26 type PipelineMetadata struct { Type string `json:"type"` Version string `json:"version"` } +func (s *PipelineMetadata) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + case "version": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Version = o + + } + } + return nil +} + // NewPipelineMetadata returns a PipelineMetadata. func NewPipelineMetadata() *PipelineMetadata { r := &PipelineMetadata{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pipelineprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pipelineprocessor.go index b9f23f095..e02cb96f9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pipelineprocessor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pipelineprocessor.go @@ -16,21 +16,133 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // PipelineProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/_types/Processors.ts#L306-L309 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/_types/Processors.ts#L917-L928 type PipelineProcessor struct { - Description *string `json:"description,omitempty"` - If *string `json:"if,omitempty"` - IgnoreFailure *bool `json:"ignore_failure,omitempty"` - IgnoreMissingPipeline *bool `json:"ignore_missing_pipeline,omitempty"` - Name string `json:"name"` - OnFailure []ProcessorContainer `json:"on_failure,omitempty"` - Tag *string `json:"tag,omitempty"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // If Conditionally execute the processor. + If *string `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissingPipeline Whether to ignore missing pipelines instead of failing. + IgnoreMissingPipeline *bool `json:"ignore_missing_pipeline,omitempty"` + // Name The name of the pipeline to execute. + // Supports template snippets. + Name string `json:"name"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` +} + +func (s *PipelineProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.If = &o + + case "ignore_failure": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing_pipeline": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreMissingPipeline = &value + case bool: + s.IgnoreMissingPipeline = &v + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return err + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + } + } + return nil } // NewPipelineProcessor returns a PipelineProcessor. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pipelinesettings.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pipelinesettings.go index 3217296fa..8ada589e7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pipelinesettings.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pipelinesettings.go @@ -16,21 +16,165 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // PipelineSettings type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/logstash/_types/Pipeline.ts#L28-L36 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/logstash/_types/Pipeline.ts#L28-L59 type PipelineSettings struct { - PipelineBatchDelay int `json:"pipeline.batch.delay"` - PipelineBatchSize int `json:"pipeline.batch.size"` - PipelineWorkers int `json:"pipeline.workers"` - QueueCheckpointWrites int `json:"queue.checkpoint.writes"` - QueueMaxBytesNumber int `json:"queue.max_bytes.number"` - QueueMaxBytesUnits string `json:"queue.max_bytes.units"` - QueueType string `json:"queue.type"` + // PipelineBatchDelay When creating pipeline event batches, how long in milliseconds to wait for + // each event before dispatching an undersized batch to pipeline workers. + PipelineBatchDelay int `json:"pipeline.batch.delay"` + // PipelineBatchSize The maximum number of events an individual worker thread will collect from + // inputs before attempting to execute its filters and outputs. + PipelineBatchSize int `json:"pipeline.batch.size"` + // PipelineWorkers The number of workers that will, in parallel, execute the filter and output + // stages of the pipeline. + PipelineWorkers int `json:"pipeline.workers"` + // QueueCheckpointWrites The maximum number of written events before forcing a checkpoint when + // persistent queues are enabled (`queue.type: persisted`). + QueueCheckpointWrites int `json:"queue.checkpoint.writes"` + // QueueMaxBytesNumber The total capacity of the queue (`queue.type: persisted`) in number of bytes. + QueueMaxBytesNumber int `json:"queue.max_bytes.number"` + // QueueMaxBytesUnits The total capacity of the queue (`queue.type: persisted`) in terms of units + // of bytes. + QueueMaxBytesUnits string `json:"queue.max_bytes.units"` + // QueueType The internal queuing model to use for event buffering. + QueueType string `json:"queue.type"` +} + +func (s *PipelineSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "pipeline.batch.delay": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.PipelineBatchDelay = value + case float64: + f := int(v) + s.PipelineBatchDelay = f + } + + case "pipeline.batch.size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.PipelineBatchSize = value + case float64: + f := int(v) + s.PipelineBatchSize = f + } + + case "pipeline.workers": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.PipelineWorkers = value + case float64: + f := int(v) + s.PipelineWorkers = f + } + + case "queue.checkpoint.writes": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.QueueCheckpointWrites = value + case float64: + f := int(v) + s.QueueCheckpointWrites = f + } + + case "queue.max_bytes.number": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.QueueMaxBytesNumber = value + case float64: + f := int(v) + s.QueueMaxBytesNumber = f + } + + case "queue.max_bytes.units": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueueMaxBytesUnits = o + + case "queue.type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueueType = o + + } + } + return nil } // NewPipelineSettings returns a PipelineSettings. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pipelinesimulation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pipelinesimulation.go index 03b21a7f7..487de7bc3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pipelinesimulation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pipelinesimulation.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/actionstatusoptions" ) // PipelineSimulation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/simulate/types.ts#L33-L39 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/simulate/types.ts#L33-L39 type PipelineSimulation struct { Doc *DocumentSimulation `json:"doc,omitempty"` ProcessorResults []PipelineSimulation `json:"processor_results,omitempty"` @@ -35,6 +41,65 @@ type PipelineSimulation struct { Tag *string `json:"tag,omitempty"` } +func (s *PipelineSimulation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc": + if err := dec.Decode(&s.Doc); err != nil { + return err + } + + case "processor_results": + if err := dec.Decode(&s.ProcessorResults); err != nil { + return err + } + + case "processor_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ProcessorType = &o + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return err + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + } + } + return nil +} + // NewPipelineSimulation returns a PipelineSimulation. func NewPipelineSimulation() *PipelineSimulation { r := &PipelineSimulation{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pipeseparatedflagssimplequerystringflag.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pipeseparatedflagssimplequerystringflag.go new file mode 100644 index 000000000..9e81ea2f9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pipeseparatedflagssimplequerystringflag.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +// PipeSeparatedFlagsSimpleQueryStringFlag holds the union for the following types: +// +// simplequerystringflag.SimpleQueryStringFlag +// string +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_spec_utils/PipeSeparatedFlags.ts#L20-L27 +type PipeSeparatedFlagsSimpleQueryStringFlag interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pivot.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pivot.go index 5015531da..c370a7ff6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pivot.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pivot.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // Pivot type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/transform/_types/Transform.ts#L54-L68 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/transform/_types/Transform.ts#L54-L68 type Pivot struct { // Aggregations Defines how to aggregate the grouped data. The following aggregations are // currently supported: average, bucket diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pivotgroupbycontainer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pivotgroupbycontainer.go index 4d862ebe9..68fdbda83 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pivotgroupbycontainer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pivotgroupbycontainer.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // PivotGroupByContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/transform/_types/Transform.ts#L70-L78 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/transform/_types/Transform.ts#L70-L78 type PivotGroupByContainer struct { DateHistogram *DateHistogramAggregation `json:"date_histogram,omitempty"` GeotileGrid *GeoTileGridAggregation `json:"geotile_grid,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pluginsrecord.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pluginsrecord.go index 6c04f7a38..f800529ac 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pluginsrecord.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pluginsrecord.go @@ -16,28 +16,107 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // PluginsRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/plugins/types.ts#L22-L52 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/plugins/types.ts#L22-L52 type PluginsRecord struct { - // Component component + // Component The component name. Component *string `json:"component,omitempty"` - // Description plugin details + // Description The plugin details. Description *string `json:"description,omitempty"` - // Id unique node id + // Id The unique node identifier. Id *string `json:"id,omitempty"` - // Name node name + // Name The node name. Name *string `json:"name,omitempty"` - // Type plugin type + // Type The plugin type. Type *string `json:"type,omitempty"` - // Version component version + // Version The component version. Version *string `json:"version,omitempty"` } +func (s *PluginsRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "component", "c": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Component = &o + + case "description", "d": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return err + } + + case "name", "n": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "type", "t": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = &o + + case "version", "v": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + // NewPluginsRecord returns a PluginsRecord. func NewPluginsRecord() *PluginsRecord { r := &PluginsRecord{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pluginsstatus.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pluginsstatus.go index 411a6ff28..db5dd67ce 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pluginsstatus.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pluginsstatus.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -26,7 +26,7 @@ import ( // PluginsStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L60-L62 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L60-L62 type PluginsStatus struct { Status shutdownstatus.ShutdownStatus `json:"status"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pluginstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pluginstats.go index 8b1a01d0d..481753942 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pluginstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pluginstats.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // PluginStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Stats.ts#L138-L148 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Stats.ts#L180-L190 type PluginStats struct { Classname string `json:"classname"` Description string `json:"description"` @@ -35,6 +43,103 @@ type PluginStats struct { Version string `json:"version"` } +func (s *PluginStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "classname": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Classname = o + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = o + + case "elasticsearch_version": + if err := dec.Decode(&s.ElasticsearchVersion); err != nil { + return err + } + + case "extended_plugins": + if err := dec.Decode(&s.ExtendedPlugins); err != nil { + return err + } + + case "has_native_controller": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.HasNativeController = value + case bool: + s.HasNativeController = v + } + + case "java_version": + if err := dec.Decode(&s.JavaVersion); err != nil { + return err + } + + case "licensed": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Licensed = value + case bool: + s.Licensed = v + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + // NewPluginStats returns a PluginStats. func NewPluginStats() *PluginStats { r := &PluginStats{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pointintimereference.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pointintimereference.go index c7eb32e4c..bd139fb58 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pointintimereference.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pointintimereference.go @@ -16,18 +16,55 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // PointInTimeReference type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/PointInTimeReference.ts#L23-L26 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/PointInTimeReference.ts#L23-L26 type PointInTimeReference struct { Id string `json:"id"` KeepAlive Duration `json:"keep_alive,omitempty"` } +func (s *PointInTimeReference) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return err + } + + case "keep_alive": + if err := dec.Decode(&s.KeepAlive); err != nil { + return err + } + + } + } + return nil +} + // NewPointInTimeReference returns a PointInTimeReference. func NewPointInTimeReference() *PointInTimeReference { r := &PointInTimeReference{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pointproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pointproperty.go index b211da202..e2012eb6a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pointproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pointproperty.go @@ -16,23 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" ) // PointProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/geo.ts#L62-L67 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/geo.ts#L62-L67 type PointProperty struct { CopyTo []string `json:"copy_to,omitempty"` DocValues *bool `json:"doc_values,omitempty"` @@ -51,6 +51,7 @@ type PointProperty struct { } func (s *PointProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -65,13 +66,33 @@ func (s *PointProperty) UnmarshalJSON(data []byte) error { switch t { case "copy_to": - if err := dec.Decode(&s.CopyTo); err != nil { - return err + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return err + } } case "doc_values": - if err := dec.Decode(&s.DocValues); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DocValues = &value + case bool: + s.DocValues = &v } case "dynamic": @@ -80,6 +101,9 @@ func (s *PointProperty) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -88,7 +112,9 @@ func (s *PointProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -367,38 +393,82 @@ func (s *PointProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "ignore_malformed": - if err := dec.Decode(&s.IgnoreMalformed); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreMalformed = &value + case bool: + s.IgnoreMalformed = &v } case "ignore_z_value": - if err := dec.Decode(&s.IgnoreZValue); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreZValue = &value + case bool: + s.IgnoreZValue = &v } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } case "null_value": - if err := dec.Decode(&s.NullValue); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NullValue = &o case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -407,7 +477,9 @@ func (s *PointProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -686,20 +758,38 @@ func (s *PointProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } case "similarity": - if err := dec.Decode(&s.Similarity); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Similarity = &o case "store": - if err := dec.Decode(&s.Store); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Store = &value + case bool: + s.Store = &v } case "type": @@ -712,6 +802,30 @@ func (s *PointProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s PointProperty) MarshalJSON() ([]byte, error) { + type innerPointProperty PointProperty + tmp := innerPointProperty{ + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + IgnoreZValue: s.IgnoreZValue, + Meta: s.Meta, + NullValue: s.NullValue, + Properties: s.Properties, + Similarity: s.Similarity, + Store: s.Store, + Type: s.Type, + } + + tmp.Type = "point" + + return json.Marshal(tmp) +} + // NewPointProperty returns a PointProperty. func NewPointProperty() *PointProperty { r := &PointProperty{ @@ -720,7 +834,5 @@ func NewPointProperty() *PointProperty { Properties: make(map[string]Property, 0), } - r.Type = "point" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pool.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pool.go index 8f42aafb9..59c8cfa16 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pool.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pool.go @@ -16,18 +16,110 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Pool type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L345-L350 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L878-L895 type Pool struct { - MaxInBytes *int64 `json:"max_in_bytes,omitempty"` - PeakMaxInBytes *int64 `json:"peak_max_in_bytes,omitempty"` + // MaxInBytes Maximum amount of memory, in bytes, available for use by the heap. + MaxInBytes *int64 `json:"max_in_bytes,omitempty"` + // PeakMaxInBytes Largest amount of memory, in bytes, historically used by the heap. + PeakMaxInBytes *int64 `json:"peak_max_in_bytes,omitempty"` + // PeakUsedInBytes Largest amount of memory, in bytes, historically used by the heap. PeakUsedInBytes *int64 `json:"peak_used_in_bytes,omitempty"` - UsedInBytes *int64 `json:"used_in_bytes,omitempty"` + // UsedInBytes Memory, in bytes, used by the heap. + UsedInBytes *int64 `json:"used_in_bytes,omitempty"` +} + +func (s *Pool) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.MaxInBytes = &value + case float64: + f := int64(v) + s.MaxInBytes = &f + } + + case "peak_max_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.PeakMaxInBytes = &value + case float64: + f := int64(v) + s.PeakMaxInBytes = &f + } + + case "peak_used_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.PeakUsedInBytes = &value + case float64: + f := int64(v) + s.PeakUsedInBytes = &f + } + + case "used_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.UsedInBytes = &value + case float64: + f := int64(v) + s.UsedInBytes = &f + } + + } + } + return nil } // NewPool returns a Pool. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/porterstemtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/porterstemtokenfilter.go index 7b3e27c80..725c8fdb2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/porterstemtokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/porterstemtokenfilter.go @@ -16,23 +16,71 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // PorterStemTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L291-L293 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L292-L294 type PorterStemTokenFilter struct { Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` } +func (s *PorterStemTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s PorterStemTokenFilter) MarshalJSON() ([]byte, error) { + type innerPorterStemTokenFilter PorterStemTokenFilter + tmp := innerPorterStemTokenFilter{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "porter_stem" + + return json.Marshal(tmp) +} + // NewPorterStemTokenFilter returns a PorterStemTokenFilter. func NewPorterStemTokenFilter() *PorterStemTokenFilter { r := &PorterStemTokenFilter{} - r.Type = "porter_stem" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/postmigrationfeature.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/postmigrationfeature.go index 6b6c2bc1a..0fedb8103 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/postmigrationfeature.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/postmigrationfeature.go @@ -16,17 +16,57 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // PostMigrationFeature type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/migration/post_feature_upgrade/PostFeatureUpgradeResponse.ts#L27-L29 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/migration/post_feature_upgrade/PostFeatureUpgradeResponse.ts#L27-L29 type PostMigrationFeature struct { FeatureName string `json:"feature_name"` } +func (s *PostMigrationFeature) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "feature_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FeatureName = o + + } + } + return nil +} + // NewPostMigrationFeature returns a PostMigrationFeature. func NewPostMigrationFeature() *PostMigrationFeature { r := &PostMigrationFeature{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/predicatetokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/predicatetokenfilter.go index f8f642369..fabd1802e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/predicatetokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/predicatetokenfilter.go @@ -16,24 +16,78 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // PredicateTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L295-L298 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L296-L299 type PredicateTokenFilter struct { Script Script `json:"script"` Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` } +func (s *PredicateTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s PredicateTokenFilter) MarshalJSON() ([]byte, error) { + type innerPredicateTokenFilter PredicateTokenFilter + tmp := innerPredicateTokenFilter{ + Script: s.Script, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "predicate_token_filter" + + return json.Marshal(tmp) +} + // NewPredicateTokenFilter returns a PredicateTokenFilter. func NewPredicateTokenFilter() *PredicateTokenFilter { r := &PredicateTokenFilter{} - r.Type = "predicate_token_filter" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/predictedvalue.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/predictedvalue.go index b771073f1..236148013 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/predictedvalue.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/predictedvalue.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -27,5 +27,5 @@ package types // bool // int // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/inference.ts#L416-L416 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/inference.ts#L457-L457 type PredictedValue interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/prefixquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/prefixquery.go index cac310d2c..a73daa48b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/prefixquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/prefixquery.go @@ -16,19 +16,122 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // PrefixQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/term.ts#L57-L66 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/term.ts#L87-L106 type PrefixQuery struct { - Boost *float32 `json:"boost,omitempty"` - CaseInsensitive *bool `json:"case_insensitive,omitempty"` - QueryName_ *string `json:"_name,omitempty"` - Rewrite *string `json:"rewrite,omitempty"` - Value string `json:"value"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // CaseInsensitive Allows ASCII case insensitive matching of the value with the indexed field + // values when set to `true`. + // Default is `false` which means the case sensitivity of matching depends on + // the underlying field’s mapping. + CaseInsensitive *bool `json:"case_insensitive,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + // Rewrite Method used to rewrite the query. + Rewrite *string `json:"rewrite,omitempty"` + // Value Beginning characters of terms you wish to find in the provided field. + Value string `json:"value"` +} + +func (s *PrefixQuery) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Value) + return err + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "case_insensitive": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.CaseInsensitive = &value + case bool: + s.CaseInsensitive = &v + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "rewrite": + if err := dec.Decode(&s.Rewrite); err != nil { + return err + } + + case "value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Value = o + + } + } + return nil } // NewPrefixQuery returns a PrefixQuery. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/preprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/preprocessor.go index 8edca5866..4196f07f7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/preprocessor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/preprocessor.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // Preprocessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/put_trained_model/types.ts#L31-L36 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/put_trained_model/types.ts#L31-L36 type Preprocessor struct { FrequencyEncoding *FrequencyEncodingPreprocessor `json:"frequency_encoding,omitempty"` OneHotEncoding *OneHotEncodingPreprocessor `json:"one_hot_encoding,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pressurememory.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pressurememory.go index cac751ebf..daf4abcd7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pressurememory.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/pressurememory.go @@ -16,27 +16,220 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // PressureMemory type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L66-L80 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L144-L199 type PressureMemory struct { - All ByteSize `json:"all,omitempty"` - AllInBytes *int64 `json:"all_in_bytes,omitempty"` - CombinedCoordinatingAndPrimary ByteSize `json:"combined_coordinating_and_primary,omitempty"` - CombinedCoordinatingAndPrimaryInBytes *int64 `json:"combined_coordinating_and_primary_in_bytes,omitempty"` - Coordinating ByteSize `json:"coordinating,omitempty"` - CoordinatingInBytes *int64 `json:"coordinating_in_bytes,omitempty"` - CoordinatingRejections *int64 `json:"coordinating_rejections,omitempty"` - Primary ByteSize `json:"primary,omitempty"` - PrimaryInBytes *int64 `json:"primary_in_bytes,omitempty"` - PrimaryRejections *int64 `json:"primary_rejections,omitempty"` - Replica ByteSize `json:"replica,omitempty"` - ReplicaInBytes *int64 `json:"replica_in_bytes,omitempty"` - ReplicaRejections *int64 `json:"replica_rejections,omitempty"` + // All Memory consumed by indexing requests in the coordinating, primary, or replica + // stage. + All ByteSize `json:"all,omitempty"` + // AllInBytes Memory consumed, in bytes, by indexing requests in the coordinating, primary, + // or replica stage. + AllInBytes *int64 `json:"all_in_bytes,omitempty"` + // CombinedCoordinatingAndPrimary Memory consumed by indexing requests in the coordinating or primary stage. + // This value is not the sum of coordinating and primary as a node can reuse the + // coordinating memory if the primary stage is executed locally. + CombinedCoordinatingAndPrimary ByteSize `json:"combined_coordinating_and_primary,omitempty"` + // CombinedCoordinatingAndPrimaryInBytes Memory consumed, in bytes, by indexing requests in the coordinating or + // primary stage. + // This value is not the sum of coordinating and primary as a node can reuse the + // coordinating memory if the primary stage is executed locally. + CombinedCoordinatingAndPrimaryInBytes *int64 `json:"combined_coordinating_and_primary_in_bytes,omitempty"` + // Coordinating Memory consumed by indexing requests in the coordinating stage. + Coordinating ByteSize `json:"coordinating,omitempty"` + // CoordinatingInBytes Memory consumed, in bytes, by indexing requests in the coordinating stage. + CoordinatingInBytes *int64 `json:"coordinating_in_bytes,omitempty"` + // CoordinatingRejections Number of indexing requests rejected in the coordinating stage. + CoordinatingRejections *int64 `json:"coordinating_rejections,omitempty"` + // Primary Memory consumed by indexing requests in the primary stage. + Primary ByteSize `json:"primary,omitempty"` + // PrimaryInBytes Memory consumed, in bytes, by indexing requests in the primary stage. + PrimaryInBytes *int64 `json:"primary_in_bytes,omitempty"` + // PrimaryRejections Number of indexing requests rejected in the primary stage. + PrimaryRejections *int64 `json:"primary_rejections,omitempty"` + // Replica Memory consumed by indexing requests in the replica stage. + Replica ByteSize `json:"replica,omitempty"` + // ReplicaInBytes Memory consumed, in bytes, by indexing requests in the replica stage. + ReplicaInBytes *int64 `json:"replica_in_bytes,omitempty"` + // ReplicaRejections Number of indexing requests rejected in the replica stage. + ReplicaRejections *int64 `json:"replica_rejections,omitempty"` +} + +func (s *PressureMemory) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "all": + if err := dec.Decode(&s.All); err != nil { + return err + } + + case "all_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.AllInBytes = &value + case float64: + f := int64(v) + s.AllInBytes = &f + } + + case "combined_coordinating_and_primary": + if err := dec.Decode(&s.CombinedCoordinatingAndPrimary); err != nil { + return err + } + + case "combined_coordinating_and_primary_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.CombinedCoordinatingAndPrimaryInBytes = &value + case float64: + f := int64(v) + s.CombinedCoordinatingAndPrimaryInBytes = &f + } + + case "coordinating": + if err := dec.Decode(&s.Coordinating); err != nil { + return err + } + + case "coordinating_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.CoordinatingInBytes = &value + case float64: + f := int64(v) + s.CoordinatingInBytes = &f + } + + case "coordinating_rejections": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.CoordinatingRejections = &value + case float64: + f := int64(v) + s.CoordinatingRejections = &f + } + + case "primary": + if err := dec.Decode(&s.Primary); err != nil { + return err + } + + case "primary_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.PrimaryInBytes = &value + case float64: + f := int64(v) + s.PrimaryInBytes = &f + } + + case "primary_rejections": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.PrimaryRejections = &value + case float64: + f := int64(v) + s.PrimaryRejections = &f + } + + case "replica": + if err := dec.Decode(&s.Replica); err != nil { + return err + } + + case "replica_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.ReplicaInBytes = &value + case float64: + f := int64(v) + s.ReplicaInBytes = &f + } + + case "replica_rejections": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.ReplicaRejections = &value + case float64: + f := int64(v) + s.ReplicaRejections = &f + } + + } + } + return nil } // NewPressureMemory returns a PressureMemory. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/privileges.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/privileges.go index a54f6e9d9..017ca38b2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/privileges.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/privileges.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // Privileges type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/has_privileges/types.ts#L48-L48 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/has_privileges/types.ts#L48-L48 type Privileges map[string]bool diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/privilegesactions.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/privilegesactions.go index c72d73147..f64e061c1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/privilegesactions.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/privilegesactions.go @@ -16,22 +16,73 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // PrivilegesActions type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/put_privileges/types.ts#L22-L27 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/put_privileges/types.ts#L22-L27 type PrivilegesActions struct { - Actions []string `json:"actions"` - Application *string `json:"application,omitempty"` - Metadata map[string]json.RawMessage `json:"metadata,omitempty"` - Name *string `json:"name,omitempty"` + Actions []string `json:"actions"` + Application *string `json:"application,omitempty"` + Metadata Metadata `json:"metadata,omitempty"` + Name *string `json:"name,omitempty"` +} + +func (s *PrivilegesActions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "actions": + if err := dec.Decode(&s.Actions); err != nil { + return err + } + + case "application": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Application = &o + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return err + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + } + } + return nil } // NewPrivilegesActions returns a PrivilegesActions. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/privilegescheck.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/privilegescheck.go index 4e93fe053..43a2f5df5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/privilegescheck.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/privilegescheck.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -26,7 +26,7 @@ import ( // PrivilegesCheck type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/has_privileges_user_profile/types.ts#L30-L37 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/has_privileges_user_profile/types.ts#L30-L37 type PrivilegesCheck struct { Application []ApplicationPrivilegesCheck `json:"application,omitempty"` // Cluster A list of the cluster privileges that you want to check. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/process.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/process.go index 8c44b70d0..9112a2292 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/process.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/process.go @@ -16,19 +16,112 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Process type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L381-L387 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L953-L975 type Process struct { - Cpu *Cpu `json:"cpu,omitempty"` - MaxFileDescriptors *int `json:"max_file_descriptors,omitempty"` - Mem *MemoryStats `json:"mem,omitempty"` - OpenFileDescriptors *int `json:"open_file_descriptors,omitempty"` - Timestamp *int64 `json:"timestamp,omitempty"` + // Cpu Contains CPU statistics for the node. + Cpu *Cpu `json:"cpu,omitempty"` + // MaxFileDescriptors Maximum number of file descriptors allowed on the system, or `-1` if not + // supported. + MaxFileDescriptors *int `json:"max_file_descriptors,omitempty"` + // Mem Contains virtual memory statistics for the node. + Mem *MemoryStats `json:"mem,omitempty"` + // OpenFileDescriptors Number of opened file descriptors associated with the current or `-1` if not + // supported. + OpenFileDescriptors *int `json:"open_file_descriptors,omitempty"` + // Timestamp Last time the statistics were refreshed. + // Recorded in milliseconds since the Unix Epoch. + Timestamp *int64 `json:"timestamp,omitempty"` +} + +func (s *Process) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "cpu": + if err := dec.Decode(&s.Cpu); err != nil { + return err + } + + case "max_file_descriptors": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxFileDescriptors = &value + case float64: + f := int(v) + s.MaxFileDescriptors = &f + } + + case "mem": + if err := dec.Decode(&s.Mem); err != nil { + return err + } + + case "open_file_descriptors": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.OpenFileDescriptors = &value + case float64: + f := int(v) + s.OpenFileDescriptors = &f + } + + case "timestamp": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Timestamp = &value + case float64: + f := int64(v) + s.Timestamp = &f + } + + } + } + return nil } // NewProcess returns a Process. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/processor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/processor.go index de668b840..5e4671a0b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/processor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/processor.go @@ -16,20 +16,102 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Processor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L162-L167 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L384-L401 type Processor struct { - Count *int64 `json:"count,omitempty"` - Current *int64 `json:"current,omitempty"` - Failed *int64 `json:"failed,omitempty"` + // Count Number of documents transformed by the processor. + Count *int64 `json:"count,omitempty"` + // Current Number of documents currently being transformed by the processor. + Current *int64 `json:"current,omitempty"` + // Failed Number of failed operations for the processor. + Failed *int64 `json:"failed,omitempty"` + // TimeInMillis Time, in milliseconds, spent by the processor transforming documents. TimeInMillis *int64 `json:"time_in_millis,omitempty"` } +func (s *Processor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Count = &value + case float64: + f := int64(v) + s.Count = &f + } + + case "current": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Current = &value + case float64: + f := int64(v) + s.Current = &f + } + + case "failed": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Failed = &value + case float64: + f := int64(v) + s.Failed = &f + } + + case "time_in_millis": + if err := dec.Decode(&s.TimeInMillis); err != nil { + return err + } + + } + } + return nil +} + // NewProcessor returns a Processor. func NewProcessor() *Processor { r := &Processor{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/processorcontainer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/processorcontainer.go index 60f46a2f6..3005a05d9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/processorcontainer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/processorcontainer.go @@ -16,48 +16,346 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // ProcessorContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/_types/Processors.ts#L28-L67 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/_types/Processors.ts#L28-L233 type ProcessorContainer struct { - Append *AppendProcessor `json:"append,omitempty"` - Attachment *AttachmentProcessor `json:"attachment,omitempty"` - Bytes *BytesProcessor `json:"bytes,omitempty"` - Circle *CircleProcessor `json:"circle,omitempty"` - Convert *ConvertProcessor `json:"convert,omitempty"` - Csv *CsvProcessor `json:"csv,omitempty"` - Date *DateProcessor `json:"date,omitempty"` - DateIndexName *DateIndexNameProcessor `json:"date_index_name,omitempty"` - Dissect *DissectProcessor `json:"dissect,omitempty"` - DotExpander *DotExpanderProcessor `json:"dot_expander,omitempty"` - Drop *DropProcessor `json:"drop,omitempty"` - Enrich *EnrichProcessor `json:"enrich,omitempty"` - Fail *FailProcessor `json:"fail,omitempty"` - Foreach *ForeachProcessor `json:"foreach,omitempty"` - Geoip *GeoIpProcessor `json:"geoip,omitempty"` - Grok *GrokProcessor `json:"grok,omitempty"` - Gsub *GsubProcessor `json:"gsub,omitempty"` - Inference *InferenceProcessor `json:"inference,omitempty"` - Join *JoinProcessor `json:"join,omitempty"` - Json *JsonProcessor `json:"json,omitempty"` - Kv *KeyValueProcessor `json:"kv,omitempty"` - Lowercase *LowercaseProcessor `json:"lowercase,omitempty"` - Pipeline *PipelineProcessor `json:"pipeline,omitempty"` - Remove *RemoveProcessor `json:"remove,omitempty"` - Rename *RenameProcessor `json:"rename,omitempty"` - Script Script `json:"script,omitempty"` - Set *SetProcessor `json:"set,omitempty"` + // Append Appends one or more values to an existing array if the field already exists + // and it is an array. + // Converts a scalar to an array and appends one or more values to it if the + // field exists and it is a scalar. + // Creates an array containing the provided values if the field doesn’t exist. + // Accepts a single value or an array of values. + Append *AppendProcessor `json:"append,omitempty"` + // Attachment The attachment processor lets Elasticsearch extract file attachments in + // common formats (such as PPT, XLS, and PDF) by using the Apache text + // extraction library Tika. + Attachment *AttachmentProcessor `json:"attachment,omitempty"` + // Bytes Converts a human readable byte value (for example `1kb`) to its value in + // bytes (for example `1024`). + // If the field is an array of strings, all members of the array will be + // converted. + // Supported human readable units are "b", "kb", "mb", "gb", "tb", "pb" case + // insensitive. + // An error will occur if the field is not a supported format or resultant value + // exceeds 2^63. + Bytes *BytesProcessor `json:"bytes,omitempty"` + // Circle Converts circle definitions of shapes to regular polygons which approximate + // them. + Circle *CircleProcessor `json:"circle,omitempty"` + // Convert Converts a field in the currently ingested document to a different type, such + // as converting a string to an integer. + // If the field value is an array, all members will be converted. + Convert *ConvertProcessor `json:"convert,omitempty"` + // Csv Extracts fields from CSV line out of a single text field within a document. + // Any empty field in CSV will be skipped. + Csv *CsvProcessor `json:"csv,omitempty"` + // Date Parses dates from fields, and then uses the date or timestamp as the + // timestamp for the document. + Date *DateProcessor `json:"date,omitempty"` + // DateIndexName The purpose of this processor is to point documents to the right time based + // index based on a date or timestamp field in a document by using the date math + // index name support. + DateIndexName *DateIndexNameProcessor `json:"date_index_name,omitempty"` + // Dissect Extracts structured fields out of a single text field by matching the text + // field against a delimiter-based pattern. + Dissect *DissectProcessor `json:"dissect,omitempty"` + // DotExpander Expands a field with dots into an object field. + // This processor allows fields with dots in the name to be accessible by other + // processors in the pipeline. + // Otherwise these fields can’t be accessed by any processor. + DotExpander *DotExpanderProcessor `json:"dot_expander,omitempty"` + // Drop Drops the document without raising any errors. + // This is useful to prevent the document from getting indexed based on some + // condition. + Drop *DropProcessor `json:"drop,omitempty"` + // Enrich The `enrich` processor can enrich documents with data from another index. + Enrich *EnrichProcessor `json:"enrich,omitempty"` + // Fail Raises an exception. + // This is useful for when you expect a pipeline to fail and want to relay a + // specific message to the requester. + Fail *FailProcessor `json:"fail,omitempty"` + // Foreach Runs an ingest processor on each element of an array or object. + Foreach *ForeachProcessor `json:"foreach,omitempty"` + // Geoip The `geoip` processor adds information about the geographical location of an + // IPv4 or IPv6 address. + Geoip *GeoIpProcessor `json:"geoip,omitempty"` + // Grok Extracts structured fields out of a single text field within a document. + // You choose which field to extract matched fields from, as well as the grok + // pattern you expect will match. + // A grok pattern is like a regular expression that supports aliased expressions + // that can be reused. + Grok *GrokProcessor `json:"grok,omitempty"` + // Gsub Converts a string field by applying a regular expression and a replacement. + // If the field is an array of string, all members of the array will be + // converted. + // If any non-string values are encountered, the processor will throw an + // exception. + Gsub *GsubProcessor `json:"gsub,omitempty"` + // Inference Uses a pre-trained data frame analytics model or a model deployed for natural + // language processing tasks to infer against the data that is being ingested in + // the pipeline. + Inference *InferenceProcessor `json:"inference,omitempty"` + // Join Joins each element of an array into a single string using a separator + // character between each element. + // Throws an error when the field is not an array. + Join *JoinProcessor `json:"join,omitempty"` + // Json Converts a JSON string into a structured JSON object. + Json *JsonProcessor `json:"json,omitempty"` + // Kv This processor helps automatically parse messages (or specific event fields) + // which are of the `foo=bar` variety. + Kv *KeyValueProcessor `json:"kv,omitempty"` + // Lowercase Converts a string to its lowercase equivalent. + // If the field is an array of strings, all members of the array will be + // converted. + Lowercase *LowercaseProcessor `json:"lowercase,omitempty"` + // Pipeline Executes another pipeline. + Pipeline *PipelineProcessor `json:"pipeline,omitempty"` + // Remove Removes existing fields. + // If one field doesn’t exist, an exception will be thrown. + Remove *RemoveProcessor `json:"remove,omitempty"` + // Rename Renames an existing field. + // If the field doesn’t exist or the new name is already used, an exception will + // be thrown. + Rename *RenameProcessor `json:"rename,omitempty"` + // Script Runs an inline or stored script on incoming documents. + // The script runs in the `ingest` context. + Script Script `json:"script,omitempty"` + // Set Adds a field with the specified value. + // If the field already exists, its value will be replaced with the provided + // one. + Set *SetProcessor `json:"set,omitempty"` + // SetSecurityUser Sets user-related details (such as `username`, `roles`, `email`, `full_name`, + // `metadata`, `api_key`, `realm` and `authentication_type`) from the current + // authenticated user to the current document by pre-processing the ingest. SetSecurityUser *SetSecurityUserProcessor `json:"set_security_user,omitempty"` - Sort *SortProcessor `json:"sort,omitempty"` - Split *SplitProcessor `json:"split,omitempty"` - Trim *TrimProcessor `json:"trim,omitempty"` - Uppercase *UppercaseProcessor `json:"uppercase,omitempty"` - Urldecode *UrlDecodeProcessor `json:"urldecode,omitempty"` - UserAgent *UserAgentProcessor `json:"user_agent,omitempty"` + // Sort Sorts the elements of an array ascending or descending. + // Homogeneous arrays of numbers will be sorted numerically, while arrays of + // strings or heterogeneous arrays of strings + numbers will be sorted + // lexicographically. + // Throws an error when the field is not an array. + Sort *SortProcessor `json:"sort,omitempty"` + // Split Splits a field into an array using a separator character. + // Only works on string fields. + Split *SplitProcessor `json:"split,omitempty"` + // Trim Trims whitespace from a field. + // If the field is an array of strings, all members of the array will be + // trimmed. + // This only works on leading and trailing whitespace. + Trim *TrimProcessor `json:"trim,omitempty"` + // Uppercase Converts a string to its uppercase equivalent. + // If the field is an array of strings, all members of the array will be + // converted. + Uppercase *UppercaseProcessor `json:"uppercase,omitempty"` + // Urldecode URL-decodes a string. + // If the field is an array of strings, all members of the array will be + // decoded. + Urldecode *UrlDecodeProcessor `json:"urldecode,omitempty"` + // UserAgent The `user_agent` processor extracts details from the user agent string a + // browser sends with its web requests. + // This processor adds this information by default under the `user_agent` field. + UserAgent *UserAgentProcessor `json:"user_agent,omitempty"` +} + +func (s *ProcessorContainer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "append": + if err := dec.Decode(&s.Append); err != nil { + return err + } + + case "attachment": + if err := dec.Decode(&s.Attachment); err != nil { + return err + } + + case "bytes": + if err := dec.Decode(&s.Bytes); err != nil { + return err + } + + case "circle": + if err := dec.Decode(&s.Circle); err != nil { + return err + } + + case "convert": + if err := dec.Decode(&s.Convert); err != nil { + return err + } + + case "csv": + if err := dec.Decode(&s.Csv); err != nil { + return err + } + + case "date": + if err := dec.Decode(&s.Date); err != nil { + return err + } + + case "date_index_name": + if err := dec.Decode(&s.DateIndexName); err != nil { + return err + } + + case "dissect": + if err := dec.Decode(&s.Dissect); err != nil { + return err + } + + case "dot_expander": + if err := dec.Decode(&s.DotExpander); err != nil { + return err + } + + case "drop": + if err := dec.Decode(&s.Drop); err != nil { + return err + } + + case "enrich": + if err := dec.Decode(&s.Enrich); err != nil { + return err + } + + case "fail": + if err := dec.Decode(&s.Fail); err != nil { + return err + } + + case "foreach": + if err := dec.Decode(&s.Foreach); err != nil { + return err + } + + case "geoip": + if err := dec.Decode(&s.Geoip); err != nil { + return err + } + + case "grok": + if err := dec.Decode(&s.Grok); err != nil { + return err + } + + case "gsub": + if err := dec.Decode(&s.Gsub); err != nil { + return err + } + + case "inference": + if err := dec.Decode(&s.Inference); err != nil { + return err + } + + case "join": + if err := dec.Decode(&s.Join); err != nil { + return err + } + + case "json": + if err := dec.Decode(&s.Json); err != nil { + return err + } + + case "kv": + if err := dec.Decode(&s.Kv); err != nil { + return err + } + + case "lowercase": + if err := dec.Decode(&s.Lowercase); err != nil { + return err + } + + case "pipeline": + if err := dec.Decode(&s.Pipeline); err != nil { + return err + } + + case "remove": + if err := dec.Decode(&s.Remove); err != nil { + return err + } + + case "rename": + if err := dec.Decode(&s.Rename); err != nil { + return err + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + case "set": + if err := dec.Decode(&s.Set); err != nil { + return err + } + + case "set_security_user": + if err := dec.Decode(&s.SetSecurityUser); err != nil { + return err + } + + case "sort": + if err := dec.Decode(&s.Sort); err != nil { + return err + } + + case "split": + if err := dec.Decode(&s.Split); err != nil { + return err + } + + case "trim": + if err := dec.Decode(&s.Trim); err != nil { + return err + } + + case "uppercase": + if err := dec.Decode(&s.Uppercase); err != nil { + return err + } + + case "urldecode": + if err := dec.Decode(&s.Urldecode); err != nil { + return err + } + + case "user_agent": + if err := dec.Decode(&s.UserAgent); err != nil { + return err + } + + } + } + return nil } // NewProcessorContainer returns a ProcessorContainer. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/profile.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/profile.go index 3de6ef594..a3b81825f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/profile.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/profile.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // Profile type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/profile.ts#L93-L95 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/profile.ts#L93-L95 type Profile struct { Shards []ShardProfile `json:"shards"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/property.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/property.go index 208c7f586..6429f51ef 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/property.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/property.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -69,5 +69,5 @@ package types // IpRangeProperty // LongRangeProperty // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/Property.ts#L93-L156 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/Property.ts#L93-L156 type Property interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/publishedclusterstates.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/publishedclusterstates.go index bf2ffff59..61100e05a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/publishedclusterstates.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/publishedclusterstates.go @@ -16,19 +16,95 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // PublishedClusterStates type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L120-L124 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L263-L276 type PublishedClusterStates struct { - CompatibleDiffs *int64 `json:"compatible_diffs,omitempty"` - FullStates *int64 `json:"full_states,omitempty"` + // CompatibleDiffs Number of compatible differences between published cluster states. + CompatibleDiffs *int64 `json:"compatible_diffs,omitempty"` + // FullStates Number of published cluster states. + FullStates *int64 `json:"full_states,omitempty"` + // IncompatibleDiffs Number of incompatible differences between published cluster states. IncompatibleDiffs *int64 `json:"incompatible_diffs,omitempty"` } +func (s *PublishedClusterStates) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "compatible_diffs": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.CompatibleDiffs = &value + case float64: + f := int64(v) + s.CompatibleDiffs = &f + } + + case "full_states": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.FullStates = &value + case float64: + f := int64(v) + s.FullStates = &f + } + + case "incompatible_diffs": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.IncompatibleDiffs = &value + case float64: + f := int64(v) + s.IncompatibleDiffs = &f + } + + } + } + return nil +} + // NewPublishedClusterStates returns a PublishedClusterStates. func NewPublishedClusterStates() *PublishedClusterStates { r := &PublishedClusterStates{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/queries.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/queries.go index bb4a7e9bc..1c7adc246 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/queries.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/queries.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // Queries type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L394-L396 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L394-L396 type Queries struct { Cache *CacheQueries `json:"cache,omitempty"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/query.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/query.go index 9cacae62d..7129f01e6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/query.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/query.go @@ -16,68 +16,519 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // Query type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/abstractions.ts#L96-L162 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/abstractions.ts#L98-L391 type Query struct { - Bool *BoolQuery `json:"bool,omitempty"` - Boosting *BoostingQuery `json:"boosting,omitempty"` - CombinedFields *CombinedFieldsQuery `json:"combined_fields,omitempty"` - Common map[string]CommonTermsQuery `json:"common,omitempty"` - ConstantScore *ConstantScoreQuery `json:"constant_score,omitempty"` - DisMax *DisMaxQuery `json:"dis_max,omitempty"` - DistanceFeature DistanceFeatureQuery `json:"distance_feature,omitempty"` - Exists *ExistsQuery `json:"exists,omitempty"` - FieldMaskingSpan *SpanFieldMaskingQuery `json:"field_masking_span,omitempty"` - FunctionScore *FunctionScoreQuery `json:"function_score,omitempty"` - Fuzzy map[string]FuzzyQuery `json:"fuzzy,omitempty"` - GeoBoundingBox *GeoBoundingBoxQuery `json:"geo_bounding_box,omitempty"` - GeoDistance *GeoDistanceQuery `json:"geo_distance,omitempty"` - GeoPolygon *GeoPolygonQuery `json:"geo_polygon,omitempty"` - GeoShape *GeoShapeQuery `json:"geo_shape,omitempty"` - HasChild *HasChildQuery `json:"has_child,omitempty"` - HasParent *HasParentQuery `json:"has_parent,omitempty"` - Ids *IdsQuery `json:"ids,omitempty"` - Intervals map[string]IntervalsQuery `json:"intervals,omitempty"` - Match map[string]MatchQuery `json:"match,omitempty"` - MatchAll *MatchAllQuery `json:"match_all,omitempty"` - MatchBoolPrefix map[string]MatchBoolPrefixQuery `json:"match_bool_prefix,omitempty"` - MatchNone *MatchNoneQuery `json:"match_none,omitempty"` - MatchPhrase map[string]MatchPhraseQuery `json:"match_phrase,omitempty"` + // Bool matches documents matching boolean combinations of other queries. + Bool *BoolQuery `json:"bool,omitempty"` + // Boosting Returns documents matching a `positive` query while reducing the relevance + // score of documents that also match a `negative` query. + Boosting *BoostingQuery `json:"boosting,omitempty"` + // CombinedFields The `combined_fields` query supports searching multiple text fields as if + // their contents had been indexed into one combined field. + CombinedFields *CombinedFieldsQuery `json:"combined_fields,omitempty"` + Common map[string]CommonTermsQuery `json:"common,omitempty"` + // ConstantScore Wraps a filter query and returns every matching document with a relevance + // score equal to the `boost` parameter value. + ConstantScore *ConstantScoreQuery `json:"constant_score,omitempty"` + // DisMax Returns documents matching one or more wrapped queries, called query clauses + // or clauses. + // If a returned document matches multiple query clauses, the `dis_max` query + // assigns the document the highest relevance score from any matching clause, + // plus a tie breaking increment for any additional matching subqueries. + DisMax *DisMaxQuery `json:"dis_max,omitempty"` + // DistanceFeature Boosts the relevance score of documents closer to a provided origin date or + // point. + // For example, you can use this query to give more weight to documents closer + // to a certain date or location. + DistanceFeature DistanceFeatureQuery `json:"distance_feature,omitempty"` + // Exists Returns documents that contain an indexed value for a field. + Exists *ExistsQuery `json:"exists,omitempty"` + // FieldMaskingSpan Wrapper to allow span queries to participate in composite single-field span + // queries by _lying_ about their search field. + FieldMaskingSpan *SpanFieldMaskingQuery `json:"field_masking_span,omitempty"` + // FunctionScore The `function_score` enables you to modify the score of documents that are + // retrieved by a query. + FunctionScore *FunctionScoreQuery `json:"function_score,omitempty"` + // Fuzzy Returns documents that contain terms similar to the search term, as measured + // by a Levenshtein edit distance. + Fuzzy map[string]FuzzyQuery `json:"fuzzy,omitempty"` + // GeoBoundingBox Matches geo_point and geo_shape values that intersect a bounding box. + GeoBoundingBox *GeoBoundingBoxQuery `json:"geo_bounding_box,omitempty"` + // GeoDistance Matches `geo_point` and `geo_shape` values within a given distance of a + // geopoint. + GeoDistance *GeoDistanceQuery `json:"geo_distance,omitempty"` + GeoPolygon *GeoPolygonQuery `json:"geo_polygon,omitempty"` + // GeoShape Filter documents indexed using either the `geo_shape` or the `geo_point` + // type. + GeoShape *GeoShapeQuery `json:"geo_shape,omitempty"` + // HasChild Returns parent documents whose joined child documents match a provided query. + HasChild *HasChildQuery `json:"has_child,omitempty"` + // HasParent Returns child documents whose joined parent document matches a provided + // query. + HasParent *HasParentQuery `json:"has_parent,omitempty"` + // Ids Returns documents based on their IDs. + // This query uses document IDs stored in the `_id` field. + Ids *IdsQuery `json:"ids,omitempty"` + // Intervals Returns documents based on the order and proximity of matching terms. + Intervals map[string]IntervalsQuery `json:"intervals,omitempty"` + // Match Returns documents that match a provided text, number, date or boolean value. + // The provided text is analyzed before matching. + Match map[string]MatchQuery `json:"match,omitempty"` + // MatchAll Matches all documents, giving them all a `_score` of 1.0. + MatchAll *MatchAllQuery `json:"match_all,omitempty"` + // MatchBoolPrefix Analyzes its input and constructs a `bool` query from the terms. + // Each term except the last is used in a `term` query. + // The last term is used in a prefix query. + MatchBoolPrefix map[string]MatchBoolPrefixQuery `json:"match_bool_prefix,omitempty"` + // MatchNone Matches no documents. + MatchNone *MatchNoneQuery `json:"match_none,omitempty"` + // MatchPhrase Analyzes the text and creates a phrase query out of the analyzed text. + MatchPhrase map[string]MatchPhraseQuery `json:"match_phrase,omitempty"` + // MatchPhrasePrefix Returns documents that contain the words of a provided text, in the same + // order as provided. + // The last term of the provided text is treated as a prefix, matching any words + // that begin with that term. MatchPhrasePrefix map[string]MatchPhrasePrefixQuery `json:"match_phrase_prefix,omitempty"` - MoreLikeThis *MoreLikeThisQuery `json:"more_like_this,omitempty"` - MultiMatch *MultiMatchQuery `json:"multi_match,omitempty"` - Nested *NestedQuery `json:"nested,omitempty"` - ParentId *ParentIdQuery `json:"parent_id,omitempty"` - Percolate *PercolateQuery `json:"percolate,omitempty"` - Pinned *PinnedQuery `json:"pinned,omitempty"` - Prefix map[string]PrefixQuery `json:"prefix,omitempty"` - QueryString *QueryStringQuery `json:"query_string,omitempty"` - Range map[string]RangeQuery `json:"range,omitempty"` - RankFeature *RankFeatureQuery `json:"rank_feature,omitempty"` - Regexp map[string]RegexpQuery `json:"regexp,omitempty"` - Script *ScriptQuery `json:"script,omitempty"` - ScriptScore *ScriptScoreQuery `json:"script_score,omitempty"` - Shape *ShapeQuery `json:"shape,omitempty"` - SimpleQueryString *SimpleQueryStringQuery `json:"simple_query_string,omitempty"` - SpanContaining *SpanContainingQuery `json:"span_containing,omitempty"` - SpanFirst *SpanFirstQuery `json:"span_first,omitempty"` - SpanMulti *SpanMultiTermQuery `json:"span_multi,omitempty"` - SpanNear *SpanNearQuery `json:"span_near,omitempty"` - SpanNot *SpanNotQuery `json:"span_not,omitempty"` - SpanOr *SpanOrQuery `json:"span_or,omitempty"` - SpanTerm map[string]SpanTermQuery `json:"span_term,omitempty"` - SpanWithin *SpanWithinQuery `json:"span_within,omitempty"` - Term map[string]TermQuery `json:"term,omitempty"` - Terms *TermsQuery `json:"terms,omitempty"` - TermsSet map[string]TermsSetQuery `json:"terms_set,omitempty"` - Type *TypeQuery `json:"type,omitempty"` - Wildcard map[string]WildcardQuery `json:"wildcard,omitempty"` - Wrapper *WrapperQuery `json:"wrapper,omitempty"` + // MoreLikeThis Returns documents that are "like" a given set of documents. + MoreLikeThis *MoreLikeThisQuery `json:"more_like_this,omitempty"` + // MultiMatch Enables you to search for a provided text, number, date or boolean value + // across multiple fields. + // The provided text is analyzed before matching. + MultiMatch *MultiMatchQuery `json:"multi_match,omitempty"` + // Nested Wraps another query to search nested fields. + // If an object matches the search, the nested query returns the root parent + // document. + Nested *NestedQuery `json:"nested,omitempty"` + // ParentId Returns child documents joined to a specific parent document. + ParentId *ParentIdQuery `json:"parent_id,omitempty"` + // Percolate Matches queries stored in an index. + Percolate *PercolateQuery `json:"percolate,omitempty"` + // Pinned Promotes selected documents to rank higher than those matching a given query. + Pinned *PinnedQuery `json:"pinned,omitempty"` + // Prefix Returns documents that contain a specific prefix in a provided field. + Prefix map[string]PrefixQuery `json:"prefix,omitempty"` + // QueryString Returns documents based on a provided query string, using a parser with a + // strict syntax. + QueryString *QueryStringQuery `json:"query_string,omitempty"` + // Range Returns documents that contain terms within a provided range. + Range map[string]RangeQuery `json:"range,omitempty"` + // RankFeature Boosts the relevance score of documents based on the numeric value of a + // `rank_feature` or `rank_features` field. + RankFeature *RankFeatureQuery `json:"rank_feature,omitempty"` + // Regexp Returns documents that contain terms matching a regular expression. + Regexp map[string]RegexpQuery `json:"regexp,omitempty"` + RuleQuery *RuleQuery `json:"rule_query,omitempty"` + // Script Filters documents based on a provided script. + // The script query is typically used in a filter context. + Script *ScriptQuery `json:"script,omitempty"` + // ScriptScore Uses a script to provide a custom score for returned documents. + ScriptScore *ScriptScoreQuery `json:"script_score,omitempty"` + // Shape Queries documents that contain fields indexed using the `shape` type. + Shape *ShapeQuery `json:"shape,omitempty"` + // SimpleQueryString Returns documents based on a provided query string, using a parser with a + // limited but fault-tolerant syntax. + SimpleQueryString *SimpleQueryStringQuery `json:"simple_query_string,omitempty"` + // SpanContaining Returns matches which enclose another span query. + SpanContaining *SpanContainingQuery `json:"span_containing,omitempty"` + // SpanFirst Matches spans near the beginning of a field. + SpanFirst *SpanFirstQuery `json:"span_first,omitempty"` + // SpanMulti Allows you to wrap a multi term query (one of `wildcard`, `fuzzy`, `prefix`, + // `range`, or `regexp` query) as a `span` query, so it can be nested. + SpanMulti *SpanMultiTermQuery `json:"span_multi,omitempty"` + // SpanNear Matches spans which are near one another. + // You can specify `slop`, the maximum number of intervening unmatched + // positions, as well as whether matches are required to be in-order. + SpanNear *SpanNearQuery `json:"span_near,omitempty"` + // SpanNot Removes matches which overlap with another span query or which are within x + // tokens before (controlled by the parameter `pre`) or y tokens after + // (controlled by the parameter `post`) another span query. + SpanNot *SpanNotQuery `json:"span_not,omitempty"` + // SpanOr Matches the union of its span clauses. + SpanOr *SpanOrQuery `json:"span_or,omitempty"` + // SpanTerm Matches spans containing a term. + SpanTerm map[string]SpanTermQuery `json:"span_term,omitempty"` + // SpanWithin Returns matches which are enclosed inside another span query. + SpanWithin *SpanWithinQuery `json:"span_within,omitempty"` + // Term Returns documents that contain an exact term in a provided field. + // To return a document, the query term must exactly match the queried field's + // value, including whitespace and capitalization. + Term map[string]TermQuery `json:"term,omitempty"` + // Terms Returns documents that contain one or more exact terms in a provided field. + // To return a document, one or more terms must exactly match a field value, + // including whitespace and capitalization. + Terms *TermsQuery `json:"terms,omitempty"` + // TermsSet Returns documents that contain a minimum number of exact terms in a provided + // field. + // To return a document, a required number of terms must exactly match the field + // values, including whitespace and capitalization. + TermsSet map[string]TermsSetQuery `json:"terms_set,omitempty"` + // TextExpansion Uses a natural language processing model to convert the query text into a + // list of token-weight pairs which are then used in a query against a sparse + // vector or rank features field. + TextExpansion map[string]TextExpansionQuery `json:"text_expansion,omitempty"` + Type *TypeQuery `json:"type,omitempty"` + // Wildcard Returns documents that contain terms matching a wildcard pattern. + Wildcard map[string]WildcardQuery `json:"wildcard,omitempty"` + // Wrapper A query that accepts any other query as base64 encoded string. + Wrapper *WrapperQuery `json:"wrapper,omitempty"` +} + +func (s *Query) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bool": + if err := dec.Decode(&s.Bool); err != nil { + return err + } + + case "boosting": + if err := dec.Decode(&s.Boosting); err != nil { + return err + } + + case "combined_fields": + if err := dec.Decode(&s.CombinedFields); err != nil { + return err + } + + case "common": + if s.Common == nil { + s.Common = make(map[string]CommonTermsQuery, 0) + } + if err := dec.Decode(&s.Common); err != nil { + return err + } + + case "constant_score": + if err := dec.Decode(&s.ConstantScore); err != nil { + return err + } + + case "dis_max": + if err := dec.Decode(&s.DisMax); err != nil { + return err + } + + case "distance_feature": + if err := dec.Decode(&s.DistanceFeature); err != nil { + return err + } + + case "exists": + if err := dec.Decode(&s.Exists); err != nil { + return err + } + + case "field_masking_span": + if err := dec.Decode(&s.FieldMaskingSpan); err != nil { + return err + } + + case "function_score": + if err := dec.Decode(&s.FunctionScore); err != nil { + return err + } + + case "fuzzy": + if s.Fuzzy == nil { + s.Fuzzy = make(map[string]FuzzyQuery, 0) + } + if err := dec.Decode(&s.Fuzzy); err != nil { + return err + } + + case "geo_bounding_box": + if err := dec.Decode(&s.GeoBoundingBox); err != nil { + return err + } + + case "geo_distance": + if err := dec.Decode(&s.GeoDistance); err != nil { + return err + } + + case "geo_polygon": + if err := dec.Decode(&s.GeoPolygon); err != nil { + return err + } + + case "geo_shape": + if err := dec.Decode(&s.GeoShape); err != nil { + return err + } + + case "has_child": + if err := dec.Decode(&s.HasChild); err != nil { + return err + } + + case "has_parent": + if err := dec.Decode(&s.HasParent); err != nil { + return err + } + + case "ids": + if err := dec.Decode(&s.Ids); err != nil { + return err + } + + case "intervals": + if s.Intervals == nil { + s.Intervals = make(map[string]IntervalsQuery, 0) + } + if err := dec.Decode(&s.Intervals); err != nil { + return err + } + + case "match": + if s.Match == nil { + s.Match = make(map[string]MatchQuery, 0) + } + if err := dec.Decode(&s.Match); err != nil { + return err + } + + case "match_all": + if err := dec.Decode(&s.MatchAll); err != nil { + return err + } + + case "match_bool_prefix": + if s.MatchBoolPrefix == nil { + s.MatchBoolPrefix = make(map[string]MatchBoolPrefixQuery, 0) + } + if err := dec.Decode(&s.MatchBoolPrefix); err != nil { + return err + } + + case "match_none": + if err := dec.Decode(&s.MatchNone); err != nil { + return err + } + + case "match_phrase": + if s.MatchPhrase == nil { + s.MatchPhrase = make(map[string]MatchPhraseQuery, 0) + } + if err := dec.Decode(&s.MatchPhrase); err != nil { + return err + } + + case "match_phrase_prefix": + if s.MatchPhrasePrefix == nil { + s.MatchPhrasePrefix = make(map[string]MatchPhrasePrefixQuery, 0) + } + if err := dec.Decode(&s.MatchPhrasePrefix); err != nil { + return err + } + + case "more_like_this": + if err := dec.Decode(&s.MoreLikeThis); err != nil { + return err + } + + case "multi_match": + if err := dec.Decode(&s.MultiMatch); err != nil { + return err + } + + case "nested": + if err := dec.Decode(&s.Nested); err != nil { + return err + } + + case "parent_id": + if err := dec.Decode(&s.ParentId); err != nil { + return err + } + + case "percolate": + if err := dec.Decode(&s.Percolate); err != nil { + return err + } + + case "pinned": + if err := dec.Decode(&s.Pinned); err != nil { + return err + } + + case "prefix": + if s.Prefix == nil { + s.Prefix = make(map[string]PrefixQuery, 0) + } + if err := dec.Decode(&s.Prefix); err != nil { + return err + } + + case "query_string": + if err := dec.Decode(&s.QueryString); err != nil { + return err + } + + case "range": + if s.Range == nil { + s.Range = make(map[string]RangeQuery, 0) + } + if err := dec.Decode(&s.Range); err != nil { + return err + } + + case "rank_feature": + if err := dec.Decode(&s.RankFeature); err != nil { + return err + } + + case "regexp": + if s.Regexp == nil { + s.Regexp = make(map[string]RegexpQuery, 0) + } + if err := dec.Decode(&s.Regexp); err != nil { + return err + } + + case "rule_query": + if err := dec.Decode(&s.RuleQuery); err != nil { + return err + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + case "script_score": + if err := dec.Decode(&s.ScriptScore); err != nil { + return err + } + + case "shape": + if err := dec.Decode(&s.Shape); err != nil { + return err + } + + case "simple_query_string": + if err := dec.Decode(&s.SimpleQueryString); err != nil { + return err + } + + case "span_containing": + if err := dec.Decode(&s.SpanContaining); err != nil { + return err + } + + case "span_first": + if err := dec.Decode(&s.SpanFirst); err != nil { + return err + } + + case "span_multi": + if err := dec.Decode(&s.SpanMulti); err != nil { + return err + } + + case "span_near": + if err := dec.Decode(&s.SpanNear); err != nil { + return err + } + + case "span_not": + if err := dec.Decode(&s.SpanNot); err != nil { + return err + } + + case "span_or": + if err := dec.Decode(&s.SpanOr); err != nil { + return err + } + + case "span_term": + if s.SpanTerm == nil { + s.SpanTerm = make(map[string]SpanTermQuery, 0) + } + if err := dec.Decode(&s.SpanTerm); err != nil { + return err + } + + case "span_within": + if err := dec.Decode(&s.SpanWithin); err != nil { + return err + } + + case "term": + if s.Term == nil { + s.Term = make(map[string]TermQuery, 0) + } + if err := dec.Decode(&s.Term); err != nil { + return err + } + + case "terms": + if err := dec.Decode(&s.Terms); err != nil { + return err + } + + case "terms_set": + if s.TermsSet == nil { + s.TermsSet = make(map[string]TermsSetQuery, 0) + } + if err := dec.Decode(&s.TermsSet); err != nil { + return err + } + + case "text_expansion": + if s.TextExpansion == nil { + s.TextExpansion = make(map[string]TextExpansionQuery, 0) + } + if err := dec.Decode(&s.TextExpansion); err != nil { + return err + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "wildcard": + if s.Wildcard == nil { + s.Wildcard = make(map[string]WildcardQuery, 0) + } + if err := dec.Decode(&s.Wildcard); err != nil { + return err + } + + case "wrapper": + if err := dec.Decode(&s.Wrapper); err != nil { + return err + } + + } + } + return nil } // NewQuery returns a Query. @@ -96,6 +547,7 @@ func NewQuery() *Query { SpanTerm: make(map[string]SpanTermQuery, 0), Term: make(map[string]TermQuery, 0), TermsSet: make(map[string]TermsSetQuery, 0), + TextExpansion: make(map[string]TextExpansionQuery, 0), Wildcard: make(map[string]WildcardQuery, 0), } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/querybreakdown.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/querybreakdown.go index e5224c717..cc35b935c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/querybreakdown.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/querybreakdown.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // QueryBreakdown type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/profile.ts#L97-L116 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/profile.ts#L97-L116 type QueryBreakdown struct { Advance int64 `json:"advance"` AdvanceCount int64 `json:"advance_count"` @@ -44,6 +52,296 @@ type QueryBreakdown struct { ShallowAdvanceCount int64 `json:"shallow_advance_count"` } +func (s *QueryBreakdown) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "advance": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Advance = value + case float64: + f := int64(v) + s.Advance = f + } + + case "advance_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.AdvanceCount = value + case float64: + f := int64(v) + s.AdvanceCount = f + } + + case "build_scorer": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.BuildScorer = value + case float64: + f := int64(v) + s.BuildScorer = f + } + + case "build_scorer_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.BuildScorerCount = value + case float64: + f := int64(v) + s.BuildScorerCount = f + } + + case "compute_max_score": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.ComputeMaxScore = value + case float64: + f := int64(v) + s.ComputeMaxScore = f + } + + case "compute_max_score_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.ComputeMaxScoreCount = value + case float64: + f := int64(v) + s.ComputeMaxScoreCount = f + } + + case "create_weight": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.CreateWeight = value + case float64: + f := int64(v) + s.CreateWeight = f + } + + case "create_weight_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.CreateWeightCount = value + case float64: + f := int64(v) + s.CreateWeightCount = f + } + + case "match": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Match = value + case float64: + f := int64(v) + s.Match = f + } + + case "match_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.MatchCount = value + case float64: + f := int64(v) + s.MatchCount = f + } + + case "next_doc": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.NextDoc = value + case float64: + f := int64(v) + s.NextDoc = f + } + + case "next_doc_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.NextDocCount = value + case float64: + f := int64(v) + s.NextDocCount = f + } + + case "score": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Score = value + case float64: + f := int64(v) + s.Score = f + } + + case "score_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.ScoreCount = value + case float64: + f := int64(v) + s.ScoreCount = f + } + + case "set_min_competitive_score": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.SetMinCompetitiveScore = value + case float64: + f := int64(v) + s.SetMinCompetitiveScore = f + } + + case "set_min_competitive_score_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.SetMinCompetitiveScoreCount = value + case float64: + f := int64(v) + s.SetMinCompetitiveScoreCount = f + } + + case "shallow_advance": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.ShallowAdvance = value + case float64: + f := int64(v) + s.ShallowAdvance = f + } + + case "shallow_advance_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.ShallowAdvanceCount = value + case float64: + f := int64(v) + s.ShallowAdvanceCount = f + } + + } + } + return nil +} + // NewQueryBreakdown returns a QueryBreakdown. func NewQueryBreakdown() *QueryBreakdown { r := &QueryBreakdown{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/querycachestats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/querycachestats.go index b4b3d40e6..523970553 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/querycachestats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/querycachestats.go @@ -16,22 +16,182 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // QueryCacheStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Stats.ts#L150-L159 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Stats.ts#L192-L226 type QueryCacheStats struct { - CacheCount int `json:"cache_count"` - CacheSize int `json:"cache_size"` - Evictions int `json:"evictions"` - HitCount int `json:"hit_count"` - MemorySize ByteSize `json:"memory_size,omitempty"` - MemorySizeInBytes int `json:"memory_size_in_bytes"` - MissCount int `json:"miss_count"` - TotalCount int `json:"total_count"` + // CacheCount Total number of entries added to the query cache across all shards assigned + // to selected nodes. + // This number includes current and evicted entries. + CacheCount int `json:"cache_count"` + // CacheSize Total number of entries currently in the query cache across all shards + // assigned to selected nodes. + CacheSize int `json:"cache_size"` + // Evictions Total number of query cache evictions across all shards assigned to selected + // nodes. + Evictions int `json:"evictions"` + // HitCount Total count of query cache hits across all shards assigned to selected nodes. + HitCount int `json:"hit_count"` + // MemorySize Total amount of memory used for the query cache across all shards assigned to + // selected nodes. + MemorySize ByteSize `json:"memory_size,omitempty"` + // MemorySizeInBytes Total amount, in bytes, of memory used for the query cache across all shards + // assigned to selected nodes. + MemorySizeInBytes int64 `json:"memory_size_in_bytes"` + // MissCount Total count of query cache misses across all shards assigned to selected + // nodes. + MissCount int `json:"miss_count"` + // TotalCount Total count of hits and misses in the query cache across all shards assigned + // to selected nodes. + TotalCount int `json:"total_count"` +} + +func (s *QueryCacheStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "cache_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.CacheCount = value + case float64: + f := int(v) + s.CacheCount = f + } + + case "cache_size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.CacheSize = value + case float64: + f := int(v) + s.CacheSize = f + } + + case "evictions": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Evictions = value + case float64: + f := int(v) + s.Evictions = f + } + + case "hit_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.HitCount = value + case float64: + f := int(v) + s.HitCount = f + } + + case "memory_size": + if err := dec.Decode(&s.MemorySize); err != nil { + return err + } + + case "memory_size_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.MemorySizeInBytes = value + case float64: + f := int64(v) + s.MemorySizeInBytes = f + } + + case "miss_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MissCount = value + case float64: + f := int(v) + s.MissCount = f + } + + case "total_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.TotalCount = value + case float64: + f := int(v) + s.TotalCount = f + } + + } + } + return nil } // NewQueryCacheStats returns a QueryCacheStats. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/queryprofile.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/queryprofile.go index f2f83da8d..18fb62d24 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/queryprofile.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/queryprofile.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // QueryProfile type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/profile.ts#L118-L124 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/profile.ts#L118-L124 type QueryProfile struct { Breakdown QueryBreakdown `json:"breakdown"` Children []QueryProfile `json:"children,omitempty"` @@ -31,6 +39,65 @@ type QueryProfile struct { Type string `json:"type"` } +func (s *QueryProfile) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "breakdown": + if err := dec.Decode(&s.Breakdown); err != nil { + return err + } + + case "children": + if err := dec.Decode(&s.Children); err != nil { + return err + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = o + + case "time_in_nanos": + if err := dec.Decode(&s.TimeInNanos); err != nil { + return err + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + // NewQueryProfile returns a QueryProfile. func NewQueryProfile() *QueryProfile { r := &QueryProfile{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/queryrule.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/queryrule.go new file mode 100644 index 000000000..96915b7c6 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/queryrule.go @@ -0,0 +1,87 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/queryruletype" +) + +// QueryRule type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/query_ruleset/_types/QueryRuleset.ts#L37-L42 +type QueryRule struct { + Actions QueryRuleActions `json:"actions"` + Criteria []QueryRuleCriteria `json:"criteria"` + RuleId string `json:"rule_id"` + Type queryruletype.QueryRuleType `json:"type"` +} + +func (s *QueryRule) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "actions": + if err := dec.Decode(&s.Actions); err != nil { + return err + } + + case "criteria": + if err := dec.Decode(&s.Criteria); err != nil { + return err + } + + case "rule_id": + if err := dec.Decode(&s.RuleId); err != nil { + return err + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + } + } + return nil +} + +// NewQueryRule returns a QueryRule. +func NewQueryRule() *QueryRule { + r := &QueryRule{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/queryruleactions.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/queryruleactions.go new file mode 100644 index 000000000..a07ca57ec --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/queryruleactions.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +// QueryRuleActions type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/query_ruleset/_types/QueryRuleset.ts#L67-L70 +type QueryRuleActions struct { + Docs []PinnedDoc `json:"docs,omitempty"` + Ids []string `json:"ids,omitempty"` +} + +// NewQueryRuleActions returns a QueryRuleActions. +func NewQueryRuleActions() *QueryRuleActions { + r := &QueryRuleActions{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/queryrulecriteria.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/queryrulecriteria.go new file mode 100644 index 000000000..1ca6a3a6a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/queryrulecriteria.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/queryrulecriteriatype" +) + +// QueryRuleCriteria type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/query_ruleset/_types/QueryRuleset.ts#L48-L52 +type QueryRuleCriteria struct { + Metadata string `json:"metadata"` + Type queryrulecriteriatype.QueryRuleCriteriaType `json:"type"` + Values []json.RawMessage `json:"values,omitempty"` +} + +func (s *QueryRuleCriteria) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "metadata": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Metadata = o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "values": + if err := dec.Decode(&s.Values); err != nil { + return err + } + + } + } + return nil +} + +// NewQueryRuleCriteria returns a QueryRuleCriteria. +func NewQueryRuleCriteria() *QueryRuleCriteria { + r := &QueryRuleCriteria{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/queryruleset.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/queryruleset.go new file mode 100644 index 000000000..39b25a974 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/queryruleset.go @@ -0,0 +1,75 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + +// QueryRuleset type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/query_ruleset/_types/QueryRuleset.ts#L26-L35 +type QueryRuleset struct { + // Rules Rules associated with the query ruleset + Rules []QueryRule `json:"rules"` + // RulesetId Query Ruleset unique identifier + RulesetId string `json:"ruleset_id"` +} + +func (s *QueryRuleset) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "rules": + if err := dec.Decode(&s.Rules); err != nil { + return err + } + + case "ruleset_id": + if err := dec.Decode(&s.RulesetId); err != nil { + return err + } + + } + } + return nil +} + +// NewQueryRuleset returns a QueryRuleset. +func NewQueryRuleset() *QueryRuleset { + r := &QueryRuleset{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/queryrulesetlistitem.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/queryrulesetlistitem.go new file mode 100644 index 000000000..8b72a02cc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/queryrulesetlistitem.go @@ -0,0 +1,87 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + +// QueryRulesetListItem type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/query_ruleset/list/types.ts#L22-L31 +type QueryRulesetListItem struct { + // RulesCount The number of rules associated with this ruleset + RulesCount int `json:"rules_count"` + // RulesetId Ruleset unique identifier + RulesetId string `json:"ruleset_id"` +} + +func (s *QueryRulesetListItem) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "rules_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.RulesCount = value + case float64: + f := int(v) + s.RulesCount = f + } + + case "ruleset_id": + if err := dec.Decode(&s.RulesetId); err != nil { + return err + } + + } + } + return nil +} + +// NewQueryRulesetListItem returns a QueryRulesetListItem. +func NewQueryRulesetListItem() *QueryRulesetListItem { + r := &QueryRulesetListItem{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/querystringquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/querystringquery.go index 08c283857..e62b68ecb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/querystringquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/querystringquery.go @@ -16,46 +16,414 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/operator" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/textquerytype" ) // QueryStringQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/fulltext.ts#L233-L269 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/fulltext.ts#L580-L700 type QueryStringQuery struct { - AllowLeadingWildcard *bool `json:"allow_leading_wildcard,omitempty"` - AnalyzeWildcard *bool `json:"analyze_wildcard,omitempty"` - Analyzer *string `json:"analyzer,omitempty"` - AutoGenerateSynonymsPhraseQuery *bool `json:"auto_generate_synonyms_phrase_query,omitempty"` - Boost *float32 `json:"boost,omitempty"` - DefaultField *string `json:"default_field,omitempty"` - DefaultOperator *operator.Operator `json:"default_operator,omitempty"` - EnablePositionIncrements *bool `json:"enable_position_increments,omitempty"` - Escape *bool `json:"escape,omitempty"` - Fields []string `json:"fields,omitempty"` - Fuzziness Fuzziness `json:"fuzziness,omitempty"` - FuzzyMaxExpansions *int `json:"fuzzy_max_expansions,omitempty"` - FuzzyPrefixLength *int `json:"fuzzy_prefix_length,omitempty"` - FuzzyRewrite *string `json:"fuzzy_rewrite,omitempty"` - FuzzyTranspositions *bool `json:"fuzzy_transpositions,omitempty"` - Lenient *bool `json:"lenient,omitempty"` - MaxDeterminizedStates *int `json:"max_determinized_states,omitempty"` - MinimumShouldMatch MinimumShouldMatch `json:"minimum_should_match,omitempty"` - PhraseSlop *Float64 `json:"phrase_slop,omitempty"` - Query string `json:"query"` - QueryName_ *string `json:"_name,omitempty"` - QuoteAnalyzer *string `json:"quote_analyzer,omitempty"` - QuoteFieldSuffix *string `json:"quote_field_suffix,omitempty"` - Rewrite *string `json:"rewrite,omitempty"` - TieBreaker *Float64 `json:"tie_breaker,omitempty"` - TimeZone *string `json:"time_zone,omitempty"` - Type *textquerytype.TextQueryType `json:"type,omitempty"` + // AllowLeadingWildcard If `true`, the wildcard characters `*` and `?` are allowed as the first + // character of the query string. + AllowLeadingWildcard *bool `json:"allow_leading_wildcard,omitempty"` + // AnalyzeWildcard If `true`, the query attempts to analyze wildcard terms in the query string. + AnalyzeWildcard *bool `json:"analyze_wildcard,omitempty"` + // Analyzer Analyzer used to convert text in the query string into tokens. + Analyzer *string `json:"analyzer,omitempty"` + // AutoGenerateSynonymsPhraseQuery If `true`, match phrase queries are automatically created for multi-term + // synonyms. + AutoGenerateSynonymsPhraseQuery *bool `json:"auto_generate_synonyms_phrase_query,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // DefaultField Default field to search if no field is provided in the query string. + // Supports wildcards (`*`). + // Defaults to the `index.query.default_field` index setting, which has a + // default value of `*`. + DefaultField *string `json:"default_field,omitempty"` + // DefaultOperator Default boolean logic used to interpret text in the query string if no + // operators are specified. + DefaultOperator *operator.Operator `json:"default_operator,omitempty"` + // EnablePositionIncrements If `true`, enable position increments in queries constructed from a + // `query_string` search. + EnablePositionIncrements *bool `json:"enable_position_increments,omitempty"` + Escape *bool `json:"escape,omitempty"` + // Fields Array of fields to search. Supports wildcards (`*`). + Fields []string `json:"fields,omitempty"` + // Fuzziness Maximum edit distance allowed for fuzzy matching. + Fuzziness Fuzziness `json:"fuzziness,omitempty"` + // FuzzyMaxExpansions Maximum number of terms to which the query expands for fuzzy matching. + FuzzyMaxExpansions *int `json:"fuzzy_max_expansions,omitempty"` + // FuzzyPrefixLength Number of beginning characters left unchanged for fuzzy matching. + FuzzyPrefixLength *int `json:"fuzzy_prefix_length,omitempty"` + // FuzzyRewrite Method used to rewrite the query. + FuzzyRewrite *string `json:"fuzzy_rewrite,omitempty"` + // FuzzyTranspositions If `true`, edits for fuzzy matching include transpositions of two adjacent + // characters (for example, `ab` to `ba`). + FuzzyTranspositions *bool `json:"fuzzy_transpositions,omitempty"` + // Lenient If `true`, format-based errors, such as providing a text value for a numeric + // field, are ignored. + Lenient *bool `json:"lenient,omitempty"` + // MaxDeterminizedStates Maximum number of automaton states required for the query. + MaxDeterminizedStates *int `json:"max_determinized_states,omitempty"` + // MinimumShouldMatch Minimum number of clauses that must match for a document to be returned. + MinimumShouldMatch MinimumShouldMatch `json:"minimum_should_match,omitempty"` + // PhraseSlop Maximum number of positions allowed between matching tokens for phrases. + PhraseSlop *Float64 `json:"phrase_slop,omitempty"` + // Query Query string you wish to parse and use for search. + Query string `json:"query"` + QueryName_ *string `json:"_name,omitempty"` + // QuoteAnalyzer Analyzer used to convert quoted text in the query string into tokens. + // For quoted text, this parameter overrides the analyzer specified in the + // `analyzer` parameter. + QuoteAnalyzer *string `json:"quote_analyzer,omitempty"` + // QuoteFieldSuffix Suffix appended to quoted text in the query string. + // You can use this suffix to use a different analysis method for exact matches. + QuoteFieldSuffix *string `json:"quote_field_suffix,omitempty"` + // Rewrite Method used to rewrite the query. + Rewrite *string `json:"rewrite,omitempty"` + // TieBreaker How to combine the queries generated from the individual search terms in the + // resulting `dis_max` query. + TieBreaker *Float64 `json:"tie_breaker,omitempty"` + // TimeZone Coordinated Universal Time (UTC) offset or IANA time zone used to convert + // date values in the query string to UTC. + TimeZone *string `json:"time_zone,omitempty"` + // Type Determines how the query matches and scores documents. + Type *textquerytype.TextQueryType `json:"type,omitempty"` +} + +func (s *QueryStringQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_leading_wildcard": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.AllowLeadingWildcard = &value + case bool: + s.AllowLeadingWildcard = &v + } + + case "analyze_wildcard": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.AnalyzeWildcard = &value + case bool: + s.AnalyzeWildcard = &v + } + + case "analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o + + case "auto_generate_synonyms_phrase_query": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.AutoGenerateSynonymsPhraseQuery = &value + case bool: + s.AutoGenerateSynonymsPhraseQuery = &v + } + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "default_field": + if err := dec.Decode(&s.DefaultField); err != nil { + return err + } + + case "default_operator": + if err := dec.Decode(&s.DefaultOperator); err != nil { + return err + } + + case "enable_position_increments": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.EnablePositionIncrements = &value + case bool: + s.EnablePositionIncrements = &v + } + + case "escape": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Escape = &value + case bool: + s.Escape = &v + } + + case "fields": + if err := dec.Decode(&s.Fields); err != nil { + return err + } + + case "fuzziness": + if err := dec.Decode(&s.Fuzziness); err != nil { + return err + } + + case "fuzzy_max_expansions": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.FuzzyMaxExpansions = &value + case float64: + f := int(v) + s.FuzzyMaxExpansions = &f + } + + case "fuzzy_prefix_length": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.FuzzyPrefixLength = &value + case float64: + f := int(v) + s.FuzzyPrefixLength = &f + } + + case "fuzzy_rewrite": + if err := dec.Decode(&s.FuzzyRewrite); err != nil { + return err + } + + case "fuzzy_transpositions": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.FuzzyTranspositions = &value + case bool: + s.FuzzyTranspositions = &v + } + + case "lenient": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Lenient = &value + case bool: + s.Lenient = &v + } + + case "max_determinized_states": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxDeterminizedStates = &value + case float64: + f := int(v) + s.MaxDeterminizedStates = &f + } + + case "minimum_should_match": + if err := dec.Decode(&s.MinimumShouldMatch); err != nil { + return err + } + + case "phrase_slop": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.PhraseSlop = &f + case float64: + f := Float64(v) + s.PhraseSlop = &f + } + + case "query": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Query = o + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "quote_analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QuoteAnalyzer = &o + + case "quote_field_suffix": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QuoteFieldSuffix = &o + + case "rewrite": + if err := dec.Decode(&s.Rewrite); err != nil { + return err + } + + case "tie_breaker": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.TieBreaker = &f + case float64: + f := Float64(v) + s.TieBreaker = &f + } + + case "time_zone": + if err := dec.Decode(&s.TimeZone); err != nil { + return err + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + } + } + return nil } // NewQueryStringQuery returns a QueryStringQuery. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/queryvector.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/queryvector.go deleted file mode 100644 index 0a5bfb147..000000000 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/queryvector.go +++ /dev/null @@ -1,26 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 - -package types - -// QueryVector type alias. -// -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Knn.ts#L24-L24 -type QueryVector []float32 diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/queryvectorbuilder.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/queryvectorbuilder.go index d28c52d90..9e02a06df 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/queryvectorbuilder.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/queryvectorbuilder.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // QueryVectorBuilder type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Knn.ts#L43-L46 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Knn.ts#L45-L48 type QueryVectorBuilder struct { TextEmbedding *TextEmbedding `json:"text_embedding,omitempty"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/querywatch.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/querywatch.go index aba53b38c..0f18b1166 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/querywatch.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/querywatch.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // QueryWatch type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Watch.ts#L58-L64 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Watch.ts#L58-L64 type QueryWatch struct { Id_ string `json:"_id"` PrimaryTerm_ *int `json:"_primary_term,omitempty"` @@ -31,6 +39,62 @@ type QueryWatch struct { Watch *Watch `json:"watch,omitempty"` } +func (s *QueryWatch) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return err + } + + case "_primary_term": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.PrimaryTerm_ = &value + case float64: + f := int(v) + s.PrimaryTerm_ = &f + } + + case "_seq_no": + if err := dec.Decode(&s.SeqNo_); err != nil { + return err + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return err + } + + case "watch": + if err := dec.Decode(&s.Watch); err != nil { + return err + } + + } + } + return nil +} + // NewQueryWatch returns a QueryWatch. func NewQueryWatch() *QueryWatch { r := &QueryWatch{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/questionansweringinferenceoptions.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/questionansweringinferenceoptions.go index 21f468118..b1eec3651 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/questionansweringinferenceoptions.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/questionansweringinferenceoptions.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // QuestionAnsweringInferenceOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/inference.ts#L251-L261 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/inference.ts#L282-L292 type QuestionAnsweringInferenceOptions struct { // MaxAnswerLength The maximum answer length to consider MaxAnswerLength *int `json:"max_answer_length,omitempty"` @@ -35,6 +43,75 @@ type QuestionAnsweringInferenceOptions struct { Tokenization *TokenizationConfigContainer `json:"tokenization,omitempty"` } +func (s *QuestionAnsweringInferenceOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_answer_length": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxAnswerLength = &value + case float64: + f := int(v) + s.MaxAnswerLength = &f + } + + case "num_top_classes": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NumTopClasses = &value + case float64: + f := int(v) + s.NumTopClasses = &f + } + + case "results_field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultsField = &o + + case "tokenization": + if err := dec.Decode(&s.Tokenization); err != nil { + return err + } + + } + } + return nil +} + // NewQuestionAnsweringInferenceOptions returns a QuestionAnsweringInferenceOptions. func NewQuestionAnsweringInferenceOptions() *QuestionAnsweringInferenceOptions { r := &QuestionAnsweringInferenceOptions{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/questionansweringinferenceupdateoptions.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/questionansweringinferenceupdateoptions.go index 2911f9150..edcff1846 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/questionansweringinferenceupdateoptions.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/questionansweringinferenceupdateoptions.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // QuestionAnsweringInferenceUpdateOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/inference.ts#L379-L390 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/inference.ts#L420-L431 type QuestionAnsweringInferenceUpdateOptions struct { // MaxAnswerLength The maximum answer length to consider for extraction MaxAnswerLength *int `json:"max_answer_length,omitempty"` @@ -37,6 +45,87 @@ type QuestionAnsweringInferenceUpdateOptions struct { Tokenization *NlpTokenizationUpdateOptions `json:"tokenization,omitempty"` } +func (s *QuestionAnsweringInferenceUpdateOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_answer_length": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxAnswerLength = &value + case float64: + f := int(v) + s.MaxAnswerLength = &f + } + + case "num_top_classes": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NumTopClasses = &value + case float64: + f := int(v) + s.NumTopClasses = &f + } + + case "question": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Question = o + + case "results_field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultsField = &o + + case "tokenization": + if err := dec.Decode(&s.Tokenization); err != nil { + return err + } + + } + } + return nil +} + // NewQuestionAnsweringInferenceUpdateOptions returns a QuestionAnsweringInferenceUpdateOptions. func NewQuestionAnsweringInferenceUpdateOptions() *QuestionAnsweringInferenceUpdateOptions { r := &QuestionAnsweringInferenceUpdateOptions{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/randomscorefunction.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/randomscorefunction.go index 4ae95f54d..b0c49d5fb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/randomscorefunction.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/randomscorefunction.go @@ -16,18 +16,63 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RandomScoreFunction type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/compound.ts#L65-L68 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/compound.ts#L127-L130 type RandomScoreFunction struct { Field *string `json:"field,omitempty"` Seed string `json:"seed,omitempty"` } +func (s *RandomScoreFunction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "seed": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Seed = o + + } + } + return nil +} + // NewRandomScoreFunction returns a RandomScoreFunction. func NewRandomScoreFunction() *RandomScoreFunction { r := &RandomScoreFunction{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rangeaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rangeaggregate.go index 20bd1e64f..771ec3c5d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rangeaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rangeaggregate.go @@ -16,27 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" ) // RangeAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L530-L531 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L531-L532 type RangeAggregate struct { - Buckets BucketsRangeBucket `json:"buckets"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Buckets BucketsRangeBucket `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` } func (s *RangeAggregate) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -57,15 +57,17 @@ func (s *RangeAggregate) UnmarshalJSON(data []byte) error { source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]RangeBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []RangeBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rangeaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rangeaggregation.go index 1005159c8..a0248465d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rangeaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rangeaggregation.go @@ -16,26 +16,130 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // RangeAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L289-L296 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L650-L670 type RangeAggregation struct { - Field *string `json:"field,omitempty"` - Format *string `json:"format,omitempty"` - Keyed *bool `json:"keyed,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Missing *int `json:"missing,omitempty"` - Name *string `json:"name,omitempty"` - Ranges []AggregationRange `json:"ranges,omitempty"` - Script Script `json:"script,omitempty"` + // Field The date field whose values are use to build ranges. + Field *string `json:"field,omitempty"` + Format *string `json:"format,omitempty"` + // Keyed Set to `true` to associate a unique string key with each bucket and return + // the ranges as a hash rather than an array. + Keyed *bool `json:"keyed,omitempty"` + Meta Metadata `json:"meta,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing *int `json:"missing,omitempty"` + Name *string `json:"name,omitempty"` + // Ranges An array of ranges used to bucket documents. + Ranges []AggregationRange `json:"ranges,omitempty"` + Script Script `json:"script,omitempty"` +} + +func (s *RangeAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "keyed": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Keyed = &value + case bool: + s.Keyed = &v + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "missing": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Missing = &value + case float64: + f := int(v) + s.Missing = &f + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + case "ranges": + if err := dec.Decode(&s.Ranges); err != nil { + return err + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + } + } + return nil } // NewRangeAggregation returns a RangeAggregation. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rangebucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rangebucket.go index 1daf1756d..e03606738 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rangebucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rangebucket.go @@ -16,25 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "fmt" - "bytes" + "encoding/json" "errors" + "fmt" "io" - + "strconv" "strings" - - "encoding/json" ) // RangeBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L533-L540 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L534-L541 type RangeBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -47,6 +45,7 @@ type RangeBucket struct { } func (s *RangeBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -60,477 +59,601 @@ func (s *RangeBucket) UnmarshalJSON(data []byte) error { switch t { - case "aggregations": - for dec.More() { - tt, err := dec.Token() + case "doc_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) if err != nil { - if errors.Is(err, io.EOF) { - break - } return err } - if value, ok := tt.(string); ok { - if strings.Contains(value, "#") { - elems := strings.Split(value, "#") - if len(elems) == 2 { - switch elems[0] { - case "cardinality": - o := NewCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentiles": - o := NewHdrPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentile_ranks": - o := NewHdrPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentiles": - o := NewTDigestPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentile_ranks": - o := NewTDigestPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "percentiles_bucket": - o := NewPercentilesBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "median_absolute_deviation": - o := NewMedianAbsoluteDeviationAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "min": - o := NewMinAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "max": - o := NewMaxAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sum": - o := NewSumAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "avg": - o := NewAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "weighted_avg": - o := NewWeightedAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "value_count": - o := NewValueCountAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_value": - o := NewSimpleValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "derivative": - o := NewDerivativeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "bucket_metric_value": - o := NewBucketMetricValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats": - o := NewStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats_bucket": - o := NewStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats": - o := NewExtendedStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats_bucket": - o := NewExtendedStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_bounds": - o := NewGeoBoundsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_centroid": - o := NewGeoCentroidAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "histogram": - o := NewHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_histogram": - o := NewDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "auto_date_histogram": - o := NewAutoDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "variable_width_histogram": - o := NewVariableWidthHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sterms": - o := NewStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lterms": - o := NewLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "dterms": - o := NewDoubleTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umterms": - o := NewUnmappedTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lrareterms": - o := NewLongRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "srareterms": - o := NewStringRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umrareterms": - o := NewUnmappedRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "multi_terms": - o := NewMultiTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "missing": - o := NewMissingAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "nested": - o := NewNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "reverse_nested": - o := NewReverseNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "global": - o := NewGlobalAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filter": - o := NewFilterAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "children": - o := NewChildrenAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "parent": - o := NewParentAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sampler": - o := NewSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "unmapped_sampler": - o := NewUnmappedSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohash_grid": - o := NewGeoHashGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geotile_grid": - o := NewGeoTileGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohex_grid": - o := NewGeoHexGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "range": - o := NewRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_range": - o := NewDateRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_distance": - o := NewGeoDistanceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_range": - o := NewIpRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_prefix": - o := NewIpPrefixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filters": - o := NewFiltersAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "adjacency_matrix": - o := NewAdjacencyMatrixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "siglterms": - o := NewSignificantLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sigsterms": - o := NewSignificantStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umsigterms": - o := NewUnmappedSignificantTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "composite": - o := NewCompositeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "scripted_metric": - o := NewScriptedMetricAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_hits": - o := NewTopHitsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "inference": - o := NewInferenceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "string_stats": - o := NewStringStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "box_plot": - o := NewBoxPlotAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_metrics": - o := NewTopMetricsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "t_test": - o := NewTTestAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "rate": - o := NewRateAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_long_value": - o := NewCumulativeCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "matrix_stats": - o := NewMatrixStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_line": - o := NewGeoLineAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - default: - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - } - } else { - return errors.New("cannot decode JSON for field Aggregations") - } - } else { - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[value] = o - } - } - } - - case "doc_count": - if err := dec.Decode(&s.DocCount); err != nil { - return err + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f } case "from": - if err := dec.Decode(&s.From); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.From = &f + case float64: + f := Float64(v) + s.From = &f } case "from_as_string": - if err := dec.Decode(&s.FromAsString); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FromAsString = &o case "key": - if err := dec.Decode(&s.Key); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Key = &o case "to": - if err := dec.Decode(&s.To); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.To = &f + case float64: + f := Float64(v) + s.To = &f } case "to_as_string": - if err := dec.Decode(&s.ToAsString); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ToAsString = &o + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "box_plot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[value] = o + } + } } } @@ -556,6 +679,7 @@ func (s RangeBucket) MarshalJSON() ([]byte, error) { for key, value := range s.Aggregations { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "Aggregations") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rangequery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rangequery.go index d3d1aabe2..d627d56a9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rangequery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rangequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // DateRangeQuery // NumberRangeQuery // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/term.ts#L92-L94 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/term.ts#L166-L168 type RangeQuery interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/exists/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankcontainer.go similarity index 63% rename from vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/exists/response.go rename to vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankcontainer.go index 281db98b7..4864d833a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/indices/exists/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankcontainer.go @@ -16,19 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 -package exists +package types -// Response holds the response body struct for the package exists +// RankContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/exists/IndicesExistsResponse.ts#L22-L24 - -type Response struct { +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Rank.ts#L22-L28 +type RankContainer struct { + // Rrf The reciprocal rank fusion parameters + Rrf *RrfRank `json:"rrf,omitempty"` } -// NewResponse returns a Response -func NewResponse() *Response { - r := &Response{} +// NewRankContainer returns a RankContainer. +func NewRankContainer() *RankContainer { + r := &RankContainer{} + return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalhit.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalhit.go index 5a510d858..6e0300402 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalhit.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalhit.go @@ -16,19 +16,73 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RankEvalHit type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/rank_eval/types.ts#L141-L145 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/rank_eval/types.ts#L141-L145 type RankEvalHit struct { Id_ string `json:"_id"` Index_ string `json:"_index"` Score_ Float64 `json:"_score"` } +func (s *RankEvalHit) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return err + } + + case "_index": + if err := dec.Decode(&s.Index_); err != nil { + return err + } + + case "_score": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Score_ = f + case float64: + f := Float64(v) + s.Score_ = f + } + + } + } + return nil +} + // NewRankEvalHit returns a RankEvalHit. func NewRankEvalHit() *RankEvalHit { r := &RankEvalHit{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalhititem.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalhititem.go index b1818b830..cbaa66b88 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalhititem.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalhititem.go @@ -16,18 +16,55 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // RankEvalHitItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/rank_eval/types.ts#L136-L139 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/rank_eval/types.ts#L136-L139 type RankEvalHitItem struct { Hit RankEvalHit `json:"hit"` Rating Float64 `json:"rating,omitempty"` } +func (s *RankEvalHitItem) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "hit": + if err := dec.Decode(&s.Hit); err != nil { + return err + } + + case "rating": + if err := dec.Decode(&s.Rating); err != nil { + return err + } + + } + } + return nil +} + // NewRankEvalHitItem returns a RankEvalHitItem. func NewRankEvalHitItem() *RankEvalHitItem { r := &RankEvalHitItem{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalmetric.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalmetric.go index 19cbad486..a8d4cd1a1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalmetric.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalmetric.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // RankEvalMetric type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/rank_eval/types.ts#L90-L96 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/rank_eval/types.ts#L90-L96 type RankEvalMetric struct { Dcg *RankEvalMetricDiscountedCumulativeGain `json:"dcg,omitempty"` ExpectedReciprocalRank *RankEvalMetricExpectedReciprocalRank `json:"expected_reciprocal_rank,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalmetricdetail.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalmetricdetail.go index 463b038ed..eac8b369a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalmetricdetail.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalmetricdetail.go @@ -16,17 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // RankEvalMetricDetail type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/rank_eval/types.ts#L125-L134 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/rank_eval/types.ts#L125-L134 type RankEvalMetricDetail struct { // Hits The hits section shows a grouping of the search results with their supplied // ratings @@ -44,6 +48,60 @@ type RankEvalMetricDetail struct { UnratedDocs []UnratedDocument `json:"unrated_docs"` } +func (s *RankEvalMetricDetail) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "hits": + if err := dec.Decode(&s.Hits); err != nil { + return err + } + + case "metric_details": + if s.MetricDetails == nil { + s.MetricDetails = make(map[string]map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.MetricDetails); err != nil { + return err + } + + case "metric_score": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.MetricScore = f + case float64: + f := Float64(v) + s.MetricScore = f + } + + case "unrated_docs": + if err := dec.Decode(&s.UnratedDocs); err != nil { + return err + } + + } + } + return nil +} + // NewRankEvalMetricDetail returns a RankEvalMetricDetail. func NewRankEvalMetricDetail() *RankEvalMetricDetail { r := &RankEvalMetricDetail{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalmetricdiscountedcumulativegain.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalmetricdiscountedcumulativegain.go index 4b256a8d2..7e282bd3f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalmetricdiscountedcumulativegain.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalmetricdiscountedcumulativegain.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RankEvalMetricDiscountedCumulativeGain type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/rank_eval/types.ts#L66-L77 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/rank_eval/types.ts#L66-L77 type RankEvalMetricDiscountedCumulativeGain struct { // K Sets the maximum number of documents retrieved per query. This value will act // in place of the usual size parameter in the query. @@ -31,6 +39,56 @@ type RankEvalMetricDiscountedCumulativeGain struct { Normalize *bool `json:"normalize,omitempty"` } +func (s *RankEvalMetricDiscountedCumulativeGain) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "k": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.K = &value + case float64: + f := int(v) + s.K = &f + } + + case "normalize": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Normalize = &value + case bool: + s.Normalize = &v + } + + } + } + return nil +} + // NewRankEvalMetricDiscountedCumulativeGain returns a RankEvalMetricDiscountedCumulativeGain. func NewRankEvalMetricDiscountedCumulativeGain() *RankEvalMetricDiscountedCumulativeGain { r := &RankEvalMetricDiscountedCumulativeGain{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalmetricexpectedreciprocalrank.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalmetricexpectedreciprocalrank.go index da3151a9e..dd3b3ef14 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalmetricexpectedreciprocalrank.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalmetricexpectedreciprocalrank.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RankEvalMetricExpectedReciprocalRank type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/rank_eval/types.ts#L79-L88 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/rank_eval/types.ts#L79-L88 type RankEvalMetricExpectedReciprocalRank struct { // K Sets the maximum number of documents retrieved per query. This value will act // in place of the usual size parameter in the query. @@ -31,6 +39,58 @@ type RankEvalMetricExpectedReciprocalRank struct { MaximumRelevance int `json:"maximum_relevance"` } +func (s *RankEvalMetricExpectedReciprocalRank) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "k": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.K = &value + case float64: + f := int(v) + s.K = &f + } + + case "maximum_relevance": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaximumRelevance = value + case float64: + f := int(v) + s.MaximumRelevance = f + } + + } + } + return nil +} + // NewRankEvalMetricExpectedReciprocalRank returns a RankEvalMetricExpectedReciprocalRank. func NewRankEvalMetricExpectedReciprocalRank() *RankEvalMetricExpectedReciprocalRank { r := &RankEvalMetricExpectedReciprocalRank{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalmetricmeanreciprocalrank.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalmetricmeanreciprocalrank.go index 19b230c55..9f636fa89 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalmetricmeanreciprocalrank.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalmetricmeanreciprocalrank.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RankEvalMetricMeanReciprocalRank type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/rank_eval/types.ts#L60-L64 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/rank_eval/types.ts#L60-L64 type RankEvalMetricMeanReciprocalRank struct { // K Sets the maximum number of documents retrieved per query. This value will act // in place of the usual size parameter in the query. @@ -32,6 +40,58 @@ type RankEvalMetricMeanReciprocalRank struct { RelevantRatingThreshold *int `json:"relevant_rating_threshold,omitempty"` } +func (s *RankEvalMetricMeanReciprocalRank) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "k": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.K = &value + case float64: + f := int(v) + s.K = &f + } + + case "relevant_rating_threshold": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.RelevantRatingThreshold = &value + case float64: + f := int(v) + s.RelevantRatingThreshold = &f + } + + } + } + return nil +} + // NewRankEvalMetricMeanReciprocalRank returns a RankEvalMetricMeanReciprocalRank. func NewRankEvalMetricMeanReciprocalRank() *RankEvalMetricMeanReciprocalRank { r := &RankEvalMetricMeanReciprocalRank{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalmetricprecision.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalmetricprecision.go index 1dd8eb9ce..8c561ed1c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalmetricprecision.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalmetricprecision.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RankEvalMetricPrecision type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/rank_eval/types.ts#L42-L52 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/rank_eval/types.ts#L42-L52 type RankEvalMetricPrecision struct { // IgnoreUnlabeled Controls how unlabeled documents in the search results are counted. If set to // true, unlabeled documents are ignored and neither count as relevant or @@ -36,6 +44,72 @@ type RankEvalMetricPrecision struct { RelevantRatingThreshold *int `json:"relevant_rating_threshold,omitempty"` } +func (s *RankEvalMetricPrecision) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "ignore_unlabeled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreUnlabeled = &value + case bool: + s.IgnoreUnlabeled = &v + } + + case "k": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.K = &value + case float64: + f := int(v) + s.K = &f + } + + case "relevant_rating_threshold": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.RelevantRatingThreshold = &value + case float64: + f := int(v) + s.RelevantRatingThreshold = &f + } + + } + } + return nil +} + // NewRankEvalMetricPrecision returns a RankEvalMetricPrecision. func NewRankEvalMetricPrecision() *RankEvalMetricPrecision { r := &RankEvalMetricPrecision{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalmetricratingtreshold.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalmetricratingtreshold.go index ea0a9f851..b3f8dfb6b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalmetricratingtreshold.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalmetricratingtreshold.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RankEvalMetricRatingTreshold type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/rank_eval/types.ts#L34-L40 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/rank_eval/types.ts#L34-L40 type RankEvalMetricRatingTreshold struct { // K Sets the maximum number of documents retrieved per query. This value will act // in place of the usual size parameter in the query. @@ -32,6 +40,58 @@ type RankEvalMetricRatingTreshold struct { RelevantRatingThreshold *int `json:"relevant_rating_threshold,omitempty"` } +func (s *RankEvalMetricRatingTreshold) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "k": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.K = &value + case float64: + f := int(v) + s.K = &f + } + + case "relevant_rating_threshold": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.RelevantRatingThreshold = &value + case float64: + f := int(v) + s.RelevantRatingThreshold = &f + } + + } + } + return nil +} + // NewRankEvalMetricRatingTreshold returns a RankEvalMetricRatingTreshold. func NewRankEvalMetricRatingTreshold() *RankEvalMetricRatingTreshold { r := &RankEvalMetricRatingTreshold{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalmetricrecall.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalmetricrecall.go index 425be91cd..0b3b0d115 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalmetricrecall.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalmetricrecall.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RankEvalMetricRecall type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/rank_eval/types.ts#L54-L58 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/rank_eval/types.ts#L54-L58 type RankEvalMetricRecall struct { // K Sets the maximum number of documents retrieved per query. This value will act // in place of the usual size parameter in the query. @@ -32,6 +40,58 @@ type RankEvalMetricRecall struct { RelevantRatingThreshold *int `json:"relevant_rating_threshold,omitempty"` } +func (s *RankEvalMetricRecall) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "k": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.K = &value + case float64: + f := int(v) + s.K = &f + } + + case "relevant_rating_threshold": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.RelevantRatingThreshold = &value + case float64: + f := int(v) + s.RelevantRatingThreshold = &f + } + + } + } + return nil +} + // NewRankEvalMetricRecall returns a RankEvalMetricRecall. func NewRankEvalMetricRecall() *RankEvalMetricRecall { r := &RankEvalMetricRecall{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalquery.go index c8e2f4640..cc427aa91 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalquery.go @@ -16,18 +16,67 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RankEvalQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/rank_eval/types.ts#L111-L114 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/rank_eval/types.ts#L111-L114 type RankEvalQuery struct { Query Query `json:"query"` Size *int `json:"size,omitempty"` } +func (s *RankEvalQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return err + } + + case "size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + } + } + return nil +} + // NewRankEvalQuery returns a RankEvalQuery. func NewRankEvalQuery() *RankEvalQuery { r := &RankEvalQuery{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalrequestitem.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalrequestitem.go index 096338db5..86368a607 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalrequestitem.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankevalrequestitem.go @@ -16,17 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" ) // RankEvalRequestItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/rank_eval/types.ts#L98-L109 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/rank_eval/types.ts#L98-L109 type RankEvalRequestItem struct { // Id The search request’s ID, used to group result details later. Id string `json:"id"` @@ -40,6 +43,54 @@ type RankEvalRequestItem struct { TemplateId *string `json:"template_id,omitempty"` } +func (s *RankEvalRequestItem) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return err + } + + case "params": + if s.Params == nil { + s.Params = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Params); err != nil { + return err + } + + case "ratings": + if err := dec.Decode(&s.Ratings); err != nil { + return err + } + + case "request": + if err := dec.Decode(&s.Request); err != nil { + return err + } + + case "template_id": + if err := dec.Decode(&s.TemplateId); err != nil { + return err + } + + } + } + return nil +} + // NewRankEvalRequestItem returns a RankEvalRequestItem. func NewRankEvalRequestItem() *RankEvalRequestItem { r := &RankEvalRequestItem{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankfeaturefunction.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankfeaturefunction.go index 4258847b0..0726b1508 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankfeaturefunction.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankfeaturefunction.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // RankFeatureFunction type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/specialized.ts#L137-L137 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/specialized.ts#L264-L264 type RankFeatureFunction struct { } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankfeaturefunctionlinear.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankfeaturefunctionlinear.go index e48cc647c..7feb7ee60 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankfeaturefunctionlinear.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankfeaturefunctionlinear.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // RankFeatureFunctionLinear type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/specialized.ts#L139-L139 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/specialized.ts#L266-L266 type RankFeatureFunctionLinear struct { } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankfeaturefunctionlogarithm.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankfeaturefunctionlogarithm.go index d9025fe59..b101c06b0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankfeaturefunctionlogarithm.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankfeaturefunctionlogarithm.go @@ -16,17 +16,62 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RankFeatureFunctionLogarithm type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/specialized.ts#L141-L143 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/specialized.ts#L268-L273 type RankFeatureFunctionLogarithm struct { + // ScalingFactor Configurable scaling factor. ScalingFactor float32 `json:"scaling_factor"` } +func (s *RankFeatureFunctionLogarithm) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "scaling_factor": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.ScalingFactor = f + case float64: + f := float32(v) + s.ScalingFactor = f + } + + } + } + return nil +} + // NewRankFeatureFunctionLogarithm returns a RankFeatureFunctionLogarithm. func NewRankFeatureFunctionLogarithm() *RankFeatureFunctionLogarithm { r := &RankFeatureFunctionLogarithm{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankfeaturefunctionsaturation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankfeaturefunctionsaturation.go index 749bee11d..519795652 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankfeaturefunctionsaturation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankfeaturefunctionsaturation.go @@ -16,17 +16,62 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RankFeatureFunctionSaturation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/specialized.ts#L145-L147 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/specialized.ts#L275-L280 type RankFeatureFunctionSaturation struct { + // Pivot Configurable pivot value so that the result will be less than 0.5. Pivot *float32 `json:"pivot,omitempty"` } +func (s *RankFeatureFunctionSaturation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "pivot": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Pivot = &f + case float64: + f := float32(v) + s.Pivot = &f + } + + } + } + return nil +} + // NewRankFeatureFunctionSaturation returns a RankFeatureFunctionSaturation. func NewRankFeatureFunctionSaturation() *RankFeatureFunctionSaturation { r := &RankFeatureFunctionSaturation{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankfeaturefunctionsigmoid.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankfeaturefunctionsigmoid.go index 452682717..42dae3312 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankfeaturefunctionsigmoid.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankfeaturefunctionsigmoid.go @@ -16,16 +16,78 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RankFeatureFunctionSigmoid type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/specialized.ts#L149-L152 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/specialized.ts#L282-L291 type RankFeatureFunctionSigmoid struct { + // Exponent Configurable Exponent. Exponent float32 `json:"exponent"` - Pivot float32 `json:"pivot"` + // Pivot Configurable pivot value so that the result will be less than 0.5. + Pivot float32 `json:"pivot"` +} + +func (s *RankFeatureFunctionSigmoid) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "exponent": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Exponent = f + case float64: + f := float32(v) + s.Exponent = f + } + + case "pivot": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Pivot = f + case float64: + f := float32(v) + s.Pivot = f + } + + } + } + return nil } // NewRankFeatureFunctionSigmoid returns a RankFeatureFunctionSigmoid. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankfeatureproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankfeatureproperty.go index d8bcc8af2..f8af4cffb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankfeatureproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankfeatureproperty.go @@ -16,23 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" ) // RankFeatureProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/core.ts#L181-L184 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/core.ts#L184-L187 type RankFeatureProperty struct { Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` Fields map[string]Property `json:"fields,omitempty"` @@ -45,6 +45,7 @@ type RankFeatureProperty struct { } func (s *RankFeatureProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -64,6 +65,9 @@ func (s *RankFeatureProperty) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -72,7 +76,9 @@ func (s *RankFeatureProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -351,28 +357,56 @@ func (s *RankFeatureProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } case "positive_score_impact": - if err := dec.Decode(&s.PositiveScoreImpact); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.PositiveScoreImpact = &value + case bool: + s.PositiveScoreImpact = &v } case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -381,7 +415,9 @@ func (s *RankFeatureProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -660,9 +696,11 @@ func (s *RankFeatureProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } @@ -676,6 +714,24 @@ func (s *RankFeatureProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s RankFeatureProperty) MarshalJSON() ([]byte, error) { + type innerRankFeatureProperty RankFeatureProperty + tmp := innerRankFeatureProperty{ + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + PositiveScoreImpact: s.PositiveScoreImpact, + Properties: s.Properties, + Type: s.Type, + } + + tmp.Type = "rank_feature" + + return json.Marshal(tmp) +} + // NewRankFeatureProperty returns a RankFeatureProperty. func NewRankFeatureProperty() *RankFeatureProperty { r := &RankFeatureProperty{ @@ -684,7 +740,5 @@ func NewRankFeatureProperty() *RankFeatureProperty { Properties: make(map[string]Property, 0), } - r.Type = "rank_feature" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankfeaturequery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankfeaturequery.go index 108178301..36788a782 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankfeaturequery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankfeaturequery.go @@ -16,21 +16,116 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RankFeatureQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/specialized.ts#L154-L162 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/specialized.ts#L293-L316 type RankFeatureQuery struct { - Boost *float32 `json:"boost,omitempty"` - Field string `json:"field"` - Linear *RankFeatureFunctionLinear `json:"linear,omitempty"` - Log *RankFeatureFunctionLogarithm `json:"log,omitempty"` - QueryName_ *string `json:"_name,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Field `rank_feature` or `rank_features` field used to boost relevance scores. + Field string `json:"field"` + // Linear Linear function used to boost relevance scores based on the value of the rank + // feature `field`. + Linear *RankFeatureFunctionLinear `json:"linear,omitempty"` + // Log Logarithmic function used to boost relevance scores based on the value of the + // rank feature `field`. + Log *RankFeatureFunctionLogarithm `json:"log,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + // Saturation Saturation function used to boost relevance scores based on the value of the + // rank feature `field`. Saturation *RankFeatureFunctionSaturation `json:"saturation,omitempty"` - Sigmoid *RankFeatureFunctionSigmoid `json:"sigmoid,omitempty"` + // Sigmoid Sigmoid function used to boost relevance scores based on the value of the + // rank feature `field`. + Sigmoid *RankFeatureFunctionSigmoid `json:"sigmoid,omitempty"` +} + +func (s *RankFeatureQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "linear": + if err := dec.Decode(&s.Linear); err != nil { + return err + } + + case "log": + if err := dec.Decode(&s.Log); err != nil { + return err + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "saturation": + if err := dec.Decode(&s.Saturation); err != nil { + return err + } + + case "sigmoid": + if err := dec.Decode(&s.Sigmoid); err != nil { + return err + } + + } + } + return nil } // NewRankFeatureQuery returns a RankFeatureQuery. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankfeaturesproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankfeaturesproperty.go index 5407e6586..9e02d7e78 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankfeaturesproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rankfeaturesproperty.go @@ -16,23 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" ) // RankFeaturesProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/core.ts#L186-L188 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/core.ts#L189-L191 type RankFeaturesProperty struct { Dynamic *dynamicmapping.DynamicMapping `json:"dynamic,omitempty"` Fields map[string]Property `json:"fields,omitempty"` @@ -44,6 +44,7 @@ type RankFeaturesProperty struct { } func (s *RankFeaturesProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -63,6 +64,9 @@ func (s *RankFeaturesProperty) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -71,7 +75,9 @@ func (s *RankFeaturesProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -350,23 +356,42 @@ func (s *RankFeaturesProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -375,7 +400,9 @@ func (s *RankFeaturesProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -654,9 +681,11 @@ func (s *RankFeaturesProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } @@ -670,6 +699,23 @@ func (s *RankFeaturesProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s RankFeaturesProperty) MarshalJSON() ([]byte, error) { + type innerRankFeaturesProperty RankFeaturesProperty + tmp := innerRankFeaturesProperty{ + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + Properties: s.Properties, + Type: s.Type, + } + + tmp.Type = "rank_features" + + return json.Marshal(tmp) +} + // NewRankFeaturesProperty returns a RankFeaturesProperty. func NewRankFeaturesProperty() *RankFeaturesProperty { r := &RankFeaturesProperty{ @@ -678,7 +724,5 @@ func NewRankFeaturesProperty() *RankFeaturesProperty { Properties: make(map[string]Property, 0), } - r.Type = "rank_features" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/raretermsaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/raretermsaggregation.go index 2e02713c6..8c0f75558 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/raretermsaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/raretermsaggregation.go @@ -16,27 +16,150 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // RareTermsAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L304-L312 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L687-L717 type RareTermsAggregation struct { - Exclude []string `json:"exclude,omitempty"` - Field *string `json:"field,omitempty"` - Include TermsInclude `json:"include,omitempty"` - MaxDocCount *int64 `json:"max_doc_count,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Missing Missing `json:"missing,omitempty"` - Name *string `json:"name,omitempty"` - Precision *Float64 `json:"precision,omitempty"` - ValueType *string `json:"value_type,omitempty"` + // Exclude Terms that should be excluded from the aggregation. + Exclude []string `json:"exclude,omitempty"` + // Field The field from which to return rare terms. + Field *string `json:"field,omitempty"` + // Include Terms that should be included in the aggregation. + Include TermsInclude `json:"include,omitempty"` + // MaxDocCount The maximum number of documents a term should appear in. + MaxDocCount *int64 `json:"max_doc_count,omitempty"` + Meta Metadata `json:"meta,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing Missing `json:"missing,omitempty"` + Name *string `json:"name,omitempty"` + // Precision The precision of the internal CuckooFilters. + // Smaller precision leads to better approximation, but higher memory usage. + Precision *Float64 `json:"precision,omitempty"` + ValueType *string `json:"value_type,omitempty"` +} + +func (s *RareTermsAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "exclude": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Exclude = append(s.Exclude, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Exclude); err != nil { + return err + } + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "include": + if err := dec.Decode(&s.Include); err != nil { + return err + } + + case "max_doc_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.MaxDocCount = &value + case float64: + f := int64(v) + s.MaxDocCount = &f + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return err + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + case "precision": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Precision = &f + case float64: + f := Float64(v) + s.Precision = &f + } + + case "value_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ValueType = &o + + } + } + return nil } // NewRareTermsAggregation returns a RareTermsAggregation. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rateaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rateaggregate.go index ca08a02d9..5916d4175 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rateaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rateaggregate.go @@ -16,21 +16,78 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // RateAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L732-L736 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L741-L745 type RateAggregate struct { - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Value Float64 `json:"value"` - ValueAsString *string `json:"value_as_string,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Value Float64 `json:"value"` + ValueAsString *string `json:"value_as_string,omitempty"` +} + +func (s *RateAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "value": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Value = f + case float64: + f := Float64(v) + s.Value = f + } + + case "value_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ValueAsString = &o + + } + } + return nil } // NewRateAggregate returns a RateAggregate. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rateaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rateaggregation.go index 0c6804f75..294223c63 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rateaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rateaggregation.go @@ -16,25 +16,94 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/calendarinterval" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/ratemode" ) // RateAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/metric.ts#L127-L130 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/metric.ts#L230-L241 type RateAggregation struct { - Field *string `json:"field,omitempty"` - Format *string `json:"format,omitempty"` - Missing Missing `json:"missing,omitempty"` - Mode *ratemode.RateMode `json:"mode,omitempty"` - Script Script `json:"script,omitempty"` - Unit *calendarinterval.CalendarInterval `json:"unit,omitempty"` + // Field The field on which to run the aggregation. + Field *string `json:"field,omitempty"` + Format *string `json:"format,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing Missing `json:"missing,omitempty"` + // Mode How the rate is calculated. + Mode *ratemode.RateMode `json:"mode,omitempty"` + Script Script `json:"script,omitempty"` + // Unit The interval used to calculate the rate. + // By default, the interval of the `date_histogram` is used. + Unit *calendarinterval.CalendarInterval `json:"unit,omitempty"` +} + +func (s *RateAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return err + } + + case "mode": + if err := dec.Decode(&s.Mode); err != nil { + return err + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + case "unit": + if err := dec.Decode(&s.Unit); err != nil { + return err + } + + } + } + return nil } // NewRateAggregation returns a RateAggregation. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/readexception.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/readexception.go index 3df44b59a..5c4e7b98b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/readexception.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/readexception.go @@ -16,19 +16,73 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ReadException type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ccr/_types/FollowIndexStats.ts#L71-L75 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ccr/_types/FollowIndexStats.ts#L71-L75 type ReadException struct { Exception ErrorCause `json:"exception"` FromSeqNo int64 `json:"from_seq_no"` Retries int `json:"retries"` } +func (s *ReadException) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "exception": + if err := dec.Decode(&s.Exception); err != nil { + return err + } + + case "from_seq_no": + if err := dec.Decode(&s.FromSeqNo); err != nil { + return err + } + + case "retries": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Retries = value + case float64: + f := int(v) + s.Retries = f + } + + } + } + return nil +} + // NewReadException returns a ReadException. func NewReadException() *ReadException { r := &ReadException{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/realmcache.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/realmcache.go index 8b31b5b80..12289f9cf 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/realmcache.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/realmcache.go @@ -16,17 +16,60 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RealmCache type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L260-L262 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L266-L268 type RealmCache struct { Size int64 `json:"size"` } +func (s *RealmCache) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "size": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Size = value + case float64: + f := int64(v) + s.Size = f + } + + } + } + return nil +} + // NewRealmCache returns a RealmCache. func NewRealmCache() *RealmCache { r := &RealmCache{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/realminfo.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/realminfo.go index 4cb36616d..bf6d38338 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/realminfo.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/realminfo.go @@ -16,18 +16,63 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RealmInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/_types/RealmInfo.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/_types/RealmInfo.ts#L22-L25 type RealmInfo struct { Name string `json:"name"` Type string `json:"type"` } +func (s *RealmInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + // NewRealmInfo returns a RealmInfo. func NewRealmInfo() *RealmInfo { r := &RealmInfo{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/recording.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/recording.go index 4b8e0509a..f2914e158 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/recording.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/recording.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Recording type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L94-L99 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L225-L230 type Recording struct { CumulativeExecutionCount *int64 `json:"cumulative_execution_count,omitempty"` CumulativeExecutionTime Duration `json:"cumulative_execution_time,omitempty"` @@ -30,6 +38,63 @@ type Recording struct { Name *string `json:"name,omitempty"` } +func (s *Recording) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "cumulative_execution_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.CumulativeExecutionCount = &value + case float64: + f := int64(v) + s.CumulativeExecutionCount = &f + } + + case "cumulative_execution_time": + if err := dec.Decode(&s.CumulativeExecutionTime); err != nil { + return err + } + + case "cumulative_execution_time_millis": + if err := dec.Decode(&s.CumulativeExecutionTimeMillis); err != nil { + return err + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + } + } + return nil +} + // NewRecording returns a Recording. func NewRecording() *Recording { r := &Recording{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/recoverybytes.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/recoverybytes.go index 1ad12cdd8..1a3cd8da6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/recoverybytes.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/recoverybytes.go @@ -16,13 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // RecoveryBytes type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/recovery/types.ts#L38-L48 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/recovery/types.ts#L38-L48 type RecoveryBytes struct { Percent Percentage `json:"percent"` Recovered ByteSize `json:"recovered,omitempty"` @@ -35,6 +42,71 @@ type RecoveryBytes struct { TotalInBytes ByteSize `json:"total_in_bytes"` } +func (s *RecoveryBytes) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "percent": + if err := dec.Decode(&s.Percent); err != nil { + return err + } + + case "recovered": + if err := dec.Decode(&s.Recovered); err != nil { + return err + } + + case "recovered_from_snapshot": + if err := dec.Decode(&s.RecoveredFromSnapshot); err != nil { + return err + } + + case "recovered_from_snapshot_in_bytes": + if err := dec.Decode(&s.RecoveredFromSnapshotInBytes); err != nil { + return err + } + + case "recovered_in_bytes": + if err := dec.Decode(&s.RecoveredInBytes); err != nil { + return err + } + + case "reused": + if err := dec.Decode(&s.Reused); err != nil { + return err + } + + case "reused_in_bytes": + if err := dec.Decode(&s.ReusedInBytes); err != nil { + return err + } + + case "total": + if err := dec.Decode(&s.Total); err != nil { + return err + } + + case "total_in_bytes": + if err := dec.Decode(&s.TotalInBytes); err != nil { + return err + } + + } + } + return nil +} + // NewRecoveryBytes returns a RecoveryBytes. func NewRecoveryBytes() *RecoveryBytes { r := &RecoveryBytes{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/recoveryfiles.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/recoveryfiles.go index dc5d09f99..b4d078b31 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/recoveryfiles.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/recoveryfiles.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RecoveryFiles type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/recovery/types.ts#L56-L62 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/recovery/types.ts#L56-L62 type RecoveryFiles struct { Details []FileDetails `json:"details,omitempty"` Percent Percentage `json:"percent"` @@ -31,6 +39,81 @@ type RecoveryFiles struct { Total int64 `json:"total"` } +func (s *RecoveryFiles) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "details": + if err := dec.Decode(&s.Details); err != nil { + return err + } + + case "percent": + if err := dec.Decode(&s.Percent); err != nil { + return err + } + + case "recovered": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Recovered = value + case float64: + f := int64(v) + s.Recovered = f + } + + case "reused": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Reused = value + case float64: + f := int64(v) + s.Reused = f + } + + case "total": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Total = value + case float64: + f := int64(v) + s.Total = f + } + + } + } + return nil +} + // NewRecoveryFiles returns a RecoveryFiles. func NewRecoveryFiles() *RecoveryFiles { r := &RecoveryFiles{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/recoveryindexstatus.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/recoveryindexstatus.go index 163a5144c..4bbdfca34 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/recoveryindexstatus.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/recoveryindexstatus.go @@ -16,13 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // RecoveryIndexStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/recovery/types.ts#L64-L74 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/recovery/types.ts#L64-L74 type RecoveryIndexStatus struct { Bytes *RecoveryBytes `json:"bytes,omitempty"` Files RecoveryFiles `json:"files"` @@ -35,6 +42,71 @@ type RecoveryIndexStatus struct { TotalTimeInMillis int64 `json:"total_time_in_millis"` } +func (s *RecoveryIndexStatus) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bytes": + if err := dec.Decode(&s.Bytes); err != nil { + return err + } + + case "files": + if err := dec.Decode(&s.Files); err != nil { + return err + } + + case "size": + if err := dec.Decode(&s.Size); err != nil { + return err + } + + case "source_throttle_time": + if err := dec.Decode(&s.SourceThrottleTime); err != nil { + return err + } + + case "source_throttle_time_in_millis": + if err := dec.Decode(&s.SourceThrottleTimeInMillis); err != nil { + return err + } + + case "target_throttle_time": + if err := dec.Decode(&s.TargetThrottleTime); err != nil { + return err + } + + case "target_throttle_time_in_millis": + if err := dec.Decode(&s.TargetThrottleTimeInMillis); err != nil { + return err + } + + case "total_time": + if err := dec.Decode(&s.TotalTime); err != nil { + return err + } + + case "total_time_in_millis": + if err := dec.Decode(&s.TotalTimeInMillis); err != nil { + return err + } + + } + } + return nil +} + // NewRecoveryIndexStatus returns a RecoveryIndexStatus. func NewRecoveryIndexStatus() *RecoveryIndexStatus { r := &RecoveryIndexStatus{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/recoveryorigin.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/recoveryorigin.go index 6d7215e85..789a04f98 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/recoveryorigin.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/recoveryorigin.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RecoveryOrigin type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/recovery/types.ts#L76-L89 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/recovery/types.ts#L76-L89 type RecoveryOrigin struct { BootstrapNewHistoryUuid *bool `json:"bootstrap_new_history_uuid,omitempty"` Host *string `json:"host,omitempty"` @@ -38,6 +46,102 @@ type RecoveryOrigin struct { Version *string `json:"version,omitempty"` } +func (s *RecoveryOrigin) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bootstrap_new_history_uuid": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.BootstrapNewHistoryUuid = &value + case bool: + s.BootstrapNewHistoryUuid = &v + } + + case "host": + if err := dec.Decode(&s.Host); err != nil { + return err + } + + case "hostname": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Hostname = &o + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return err + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return err + } + + case "ip": + if err := dec.Decode(&s.Ip); err != nil { + return err + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "repository": + if err := dec.Decode(&s.Repository); err != nil { + return err + } + + case "restoreUUID": + if err := dec.Decode(&s.RestoreUUID); err != nil { + return err + } + + case "snapshot": + if err := dec.Decode(&s.Snapshot); err != nil { + return err + } + + case "transport_address": + if err := dec.Decode(&s.TransportAddress); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + // NewRecoveryOrigin returns a RecoveryOrigin. func NewRecoveryOrigin() *RecoveryOrigin { r := &RecoveryOrigin{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/recoveryrecord.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/recoveryrecord.go index 859b510e6..549da9c78 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/recoveryrecord.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/recoveryrecord.go @@ -16,68 +16,345 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RecoveryRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/recovery/types.ts#L24-L155 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/recovery/types.ts#L24-L155 type RecoveryRecord struct { - // Bytes number of bytes to recover + // Bytes The number of bytes to recover. Bytes *string `json:"bytes,omitempty"` - // BytesPercent percent of bytes recovered + // BytesPercent The ratio of bytes recovered. BytesPercent Percentage `json:"bytes_percent,omitempty"` - // BytesRecovered bytes recovered + // BytesRecovered The bytes recovered. BytesRecovered *string `json:"bytes_recovered,omitempty"` - // BytesTotal total number of bytes + // BytesTotal The total number of bytes. BytesTotal *string `json:"bytes_total,omitempty"` - // Files number of files to recover + // Files The number of files to recover. Files *string `json:"files,omitempty"` - // FilesPercent percent of files recovered + // FilesPercent The ratio of files recovered. FilesPercent Percentage `json:"files_percent,omitempty"` - // FilesRecovered files recovered + // FilesRecovered The files recovered. FilesRecovered *string `json:"files_recovered,omitempty"` - // FilesTotal total number of files + // FilesTotal The total number of files. FilesTotal *string `json:"files_total,omitempty"` - // Index index name + // Index The index name. Index *string `json:"index,omitempty"` - // Repository repository + // Repository The repository name. Repository *string `json:"repository,omitempty"` - // Shard shard name + // Shard The shard name. Shard *string `json:"shard,omitempty"` - // Snapshot snapshot + // Snapshot The snapshot name. Snapshot *string `json:"snapshot,omitempty"` - // SourceHost source host + // SourceHost The source host. SourceHost *string `json:"source_host,omitempty"` - // SourceNode source node name + // SourceNode The source node name. SourceNode *string `json:"source_node,omitempty"` - // Stage recovery stage + // Stage The recovery stage. Stage *string `json:"stage,omitempty"` - // StartTime recovery start time + // StartTime The recovery start time. StartTime DateTime `json:"start_time,omitempty"` - // StartTimeMillis recovery start time in epoch milliseconds + // StartTimeMillis The recovery start time in epoch milliseconds. StartTimeMillis *int64 `json:"start_time_millis,omitempty"` - // StopTime recovery stop time + // StopTime The recovery stop time. StopTime DateTime `json:"stop_time,omitempty"` - // StopTimeMillis recovery stop time in epoch milliseconds + // StopTimeMillis The recovery stop time in epoch milliseconds. StopTimeMillis *int64 `json:"stop_time_millis,omitempty"` - // TargetHost target host + // TargetHost The target host. TargetHost *string `json:"target_host,omitempty"` - // TargetNode target node name + // TargetNode The target node name. TargetNode *string `json:"target_node,omitempty"` - // Time recovery time + // Time The recovery time. Time Duration `json:"time,omitempty"` - // TranslogOps number of translog ops to recover + // TranslogOps The number of translog operations to recover. TranslogOps *string `json:"translog_ops,omitempty"` - // TranslogOpsPercent percent of translog ops recovered + // TranslogOpsPercent The ratio of translog operations recovered. TranslogOpsPercent Percentage `json:"translog_ops_percent,omitempty"` - // TranslogOpsRecovered translog ops recovered + // TranslogOpsRecovered The translog operations recovered. TranslogOpsRecovered *string `json:"translog_ops_recovered,omitempty"` - // Type recovery type + // Type The recovery type. Type *string `json:"type,omitempty"` } +func (s *RecoveryRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bytes", "b": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Bytes = &o + + case "bytes_percent", "bp": + if err := dec.Decode(&s.BytesPercent); err != nil { + return err + } + + case "bytes_recovered", "br": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BytesRecovered = &o + + case "bytes_total", "tb": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BytesTotal = &o + + case "files", "f": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Files = &o + + case "files_percent", "fp": + if err := dec.Decode(&s.FilesPercent); err != nil { + return err + } + + case "files_recovered", "fr": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FilesRecovered = &o + + case "files_total", "tf": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FilesTotal = &o + + case "index", "i", "idx": + if err := dec.Decode(&s.Index); err != nil { + return err + } + + case "repository", "rep": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Repository = &o + + case "shard", "s", "sh": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Shard = &o + + case "snapshot", "snap": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Snapshot = &o + + case "source_host", "shost": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SourceHost = &o + + case "source_node", "snode": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SourceNode = &o + + case "stage", "st": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Stage = &o + + case "start_time", "start": + if err := dec.Decode(&s.StartTime); err != nil { + return err + } + + case "start_time_millis", "start_millis": + if err := dec.Decode(&s.StartTimeMillis); err != nil { + return err + } + + case "stop_time", "stop": + if err := dec.Decode(&s.StopTime); err != nil { + return err + } + + case "stop_time_millis", "stop_millis": + if err := dec.Decode(&s.StopTimeMillis); err != nil { + return err + } + + case "target_host", "thost": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TargetHost = &o + + case "target_node", "tnode": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TargetNode = &o + + case "time", "t", "ti": + if err := dec.Decode(&s.Time); err != nil { + return err + } + + case "translog_ops", "to": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TranslogOps = &o + + case "translog_ops_percent", "top": + if err := dec.Decode(&s.TranslogOpsPercent); err != nil { + return err + } + + case "translog_ops_recovered", "tor": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TranslogOpsRecovered = &o + + case "type", "ty": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = &o + + } + } + return nil +} + // NewRecoveryRecord returns a RecoveryRecord. func NewRecoveryRecord() *RecoveryRecord { r := &RecoveryRecord{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/recoverystartstatus.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/recoverystartstatus.go index 3179ca67e..c575a050d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/recoverystartstatus.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/recoverystartstatus.go @@ -16,13 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // RecoveryStartStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/recovery/types.ts#L91-L96 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/recovery/types.ts#L91-L96 type RecoveryStartStatus struct { CheckIndexTime Duration `json:"check_index_time,omitempty"` CheckIndexTimeInMillis int64 `json:"check_index_time_in_millis"` @@ -30,6 +37,46 @@ type RecoveryStartStatus struct { TotalTimeInMillis int64 `json:"total_time_in_millis"` } +func (s *RecoveryStartStatus) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "check_index_time": + if err := dec.Decode(&s.CheckIndexTime); err != nil { + return err + } + + case "check_index_time_in_millis": + if err := dec.Decode(&s.CheckIndexTimeInMillis); err != nil { + return err + } + + case "total_time": + if err := dec.Decode(&s.TotalTime); err != nil { + return err + } + + case "total_time_in_millis": + if err := dec.Decode(&s.TotalTimeInMillis); err != nil { + return err + } + + } + } + return nil +} + // NewRecoveryStartStatus returns a RecoveryStartStatus. func NewRecoveryStartStatus() *RecoveryStartStatus { r := &RecoveryStartStatus{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/recoverystats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/recoverystats.go index bdf2c05b0..17393b448 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/recoverystats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/recoverystats.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RecoveryStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Stats.ts#L161-L166 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Stats.ts#L228-L233 type RecoveryStats struct { CurrentAsSource int64 `json:"current_as_source"` CurrentAsTarget int64 `json:"current_as_target"` @@ -30,6 +38,66 @@ type RecoveryStats struct { ThrottleTimeInMillis int64 `json:"throttle_time_in_millis"` } +func (s *RecoveryStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "current_as_source": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.CurrentAsSource = value + case float64: + f := int64(v) + s.CurrentAsSource = f + } + + case "current_as_target": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.CurrentAsTarget = value + case float64: + f := int64(v) + s.CurrentAsTarget = f + } + + case "throttle_time": + if err := dec.Decode(&s.ThrottleTime); err != nil { + return err + } + + case "throttle_time_in_millis": + if err := dec.Decode(&s.ThrottleTimeInMillis); err != nil { + return err + } + + } + } + return nil +} + // NewRecoveryStats returns a RecoveryStats. func NewRecoveryStats() *RecoveryStats { r := &RecoveryStats{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/recoverystatus.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/recoverystatus.go index f9fc87adb..3c72f941d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/recoverystatus.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/recoverystatus.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // RecoveryStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/recovery/types.ts#L98-L100 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/recovery/types.ts#L98-L100 type RecoveryStatus struct { Shards []ShardRecovery `json:"shards"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/refreshstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/refreshstats.go index 0cb09297f..476500a09 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/refreshstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/refreshstats.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RefreshStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Stats.ts#L168-L175 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Stats.ts#L235-L242 type RefreshStats struct { ExternalTotal int64 `json:"external_total"` ExternalTotalTimeInMillis int64 `json:"external_total_time_in_millis"` @@ -32,6 +40,86 @@ type RefreshStats struct { TotalTimeInMillis int64 `json:"total_time_in_millis"` } +func (s *RefreshStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "external_total": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.ExternalTotal = value + case float64: + f := int64(v) + s.ExternalTotal = f + } + + case "external_total_time_in_millis": + if err := dec.Decode(&s.ExternalTotalTimeInMillis); err != nil { + return err + } + + case "listeners": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Listeners = value + case float64: + f := int64(v) + s.Listeners = f + } + + case "total": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Total = value + case float64: + f := int64(v) + s.Total = f + } + + case "total_time": + if err := dec.Decode(&s.TotalTime); err != nil { + return err + } + + case "total_time_in_millis": + if err := dec.Decode(&s.TotalTimeInMillis); err != nil { + return err + } + + } + } + return nil +} + // NewRefreshStats returns a RefreshStats. func NewRefreshStats() *RefreshStats { r := &RefreshStats{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/regexoptions.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/regexoptions.go new file mode 100644 index 000000000..38730487d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/regexoptions.go @@ -0,0 +1,94 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + +// RegexOptions type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/suggester.ts#L180-L191 +type RegexOptions struct { + // Flags Optional operators for the regular expression. + Flags string `json:"flags,omitempty"` + // MaxDeterminizedStates Maximum number of automaton states required for the query. + MaxDeterminizedStates *int `json:"max_determinized_states,omitempty"` +} + +func (s *RegexOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "flags": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Flags = o + + case "max_determinized_states": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxDeterminizedStates = &value + case float64: + f := int(v) + s.MaxDeterminizedStates = &f + } + + } + } + return nil +} + +// NewRegexOptions returns a RegexOptions. +func NewRegexOptions() *RegexOptions { + r := &RegexOptions{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/regexpquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/regexpquery.go index 13811bcdf..ee72a5831 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/regexpquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/regexpquery.go @@ -16,21 +16,154 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RegexpQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/term.ts#L102-L114 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/term.ts#L185-L215 type RegexpQuery struct { - Boost *float32 `json:"boost,omitempty"` - CaseInsensitive *bool `json:"case_insensitive,omitempty"` - Flags *string `json:"flags,omitempty"` - MaxDeterminizedStates *int `json:"max_determinized_states,omitempty"` - QueryName_ *string `json:"_name,omitempty"` - Rewrite *string `json:"rewrite,omitempty"` - Value string `json:"value"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // CaseInsensitive Allows case insensitive matching of the regular expression value with the + // indexed field values when set to `true`. + // When `false`, case sensitivity of matching depends on the underlying field’s + // mapping. + CaseInsensitive *bool `json:"case_insensitive,omitempty"` + // Flags Enables optional operators for the regular expression. + Flags *string `json:"flags,omitempty"` + // MaxDeterminizedStates Maximum number of automaton states required for the query. + MaxDeterminizedStates *int `json:"max_determinized_states,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + // Rewrite Method used to rewrite the query. + Rewrite *string `json:"rewrite,omitempty"` + // Value Regular expression for terms you wish to find in the provided field. + Value string `json:"value"` +} + +func (s *RegexpQuery) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Value) + return err + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "case_insensitive": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.CaseInsensitive = &value + case bool: + s.CaseInsensitive = &v + } + + case "flags": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Flags = &o + + case "max_determinized_states": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxDeterminizedStates = &value + case float64: + f := int(v) + s.MaxDeterminizedStates = &f + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "rewrite": + if err := dec.Decode(&s.Rewrite); err != nil { + return err + } + + case "value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Value = o + + } + } + return nil } // NewRegexpQuery returns a RegexpQuery. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/regressioninferenceoptions.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/regressioninferenceoptions.go index ed6892188..5f2888849 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/regressioninferenceoptions.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/regressioninferenceoptions.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RegressionInferenceOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/inference.ts#L69-L78 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/inference.ts#L82-L91 type RegressionInferenceOptions struct { // NumTopFeatureImportanceValues Specifies the maximum number of feature importance values per document. NumTopFeatureImportanceValues *int `json:"num_top_feature_importance_values,omitempty"` @@ -31,6 +39,47 @@ type RegressionInferenceOptions struct { ResultsField *string `json:"results_field,omitempty"` } +func (s *RegressionInferenceOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "num_top_feature_importance_values": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NumTopFeatureImportanceValues = &value + case float64: + f := int(v) + s.NumTopFeatureImportanceValues = &f + } + + case "results_field": + if err := dec.Decode(&s.ResultsField); err != nil { + return err + } + + } + } + return nil +} + // NewRegressionInferenceOptions returns a RegressionInferenceOptions. func NewRegressionInferenceOptions() *RegressionInferenceOptions { r := &RegressionInferenceOptions{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reindexdestination.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reindexdestination.go index bd2f97dd2..2195e1309 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reindexdestination.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reindexdestination.go @@ -16,26 +16,94 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/optype" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/versiontype" ) // ReindexDestination type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/reindex/types.ts#L39-L45 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/reindex/types.ts#L39-L64 type ReindexDestination struct { - Index string `json:"index"` - OpType *optype.OpType `json:"op_type,omitempty"` - Pipeline *string `json:"pipeline,omitempty"` - Routing *string `json:"routing,omitempty"` + // Index The name of the data stream, index, or index alias you are copying to. + Index string `json:"index"` + // OpType Set to `create` to only index documents that do not already exist. + // Important: To reindex to a data stream destination, this argument must be + // `create`. + OpType *optype.OpType `json:"op_type,omitempty"` + // Pipeline The name of the pipeline to use. + Pipeline *string `json:"pipeline,omitempty"` + // Routing By default, a document's routing is preserved unless it’s changed by the + // script. + // Set to `discard` to set routing to `null`, or `=value` to route using the + // specified `value`. + Routing *string `json:"routing,omitempty"` + // VersionType The versioning to use for the indexing operation. VersionType *versiontype.VersionType `json:"version_type,omitempty"` } +func (s *ReindexDestination) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return err + } + + case "op_type": + if err := dec.Decode(&s.OpType); err != nil { + return err + } + + case "pipeline": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pipeline = &o + + case "routing": + if err := dec.Decode(&s.Routing); err != nil { + return err + } + + case "version_type": + if err := dec.Decode(&s.VersionType); err != nil { + return err + } + + } + } + return nil +} + // NewReindexDestination returns a ReindexDestination. func NewReindexDestination() *ReindexDestination { r := &ReindexDestination{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reindexnode.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reindexnode.go index cbddd5683..06c67d6c7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reindexnode.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reindexnode.go @@ -16,32 +16,98 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/noderole" ) // ReindexNode type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/reindex_rethrottle/types.ts#L33-L35 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/reindex_rethrottle/types.ts#L33-L35 type ReindexNode struct { Attributes map[string]string `json:"attributes"` Host string `json:"host"` Ip string `json:"ip"` Name string `json:"name"` Roles []noderole.NodeRole `json:"roles,omitempty"` - Tasks map[TaskId]ReindexTask `json:"tasks"` + Tasks map[string]ReindexTask `json:"tasks"` TransportAddress string `json:"transport_address"` } +func (s *ReindexNode) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "attributes": + if s.Attributes == nil { + s.Attributes = make(map[string]string, 0) + } + if err := dec.Decode(&s.Attributes); err != nil { + return err + } + + case "host": + if err := dec.Decode(&s.Host); err != nil { + return err + } + + case "ip": + if err := dec.Decode(&s.Ip); err != nil { + return err + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return err + } + + case "tasks": + if s.Tasks == nil { + s.Tasks = make(map[string]ReindexTask, 0) + } + if err := dec.Decode(&s.Tasks); err != nil { + return err + } + + case "transport_address": + if err := dec.Decode(&s.TransportAddress); err != nil { + return err + } + + } + } + return nil +} + // NewReindexNode returns a ReindexNode. func NewReindexNode() *ReindexNode { r := &ReindexNode{ Attributes: make(map[string]string, 0), - Tasks: make(map[TaskId]ReindexTask, 0), + Tasks: make(map[string]ReindexTask, 0), } return r diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reindexsource.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reindexsource.go index 39c1206ff..2de39cdd8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reindexsource.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reindexsource.go @@ -16,22 +16,145 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ReindexSource type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/reindex/types.ts#L47-L57 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/reindex/types.ts#L66-L97 type ReindexSource struct { - Index []string `json:"index"` - Query *Query `json:"query,omitempty"` - Remote *RemoteSource `json:"remote,omitempty"` - RuntimeMappings map[string]RuntimeField `json:"runtime_mappings,omitempty"` - Size *int `json:"size,omitempty"` - Slice *SlicedScroll `json:"slice,omitempty"` - Sort []SortCombinations `json:"sort,omitempty"` - SourceFields_ []string `json:"_source,omitempty"` + // Index The name of the data stream, index, or alias you are copying from. + // Accepts a comma-separated list to reindex from multiple sources. + Index []string `json:"index"` + // Query Specifies the documents to reindex using the Query DSL. + Query *Query `json:"query,omitempty"` + // Remote A remote instance of Elasticsearch that you want to index from. + Remote *RemoteSource `json:"remote,omitempty"` + RuntimeMappings RuntimeFields `json:"runtime_mappings,omitempty"` + // Size The number of documents to index per batch. + // Use when indexing from remote to ensure that the batches fit within the + // on-heap buffer, which defaults to a maximum size of 100 MB. + Size *int `json:"size,omitempty"` + // Slice Slice the reindex request manually using the provided slice ID and total + // number of slices. + Slice *SlicedScroll `json:"slice,omitempty"` + Sort []SortCombinations `json:"sort,omitempty"` + // SourceFields_ If `true` reindexes all source fields. + // Set to a list to reindex select fields. + SourceFields_ []string `json:"_source,omitempty"` +} + +func (s *ReindexSource) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Index = append(s.Index, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Index); err != nil { + return err + } + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return err + } + + case "remote": + if err := dec.Decode(&s.Remote); err != nil { + return err + } + + case "runtime_mappings": + if err := dec.Decode(&s.RuntimeMappings); err != nil { + return err + } + + case "size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + case "slice": + if err := dec.Decode(&s.Slice); err != nil { + return err + } + + case "sort": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(SortCombinations) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Sort = append(s.Sort, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Sort); err != nil { + return err + } + } + + case "_source": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.SourceFields_ = append(s.SourceFields_, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.SourceFields_); err != nil { + return err + } + } + + } + } + return nil } // NewReindexSource returns a ReindexSource. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reindexstatus.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reindexstatus.go index 45a2ff9e1..aefe26db4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reindexstatus.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reindexstatus.go @@ -16,27 +16,218 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ReindexStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/reindex_rethrottle/types.ts#L37-L51 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/reindex_rethrottle/types.ts#L37-L85 type ReindexStatus struct { - Batches int64 `json:"batches"` - Created int64 `json:"created"` - Deleted int64 `json:"deleted"` - Noops int64 `json:"noops"` - RequestsPerSecond float32 `json:"requests_per_second"` - Retries Retries `json:"retries"` - Throttled Duration `json:"throttled,omitempty"` - ThrottledMillis int64 `json:"throttled_millis"` - ThrottledUntil Duration `json:"throttled_until,omitempty"` - ThrottledUntilMillis int64 `json:"throttled_until_millis"` - Total int64 `json:"total"` - Updated int64 `json:"updated"` - VersionConflicts int64 `json:"version_conflicts"` + // Batches The number of scroll responses pulled back by the reindex. + Batches int64 `json:"batches"` + // Created The number of documents that were successfully created. + Created int64 `json:"created"` + // Deleted The number of documents that were successfully deleted. + Deleted int64 `json:"deleted"` + // Noops The number of documents that were ignored because the script used for the + // reindex returned a `noop` value for `ctx.op`. + Noops int64 `json:"noops"` + // RequestsPerSecond The number of requests per second effectively executed during the reindex. + RequestsPerSecond float32 `json:"requests_per_second"` + // Retries The number of retries attempted by reindex. `bulk` is the number of bulk + // actions retried and `search` is the number of search actions retried. + Retries Retries `json:"retries"` + Throttled Duration `json:"throttled,omitempty"` + // ThrottledMillis Number of milliseconds the request slept to conform to `requests_per_second`. + ThrottledMillis int64 `json:"throttled_millis"` + ThrottledUntil Duration `json:"throttled_until,omitempty"` + // ThrottledUntilMillis This field should always be equal to zero in a `_reindex` response. + // It only has meaning when using the Task API, where it indicates the next time + // (in milliseconds since epoch) a throttled request will be executed again in + // order to conform to `requests_per_second`. + ThrottledUntilMillis int64 `json:"throttled_until_millis"` + // Total The number of documents that were successfully processed. + Total int64 `json:"total"` + // Updated The number of documents that were successfully updated, for example, a + // document with same ID already existed prior to reindex updating it. + Updated int64 `json:"updated"` + // VersionConflicts The number of version conflicts that reindex hits. + VersionConflicts int64 `json:"version_conflicts"` +} + +func (s *ReindexStatus) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "batches": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Batches = value + case float64: + f := int64(v) + s.Batches = f + } + + case "created": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Created = value + case float64: + f := int64(v) + s.Created = f + } + + case "deleted": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Deleted = value + case float64: + f := int64(v) + s.Deleted = f + } + + case "noops": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Noops = value + case float64: + f := int64(v) + s.Noops = f + } + + case "requests_per_second": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.RequestsPerSecond = f + case float64: + f := float32(v) + s.RequestsPerSecond = f + } + + case "retries": + if err := dec.Decode(&s.Retries); err != nil { + return err + } + + case "throttled": + if err := dec.Decode(&s.Throttled); err != nil { + return err + } + + case "throttled_millis": + if err := dec.Decode(&s.ThrottledMillis); err != nil { + return err + } + + case "throttled_until": + if err := dec.Decode(&s.ThrottledUntil); err != nil { + return err + } + + case "throttled_until_millis": + if err := dec.Decode(&s.ThrottledUntilMillis); err != nil { + return err + } + + case "total": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Total = value + case float64: + f := int64(v) + s.Total = f + } + + case "updated": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Updated = value + case float64: + f := int64(v) + s.Updated = f + } + + case "version_conflicts": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.VersionConflicts = value + case float64: + f := int64(v) + s.VersionConflicts = f + } + + } + } + return nil } // NewReindexStatus returns a ReindexStatus. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reindextask.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reindextask.go index 1b689cd04..5572894f1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reindextask.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reindextask.go @@ -16,24 +16,142 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ReindexTask type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/reindex_rethrottle/types.ts#L53-L64 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/reindex_rethrottle/types.ts#L87-L98 type ReindexTask struct { - Action string `json:"action"` - Cancellable bool `json:"cancellable"` - Description string `json:"description"` - Headers map[string][]string `json:"headers"` - Id int64 `json:"id"` - Node string `json:"node"` - RunningTimeInNanos int64 `json:"running_time_in_nanos"` - StartTimeInMillis int64 `json:"start_time_in_millis"` - Status ReindexStatus `json:"status"` - Type string `json:"type"` + Action string `json:"action"` + Cancellable bool `json:"cancellable"` + Description string `json:"description"` + Headers HttpHeaders `json:"headers"` + Id int64 `json:"id"` + Node string `json:"node"` + RunningTimeInNanos int64 `json:"running_time_in_nanos"` + StartTimeInMillis int64 `json:"start_time_in_millis"` + Status ReindexStatus `json:"status"` + Type string `json:"type"` +} + +func (s *ReindexTask) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "action": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Action = o + + case "cancellable": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Cancellable = value + case bool: + s.Cancellable = v + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = o + + case "headers": + if err := dec.Decode(&s.Headers); err != nil { + return err + } + + case "id": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Id = value + case float64: + f := int64(v) + s.Id = f + } + + case "node": + if err := dec.Decode(&s.Node); err != nil { + return err + } + + case "running_time_in_nanos": + if err := dec.Decode(&s.RunningTimeInNanos); err != nil { + return err + } + + case "start_time_in_millis": + if err := dec.Decode(&s.StartTimeInMillis); err != nil { + return err + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return err + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil } // NewReindexTask returns a ReindexTask. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reloaddetails.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reloaddetails.go index a9626374c..a12df8055 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reloaddetails.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reloaddetails.go @@ -16,19 +16,69 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ReloadDetails type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/reload_search_analyzers/types.ts#L20-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/reload_search_analyzers/types.ts#L27-L31 type ReloadDetails struct { Index string `json:"index"` ReloadedAnalyzers []string `json:"reloaded_analyzers"` ReloadedNodeIds []string `json:"reloaded_node_ids"` } +func (s *ReloadDetails) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Index = o + + case "reloaded_analyzers": + if err := dec.Decode(&s.ReloadedAnalyzers); err != nil { + return err + } + + case "reloaded_node_ids": + if err := dec.Decode(&s.ReloadedNodeIds); err != nil { + return err + } + + } + } + return nil +} + // NewReloadDetails returns a ReloadDetails. func NewReloadDetails() *ReloadDetails { r := &ReloadDetails{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reloadresult.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reloadresult.go new file mode 100644 index 000000000..3a79b6c75 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reloadresult.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +// ReloadResult type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/reload_search_analyzers/types.ts#L22-L25 +type ReloadResult struct { + ReloadDetails []ReloadDetails `json:"reload_details"` + Shards_ ShardStatistics `json:"_shards"` +} + +// NewReloadResult returns a ReloadResult. +func NewReloadResult() *ReloadResult { + r := &ReloadResult{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/relocationfailureinfo.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/relocationfailureinfo.go index e49b0a846..ee36ad7ec 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/relocationfailureinfo.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/relocationfailureinfo.go @@ -16,17 +16,61 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RelocationFailureInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Node.ts#L72-L74 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Node.ts#L73-L75 type RelocationFailureInfo struct { FailedAttempts int `json:"failed_attempts"` } +func (s *RelocationFailureInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "failed_attempts": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.FailedAttempts = value + case float64: + f := int(v) + s.FailedAttempts = f + } + + } + } + return nil +} + // NewRelocationFailureInfo returns a RelocationFailureInfo. func NewRelocationFailureInfo() *RelocationFailureInfo { r := &RelocationFailureInfo{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/remotesource.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/remotesource.go index 0b246fc80..95a2a3e38 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/remotesource.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/remotesource.go @@ -16,20 +16,87 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // RemoteSource type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/reindex/types.ts#L59-L66 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/reindex/types.ts#L99-L125 type RemoteSource struct { - ConnectTimeout Duration `json:"connect_timeout,omitempty"` - Headers map[string]string `json:"headers,omitempty"` - Host string `json:"host"` - Password *string `json:"password,omitempty"` - SocketTimeout Duration `json:"socket_timeout,omitempty"` - Username *string `json:"username,omitempty"` + // ConnectTimeout The remote connection timeout. + // Defaults to 30 seconds. + ConnectTimeout Duration `json:"connect_timeout,omitempty"` + // Headers An object containing the headers of the request. + Headers map[string]string `json:"headers,omitempty"` + // Host The URL for the remote instance of Elasticsearch that you want to index from. + Host string `json:"host"` + // Password The password to use for authentication with the remote host. + Password *string `json:"password,omitempty"` + // SocketTimeout The remote socket read timeout. Defaults to 30 seconds. + SocketTimeout Duration `json:"socket_timeout,omitempty"` + // Username The username to use for authentication with the remote host. + Username *string `json:"username,omitempty"` +} + +func (s *RemoteSource) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "connect_timeout": + if err := dec.Decode(&s.ConnectTimeout); err != nil { + return err + } + + case "headers": + if s.Headers == nil { + s.Headers = make(map[string]string, 0) + } + if err := dec.Decode(&s.Headers); err != nil { + return err + } + + case "host": + if err := dec.Decode(&s.Host); err != nil { + return err + } + + case "password": + if err := dec.Decode(&s.Password); err != nil { + return err + } + + case "socket_timeout": + if err := dec.Decode(&s.SocketTimeout); err != nil { + return err + } + + case "username": + if err := dec.Decode(&s.Username); err != nil { + return err + } + + } + } + return nil } // NewRemoteSource returns a RemoteSource. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/removeaction.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/removeaction.go index e8cb3f74c..b09993a33 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/removeaction.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/removeaction.go @@ -16,19 +16,112 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RemoveAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/update_aliases/types.ts#L46-L53 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/update_aliases/types.ts#L97-L122 type RemoveAction struct { - Alias *string `json:"alias,omitempty"` - Aliases []string `json:"aliases,omitempty"` - Index *string `json:"index,omitempty"` - Indices []string `json:"indices,omitempty"` - MustExist *bool `json:"must_exist,omitempty"` + // Alias Alias for the action. + // Index alias names support date math. + Alias *string `json:"alias,omitempty"` + // Aliases Aliases for the action. + // Index alias names support date math. + Aliases []string `json:"aliases,omitempty"` + // Index Data stream or index for the action. + // Supports wildcards (`*`). + Index *string `json:"index,omitempty"` + // Indices Data streams or indices for the action. + // Supports wildcards (`*`). + Indices []string `json:"indices,omitempty"` + // MustExist If `true`, the alias must exist to perform the action. + MustExist *bool `json:"must_exist,omitempty"` +} + +func (s *RemoveAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "alias": + if err := dec.Decode(&s.Alias); err != nil { + return err + } + + case "aliases": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Aliases = append(s.Aliases, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Aliases); err != nil { + return err + } + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return err + } + + case "indices": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Indices = append(s.Indices, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Indices); err != nil { + return err + } + } + + case "must_exist": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.MustExist = &value + case bool: + s.MustExist = &v + } + + } + } + return nil } // NewRemoveAction returns a RemoveAction. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/removeduplicatestokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/removeduplicatestokenfilter.go index b3e888c62..219424b83 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/removeduplicatestokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/removeduplicatestokenfilter.go @@ -16,23 +16,71 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // RemoveDuplicatesTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L300-L302 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L301-L303 type RemoveDuplicatesTokenFilter struct { Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` } +func (s *RemoveDuplicatesTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s RemoveDuplicatesTokenFilter) MarshalJSON() ([]byte, error) { + type innerRemoveDuplicatesTokenFilter RemoveDuplicatesTokenFilter + tmp := innerRemoveDuplicatesTokenFilter{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "remove_duplicates" + + return json.Marshal(tmp) +} + // NewRemoveDuplicatesTokenFilter returns a RemoveDuplicatesTokenFilter. func NewRemoveDuplicatesTokenFilter() *RemoveDuplicatesTokenFilter { r := &RemoveDuplicatesTokenFilter{} - r.Type = "remove_duplicates" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/removeindexaction.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/removeindexaction.go index 5c1716a06..23454a1c0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/removeindexaction.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/removeindexaction.go @@ -16,17 +16,85 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RemoveIndexAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/update_aliases/types.ts#L55-L60 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/update_aliases/types.ts#L124-L139 type RemoveIndexAction struct { - Index *string `json:"index,omitempty"` - Indices []string `json:"indices,omitempty"` - MustExist *bool `json:"must_exist,omitempty"` + // Index Data stream or index for the action. + // Supports wildcards (`*`). + Index *string `json:"index,omitempty"` + // Indices Data streams or indices for the action. + // Supports wildcards (`*`). + Indices []string `json:"indices,omitempty"` + // MustExist If `true`, the alias must exist to perform the action. + MustExist *bool `json:"must_exist,omitempty"` +} + +func (s *RemoveIndexAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return err + } + + case "indices": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Indices = append(s.Indices, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Indices); err != nil { + return err + } + } + + case "must_exist": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.MustExist = &value + case bool: + s.MustExist = &v + } + + } + } + return nil } // NewRemoveIndexAction returns a RemoveIndexAction. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/removeprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/removeprocessor.go index 1ff512c17..71bdc1792 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/removeprocessor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/removeprocessor.go @@ -16,21 +16,144 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RemoveProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/_types/Processors.ts#L311-L314 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/_types/Processors.ts#L930-L940 type RemoveProcessor struct { - Description *string `json:"description,omitempty"` - Field []string `json:"field"` - If *string `json:"if,omitempty"` - IgnoreFailure *bool `json:"ignore_failure,omitempty"` - IgnoreMissing *bool `json:"ignore_missing,omitempty"` - OnFailure []ProcessorContainer `json:"on_failure,omitempty"` - Tag *string `json:"tag,omitempty"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field Fields to be removed. Supports template snippets. + Field []string `json:"field"` + // If Conditionally execute the processor. + If *string `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist or is `null`, the processor quietly + // exits without modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` +} + +func (s *RemoveProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Field = append(s.Field, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Field); err != nil { + return err + } + } + + case "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.If = &o + + case "ignore_failure": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return err + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + } + } + return nil } // NewRemoveProcessor returns a RemoveProcessor. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/renameprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/renameprocessor.go index d231047f3..2159c59a7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/renameprocessor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/renameprocessor.go @@ -16,22 +16,142 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RenameProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/_types/Processors.ts#L316-L320 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/_types/Processors.ts#L942-L958 type RenameProcessor struct { - Description *string `json:"description,omitempty"` - Field string `json:"field"` - If *string `json:"if,omitempty"` - IgnoreFailure *bool `json:"ignore_failure,omitempty"` - IgnoreMissing *bool `json:"ignore_missing,omitempty"` - OnFailure []ProcessorContainer `json:"on_failure,omitempty"` - Tag *string `json:"tag,omitempty"` - TargetField string `json:"target_field"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field to be renamed. + // Supports template snippets. + Field string `json:"field"` + // If Conditionally execute the processor. + If *string `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist, the processor quietly exits without + // modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField The new name of the field. + // Supports template snippets. + TargetField string `json:"target_field"` +} + +func (s *RenameProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.If = &o + + case "ignore_failure": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return err + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return err + } + + } + } + return nil } // NewRenameProcessor returns a RenameProcessor. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reportingemailattachment.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reportingemailattachment.go index 99d0db05d..c656fc926 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reportingemailattachment.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reportingemailattachment.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ReportingEmailAttachment type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Actions.ts#L224-L232 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Actions.ts#L224-L232 type ReportingEmailAttachment struct { Inline *bool `json:"inline,omitempty"` Interval Duration `json:"interval,omitempty"` @@ -31,6 +39,78 @@ type ReportingEmailAttachment struct { Url string `json:"url"` } +func (s *ReportingEmailAttachment) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "inline": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Inline = &value + case bool: + s.Inline = &v + } + + case "interval": + if err := dec.Decode(&s.Interval); err != nil { + return err + } + + case "request": + if err := dec.Decode(&s.Request); err != nil { + return err + } + + case "retries": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Retries = &value + case float64: + f := int(v) + s.Retries = &f + } + + case "url": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Url = o + + } + } + return nil +} + // NewReportingEmailAttachment returns a ReportingEmailAttachment. func NewReportingEmailAttachment() *ReportingEmailAttachment { r := &ReportingEmailAttachment{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/repositoriesrecord.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/repositoriesrecord.go index 504e4c745..2003ff809 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/repositoriesrecord.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/repositoriesrecord.go @@ -16,20 +16,72 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RepositoriesRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/repositories/types.ts#L20-L31 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/repositories/types.ts#L20-L31 type RepositoriesRecord struct { - // Id unique repository id + // Id The unique repository identifier. Id *string `json:"id,omitempty"` - // Type repository type + // Type The repository type. Type *string `json:"type,omitempty"` } +func (s *RepositoriesRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "id", "repoId": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Id = &o + + case "type", "t": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = &o + + } + } + return nil +} + // NewRepositoriesRecord returns a RepositoriesRecord. func NewRepositoriesRecord() *RepositoriesRecord { r := &RepositoriesRecord{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/repository.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/repository.go index 5e322fd84..7c86bd2ea 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/repository.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/repository.go @@ -16,19 +16,69 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Repository type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/snapshot/_types/SnapshotRepository.ts#L23-L27 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/snapshot/_types/SnapshotRepository.ts#L23-L27 type Repository struct { Settings RepositorySettings `json:"settings"` Type string `json:"type"` Uuid *string `json:"uuid,omitempty"` } +func (s *Repository) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "settings": + if err := dec.Decode(&s.Settings); err != nil { + return err + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + case "uuid": + if err := dec.Decode(&s.Uuid); err != nil { + return err + } + + } + } + return nil +} + // NewRepository returns a Repository. func NewRepository() *Repository { r := &Repository{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/repositoryintegrityindicator.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/repositoryintegrityindicator.go new file mode 100644 index 000000000..de886324b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/repositoryintegrityindicator.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indicatorhealthstatus" +) + +// RepositoryIntegrityIndicator type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/health_report/types.ts#L134-L138 +type RepositoryIntegrityIndicator struct { + Details *RepositoryIntegrityIndicatorDetails `json:"details,omitempty"` + Diagnosis []Diagnosis `json:"diagnosis,omitempty"` + Impacts []Impact `json:"impacts,omitempty"` + Status indicatorhealthstatus.IndicatorHealthStatus `json:"status"` + Symptom string `json:"symptom"` +} + +func (s *RepositoryIntegrityIndicator) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "details": + if err := dec.Decode(&s.Details); err != nil { + return err + } + + case "diagnosis": + if err := dec.Decode(&s.Diagnosis); err != nil { + return err + } + + case "impacts": + if err := dec.Decode(&s.Impacts); err != nil { + return err + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return err + } + + case "symptom": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Symptom = o + + } + } + return nil +} + +// NewRepositoryIntegrityIndicator returns a RepositoryIntegrityIndicator. +func NewRepositoryIntegrityIndicator() *RepositoryIntegrityIndicator { + r := &RepositoryIntegrityIndicator{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/repositoryintegrityindicatordetails.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/repositoryintegrityindicatordetails.go new file mode 100644 index 000000000..3938eb167 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/repositoryintegrityindicatordetails.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + +// RepositoryIntegrityIndicatorDetails type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/health_report/types.ts#L139-L143 +type RepositoryIntegrityIndicatorDetails struct { + Corrupted []string `json:"corrupted,omitempty"` + CorruptedRepositories *int64 `json:"corrupted_repositories,omitempty"` + TotalRepositories *int64 `json:"total_repositories,omitempty"` +} + +func (s *RepositoryIntegrityIndicatorDetails) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "corrupted": + if err := dec.Decode(&s.Corrupted); err != nil { + return err + } + + case "corrupted_repositories": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.CorruptedRepositories = &value + case float64: + f := int64(v) + s.CorruptedRepositories = &f + } + + case "total_repositories": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TotalRepositories = &value + case float64: + f := int64(v) + s.TotalRepositories = &f + } + + } + } + return nil +} + +// NewRepositoryIntegrityIndicatorDetails returns a RepositoryIntegrityIndicatorDetails. +func NewRepositoryIntegrityIndicatorDetails() *RepositoryIntegrityIndicatorDetails { + r := &RepositoryIntegrityIndicatorDetails{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/repositorylocation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/repositorylocation.go index 788f0b93a..008e9a3e3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/repositorylocation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/repositorylocation.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RepositoryLocation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/RepositoryMeteringInformation.ts#L68-L74 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/RepositoryMeteringInformation.ts#L68-L74 type RepositoryLocation struct { BasePath string `json:"base_path"` // Bucket Bucket name (GCP, S3) @@ -31,6 +39,62 @@ type RepositoryLocation struct { Container *string `json:"container,omitempty"` } +func (s *RepositoryLocation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "base_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BasePath = o + + case "bucket": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Bucket = &o + + case "container": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Container = &o + + } + } + return nil +} + // NewRepositoryLocation returns a RepositoryLocation. func NewRepositoryLocation() *RepositoryLocation { r := &RepositoryLocation{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/repositorymeteringinformation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/repositorymeteringinformation.go index c72505674..b20315739 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/repositorymeteringinformation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/repositorymeteringinformation.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RepositoryMeteringInformation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/RepositoryMeteringInformation.ts#L24-L66 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/RepositoryMeteringInformation.ts#L24-L66 type RepositoryMeteringInformation struct { // Archived A flag that tells whether or not this object has been archived. When a // repository is closed or updated the @@ -58,6 +66,87 @@ type RepositoryMeteringInformation struct { RequestCounts RequestCounts `json:"request_counts"` } +func (s *RepositoryMeteringInformation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "archived": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Archived = value + case bool: + s.Archived = v + } + + case "cluster_version": + if err := dec.Decode(&s.ClusterVersion); err != nil { + return err + } + + case "repository_ephemeral_id": + if err := dec.Decode(&s.RepositoryEphemeralId); err != nil { + return err + } + + case "repository_location": + if err := dec.Decode(&s.RepositoryLocation); err != nil { + return err + } + + case "repository_name": + if err := dec.Decode(&s.RepositoryName); err != nil { + return err + } + + case "repository_started_at": + if err := dec.Decode(&s.RepositoryStartedAt); err != nil { + return err + } + + case "repository_stopped_at": + if err := dec.Decode(&s.RepositoryStoppedAt); err != nil { + return err + } + + case "repository_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RepositoryType = o + + case "request_counts": + if err := dec.Decode(&s.RequestCounts); err != nil { + return err + } + + } + } + return nil +} + // NewRepositoryMeteringInformation returns a RepositoryMeteringInformation. func NewRepositoryMeteringInformation() *RepositoryMeteringInformation { r := &RepositoryMeteringInformation{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/repositorysettings.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/repositorysettings.go index 30c18a870..4edc0929c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/repositorysettings.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/repositorysettings.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RepositorySettings type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/snapshot/_types/SnapshotRepository.ts#L29-L38 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/snapshot/_types/SnapshotRepository.ts#L29-L38 type RepositorySettings struct { ChunkSize *string `json:"chunk_size,omitempty"` Compress string `json:"compress,omitempty"` @@ -31,6 +39,86 @@ type RepositorySettings struct { ReadOnly string `json:"read_only,omitempty"` } +func (s *RepositorySettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "chunk_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ChunkSize = &o + + case "compress": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Compress = o + + case "concurrent_streams": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ConcurrentStreams = o + + case "location": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Location = o + + case "read_only", "readonly": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ReadOnly = o + + } + } + return nil +} + // NewRepositorySettings returns a RepositorySettings. func NewRepositorySettings() *RepositorySettings { r := &RepositorySettings{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/requestcachestats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/requestcachestats.go index 74f012b60..a02287494 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/requestcachestats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/requestcachestats.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RequestCacheStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Stats.ts#L177-L183 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Stats.ts#L244-L250 type RequestCacheStats struct { Evictions int64 `json:"evictions"` HitCount int64 `json:"hit_count"` @@ -31,6 +39,98 @@ type RequestCacheStats struct { MissCount int64 `json:"miss_count"` } +func (s *RequestCacheStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "evictions": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Evictions = value + case float64: + f := int64(v) + s.Evictions = f + } + + case "hit_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.HitCount = value + case float64: + f := int64(v) + s.HitCount = f + } + + case "memory_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MemorySize = &o + + case "memory_size_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.MemorySizeInBytes = value + case float64: + f := int64(v) + s.MemorySizeInBytes = f + } + + case "miss_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.MissCount = value + case float64: + f := int64(v) + s.MissCount = f + } + + } + } + return nil +} + // NewRequestCacheStats returns a RequestCacheStats. func NewRequestCacheStats() *RequestCacheStats { r := &RequestCacheStats{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/requestcounts.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/requestcounts.go index 5c0c3abed..010c7a9d4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/requestcounts.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/requestcounts.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RequestCounts type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/RepositoryMeteringInformation.ts#L76-L103 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/RepositoryMeteringInformation.ts#L76-L103 type RequestCounts struct { // GetBlob Number of Get Blob requests (Azure) GetBlob *int64 `json:"GetBlob,omitempty"` @@ -53,6 +61,191 @@ type RequestCounts struct { PutObject *int64 `json:"PutObject,omitempty"` } +func (s *RequestCounts) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "GetBlob": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.GetBlob = &value + case float64: + f := int64(v) + s.GetBlob = &f + } + + case "GetBlobProperties": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.GetBlobProperties = &value + case float64: + f := int64(v) + s.GetBlobProperties = &f + } + + case "GetObject": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.GetObject = &value + case float64: + f := int64(v) + s.GetObject = &f + } + + case "InsertObject": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.InsertObject = &value + case float64: + f := int64(v) + s.InsertObject = &f + } + + case "ListBlobs": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.ListBlobs = &value + case float64: + f := int64(v) + s.ListBlobs = &f + } + + case "ListObjects": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.ListObjects = &value + case float64: + f := int64(v) + s.ListObjects = &f + } + + case "PutBlob": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.PutBlob = &value + case float64: + f := int64(v) + s.PutBlob = &f + } + + case "PutBlock": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.PutBlock = &value + case float64: + f := int64(v) + s.PutBlock = &f + } + + case "PutBlockList": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.PutBlockList = &value + case float64: + f := int64(v) + s.PutBlockList = &f + } + + case "PutMultipartObject": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.PutMultipartObject = &value + case float64: + f := int64(v) + s.PutMultipartObject = &f + } + + case "PutObject": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.PutObject = &value + case float64: + f := int64(v) + s.PutObject = &f + } + + } + } + return nil +} + // NewRequestCounts returns a RequestCounts. func NewRequestCounts() *RequestCounts { r := &RequestCounts{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/requestitem.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/requestitem.go new file mode 100644 index 000000000..5c615c286 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/requestitem.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +// RequestItem holds the union for the following types: +// +// MultisearchHeader +// TemplateConfig +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/msearch_template/types.ts#L25-L26 +type RequestItem interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reroutedecision.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reroutedecision.go index 5c7d92204..2669d5d21 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reroutedecision.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reroutedecision.go @@ -16,19 +16,83 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RerouteDecision type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/reroute/types.ts#L86-L90 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/reroute/types.ts#L86-L90 type RerouteDecision struct { Decider string `json:"decider"` Decision string `json:"decision"` Explanation string `json:"explanation"` } +func (s *RerouteDecision) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "decider": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Decider = o + + case "decision": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Decision = o + + case "explanation": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Explanation = o + + } + } + return nil +} + // NewRerouteDecision returns a RerouteDecision. func NewRerouteDecision() *RerouteDecision { r := &RerouteDecision{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rerouteexplanation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rerouteexplanation.go index 11b51babe..1751347aa 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rerouteexplanation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rerouteexplanation.go @@ -16,19 +16,69 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RerouteExplanation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/reroute/types.ts#L92-L96 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/reroute/types.ts#L92-L96 type RerouteExplanation struct { Command string `json:"command"` Decisions []RerouteDecision `json:"decisions"` Parameters RerouteParameters `json:"parameters"` } +func (s *RerouteExplanation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "command": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Command = o + + case "decisions": + if err := dec.Decode(&s.Decisions); err != nil { + return err + } + + case "parameters": + if err := dec.Decode(&s.Parameters); err != nil { + return err + } + + } + } + return nil +} + // NewRerouteExplanation returns a RerouteExplanation. func NewRerouteExplanation() *RerouteExplanation { r := &RerouteExplanation{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rerouteparameters.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rerouteparameters.go index 3c40fc6cb..c9645030e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rerouteparameters.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rerouteparameters.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RerouteParameters type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/reroute/types.ts#L98-L105 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/reroute/types.ts#L98-L105 type RerouteParameters struct { AllowPrimary bool `json:"allow_primary"` FromNode *string `json:"from_node,omitempty"` @@ -32,6 +40,76 @@ type RerouteParameters struct { ToNode *string `json:"to_node,omitempty"` } +func (s *RerouteParameters) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_primary": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.AllowPrimary = value + case bool: + s.AllowPrimary = v + } + + case "from_node": + if err := dec.Decode(&s.FromNode); err != nil { + return err + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return err + } + + case "node": + if err := dec.Decode(&s.Node); err != nil { + return err + } + + case "shard": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Shard = value + case float64: + f := int(v) + s.Shard = f + } + + case "to_node": + if err := dec.Decode(&s.ToNode); err != nil { + return err + } + + } + } + return nil +} + // NewRerouteParameters returns a RerouteParameters. func NewRerouteParameters() *RerouteParameters { r := &RerouteParameters{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rescore.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rescore.go index 67755c633..12c04ecec 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rescore.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rescore.go @@ -16,18 +16,67 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Rescore type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/rescoring.ts#L23-L26 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/rescoring.ts#L23-L26 type Rescore struct { Query RescoreQuery `json:"query"` WindowSize *int `json:"window_size,omitempty"` } +func (s *Rescore) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return err + } + + case "window_size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.WindowSize = &value + case float64: + f := int(v) + s.WindowSize = &f + } + + } + } + return nil +} + // NewRescore returns a Rescore. func NewRescore() *Rescore { r := &Rescore{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rescorequery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rescorequery.go index 52573ceb4..2408ac166 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rescorequery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rescorequery.go @@ -16,22 +16,96 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/scoremode" ) // RescoreQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/rescoring.ts#L28-L34 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/rescoring.ts#L28-L50 type RescoreQuery struct { - Query Query `json:"rescore_query"` - QueryWeight *Float64 `json:"query_weight,omitempty"` - RescoreQueryWeight *Float64 `json:"rescore_query_weight,omitempty"` - ScoreMode *scoremode.ScoreMode `json:"score_mode,omitempty"` + // Query The query to use for rescoring. + // This query is only run on the Top-K results returned by the `query` and + // `post_filter` phases. + Query Query `json:"rescore_query"` + // QueryWeight Relative importance of the original query versus the rescore query. + QueryWeight *Float64 `json:"query_weight,omitempty"` + // RescoreQueryWeight Relative importance of the rescore query versus the original query. + RescoreQueryWeight *Float64 `json:"rescore_query_weight,omitempty"` + // ScoreMode Determines how scores are combined. + ScoreMode *scoremode.ScoreMode `json:"score_mode,omitempty"` +} + +func (s *RescoreQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "rescore_query": + if err := dec.Decode(&s.Query); err != nil { + return err + } + + case "query_weight": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.QueryWeight = &f + case float64: + f := Float64(v) + s.QueryWeight = &f + } + + case "rescore_query_weight": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.RescoreQueryWeight = &f + case float64: + f := Float64(v) + s.RescoreQueryWeight = &f + } + + case "score_mode": + if err := dec.Decode(&s.ScoreMode); err != nil { + return err + } + + } + } + return nil } // NewRescoreQuery returns a RescoreQuery. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reservedsize.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reservedsize.go index 79754bf42..72eae5a94 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reservedsize.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reservedsize.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ReservedSize type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/allocation_explain/types.ts#L71-L76 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/allocation_explain/types.ts#L71-L76 type ReservedSize struct { NodeId string `json:"node_id"` Path string `json:"path"` @@ -30,6 +38,63 @@ type ReservedSize struct { Total int64 `json:"total"` } +func (s *ReservedSize) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "node_id": + if err := dec.Decode(&s.NodeId); err != nil { + return err + } + + case "path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Path = o + + case "shards": + if err := dec.Decode(&s.Shards); err != nil { + return err + } + + case "total": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Total = value + case float64: + f := int64(v) + s.Total = f + } + + } + } + return nil +} + // NewReservedSize returns a ReservedSize. func NewReservedSize() *ReservedSize { r := &ReservedSize{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/resolveindexaliasitem.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/resolveindexaliasitem.go index 2c65dddb8..f534b2745 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/resolveindexaliasitem.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/resolveindexaliasitem.go @@ -16,18 +16,66 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // ResolveIndexAliasItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/resolve_index/ResolveIndexResponse.ts#L37-L40 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/resolve_index/ResolveIndexResponse.ts#L37-L40 type ResolveIndexAliasItem struct { Indices []string `json:"indices"` Name string `json:"name"` } +func (s *ResolveIndexAliasItem) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "indices": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Indices = append(s.Indices, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Indices); err != nil { + return err + } + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + } + } + return nil +} + // NewResolveIndexAliasItem returns a ResolveIndexAliasItem. func NewResolveIndexAliasItem() *ResolveIndexAliasItem { r := &ResolveIndexAliasItem{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/resolveindexdatastreamsitem.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/resolveindexdatastreamsitem.go index 8f6cf02d7..2856792db 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/resolveindexdatastreamsitem.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/resolveindexdatastreamsitem.go @@ -16,19 +16,72 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // ResolveIndexDataStreamsItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/resolve_index/ResolveIndexResponse.ts#L42-L46 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/resolve_index/ResolveIndexResponse.ts#L42-L46 type ResolveIndexDataStreamsItem struct { BackingIndices []string `json:"backing_indices"` Name string `json:"name"` TimestampField string `json:"timestamp_field"` } +func (s *ResolveIndexDataStreamsItem) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "backing_indices": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.BackingIndices = append(s.BackingIndices, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.BackingIndices); err != nil { + return err + } + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "timestamp_field": + if err := dec.Decode(&s.TimestampField); err != nil { + return err + } + + } + } + return nil +} + // NewResolveIndexDataStreamsItem returns a ResolveIndexDataStreamsItem. func NewResolveIndexDataStreamsItem() *ResolveIndexDataStreamsItem { r := &ResolveIndexDataStreamsItem{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/resolveindexitem.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/resolveindexitem.go index 81a520ebc..6d799495d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/resolveindexitem.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/resolveindexitem.go @@ -16,13 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // ResolveIndexItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/resolve_index/ResolveIndexResponse.ts#L30-L35 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/resolve_index/ResolveIndexResponse.ts#L30-L35 type ResolveIndexItem struct { Aliases []string `json:"aliases,omitempty"` Attributes []string `json:"attributes"` @@ -30,6 +37,46 @@ type ResolveIndexItem struct { Name string `json:"name"` } +func (s *ResolveIndexItem) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aliases": + if err := dec.Decode(&s.Aliases); err != nil { + return err + } + + case "attributes": + if err := dec.Decode(&s.Attributes); err != nil { + return err + } + + case "data_stream": + if err := dec.Decode(&s.DataStream); err != nil { + return err + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + } + } + return nil +} + // NewResolveIndexItem returns a ResolveIndexItem. func NewResolveIndexItem() *ResolveIndexItem { r := &ResolveIndexItem{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/resourceprivileges.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/resourceprivileges.go index 907871c9f..9f609683c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/resourceprivileges.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/resourceprivileges.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // ResourcePrivileges type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/has_privileges/types.ts#L47-L47 -type ResourcePrivileges map[string]map[string]bool +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/has_privileges/types.ts#L47-L47 +type ResourcePrivileges map[string]Privileges diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/responsebody.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/responsebody.go index 5f28c50f3..70c99e927 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/responsebody.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/responsebody.go @@ -16,23 +16,22 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - + "strconv" "strings" - - "encoding/json" ) // ResponseBody type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/SearchResponse.ts#L38-L54 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/SearchResponse.ts#L38-L54 type ResponseBody struct { Aggregations map[string]Aggregate `json:"aggregations,omitempty"` Clusters_ *ClusterStatistics `json:"_clusters,omitempty"` @@ -51,6 +50,7 @@ type ResponseBody struct { } func (s *ResponseBody) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -65,6 +65,10 @@ func (s *ResponseBody) UnmarshalJSON(data []byte) error { switch t { case "aggregations": + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + for dec.More() { tt, err := dec.Token() if err != nil { @@ -77,415 +81,494 @@ func (s *ResponseBody) UnmarshalJSON(data []byte) error { if strings.Contains(value, "#") { elems := strings.Split(value, "#") if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } switch elems[0] { + case "cardinality": o := NewCardinalityAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "hdr_percentiles": o := NewHdrPercentilesAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "hdr_percentile_ranks": o := NewHdrPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "tdigest_percentiles": o := NewTDigestPercentilesAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "tdigest_percentile_ranks": o := NewTDigestPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "percentiles_bucket": o := NewPercentilesBucketAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "median_absolute_deviation": o := NewMedianAbsoluteDeviationAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "min": o := NewMinAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "max": o := NewMaxAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "sum": o := NewSumAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "avg": o := NewAvgAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "weighted_avg": o := NewWeightedAvgAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "value_count": o := NewValueCountAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "simple_value": o := NewSimpleValueAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "derivative": o := NewDerivativeAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "bucket_metric_value": o := NewBucketMetricValueAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "stats": o := NewStatsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "stats_bucket": o := NewStatsBucketAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "extended_stats": o := NewExtendedStatsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "extended_stats_bucket": o := NewExtendedStatsBucketAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geo_bounds": o := NewGeoBoundsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geo_centroid": o := NewGeoCentroidAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "histogram": o := NewHistogramAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "date_histogram": o := NewDateHistogramAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "auto_date_histogram": o := NewAutoDateHistogramAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "variable_width_histogram": o := NewVariableWidthHistogramAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "sterms": o := NewStringTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "lterms": o := NewLongTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "dterms": o := NewDoubleTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "umterms": o := NewUnmappedTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "lrareterms": o := NewLongRareTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "srareterms": o := NewStringRareTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "umrareterms": o := NewUnmappedRareTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "multi_terms": o := NewMultiTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "missing": o := NewMissingAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "nested": o := NewNestedAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "reverse_nested": o := NewReverseNestedAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "global": o := NewGlobalAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "filter": o := NewFilterAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "children": o := NewChildrenAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "parent": o := NewParentAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "sampler": o := NewSamplerAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "unmapped_sampler": o := NewUnmappedSamplerAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geohash_grid": o := NewGeoHashGridAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geotile_grid": o := NewGeoTileGridAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geohex_grid": o := NewGeoHexGridAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "range": o := NewRangeAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "date_range": o := NewDateRangeAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geo_distance": o := NewGeoDistanceAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "ip_range": o := NewIpRangeAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "ip_prefix": o := NewIpPrefixAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "filters": o := NewFiltersAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "adjacency_matrix": o := NewAdjacencyMatrixAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "siglterms": o := NewSignificantLongTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "sigsterms": o := NewSignificantStringTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "umsigterms": o := NewUnmappedSignificantTermsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "composite": o := NewCompositeAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + case "scripted_metric": o := NewScriptedMetricAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "top_hits": o := NewTopHitsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "inference": o := NewInferenceAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "string_stats": o := NewStringStatsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "box_plot": o := NewBoxPlotAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "top_metrics": o := NewTopMetricsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "t_test": o := NewTTestAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "rate": o := NewRateAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "simple_long_value": o := NewCumulativeCardinalityAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "matrix_stats": o := NewMatrixStatsAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + case "geo_line": o := NewGeoLineAggregate() - if err := dec.Decode(o); err != nil { + if err := dec.Decode(&o); err != nil { return err } s.Aggregations[elems[1]] = o + default: o := make(map[string]interface{}, 0) if err := dec.Decode(&o); err != nil { @@ -512,6 +595,9 @@ func (s *ResponseBody) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]json.RawMessage, 0) + } if err := dec.Decode(&s.Fields); err != nil { return err } @@ -522,13 +608,34 @@ func (s *ResponseBody) UnmarshalJSON(data []byte) error { } case "max_score": - if err := dec.Decode(&s.MaxScore); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.MaxScore = &f + case float64: + f := Float64(v) + s.MaxScore = &f } case "num_reduce_phases": - if err := dec.Decode(&s.NumReducePhases); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.NumReducePhases = &value + case float64: + f := int64(v) + s.NumReducePhases = &f } case "pit_id": @@ -552,23 +659,109 @@ func (s *ResponseBody) UnmarshalJSON(data []byte) error { } case "suggest": - if err := dec.Decode(&s.Suggest); err != nil { - return err + if s.Suggest == nil { + s.Suggest = make(map[string][]Suggest, 0) + } + + for dec.More() { + tt, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + if value, ok := tt.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Suggest == nil { + s.Suggest = make(map[string][]Suggest, 0) + } + switch elems[0] { + + case "completion": + o := NewCompletionSuggest() + if err := dec.Decode(&o); err != nil { + return err + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + case "phrase": + o := NewPhraseSuggest() + if err := dec.Decode(&o); err != nil { + return err + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + case "term": + o := NewTermSuggest() + if err := dec.Decode(&o); err != nil { + return err + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + + default: + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Suggest[elems[1]] = append(s.Suggest[elems[1]], o) + } + } else { + return errors.New("cannot decode JSON for field Suggest") + } + } else { + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Suggest[value] = append(s.Suggest[value], o) + } + } } case "terminated_early": - if err := dec.Decode(&s.TerminatedEarly); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.TerminatedEarly = &value + case bool: + s.TerminatedEarly = &v } case "timed_out": - if err := dec.Decode(&s.TimedOut); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.TimedOut = value + case bool: + s.TimedOut = v } case "took": - if err := dec.Decode(&s.Took); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Took = value + case float64: + f := int64(v) + s.Took = f } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/responseitem.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/responseitem.go index ecd94a811..c9a6873c0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/responseitem.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/responseitem.go @@ -16,14 +16,180 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types -// ResponseItem holds the union for the following types: -// -// GetResult -// MultiGetError +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + +// ResponseItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/mget/types.ts#L57-L60 -type ResponseItem interface{} +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/bulk/types.ts#L37-L81 +type ResponseItem struct { + // Error Contains additional information about the failed operation. + // The parameter is only returned for failed operations. + Error *ErrorCause `json:"error,omitempty"` + ForcedRefresh *bool `json:"forced_refresh,omitempty"` + Get *InlineGetDictUserDefined `json:"get,omitempty"` + // Id_ The document ID associated with the operation. + Id_ string `json:"_id,omitempty"` + // Index_ Name of the index associated with the operation. + // If the operation targeted a data stream, this is the backing index into which + // the document was written. + Index_ string `json:"_index"` + // PrimaryTerm_ The primary term assigned to the document for the operation. + PrimaryTerm_ *int64 `json:"_primary_term,omitempty"` + // Result Result of the operation. + // Successful values are `created`, `deleted`, and `updated`. + Result *string `json:"result,omitempty"` + // SeqNo_ The sequence number assigned to the document for the operation. + // Sequence numbers are used to ensure an older version of a document doesn’t + // overwrite a newer version. + SeqNo_ *int64 `json:"_seq_no,omitempty"` + // Shards_ Contains shard information for the operation. + Shards_ *ShardStatistics `json:"_shards,omitempty"` + // Status HTTP status code returned for the operation. + Status int `json:"status"` + // Version_ The document version associated with the operation. + // The document version is incremented each time the document is updated. + Version_ *int64 `json:"_version,omitempty"` +} + +func (s *ResponseItem) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "error": + if err := dec.Decode(&s.Error); err != nil { + return err + } + + case "forced_refresh": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.ForcedRefresh = &value + case bool: + s.ForcedRefresh = &v + } + + case "get": + if err := dec.Decode(&s.Get); err != nil { + return err + } + + case "_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Id_ = o + + case "_index": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Index_ = o + + case "_primary_term": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.PrimaryTerm_ = &value + case float64: + f := int64(v) + s.PrimaryTerm_ = &f + } + + case "result": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Result = &o + + case "_seq_no": + if err := dec.Decode(&s.SeqNo_); err != nil { + return err + } + + case "_shards": + if err := dec.Decode(&s.Shards_); err != nil { + return err + } + + case "status": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Status = value + case float64: + f := int(v) + s.Status = f + } + + case "_version": + if err := dec.Decode(&s.Version_); err != nil { + return err + } + + } + } + return nil +} + +// NewResponseItem returns a ResponseItem. +func NewResponseItem() *ResponseItem { + r := &ResponseItem{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/retention.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/retention.go index b0848f900..997402259 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/retention.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/retention.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Retention type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/slm/_types/SnapshotLifecycle.ts#L84-L97 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/slm/_types/SnapshotLifecycle.ts#L84-L97 type Retention struct { // ExpireAfter Time period after which a snapshot is considered expired and eligible for // deletion. SLM deletes expired snapshots based on the slm.retention_schedule. @@ -35,6 +43,63 @@ type Retention struct { MinCount int `json:"min_count"` } +func (s *Retention) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "expire_after": + if err := dec.Decode(&s.ExpireAfter); err != nil { + return err + } + + case "max_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxCount = value + case float64: + f := int(v) + s.MaxCount = f + } + + case "min_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MinCount = value + case float64: + f := int(v) + s.MinCount = f + } + + } + } + return nil +} + // NewRetention returns a Retention. func NewRetention() *Retention { r := &Retention{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/retentionlease.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/retentionlease.go index eff04ac1b..c5f860c9c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/retentionlease.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/retentionlease.go @@ -16,17 +16,49 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // RetentionLease type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L65-L67 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L65-L67 type RetentionLease struct { Period Duration `json:"period"` } +func (s *RetentionLease) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "period": + if err := dec.Decode(&s.Period); err != nil { + return err + } + + } + } + return nil +} + // NewRetentionLease returns a RetentionLease. func NewRetentionLease() *RetentionLease { r := &RetentionLease{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/retentionpolicy.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/retentionpolicy.go index 10dd33941..fb8829f1b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/retentionpolicy.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/retentionpolicy.go @@ -16,13 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // RetentionPolicy type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/transform/_types/Transform.ts#L88-L96 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/transform/_types/Transform.ts#L88-L96 type RetentionPolicy struct { // Field The date field that is used to calculate the age of the document. Field string `json:"field"` @@ -32,6 +39,36 @@ type RetentionPolicy struct { MaxAge Duration `json:"max_age"` } +func (s *RetentionPolicy) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "max_age": + if err := dec.Decode(&s.MaxAge); err != nil { + return err + } + + } + } + return nil +} + // NewRetentionPolicy returns a RetentionPolicy. func NewRetentionPolicy() *RetentionPolicy { r := &RetentionPolicy{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/retentionpolicycontainer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/retentionpolicycontainer.go index da5018b56..b368419a1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/retentionpolicycontainer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/retentionpolicycontainer.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // RetentionPolicyContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/transform/_types/Transform.ts#L80-L86 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/transform/_types/Transform.ts#L80-L86 type RetentionPolicyContainer struct { // Time Specifies that the transform uses a time field to set the retention policy. Time *RetentionPolicy `json:"time,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/retries.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/retries.go index f9c15b3a0..9e8bd6a0e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/retries.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/retries.go @@ -16,18 +16,76 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Retries type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Retries.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Retries.ts#L22-L25 type Retries struct { Bulk int64 `json:"bulk"` Search int64 `json:"search"` } +func (s *Retries) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bulk": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Bulk = value + case float64: + f := int64(v) + s.Bulk = f + } + + case "search": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Search = value + case float64: + f := int64(v) + s.Search = f + } + + } + } + return nil +} + // NewRetries returns a Retries. func NewRetries() *Retries { r := &Retries{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reversenestedaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reversenestedaggregate.go index 295aea03b..4564f6197 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reversenestedaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reversenestedaggregate.go @@ -16,32 +16,31 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "fmt" - "bytes" + "encoding/json" "errors" + "fmt" "io" - + "strconv" "strings" - - "encoding/json" ) // ReverseNestedAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L488-L489 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L489-L490 type ReverseNestedAggregate struct { - Aggregations map[string]Aggregate `json:"-"` - DocCount int64 `json:"doc_count"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Aggregations map[string]Aggregate `json:"-"` + DocCount int64 `json:"doc_count"` + Meta Metadata `json:"meta,omitempty"` } func (s *ReverseNestedAggregate) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -55,451 +54,19 @@ func (s *ReverseNestedAggregate) UnmarshalJSON(data []byte) error { switch t { - case "aggregations": - for dec.More() { - tt, err := dec.Token() + case "doc_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) if err != nil { - if errors.Is(err, io.EOF) { - break - } return err } - if value, ok := tt.(string); ok { - if strings.Contains(value, "#") { - elems := strings.Split(value, "#") - if len(elems) == 2 { - switch elems[0] { - case "cardinality": - o := NewCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentiles": - o := NewHdrPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentile_ranks": - o := NewHdrPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentiles": - o := NewTDigestPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentile_ranks": - o := NewTDigestPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "percentiles_bucket": - o := NewPercentilesBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "median_absolute_deviation": - o := NewMedianAbsoluteDeviationAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "min": - o := NewMinAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "max": - o := NewMaxAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sum": - o := NewSumAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "avg": - o := NewAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "weighted_avg": - o := NewWeightedAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "value_count": - o := NewValueCountAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_value": - o := NewSimpleValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "derivative": - o := NewDerivativeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "bucket_metric_value": - o := NewBucketMetricValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats": - o := NewStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats_bucket": - o := NewStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats": - o := NewExtendedStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats_bucket": - o := NewExtendedStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_bounds": - o := NewGeoBoundsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_centroid": - o := NewGeoCentroidAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "histogram": - o := NewHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_histogram": - o := NewDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "auto_date_histogram": - o := NewAutoDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "variable_width_histogram": - o := NewVariableWidthHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sterms": - o := NewStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lterms": - o := NewLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "dterms": - o := NewDoubleTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umterms": - o := NewUnmappedTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lrareterms": - o := NewLongRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "srareterms": - o := NewStringRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umrareterms": - o := NewUnmappedRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "multi_terms": - o := NewMultiTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "missing": - o := NewMissingAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "nested": - o := NewNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "reverse_nested": - o := NewReverseNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "global": - o := NewGlobalAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filter": - o := NewFilterAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "children": - o := NewChildrenAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "parent": - o := NewParentAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sampler": - o := NewSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "unmapped_sampler": - o := NewUnmappedSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohash_grid": - o := NewGeoHashGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geotile_grid": - o := NewGeoTileGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohex_grid": - o := NewGeoHexGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "range": - o := NewRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_range": - o := NewDateRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_distance": - o := NewGeoDistanceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_range": - o := NewIpRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_prefix": - o := NewIpPrefixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filters": - o := NewFiltersAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "adjacency_matrix": - o := NewAdjacencyMatrixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "siglterms": - o := NewSignificantLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sigsterms": - o := NewSignificantStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umsigterms": - o := NewUnmappedSignificantTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "composite": - o := NewCompositeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "scripted_metric": - o := NewScriptedMetricAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_hits": - o := NewTopHitsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "inference": - o := NewInferenceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "string_stats": - o := NewStringStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "box_plot": - o := NewBoxPlotAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_metrics": - o := NewTopMetricsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "t_test": - o := NewTTestAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "rate": - o := NewRateAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_long_value": - o := NewCumulativeCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "matrix_stats": - o := NewMatrixStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_line": - o := NewGeoLineAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - default: - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - } - } else { - return errors.New("cannot decode JSON for field Aggregations") - } - } else { - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[value] = o - } - } - } - - case "doc_count": - if err := dec.Decode(&s.DocCount); err != nil { - return err + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f } case "meta": @@ -507,6 +74,519 @@ func (s *ReverseNestedAggregate) UnmarshalJSON(data []byte) error { return err } + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "box_plot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[value] = o + } + } + } } return nil @@ -531,6 +611,7 @@ func (s ReverseNestedAggregate) MarshalJSON() ([]byte, error) { for key, value := range s.Aggregations { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "Aggregations") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reversenestedaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reversenestedaggregation.go index 774f78942..84ada4aa7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reversenestedaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reversenestedaggregation.go @@ -16,21 +16,70 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // ReverseNestedAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L314-L316 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L719-L725 type ReverseNestedAggregation struct { - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Name *string `json:"name,omitempty"` - Path *string `json:"path,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Name *string `json:"name,omitempty"` + // Path Defines the nested object field that should be joined back to. + // The default is empty, which means that it joins back to the root/main + // document level. + Path *string `json:"path,omitempty"` +} + +func (s *ReverseNestedAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + case "path": + if err := dec.Decode(&s.Path); err != nil { + return err + } + + } + } + return nil } // NewReverseNestedAggregation returns a ReverseNestedAggregation. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reversetokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reversetokenfilter.go index 5c5c6e642..e81124a9f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reversetokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/reversetokenfilter.go @@ -16,23 +16,71 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // ReverseTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L304-L306 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L305-L307 type ReverseTokenFilter struct { Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` } +func (s *ReverseTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s ReverseTokenFilter) MarshalJSON() ([]byte, error) { + type innerReverseTokenFilter ReverseTokenFilter + tmp := innerReverseTokenFilter{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "reverse" + + return json.Marshal(tmp) +} + // NewReverseTokenFilter returns a ReverseTokenFilter. func NewReverseTokenFilter() *ReverseTokenFilter { r := &ReverseTokenFilter{} - r.Type = "reverse" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/role.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/role.go index c2370bbdb..e81e98b95 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/role.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/role.go @@ -16,28 +16,94 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" ) // Role type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/get_role/types.ts#L29-L39 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/get_role/types.ts#L29-L42 type Role struct { Applications []ApplicationPrivileges `json:"applications"` Cluster []string `json:"cluster"` Global map[string]map[string]map[string][]string `json:"global,omitempty"` Indices []IndicesPrivileges `json:"indices"` - Metadata map[string]json.RawMessage `json:"metadata"` + Metadata Metadata `json:"metadata"` RoleTemplates []RoleTemplate `json:"role_templates,omitempty"` RunAs []string `json:"run_as"` TransientMetadata TransientMetadataConfig `json:"transient_metadata"` } +func (s *Role) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "applications": + if err := dec.Decode(&s.Applications); err != nil { + return err + } + + case "cluster": + if err := dec.Decode(&s.Cluster); err != nil { + return err + } + + case "global": + if s.Global == nil { + s.Global = make(map[string]map[string]map[string][]string, 0) + } + if err := dec.Decode(&s.Global); err != nil { + return err + } + + case "indices": + if err := dec.Decode(&s.Indices); err != nil { + return err + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return err + } + + case "role_templates": + if err := dec.Decode(&s.RoleTemplates); err != nil { + return err + } + + case "run_as": + if err := dec.Decode(&s.RunAs); err != nil { + return err + } + + case "transient_metadata": + if err := dec.Decode(&s.TransientMetadata); err != nil { + return err + } + + } + } + return nil +} + // NewRole returns a Role. func NewRole() *Role { r := &Role{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/roledescriptor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/roledescriptor.go index ab85c7aaf..133f6c3df 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/roledescriptor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/roledescriptor.go @@ -16,25 +16,104 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" ) // RoleDescriptor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/_types/RoleDescriptor.ts#L27-L36 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/_types/RoleDescriptor.ts#L27-L55 type RoleDescriptor struct { - Applications []ApplicationPrivileges `json:"applications,omitempty"` - Cluster []string `json:"cluster,omitempty"` - Global []GlobalPrivilege `json:"global,omitempty"` - Indices []IndicesPrivileges `json:"indices,omitempty"` - Metadata map[string]json.RawMessage `json:"metadata,omitempty"` - RunAs []string `json:"run_as,omitempty"` - TransientMetadata *TransientMetadataConfig `json:"transient_metadata,omitempty"` + // Applications A list of application privilege entries + Applications []ApplicationPrivileges `json:"applications,omitempty"` + // Cluster A list of cluster privileges. These privileges define the cluster level + // actions that API keys are able to execute. + Cluster []string `json:"cluster,omitempty"` + // Global An object defining global privileges. A global privilege is a form of cluster + // privilege that is request-aware. Support for global privileges is currently + // limited to the management of application privileges. + Global []GlobalPrivilege `json:"global,omitempty"` + // Indices A list of indices permissions entries. + Indices []IndicesPrivileges `json:"indices,omitempty"` + // Metadata Optional meta-data. Within the metadata object, keys that begin with `_` are + // reserved for system usage. + Metadata Metadata `json:"metadata,omitempty"` + // RunAs A list of users that the API keys can impersonate. + RunAs []string `json:"run_as,omitempty"` + TransientMetadata *TransientMetadataConfig `json:"transient_metadata,omitempty"` +} + +func (s *RoleDescriptor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "applications": + if err := dec.Decode(&s.Applications); err != nil { + return err + } + + case "cluster": + if err := dec.Decode(&s.Cluster); err != nil { + return err + } + + case "global": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewGlobalPrivilege() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Global = append(s.Global, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Global); err != nil { + return err + } + } + + case "indices", "index": + if err := dec.Decode(&s.Indices); err != nil { + return err + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return err + } + + case "run_as": + if err := dec.Decode(&s.RunAs); err != nil { + return err + } + + case "transient_metadata": + if err := dec.Decode(&s.TransientMetadata); err != nil { + return err + } + + } + } + return nil } // NewRoleDescriptor returns a RoleDescriptor. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/roledescriptorread.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/roledescriptorread.go index 138ebe08d..34b1cabcf 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/roledescriptorread.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/roledescriptorread.go @@ -16,25 +16,104 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" ) // RoleDescriptorRead type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/_types/RoleDescriptor.ts#L38-L47 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/_types/RoleDescriptor.ts#L57-L85 type RoleDescriptorRead struct { - Applications []ApplicationPrivileges `json:"applications,omitempty"` - Cluster []string `json:"cluster"` - Global []GlobalPrivilege `json:"global,omitempty"` - Indices []IndicesPrivileges `json:"indices"` - Metadata map[string]json.RawMessage `json:"metadata,omitempty"` - RunAs []string `json:"run_as,omitempty"` - TransientMetadata *TransientMetadataConfig `json:"transient_metadata,omitempty"` + // Applications A list of application privilege entries + Applications []ApplicationPrivileges `json:"applications,omitempty"` + // Cluster A list of cluster privileges. These privileges define the cluster level + // actions that API keys are able to execute. + Cluster []string `json:"cluster"` + // Global An object defining global privileges. A global privilege is a form of cluster + // privilege that is request-aware. Support for global privileges is currently + // limited to the management of application privileges. + Global []GlobalPrivilege `json:"global,omitempty"` + // Indices A list of indices permissions entries. + Indices []IndicesPrivileges `json:"indices"` + // Metadata Optional meta-data. Within the metadata object, keys that begin with `_` are + // reserved for system usage. + Metadata Metadata `json:"metadata,omitempty"` + // RunAs A list of users that the API keys can impersonate. + RunAs []string `json:"run_as,omitempty"` + TransientMetadata *TransientMetadataConfig `json:"transient_metadata,omitempty"` +} + +func (s *RoleDescriptorRead) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "applications": + if err := dec.Decode(&s.Applications); err != nil { + return err + } + + case "cluster": + if err := dec.Decode(&s.Cluster); err != nil { + return err + } + + case "global": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewGlobalPrivilege() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Global = append(s.Global, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Global); err != nil { + return err + } + } + + case "indices", "index": + if err := dec.Decode(&s.Indices); err != nil { + return err + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return err + } + + case "run_as": + if err := dec.Decode(&s.RunAs); err != nil { + return err + } + + case "transient_metadata": + if err := dec.Decode(&s.TransientMetadata); err != nil { + return err + } + + } + } + return nil } // NewRoleDescriptorRead returns a RoleDescriptorRead. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/roledescriptorwrapper.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/roledescriptorwrapper.go index 26fac6829..daeeec171 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/roledescriptorwrapper.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/roledescriptorwrapper.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // RoleDescriptorWrapper type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/get_service_accounts/types.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/get_service_accounts/types.ts#L22-L24 type RoleDescriptorWrapper struct { RoleDescriptor RoleDescriptorRead `json:"role_descriptor"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rolemappingrule.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rolemappingrule.go index c7a57df3d..b7b7e0af8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rolemappingrule.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rolemappingrule.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // RoleMappingRule type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/_types/RoleMappingRule.ts#L23-L31 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/_types/RoleMappingRule.ts#L23-L31 type RoleMappingRule struct { All []RoleMappingRule `json:"all,omitempty"` Any []RoleMappingRule `json:"any,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/roletemplate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/roletemplate.go index 33036304f..6c6c1eeae 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/roletemplate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/roletemplate.go @@ -16,22 +16,57 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/templateformat" ) // RoleTemplate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/get_role/types.ts#L47-L50 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/get_role/types.ts#L50-L53 type RoleTemplate struct { Format *templateformat.TemplateFormat `json:"format,omitempty"` Template Script `json:"template"` } +func (s *RoleTemplate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "format": + if err := dec.Decode(&s.Format); err != nil { + return err + } + + case "template": + if err := dec.Decode(&s.Template); err != nil { + return err + } + + } + } + return nil +} + // NewRoleTemplate returns a RoleTemplate. func NewRoleTemplate() *RoleTemplate { r := &RoleTemplate{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/roletemplateinlinequery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/roletemplateinlinequery.go index 01b1a2dd7..6b77a8207 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/roletemplateinlinequery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/roletemplateinlinequery.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // string // Query // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/_types/Privileges.ts#L159-L160 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/_types/Privileges.ts#L160-L161 type RoleTemplateInlineQuery interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/roletemplateinlinescript.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/roletemplateinlinescript.go index 33045a39a..4edc31171 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/roletemplateinlinescript.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/roletemplateinlinescript.go @@ -16,31 +16,38 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/scriptlanguage" - "bytes" + "encoding/json" "errors" "io" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/scriptlanguage" ) // RoleTemplateInlineScript type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/_types/Privileges.ts#L152-L157 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/_types/Privileges.ts#L153-L158 type RoleTemplateInlineScript struct { Lang *scriptlanguage.ScriptLanguage `json:"lang,omitempty"` Options map[string]string `json:"options,omitempty"` - Params map[string]json.RawMessage `json:"params,omitempty"` - Source RoleTemplateInlineQuery `json:"source"` + // Params Specifies any named parameters that are passed into the script as variables. + // Use parameters instead of hard-coded values to decrease compile time. + Params map[string]json.RawMessage `json:"params,omitempty"` + Source RoleTemplateInlineQuery `json:"source"` } func (s *RoleTemplateInlineScript) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Source) + return err + } + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -60,16 +67,23 @@ func (s *RoleTemplateInlineScript) UnmarshalJSON(data []byte) error { } case "options": + if s.Options == nil { + s.Options = make(map[string]string, 0) + } if err := dec.Decode(&s.Options); err != nil { return err } case "params": + if s.Params == nil { + s.Params = make(map[string]json.RawMessage, 0) + } if err := dec.Decode(&s.Params); err != nil { return err } case "source": + rawMsg := json.RawMessage{} dec.Decode(&rawMsg) source := bytes.NewReader(rawMsg) diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/roletemplatequery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/roletemplatequery.go index 3e90a0c25..9073f300e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/roletemplatequery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/roletemplatequery.go @@ -16,13 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // RoleTemplateQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/_types/Privileges.ts#L140-L150 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/_types/Privileges.ts#L141-L151 type RoleTemplateQuery struct { // Template When you create a role, you can specify a query that defines the document // level security permissions. You can optionally @@ -35,6 +42,31 @@ type RoleTemplateQuery struct { Template RoleTemplateScript `json:"template,omitempty"` } +func (s *RoleTemplateQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "template": + if err := dec.Decode(&s.Template); err != nil { + return err + } + + } + } + return nil +} + // NewRoleTemplateQuery returns a RoleTemplateQuery. func NewRoleTemplateQuery() *RoleTemplateQuery { r := &RoleTemplateQuery{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/roletemplatescript.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/roletemplatescript.go index 36a35384e..16f9b141a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/roletemplatescript.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/roletemplatescript.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // RoleTemplateInlineScript // StoredScriptId // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/_types/Privileges.ts#L162-L163 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/_types/Privileges.ts#L163-L164 type RoleTemplateScript interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rolloverconditions.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rolloverconditions.go index aec09b6ae..8f3e120f6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rolloverconditions.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rolloverconditions.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RolloverConditions type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/rollover/types.ts#L24-L40 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/rollover/types.ts#L24-L40 type RolloverConditions struct { MaxAge Duration `json:"max_age,omitempty"` MaxAgeMillis *int64 `json:"max_age_millis,omitempty"` @@ -41,6 +49,181 @@ type RolloverConditions struct { MinSizeBytes *int64 `json:"min_size_bytes,omitempty"` } +func (s *RolloverConditions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_age": + if err := dec.Decode(&s.MaxAge); err != nil { + return err + } + + case "max_age_millis": + if err := dec.Decode(&s.MaxAgeMillis); err != nil { + return err + } + + case "max_docs": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.MaxDocs = &value + case float64: + f := int64(v) + s.MaxDocs = &f + } + + case "max_primary_shard_docs": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.MaxPrimaryShardDocs = &value + case float64: + f := int64(v) + s.MaxPrimaryShardDocs = &f + } + + case "max_primary_shard_size": + if err := dec.Decode(&s.MaxPrimaryShardSize); err != nil { + return err + } + + case "max_primary_shard_size_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.MaxPrimaryShardSizeBytes = &value + case float64: + f := int64(v) + s.MaxPrimaryShardSizeBytes = &f + } + + case "max_size": + if err := dec.Decode(&s.MaxSize); err != nil { + return err + } + + case "max_size_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.MaxSizeBytes = &value + case float64: + f := int64(v) + s.MaxSizeBytes = &f + } + + case "min_age": + if err := dec.Decode(&s.MinAge); err != nil { + return err + } + + case "min_docs": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.MinDocs = &value + case float64: + f := int64(v) + s.MinDocs = &f + } + + case "min_primary_shard_docs": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.MinPrimaryShardDocs = &value + case float64: + f := int64(v) + s.MinPrimaryShardDocs = &f + } + + case "min_primary_shard_size": + if err := dec.Decode(&s.MinPrimaryShardSize); err != nil { + return err + } + + case "min_primary_shard_size_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.MinPrimaryShardSizeBytes = &value + case float64: + f := int64(v) + s.MinPrimaryShardSizeBytes = &f + } + + case "min_size": + if err := dec.Decode(&s.MinSize); err != nil { + return err + } + + case "min_size_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.MinSizeBytes = &value + case float64: + f := int64(v) + s.MinSizeBytes = &f + } + + } + } + return nil +} + // NewRolloverConditions returns a RolloverConditions. func NewRolloverConditions() *RolloverConditions { r := &RolloverConditions{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rollupcapabilities.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rollupcapabilities.go index 40c9514c7..fc03aeef2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rollupcapabilities.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rollupcapabilities.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // RollupCapabilities type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/rollup/get_rollup_caps/types.ts#L25-L27 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/rollup/get_rollup_caps/types.ts#L25-L27 type RollupCapabilities struct { RollupJobs []RollupCapabilitySummary `json:"rollup_jobs"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rollupcapabilitysummary.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rollupcapabilitysummary.go index d53a27dac..aba4908c8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rollupcapabilitysummary.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rollupcapabilitysummary.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RollupCapabilitySummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/rollup/get_rollup_caps/types.ts#L29-L34 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/rollup/get_rollup_caps/types.ts#L29-L34 type RollupCapabilitySummary struct { Fields map[string][]RollupFieldSummary `json:"fields"` IndexPattern string `json:"index_pattern"` @@ -30,6 +38,70 @@ type RollupCapabilitySummary struct { RollupIndex string `json:"rollup_index"` } +func (s *RollupCapabilitySummary) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string][]RollupFieldSummary, 0) + } + if err := dec.Decode(&s.Fields); err != nil { + return err + } + + case "index_pattern": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexPattern = o + + case "job_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.JobId = o + + case "rollup_index": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RollupIndex = o + + } + } + return nil +} + // NewRollupCapabilitySummary returns a RollupCapabilitySummary. func NewRollupCapabilitySummary() *RollupCapabilitySummary { r := &RollupCapabilitySummary{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rollupfieldsummary.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rollupfieldsummary.go index 325aef636..6a895d0f1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rollupfieldsummary.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rollupfieldsummary.go @@ -16,19 +16,69 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RollupFieldSummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/rollup/get_rollup_caps/types.ts#L36-L40 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/rollup/get_rollup_caps/types.ts#L36-L40 type RollupFieldSummary struct { Agg string `json:"agg"` CalendarInterval Duration `json:"calendar_interval,omitempty"` TimeZone *string `json:"time_zone,omitempty"` } +func (s *RollupFieldSummary) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "agg": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Agg = o + + case "calendar_interval": + if err := dec.Decode(&s.CalendarInterval); err != nil { + return err + } + + case "time_zone": + if err := dec.Decode(&s.TimeZone); err != nil { + return err + } + + } + } + return nil +} + // NewRollupFieldSummary returns a RollupFieldSummary. func NewRollupFieldSummary() *RollupFieldSummary { r := &RollupFieldSummary{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rollupjob.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rollupjob.go index 327c8f986..4250554d1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rollupjob.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rollupjob.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // RollupJob type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/rollup/get_jobs/types.ts#L28-L32 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/rollup/get_jobs/types.ts#L28-L32 type RollupJob struct { Config RollupJobConfiguration `json:"config"` Stats RollupJobStats `json:"stats"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rollupjobconfiguration.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rollupjobconfiguration.go index 0fdb13066..0d77cc02b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rollupjobconfiguration.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rollupjobconfiguration.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RollupJobConfiguration type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/rollup/get_jobs/types.ts#L34-L43 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/rollup/get_jobs/types.ts#L34-L43 type RollupJobConfiguration struct { Cron string `json:"cron"` Groups Groupings `json:"groups"` @@ -34,6 +42,90 @@ type RollupJobConfiguration struct { Timeout Duration `json:"timeout"` } +func (s *RollupJobConfiguration) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "cron": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Cron = o + + case "groups": + if err := dec.Decode(&s.Groups); err != nil { + return err + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return err + } + + case "index_pattern": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexPattern = o + + case "metrics": + if err := dec.Decode(&s.Metrics); err != nil { + return err + } + + case "page_size": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.PageSize = value + case float64: + f := int64(v) + s.PageSize = f + } + + case "rollup_index": + if err := dec.Decode(&s.RollupIndex); err != nil { + return err + } + + case "timeout": + if err := dec.Decode(&s.Timeout); err != nil { + return err + } + + } + } + return nil +} + // NewRollupJobConfiguration returns a RollupJobConfiguration. func NewRollupJobConfiguration() *RollupJobConfiguration { r := &RollupJobConfiguration{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rollupjobstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rollupjobstats.go index ff426de8f..0b05c4c30 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rollupjobstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rollupjobstats.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RollupJobStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/rollup/get_jobs/types.ts#L45-L58 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/rollup/get_jobs/types.ts#L45-L58 type RollupJobStats struct { DocumentsProcessed int64 `json:"documents_processed"` IndexFailures int64 `json:"index_failures"` @@ -38,6 +46,176 @@ type RollupJobStats struct { TriggerCount int64 `json:"trigger_count"` } +func (s *RollupJobStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "documents_processed": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DocumentsProcessed = value + case float64: + f := int64(v) + s.DocumentsProcessed = f + } + + case "index_failures": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.IndexFailures = value + case float64: + f := int64(v) + s.IndexFailures = f + } + + case "index_time_in_ms": + if err := dec.Decode(&s.IndexTimeInMs); err != nil { + return err + } + + case "index_total": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.IndexTotal = value + case float64: + f := int64(v) + s.IndexTotal = f + } + + case "pages_processed": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.PagesProcessed = value + case float64: + f := int64(v) + s.PagesProcessed = f + } + + case "processing_time_in_ms": + if err := dec.Decode(&s.ProcessingTimeInMs); err != nil { + return err + } + + case "processing_total": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.ProcessingTotal = value + case float64: + f := int64(v) + s.ProcessingTotal = f + } + + case "rollups_indexed": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.RollupsIndexed = value + case float64: + f := int64(v) + s.RollupsIndexed = f + } + + case "search_failures": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.SearchFailures = value + case float64: + f := int64(v) + s.SearchFailures = f + } + + case "search_time_in_ms": + if err := dec.Decode(&s.SearchTimeInMs); err != nil { + return err + } + + case "search_total": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.SearchTotal = value + case float64: + f := int64(v) + s.SearchTotal = f + } + + case "trigger_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TriggerCount = value + case float64: + f := int64(v) + s.TriggerCount = f + } + + } + } + return nil +} + // NewRollupJobStats returns a RollupJobStats. func NewRollupJobStats() *RollupJobStats { r := &RollupJobStats{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rollupjobstatus.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rollupjobstatus.go index 4c4004506..5ae49fdd6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rollupjobstatus.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rollupjobstatus.go @@ -16,25 +16,76 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexingjobstate" ) // RollupJobStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/rollup/get_jobs/types.ts#L60-L64 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/rollup/get_jobs/types.ts#L60-L64 type RollupJobStatus struct { CurrentPosition map[string]json.RawMessage `json:"current_position,omitempty"` JobState indexingjobstate.IndexingJobState `json:"job_state"` UpgradedDocId *bool `json:"upgraded_doc_id,omitempty"` } +func (s *RollupJobStatus) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "current_position": + if s.CurrentPosition == nil { + s.CurrentPosition = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.CurrentPosition); err != nil { + return err + } + + case "job_state": + if err := dec.Decode(&s.JobState); err != nil { + return err + } + + case "upgraded_doc_id": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.UpgradedDocId = &value + case bool: + s.UpgradedDocId = &v + } + + } + } + return nil +} + // NewRollupJobStatus returns a RollupJobStatus. func NewRollupJobStatus() *RollupJobStatus { r := &RollupJobStatus{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rollupjobsummary.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rollupjobsummary.go index 8f61dc412..87681e480 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rollupjobsummary.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rollupjobsummary.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RollupJobSummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/rollup/get_rollup_index_caps/types.ts#L28-L33 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/rollup/get_rollup_index_caps/types.ts#L28-L33 type RollupJobSummary struct { Fields map[string][]RollupJobSummaryField `json:"fields"` IndexPattern string `json:"index_pattern"` @@ -30,6 +38,56 @@ type RollupJobSummary struct { RollupIndex string `json:"rollup_index"` } +func (s *RollupJobSummary) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fields": + if s.Fields == nil { + s.Fields = make(map[string][]RollupJobSummaryField, 0) + } + if err := dec.Decode(&s.Fields); err != nil { + return err + } + + case "index_pattern": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexPattern = o + + case "job_id": + if err := dec.Decode(&s.JobId); err != nil { + return err + } + + case "rollup_index": + if err := dec.Decode(&s.RollupIndex); err != nil { + return err + } + + } + } + return nil +} + // NewRollupJobSummary returns a RollupJobSummary. func NewRollupJobSummary() *RollupJobSummary { r := &RollupJobSummary{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rollupjobsummaryfield.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rollupjobsummaryfield.go index bd70d96d3..fd6d16d65 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rollupjobsummaryfield.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rollupjobsummaryfield.go @@ -16,19 +16,69 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RollupJobSummaryField type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/rollup/get_rollup_index_caps/types.ts#L35-L39 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/rollup/get_rollup_index_caps/types.ts#L35-L39 type RollupJobSummaryField struct { Agg string `json:"agg"` CalendarInterval Duration `json:"calendar_interval,omitempty"` TimeZone *string `json:"time_zone,omitempty"` } +func (s *RollupJobSummaryField) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "agg": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Agg = o + + case "calendar_interval": + if err := dec.Decode(&s.CalendarInterval); err != nil { + return err + } + + case "time_zone": + if err := dec.Decode(&s.TimeZone); err != nil { + return err + } + + } + } + return nil +} + // NewRollupJobSummaryField returns a RollupJobSummaryField. func NewRollupJobSummaryField() *RollupJobSummaryField { r := &RollupJobSummaryField{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/routingfield.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/routingfield.go index 6e727ddc0..1f717d0b8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/routingfield.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/routingfield.go @@ -16,17 +16,59 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RoutingField type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/meta-fields.ts#L50-L52 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/meta-fields.ts#L50-L52 type RoutingField struct { Required bool `json:"required"` } +func (s *RoutingField) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "required": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Required = value + case bool: + s.Required = v + } + + } + } + return nil +} + // NewRoutingField returns a RoutingField. func NewRoutingField() *RoutingField { r := &RoutingField{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/row.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/row.go deleted file mode 100644 index be7c6e1de..000000000 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/row.go +++ /dev/null @@ -1,28 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 - -package types - -import "encoding/json" - -// Row type alias. -// -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/sql/types.ts#L28-L28 -type Row []json.RawMessage diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rrfrank.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rrfrank.go new file mode 100644 index 000000000..512c69205 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rrfrank.go @@ -0,0 +1,97 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + +// RrfRank type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Rank.ts#L32-L37 +type RrfRank struct { + // RankConstant How much influence documents in individual result sets per query have over + // the final ranked result set + RankConstant *int64 `json:"rank_constant,omitempty"` + // WindowSize Size of the individual result sets per query + WindowSize *int64 `json:"window_size,omitempty"` +} + +func (s *RrfRank) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "rank_constant": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.RankConstant = &value + case float64: + f := int64(v) + s.RankConstant = &f + } + + case "window_size": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.WindowSize = &value + case float64: + f := int64(v) + s.WindowSize = &f + } + + } + } + return nil +} + +// NewRrfRank returns a RrfRank. +func NewRrfRank() *RrfRank { + r := &RrfRank{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rulecondition.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rulecondition.go index 2c5aaacd0..776dc8959 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rulecondition.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rulecondition.go @@ -16,18 +16,24 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/appliesto" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/conditionoperator" ) // RuleCondition type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Rule.ts#L52-L65 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Rule.ts#L52-L65 type RuleCondition struct { // AppliesTo Specifies the result property to which the condition applies. If your // detector uses `lat_long`, `metric`, `rare`, or `freq_rare` functions, you can @@ -40,6 +46,52 @@ type RuleCondition struct { Value Float64 `json:"value"` } +func (s *RuleCondition) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "applies_to": + if err := dec.Decode(&s.AppliesTo); err != nil { + return err + } + + case "operator": + if err := dec.Decode(&s.Operator); err != nil { + return err + } + + case "value": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Value = f + case float64: + f := Float64(v) + s.Value = f + } + + } + } + return nil +} + // NewRuleCondition returns a RuleCondition. func NewRuleCondition() *RuleCondition { r := &RuleCondition{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rulequery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rulequery.go new file mode 100644 index 000000000..b910d4a77 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/rulequery.go @@ -0,0 +1,115 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + +// RuleQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/specialized.ts#L369-L373 +type RuleQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + MatchCriteria json.RawMessage `json:"match_criteria,omitempty"` + Organic *Query `json:"organic,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + RulesetId string `json:"ruleset_id"` +} + +func (s *RuleQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "match_criteria": + if err := dec.Decode(&s.MatchCriteria); err != nil { + return err + } + + case "organic": + if err := dec.Decode(&s.Organic); err != nil { + return err + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "ruleset_id": + if err := dec.Decode(&s.RulesetId); err != nil { + return err + } + + } + } + return nil +} + +// NewRuleQuery returns a RuleQuery. +func NewRuleQuery() *RuleQuery { + r := &RuleQuery{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/runningstatesearchinterval.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/runningstatesearchinterval.go index 2c8a3b893..52457fe53 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/runningstatesearchinterval.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/runningstatesearchinterval.go @@ -16,18 +16,69 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // RunningStateSearchInterval type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Datafeed.ts#L164-L169 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Datafeed.ts#L214-L231 type RunningStateSearchInterval struct { - End Duration `json:"end,omitempty"` - EndMs int64 `json:"end_ms"` - Start Duration `json:"start,omitempty"` - StartMs int64 `json:"start_ms"` + // End The end time. + End Duration `json:"end,omitempty"` + // EndMs The end time as an epoch in milliseconds. + EndMs int64 `json:"end_ms"` + // Start The start time. + Start Duration `json:"start,omitempty"` + // StartMs The start time as an epoch in milliseconds. + StartMs int64 `json:"start_ms"` +} + +func (s *RunningStateSearchInterval) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "end": + if err := dec.Decode(&s.End); err != nil { + return err + } + + case "end_ms": + if err := dec.Decode(&s.EndMs); err != nil { + return err + } + + case "start": + if err := dec.Decode(&s.Start); err != nil { + return err + } + + case "start_ms": + if err := dec.Decode(&s.StartMs); err != nil { + return err + } + + } + } + return nil } // NewRunningStateSearchInterval returns a RunningStateSearchInterval. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/runtimefield.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/runtimefield.go index c5d8efc67..f0394facb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/runtimefield.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/runtimefield.go @@ -16,29 +16,101 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/runtimefieldtype" ) // RuntimeField type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/RuntimeFields.ts#L26-L38 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/RuntimeFields.ts#L26-L48 type RuntimeField struct { // FetchFields For type `lookup` FetchFields []RuntimeFieldFetchFields `json:"fetch_fields,omitempty"` - Format *string `json:"format,omitempty"` + // Format A custom format for `date` type runtime fields. + Format *string `json:"format,omitempty"` // InputField For type `lookup` InputField *string `json:"input_field,omitempty"` - Script Script `json:"script,omitempty"` + // Script Painless script executed at query time. + Script Script `json:"script,omitempty"` // TargetField For type `lookup` TargetField *string `json:"target_field,omitempty"` // TargetIndex For type `lookup` - TargetIndex *string `json:"target_index,omitempty"` - Type runtimefieldtype.RuntimeFieldType `json:"type"` + TargetIndex *string `json:"target_index,omitempty"` + // Type Field type, which can be: `boolean`, `composite`, `date`, `double`, + // `geo_point`, `ip`,`keyword`, `long`, or `lookup`. + Type runtimefieldtype.RuntimeFieldType `json:"type"` +} + +func (s *RuntimeField) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fetch_fields": + if err := dec.Decode(&s.FetchFields); err != nil { + return err + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "input_field": + if err := dec.Decode(&s.InputField); err != nil { + return err + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return err + } + + case "target_index": + if err := dec.Decode(&s.TargetIndex); err != nil { + return err + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + } + } + return nil } // NewRuntimeField returns a RuntimeField. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/runtimefieldfetchfields.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/runtimefieldfetchfields.go index 004a9130a..b61b768a5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/runtimefieldfetchfields.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/runtimefieldfetchfields.go @@ -16,18 +16,68 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RuntimeFieldFetchFields type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/RuntimeFields.ts#L40-L44 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/RuntimeFields.ts#L50-L54 type RuntimeFieldFetchFields struct { Field string `json:"field"` Format *string `json:"format,omitempty"` } +func (s *RuntimeFieldFetchFields) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Field) + return err + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + } + } + return nil +} + // NewRuntimeFieldFetchFields returns a RuntimeFieldFetchFields. func NewRuntimeFieldFetchFields() *RuntimeFieldFetchFields { r := &RuntimeFieldFetchFields{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/runtimefields.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/runtimefields.go index 9bfc08a28..8c2a14896 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/runtimefields.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/runtimefields.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // RuntimeFields type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/RuntimeFields.ts#L24-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/RuntimeFields.ts#L24-L24 type RuntimeFields map[string]RuntimeField diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/runtimefieldstype.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/runtimefieldstype.go index 90e624dff..d1ff4bd9f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/runtimefieldstype.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/runtimefieldstype.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // RuntimeFieldsType type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L273-L288 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L279-L294 type RuntimeFieldsType struct { CharsMax int64 `json:"chars_max"` CharsTotal int64 `json:"chars_total"` @@ -40,6 +48,216 @@ type RuntimeFieldsType struct { SourceTotal int64 `json:"source_total"` } +func (s *RuntimeFieldsType) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "chars_max": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.CharsMax = value + case float64: + f := int64(v) + s.CharsMax = f + } + + case "chars_total": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.CharsTotal = value + case float64: + f := int64(v) + s.CharsTotal = f + } + + case "count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + case "doc_max": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DocMax = value + case float64: + f := int64(v) + s.DocMax = f + } + + case "doc_total": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DocTotal = value + case float64: + f := int64(v) + s.DocTotal = f + } + + case "index_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.IndexCount = value + case float64: + f := int64(v) + s.IndexCount = f + } + + case "lang": + if err := dec.Decode(&s.Lang); err != nil { + return err + } + + case "lines_max": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.LinesMax = value + case float64: + f := int64(v) + s.LinesMax = f + } + + case "lines_total": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.LinesTotal = value + case float64: + f := int64(v) + s.LinesTotal = f + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "scriptless_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.ScriptlessCount = value + case float64: + f := int64(v) + s.ScriptlessCount = f + } + + case "shadowed_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.ShadowedCount = value + case float64: + f := int64(v) + s.ShadowedCount = f + } + + case "source_max": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.SourceMax = value + case float64: + f := int64(v) + s.SourceMax = f + } + + case "source_total": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.SourceTotal = value + case float64: + f := int64(v) + s.SourceTotal = f + } + + } + } + return nil +} + // NewRuntimeFieldsType returns a RuntimeFieldsType. func NewRuntimeFieldsType() *RuntimeFieldsType { r := &RuntimeFieldsType{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/samplediversity.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/samplediversity.go index 59fdf63f5..b388cdb0e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/samplediversity.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/samplediversity.go @@ -16,18 +16,67 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // SampleDiversity type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/graph/_types/ExploreControls.ts#L31-L34 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/graph/_types/ExploreControls.ts#L51-L54 type SampleDiversity struct { Field string `json:"field"` MaxDocsPerValue int `json:"max_docs_per_value"` } +func (s *SampleDiversity) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "max_docs_per_value": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxDocsPerValue = value + case float64: + f := int(v) + s.MaxDocsPerValue = f + } + + } + } + return nil +} + // NewSampleDiversity returns a SampleDiversity. func NewSampleDiversity() *SampleDiversity { r := &SampleDiversity{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sampleraggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sampleraggregate.go index 71de454ac..286896f80 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sampleraggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sampleraggregate.go @@ -16,32 +16,31 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "fmt" - "bytes" + "encoding/json" "errors" + "fmt" "io" - + "strconv" "strings" - - "encoding/json" ) // SamplerAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L497-L498 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L498-L499 type SamplerAggregate struct { - Aggregations map[string]Aggregate `json:"-"` - DocCount int64 `json:"doc_count"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Aggregations map[string]Aggregate `json:"-"` + DocCount int64 `json:"doc_count"` + Meta Metadata `json:"meta,omitempty"` } func (s *SamplerAggregate) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -55,451 +54,19 @@ func (s *SamplerAggregate) UnmarshalJSON(data []byte) error { switch t { - case "aggregations": - for dec.More() { - tt, err := dec.Token() + case "doc_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) if err != nil { - if errors.Is(err, io.EOF) { - break - } return err } - if value, ok := tt.(string); ok { - if strings.Contains(value, "#") { - elems := strings.Split(value, "#") - if len(elems) == 2 { - switch elems[0] { - case "cardinality": - o := NewCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentiles": - o := NewHdrPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentile_ranks": - o := NewHdrPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentiles": - o := NewTDigestPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentile_ranks": - o := NewTDigestPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "percentiles_bucket": - o := NewPercentilesBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "median_absolute_deviation": - o := NewMedianAbsoluteDeviationAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "min": - o := NewMinAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "max": - o := NewMaxAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sum": - o := NewSumAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "avg": - o := NewAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "weighted_avg": - o := NewWeightedAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "value_count": - o := NewValueCountAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_value": - o := NewSimpleValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "derivative": - o := NewDerivativeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "bucket_metric_value": - o := NewBucketMetricValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats": - o := NewStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats_bucket": - o := NewStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats": - o := NewExtendedStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats_bucket": - o := NewExtendedStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_bounds": - o := NewGeoBoundsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_centroid": - o := NewGeoCentroidAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "histogram": - o := NewHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_histogram": - o := NewDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "auto_date_histogram": - o := NewAutoDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "variable_width_histogram": - o := NewVariableWidthHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sterms": - o := NewStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lterms": - o := NewLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "dterms": - o := NewDoubleTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umterms": - o := NewUnmappedTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lrareterms": - o := NewLongRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "srareterms": - o := NewStringRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umrareterms": - o := NewUnmappedRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "multi_terms": - o := NewMultiTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "missing": - o := NewMissingAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "nested": - o := NewNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "reverse_nested": - o := NewReverseNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "global": - o := NewGlobalAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filter": - o := NewFilterAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "children": - o := NewChildrenAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "parent": - o := NewParentAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sampler": - o := NewSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "unmapped_sampler": - o := NewUnmappedSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohash_grid": - o := NewGeoHashGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geotile_grid": - o := NewGeoTileGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohex_grid": - o := NewGeoHexGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "range": - o := NewRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_range": - o := NewDateRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_distance": - o := NewGeoDistanceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_range": - o := NewIpRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_prefix": - o := NewIpPrefixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filters": - o := NewFiltersAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "adjacency_matrix": - o := NewAdjacencyMatrixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "siglterms": - o := NewSignificantLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sigsterms": - o := NewSignificantStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umsigterms": - o := NewUnmappedSignificantTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "composite": - o := NewCompositeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "scripted_metric": - o := NewScriptedMetricAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_hits": - o := NewTopHitsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "inference": - o := NewInferenceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "string_stats": - o := NewStringStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "box_plot": - o := NewBoxPlotAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_metrics": - o := NewTopMetricsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "t_test": - o := NewTTestAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "rate": - o := NewRateAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_long_value": - o := NewCumulativeCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "matrix_stats": - o := NewMatrixStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_line": - o := NewGeoLineAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - default: - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - } - } else { - return errors.New("cannot decode JSON for field Aggregations") - } - } else { - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[value] = o - } - } - } - - case "doc_count": - if err := dec.Decode(&s.DocCount); err != nil { - return err + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f } case "meta": @@ -507,6 +74,519 @@ func (s *SamplerAggregate) UnmarshalJSON(data []byte) error { return err } + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "box_plot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[value] = o + } + } + } } return nil @@ -531,6 +611,7 @@ func (s SamplerAggregate) MarshalJSON() ([]byte, error) { for key, value := range s.Aggregations { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "Aggregations") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sampleraggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sampleraggregation.go index e72affddc..5ab853110 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sampleraggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sampleraggregation.go @@ -16,21 +16,80 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // SamplerAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L318-L320 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L727-L733 type SamplerAggregation struct { - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Name *string `json:"name,omitempty"` - ShardSize *int `json:"shard_size,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Name *string `json:"name,omitempty"` + // ShardSize Limits how many top-scoring documents are collected in the sample processed + // on each shard. + ShardSize *int `json:"shard_size,omitempty"` +} + +func (s *SamplerAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + case "shard_size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.ShardSize = &value + case float64: + f := int(v) + s.ShardSize = &f + } + + } + } + return nil } // NewSamplerAggregation returns a SamplerAggregation. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scaledfloatnumberproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scaledfloatnumberproperty.go index 20ee20702..c243b4cff 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scaledfloatnumberproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scaledfloatnumberproperty.go @@ -16,25 +16,25 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" ) // ScaledFloatNumberProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/core.ts#L171-L175 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/core.ts#L174-L178 type ScaledFloatNumberProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -64,6 +64,7 @@ type ScaledFloatNumberProperty struct { } func (s *ScaledFloatNumberProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -78,23 +79,63 @@ func (s *ScaledFloatNumberProperty) UnmarshalJSON(data []byte) error { switch t { case "boost": - if err := dec.Decode(&s.Boost); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f } case "coerce": - if err := dec.Decode(&s.Coerce); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Coerce = &value + case bool: + s.Coerce = &v } case "copy_to": - if err := dec.Decode(&s.CopyTo); err != nil { - return err + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return err + } } case "doc_values": - if err := dec.Decode(&s.DocValues); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DocValues = &value + case bool: + s.DocValues = &v } case "dynamic": @@ -103,6 +144,9 @@ func (s *ScaledFloatNumberProperty) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -111,7 +155,9 @@ func (s *ScaledFloatNumberProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -390,35 +436,80 @@ func (s *ScaledFloatNumberProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "ignore_malformed": - if err := dec.Decode(&s.IgnoreMalformed); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreMalformed = &value + case bool: + s.IgnoreMalformed = &v } case "index": - if err := dec.Decode(&s.Index); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Index = &value + case bool: + s.Index = &v } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } case "null_value": - if err := dec.Decode(&s.NullValue); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.NullValue = &f + case float64: + f := Float64(v) + s.NullValue = &f } case "on_script_error": @@ -427,6 +518,9 @@ func (s *ScaledFloatNumberProperty) UnmarshalJSON(data []byte) error { } case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -435,7 +529,9 @@ func (s *ScaledFloatNumberProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -714,15 +810,28 @@ func (s *ScaledFloatNumberProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } case "scaling_factor": - if err := dec.Decode(&s.ScalingFactor); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.ScalingFactor = &f + case float64: + f := Float64(v) + s.ScalingFactor = &f } case "script": @@ -731,18 +840,43 @@ func (s *ScaledFloatNumberProperty) UnmarshalJSON(data []byte) error { } case "similarity": - if err := dec.Decode(&s.Similarity); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Similarity = &o case "store": - if err := dec.Decode(&s.Store); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Store = &value + case bool: + s.Store = &v } case "time_series_dimension": - if err := dec.Decode(&s.TimeSeriesDimension); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.TimeSeriesDimension = &value + case bool: + s.TimeSeriesDimension = &v } case "time_series_metric": @@ -760,6 +894,37 @@ func (s *ScaledFloatNumberProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s ScaledFloatNumberProperty) MarshalJSON() ([]byte, error) { + type innerScaledFloatNumberProperty ScaledFloatNumberProperty + tmp := innerScaledFloatNumberProperty{ + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + Index: s.Index, + Meta: s.Meta, + NullValue: s.NullValue, + OnScriptError: s.OnScriptError, + Properties: s.Properties, + ScalingFactor: s.ScalingFactor, + Script: s.Script, + Similarity: s.Similarity, + Store: s.Store, + TimeSeriesDimension: s.TimeSeriesDimension, + TimeSeriesMetric: s.TimeSeriesMetric, + Type: s.Type, + } + + tmp.Type = "scaled_float" + + return json.Marshal(tmp) +} + // NewScaledFloatNumberProperty returns a ScaledFloatNumberProperty. func NewScaledFloatNumberProperty() *ScaledFloatNumberProperty { r := &ScaledFloatNumberProperty{ @@ -768,7 +933,5 @@ func NewScaledFloatNumberProperty() *ScaledFloatNumberProperty { Properties: make(map[string]Property, 0), } - r.Type = "scaled_float" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/schedulecontainer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/schedulecontainer.go index d1b454dee..4352544c5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/schedulecontainer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/schedulecontainer.go @@ -16,13 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // ScheduleContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Schedule.ts#L85-L96 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Schedule.ts#L85-L96 type ScheduleContainer struct { Cron *string `json:"cron,omitempty"` Daily *DailySchedule `json:"daily,omitempty"` @@ -33,6 +40,94 @@ type ScheduleContainer struct { Yearly []TimeOfYear `json:"yearly,omitempty"` } +func (s *ScheduleContainer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "cron": + if err := dec.Decode(&s.Cron); err != nil { + return err + } + + case "daily": + if err := dec.Decode(&s.Daily); err != nil { + return err + } + + case "hourly": + if err := dec.Decode(&s.Hourly); err != nil { + return err + } + + case "interval": + if err := dec.Decode(&s.Interval); err != nil { + return err + } + + case "monthly": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewTimeOfMonth() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Monthly = append(s.Monthly, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Monthly); err != nil { + return err + } + } + + case "weekly": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewTimeOfWeek() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Weekly = append(s.Weekly, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Weekly); err != nil { + return err + } + } + + case "yearly": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewTimeOfYear() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Yearly = append(s.Yearly, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Yearly); err != nil { + return err + } + } + + } + } + return nil +} + // NewScheduleContainer returns a ScheduleContainer. func NewScheduleContainer() *ScheduleContainer { r := &ScheduleContainer{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scheduletimeofday.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scheduletimeofday.go index 38c48f291..64656d678 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scheduletimeofday.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scheduletimeofday.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // string // HourAndMinute // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Schedule.ts#L103-L108 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Schedule.ts#L103-L108 type ScheduleTimeOfDay interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scheduletriggerevent.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scheduletriggerevent.go index 6fb693026..0ad2b20ea 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scheduletriggerevent.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scheduletriggerevent.go @@ -16,18 +16,55 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // ScheduleTriggerEvent type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Schedule.ts#L98-L101 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Schedule.ts#L98-L101 type ScheduleTriggerEvent struct { ScheduledTime DateTime `json:"scheduled_time"` TriggeredTime DateTime `json:"triggered_time,omitempty"` } +func (s *ScheduleTriggerEvent) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "scheduled_time": + if err := dec.Decode(&s.ScheduledTime); err != nil { + return err + } + + case "triggered_time": + if err := dec.Decode(&s.TriggeredTime); err != nil { + return err + } + + } + } + return nil +} + // NewScheduleTriggerEvent returns a ScheduleTriggerEvent. func NewScheduleTriggerEvent() *ScheduleTriggerEvent { r := &ScheduleTriggerEvent{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scoresort.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scoresort.go index 115b84fbd..00ea4c403 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scoresort.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scoresort.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -26,7 +26,7 @@ import ( // ScoreSort type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/sort.ts#L55-L57 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/sort.ts#L55-L57 type ScoreSort struct { Order *sortorder.SortOrder `json:"order,omitempty"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/script.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/script.go index e2cda1a95..185639dfa 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/script.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/script.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // InlineScript // StoredScriptId // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Scripting.ts#L56-L57 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Scripting.ts#L88-L89 type Script interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scriptcache.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scriptcache.go index 7e2f2f313..9b0dc8463 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scriptcache.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scriptcache.go @@ -16,18 +16,107 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ScriptCache type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L413-L418 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L1031-L1045 type ScriptCache struct { - CacheEvictions *int64 `json:"cache_evictions,omitempty"` - CompilationLimitTriggered *int64 `json:"compilation_limit_triggered,omitempty"` - Compilations *int64 `json:"compilations,omitempty"` - Context *string `json:"context,omitempty"` + // CacheEvictions Total number of times the script cache has evicted old data. + CacheEvictions *int64 `json:"cache_evictions,omitempty"` + // CompilationLimitTriggered Total number of times the script compilation circuit breaker has limited + // inline script compilations. + CompilationLimitTriggered *int64 `json:"compilation_limit_triggered,omitempty"` + // Compilations Total number of inline script compilations performed by the node. + Compilations *int64 `json:"compilations,omitempty"` + Context *string `json:"context,omitempty"` +} + +func (s *ScriptCache) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "cache_evictions": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.CacheEvictions = &value + case float64: + f := int64(v) + s.CacheEvictions = &f + } + + case "compilation_limit_triggered": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.CompilationLimitTriggered = &value + case float64: + f := int64(v) + s.CompilationLimitTriggered = &f + } + + case "compilations": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Compilations = &value + case float64: + f := int64(v) + s.Compilations = &f + } + + case "context": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Context = &o + + } + } + return nil } // NewScriptCache returns a ScriptCache. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scriptcondition.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scriptcondition.go index 61eb89111..604b0d496 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scriptcondition.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scriptcondition.go @@ -16,17 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // ScriptCondition type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Conditions.ts#L76-L84 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Conditions.ts#L76-L84 type ScriptCondition struct { Id *string `json:"id,omitempty"` Lang *string `json:"lang,omitempty"` @@ -34,6 +38,70 @@ type ScriptCondition struct { Source *string `json:"source,omitempty"` } +func (s *ScriptCondition) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Id = &o + + case "lang": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Lang = &o + + case "params": + if s.Params == nil { + s.Params = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Params); err != nil { + return err + } + + case "source": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Source = &o + + } + } + return nil +} + // NewScriptCondition returns a ScriptCondition. func NewScriptCondition() *ScriptCondition { r := &ScriptCondition{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scriptedheuristic.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scriptedheuristic.go index dd86acef3..b28ed8566 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scriptedheuristic.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scriptedheuristic.go @@ -16,17 +16,49 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // ScriptedHeuristic type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L338-L340 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L766-L768 type ScriptedHeuristic struct { Script Script `json:"script"` } +func (s *ScriptedHeuristic) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + } + } + return nil +} + // NewScriptedHeuristic returns a ScriptedHeuristic. func NewScriptedHeuristic() *ScriptedHeuristic { r := &ScriptedHeuristic{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scriptedmetricaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scriptedmetricaggregate.go index 4637328ec..463b339dd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scriptedmetricaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scriptedmetricaggregate.go @@ -16,20 +16,53 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" ) // ScriptedMetricAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L640-L643 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L649-L652 type ScriptedMetricAggregate struct { - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Value json.RawMessage `json:"value,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Value json.RawMessage `json:"value,omitempty"` +} + +func (s *ScriptedMetricAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return err + } + + } + } + return nil } // NewScriptedMetricAggregate returns a ScriptedMetricAggregate. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scriptedmetricaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scriptedmetricaggregation.go index 9d0349f57..4b7f22cf8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scriptedmetricaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scriptedmetricaggregation.go @@ -16,26 +16,109 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" ) // ScriptedMetricAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/metric.ts#L137-L143 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/metric.ts#L254-L280 type ScriptedMetricAggregation struct { - CombineScript Script `json:"combine_script,omitempty"` - Field *string `json:"field,omitempty"` - InitScript Script `json:"init_script,omitempty"` - MapScript Script `json:"map_script,omitempty"` - Missing Missing `json:"missing,omitempty"` - Params map[string]json.RawMessage `json:"params,omitempty"` - ReduceScript Script `json:"reduce_script,omitempty"` - Script Script `json:"script,omitempty"` + // CombineScript Runs once on each shard after document collection is complete. + // Allows the aggregation to consolidate the state returned from each shard. + CombineScript Script `json:"combine_script,omitempty"` + // Field The field on which to run the aggregation. + Field *string `json:"field,omitempty"` + // InitScript Runs prior to any collection of documents. + // Allows the aggregation to set up any initial state. + InitScript Script `json:"init_script,omitempty"` + // MapScript Run once per document collected. + // If no `combine_script` is specified, the resulting state needs to be stored + // in the `state` object. + MapScript Script `json:"map_script,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing Missing `json:"missing,omitempty"` + // Params A global object with script parameters for `init`, `map` and `combine` + // scripts. + // It is shared between the scripts. + Params map[string]json.RawMessage `json:"params,omitempty"` + // ReduceScript Runs once on the coordinating node after all shards have returned their + // results. + // The script is provided with access to a variable `states`, which is an array + // of the result of the `combine_script` on each shard. + ReduceScript Script `json:"reduce_script,omitempty"` + Script Script `json:"script,omitempty"` +} + +func (s *ScriptedMetricAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "combine_script": + if err := dec.Decode(&s.CombineScript); err != nil { + return err + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "init_script": + if err := dec.Decode(&s.InitScript); err != nil { + return err + } + + case "map_script": + if err := dec.Decode(&s.MapScript); err != nil { + return err + } + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return err + } + + case "params": + if s.Params == nil { + s.Params = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Params); err != nil { + return err + } + + case "reduce_script": + if err := dec.Decode(&s.ReduceScript); err != nil { + return err + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + } + } + return nil } // NewScriptedMetricAggregation returns a ScriptedMetricAggregation. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scriptfield.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scriptfield.go index 014b03a01..c4cad2b9b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scriptfield.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scriptfield.go @@ -16,18 +16,65 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ScriptField type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Scripting.ts#L59-L62 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Scripting.ts#L91-L94 type ScriptField struct { IgnoreFailure *bool `json:"ignore_failure,omitempty"` Script Script `json:"script"` } +func (s *ScriptField) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "ignore_failure": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + } + } + return nil +} + // NewScriptField returns a ScriptField. func NewScriptField() *ScriptField { r := &ScriptField{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scripting.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scripting.go index 6ae9dff9c..8a7835936 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scripting.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scripting.go @@ -16,19 +16,110 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Scripting type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L389-L395 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L977-L995 type Scripting struct { - CacheEvictions *int64 `json:"cache_evictions,omitempty"` - CompilationLimitTriggered *int64 `json:"compilation_limit_triggered,omitempty"` - Compilations *int64 `json:"compilations,omitempty"` - CompilationsHistory map[string]int64 `json:"compilations_history,omitempty"` - Contexts []NodesContext `json:"contexts,omitempty"` + // CacheEvictions Total number of times the script cache has evicted old data. + CacheEvictions *int64 `json:"cache_evictions,omitempty"` + // CompilationLimitTriggered Total number of times the script compilation circuit breaker has limited + // inline script compilations. + CompilationLimitTriggered *int64 `json:"compilation_limit_triggered,omitempty"` + // Compilations Total number of inline script compilations performed by the node. + Compilations *int64 `json:"compilations,omitempty"` + // CompilationsHistory Contains this recent history of script compilations. + CompilationsHistory map[string]int64 `json:"compilations_history,omitempty"` + Contexts []NodesContext `json:"contexts,omitempty"` +} + +func (s *Scripting) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "cache_evictions": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.CacheEvictions = &value + case float64: + f := int64(v) + s.CacheEvictions = &f + } + + case "compilation_limit_triggered": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.CompilationLimitTriggered = &value + case float64: + f := int64(v) + s.CompilationLimitTriggered = &f + } + + case "compilations": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Compilations = &value + case float64: + f := int64(v) + s.Compilations = &f + } + + case "compilations_history": + if s.CompilationsHistory == nil { + s.CompilationsHistory = make(map[string]int64, 0) + } + if err := dec.Decode(&s.CompilationsHistory); err != nil { + return err + } + + case "contexts": + if err := dec.Decode(&s.Contexts); err != nil { + return err + } + + } + } + return nil } // NewScripting returns a Scripting. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scriptquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scriptquery.go index e7dae360b..aa64c5fa5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scriptquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scriptquery.go @@ -16,17 +16,85 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ScriptQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/specialized.ts#L164-L166 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/specialized.ts#L318-L324 type ScriptQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. Boost *float32 `json:"boost,omitempty"` QueryName_ *string `json:"_name,omitempty"` - Script Script `json:"script"` + // Script Contains a script to run as a query. + // This script must return a boolean value, `true` or `false`. + Script Script `json:"script"` +} + +func (s *ScriptQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + } + } + return nil } // NewScriptQuery returns a ScriptQuery. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scriptscorefunction.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scriptscorefunction.go index 8915a5c75..74a05b294 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scriptscorefunction.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scriptscorefunction.go @@ -16,17 +16,50 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // ScriptScoreFunction type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/compound.ts#L61-L63 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/compound.ts#L120-L125 type ScriptScoreFunction struct { + // Script A script that computes a score. Script Script `json:"script"` } +func (s *ScriptScoreFunction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + } + } + return nil +} + // NewScriptScoreFunction returns a ScriptScoreFunction. func NewScriptScoreFunction() *ScriptScoreFunction { r := &ScriptScoreFunction{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scriptscorequery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scriptscorequery.go index 24da89077..c23c45688 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scriptscorequery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scriptscorequery.go @@ -16,19 +16,112 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ScriptScoreQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/specialized.ts#L168-L172 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/specialized.ts#L326-L340 type ScriptScoreQuery struct { - Boost *float32 `json:"boost,omitempty"` - MinScore *float32 `json:"min_score,omitempty"` - Query *Query `json:"query,omitempty"` - QueryName_ *string `json:"_name,omitempty"` - Script Script `json:"script"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // MinScore Documents with a score lower than this floating point number are excluded + // from the search results. + MinScore *float32 `json:"min_score,omitempty"` + // Query Query used to return documents. + Query *Query `json:"query,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + // Script Script used to compute the score of documents returned by the query. + // Important: final relevance scores from the `script_score` query cannot be + // negative. + Script Script `json:"script"` +} + +func (s *ScriptScoreQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "min_score": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.MinScore = &f + case float64: + f := float32(v) + s.MinScore = &f + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return err + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + } + } + return nil } // NewScriptScoreQuery returns a ScriptScoreQuery. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scriptsort.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scriptsort.go index 8605605e4..2719d20a4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scriptsort.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scriptsort.go @@ -16,11 +16,16 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/scriptsorttype" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortmode" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortorder" @@ -28,7 +33,7 @@ import ( // ScriptSort type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/sort.ts#L68-L74 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/sort.ts#L68-L74 type ScriptSort struct { Mode *sortmode.SortMode `json:"mode,omitempty"` Nested *NestedSortValue `json:"nested,omitempty"` @@ -37,6 +42,51 @@ type ScriptSort struct { Type *scriptsorttype.ScriptSortType `json:"type,omitempty"` } +func (s *ScriptSort) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "mode": + if err := dec.Decode(&s.Mode); err != nil { + return err + } + + case "nested": + if err := dec.Decode(&s.Nested); err != nil { + return err + } + + case "order": + if err := dec.Decode(&s.Order); err != nil { + return err + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + } + } + return nil +} + // NewScriptSort returns a ScriptSort. func NewScriptSort() *ScriptSort { r := &ScriptSort{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scripttransform.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scripttransform.go index 8f65a1594..ba72fe2a2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scripttransform.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scripttransform.go @@ -16,17 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // ScriptTransform type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Transform.ts#L36-L44 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Transform.ts#L36-L44 type ScriptTransform struct { Id *string `json:"id,omitempty"` Lang *string `json:"lang,omitempty"` @@ -34,6 +38,70 @@ type ScriptTransform struct { Source *string `json:"source,omitempty"` } +func (s *ScriptTransform) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Id = &o + + case "lang": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Lang = &o + + case "params": + if s.Params == nil { + s.Params = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Params); err != nil { + return err + } + + case "source": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Source = &o + + } + } + return nil +} + // NewScriptTransform returns a ScriptTransform. func NewScriptTransform() *ScriptTransform { r := &ScriptTransform{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scrollids.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scrollids.go index a1dce3cb7..7c3ee1825 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scrollids.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/scrollids.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // ScrollIds type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/common.ts#L50-L50 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/common.ts#L50-L50 type ScrollIds []string diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchablesnapshots.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchablesnapshots.go index 98965b27d..63825aea1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchablesnapshots.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchablesnapshots.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // SearchableSnapshots type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L419-L423 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L428-L432 type SearchableSnapshots struct { Available bool `json:"available"` Enabled bool `json:"enabled"` @@ -31,6 +39,102 @@ type SearchableSnapshots struct { SharedCacheIndicesCount *int `json:"shared_cache_indices_count,omitempty"` } +func (s *SearchableSnapshots) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "available": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Available = value + case bool: + s.Available = v + } + + case "enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "full_copy_indices_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.FullCopyIndicesCount = &value + case float64: + f := int(v) + s.FullCopyIndicesCount = &f + } + + case "indices_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IndicesCount = value + case float64: + f := int(v) + s.IndicesCount = f + } + + case "shared_cache_indices_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.SharedCacheIndicesCount = &value + case float64: + f := int(v) + s.SharedCacheIndicesCount = &f + } + + } + } + return nil +} + // NewSearchableSnapshots returns a SearchableSnapshots. func NewSearchableSnapshots() *SearchableSnapshots { r := &SearchableSnapshots{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchapplication.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchapplication.go new file mode 100644 index 000000000..60bb27cac --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchapplication.go @@ -0,0 +1,96 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + +// SearchApplication type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/search_application/_types/SearchApplication.ts#L24-L45 +type SearchApplication struct { + // AnalyticsCollectionName Analytics collection associated to the Search Application. + AnalyticsCollectionName *string `json:"analytics_collection_name,omitempty"` + // Indices Indices that are part of the Search Application. + Indices []string `json:"indices"` + // Name Search Application name. + Name string `json:"name"` + // Template Search template to use on search operations. + Template *SearchApplicationTemplate `json:"template,omitempty"` + // UpdatedAtMillis Last time the Search Application was updated. + UpdatedAtMillis int64 `json:"updated_at_millis"` +} + +func (s *SearchApplication) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analytics_collection_name": + if err := dec.Decode(&s.AnalyticsCollectionName); err != nil { + return err + } + + case "indices": + if err := dec.Decode(&s.Indices); err != nil { + return err + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "template": + if err := dec.Decode(&s.Template); err != nil { + return err + } + + case "updated_at_millis": + if err := dec.Decode(&s.UpdatedAtMillis); err != nil { + return err + } + + } + } + return nil +} + +// NewSearchApplication returns a SearchApplication. +func NewSearchApplication() *SearchApplication { + r := &SearchApplication{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchapplicationlistitem.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchapplicationlistitem.go new file mode 100644 index 000000000..4c070e328 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchapplicationlistitem.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + +// SearchApplicationListItem type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/search_application/list/SearchApplicationsListResponse.ts#L31-L48 +type SearchApplicationListItem struct { + // AnalyticsCollectionName Analytics collection associated to the Search Application + AnalyticsCollectionName *string `json:"analytics_collection_name,omitempty"` + // Indices Indices that are part of the Search Application + Indices []string `json:"indices"` + // Name Search Application name + Name string `json:"name"` + // UpdatedAtMillis Last time the Search Application was updated + UpdatedAtMillis int64 `json:"updated_at_millis"` +} + +func (s *SearchApplicationListItem) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analytics_collection_name": + if err := dec.Decode(&s.AnalyticsCollectionName); err != nil { + return err + } + + case "indices": + if err := dec.Decode(&s.Indices); err != nil { + return err + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "updated_at_millis": + if err := dec.Decode(&s.UpdatedAtMillis); err != nil { + return err + } + + } + } + return nil +} + +// NewSearchApplicationListItem returns a SearchApplicationListItem. +func NewSearchApplicationListItem() *SearchApplicationListItem { + r := &SearchApplicationListItem{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchapplicationtemplate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchapplicationtemplate.go new file mode 100644 index 000000000..e9339b8c3 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchapplicationtemplate.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +// SearchApplicationTemplate type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/search_application/_types/SearchApplication.ts#L47-L52 +type SearchApplicationTemplate struct { + // Script The associated mustache template. + Script InlineScript `json:"script"` +} + +// NewSearchApplicationTemplate returns a SearchApplicationTemplate. +func NewSearchApplicationTemplate() *SearchApplicationTemplate { + r := &SearchApplicationTemplate{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchasyoutypeproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchasyoutypeproperty.go index 0030f16fb..1f7db3384 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchasyoutypeproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchasyoutypeproperty.go @@ -16,25 +16,25 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexoptions" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/termvectoroption" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexoptions" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/termvectoroption" ) // SearchAsYouTypeProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/core.ts#L190-L200 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/core.ts#L197-L207 type SearchAsYouTypeProperty struct { Analyzer *string `json:"analyzer,omitempty"` CopyTo []string `json:"copy_to,omitempty"` @@ -57,6 +57,7 @@ type SearchAsYouTypeProperty struct { } func (s *SearchAsYouTypeProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -71,13 +72,31 @@ func (s *SearchAsYouTypeProperty) UnmarshalJSON(data []byte) error { switch t { case "analyzer": - if err := dec.Decode(&s.Analyzer); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o case "copy_to": - if err := dec.Decode(&s.CopyTo); err != nil { - return err + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return err + } } case "dynamic": @@ -86,6 +105,9 @@ func (s *SearchAsYouTypeProperty) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -94,7 +116,9 @@ func (s *SearchAsYouTypeProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -373,20 +397,42 @@ func (s *SearchAsYouTypeProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "index": - if err := dec.Decode(&s.Index); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Index = &value + case bool: + s.Index = &v } case "index_options": @@ -395,21 +441,47 @@ func (s *SearchAsYouTypeProperty) UnmarshalJSON(data []byte) error { } case "max_shingle_size": - if err := dec.Decode(&s.MaxShingleSize); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxShingleSize = &value + case float64: + f := int(v) + s.MaxShingleSize = &f } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } case "norms": - if err := dec.Decode(&s.Norms); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Norms = &value + case bool: + s.Norms = &v } case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -418,7 +490,9 @@ func (s *SearchAsYouTypeProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -697,30 +771,62 @@ func (s *SearchAsYouTypeProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } case "search_analyzer": - if err := dec.Decode(&s.SearchAnalyzer); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchAnalyzer = &o case "search_quote_analyzer": - if err := dec.Decode(&s.SearchQuoteAnalyzer); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchQuoteAnalyzer = &o case "similarity": - if err := dec.Decode(&s.Similarity); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Similarity = &o case "store": - if err := dec.Decode(&s.Store); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Store = &value + case bool: + s.Store = &v } case "term_vector": @@ -738,6 +844,34 @@ func (s *SearchAsYouTypeProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s SearchAsYouTypeProperty) MarshalJSON() ([]byte, error) { + type innerSearchAsYouTypeProperty SearchAsYouTypeProperty + tmp := innerSearchAsYouTypeProperty{ + Analyzer: s.Analyzer, + CopyTo: s.CopyTo, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + IndexOptions: s.IndexOptions, + MaxShingleSize: s.MaxShingleSize, + Meta: s.Meta, + Norms: s.Norms, + Properties: s.Properties, + SearchAnalyzer: s.SearchAnalyzer, + SearchQuoteAnalyzer: s.SearchQuoteAnalyzer, + Similarity: s.Similarity, + Store: s.Store, + TermVector: s.TermVector, + Type: s.Type, + } + + tmp.Type = "search_as_you_type" + + return json.Marshal(tmp) +} + // NewSearchAsYouTypeProperty returns a SearchAsYouTypeProperty. func NewSearchAsYouTypeProperty() *SearchAsYouTypeProperty { r := &SearchAsYouTypeProperty{ @@ -746,7 +880,5 @@ func NewSearchAsYouTypeProperty() *SearchAsYouTypeProperty { Properties: make(map[string]Property, 0), } - r.Type = "search_as_you_type" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchidle.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchidle.go index 16307a6d5..cfff46d41 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchidle.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchidle.go @@ -16,17 +16,49 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // SearchIdle type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L236-L239 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L236-L239 type SearchIdle struct { After Duration `json:"after,omitempty"` } +func (s *SearchIdle) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "after": + if err := dec.Decode(&s.After); err != nil { + return err + } + + } + } + return nil +} + // NewSearchIdle returns a SearchIdle. func NewSearchIdle() *SearchIdle { r := &SearchIdle{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchinput.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchinput.go index f46ca85eb..3973f5faa 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchinput.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchinput.go @@ -16,19 +16,61 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // SearchInput type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Input.ts#L112-L116 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Input.ts#L112-L116 type SearchInput struct { Extract []string `json:"extract,omitempty"` Request SearchInputRequestDefinition `json:"request"` Timeout Duration `json:"timeout,omitempty"` } +func (s *SearchInput) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "extract": + if err := dec.Decode(&s.Extract); err != nil { + return err + } + + case "request": + if err := dec.Decode(&s.Request); err != nil { + return err + } + + case "timeout": + if err := dec.Decode(&s.Timeout); err != nil { + return err + } + + } + } + return nil +} + // NewSearchInput returns a SearchInput. func NewSearchInput() *SearchInput { r := &SearchInput{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchinputrequestbody.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchinputrequestbody.go index ce01ff6c7..281253999 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchinputrequestbody.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchinputrequestbody.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // SearchInputRequestBody type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Input.ts#L147-L149 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Input.ts#L147-L149 type SearchInputRequestBody struct { Query Query `json:"query"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchinputrequestdefinition.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchinputrequestdefinition.go index 71ad6bd91..f44bf427d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchinputrequestdefinition.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchinputrequestdefinition.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/searchtype" ) // SearchInputRequestDefinition type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Input.ts#L118-L125 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Input.ts#L118-L125 type SearchInputRequestDefinition struct { Body *SearchInputRequestBody `json:"body,omitempty"` Indices []string `json:"indices,omitempty"` @@ -36,6 +42,65 @@ type SearchInputRequestDefinition struct { Template *SearchTemplateRequestBody `json:"template,omitempty"` } +func (s *SearchInputRequestDefinition) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "body": + if err := dec.Decode(&s.Body); err != nil { + return err + } + + case "indices": + if err := dec.Decode(&s.Indices); err != nil { + return err + } + + case "indices_options": + if err := dec.Decode(&s.IndicesOptions); err != nil { + return err + } + + case "rest_total_hits_as_int": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.RestTotalHitsAsInt = &value + case bool: + s.RestTotalHitsAsInt = &v + } + + case "search_type": + if err := dec.Decode(&s.SearchType); err != nil { + return err + } + + case "template": + if err := dec.Decode(&s.Template); err != nil { + return err + } + + } + } + return nil +} + // NewSearchInputRequestDefinition returns a SearchInputRequestDefinition. func NewSearchInputRequestDefinition() *SearchInputRequestDefinition { r := &SearchInputRequestDefinition{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchprofile.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchprofile.go index 4ab686df0..1e2894070 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchprofile.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchprofile.go @@ -16,19 +16,72 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // SearchProfile type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/profile.ts#L126-L130 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/profile.ts#L126-L130 type SearchProfile struct { Collector []Collector `json:"collector"` Query []QueryProfile `json:"query"` RewriteTime int64 `json:"rewrite_time"` } +func (s *SearchProfile) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "collector": + if err := dec.Decode(&s.Collector); err != nil { + return err + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return err + } + + case "rewrite_time": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.RewriteTime = value + case float64: + f := int64(v) + s.RewriteTime = f + } + + } + } + return nil +} + // NewSearchProfile returns a SearchProfile. func NewSearchProfile() *SearchProfile { r := &SearchProfile{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchstats.go index 9f261bdf7..ecee92289 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchstats.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // SearchStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Stats.ts#L185-L204 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Stats.ts#L252-L271 type SearchStats struct { FetchCurrent int64 `json:"fetch_current"` FetchTime Duration `json:"fetch_time,omitempty"` @@ -44,6 +52,209 @@ type SearchStats struct { SuggestTotal int64 `json:"suggest_total"` } +func (s *SearchStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fetch_current": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.FetchCurrent = value + case float64: + f := int64(v) + s.FetchCurrent = f + } + + case "fetch_time": + if err := dec.Decode(&s.FetchTime); err != nil { + return err + } + + case "fetch_time_in_millis": + if err := dec.Decode(&s.FetchTimeInMillis); err != nil { + return err + } + + case "fetch_total": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.FetchTotal = value + case float64: + f := int64(v) + s.FetchTotal = f + } + + case "groups": + if s.Groups == nil { + s.Groups = make(map[string]SearchStats, 0) + } + if err := dec.Decode(&s.Groups); err != nil { + return err + } + + case "open_contexts": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.OpenContexts = &value + case float64: + f := int64(v) + s.OpenContexts = &f + } + + case "query_current": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.QueryCurrent = value + case float64: + f := int64(v) + s.QueryCurrent = f + } + + case "query_time": + if err := dec.Decode(&s.QueryTime); err != nil { + return err + } + + case "query_time_in_millis": + if err := dec.Decode(&s.QueryTimeInMillis); err != nil { + return err + } + + case "query_total": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.QueryTotal = value + case float64: + f := int64(v) + s.QueryTotal = f + } + + case "scroll_current": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.ScrollCurrent = value + case float64: + f := int64(v) + s.ScrollCurrent = f + } + + case "scroll_time": + if err := dec.Decode(&s.ScrollTime); err != nil { + return err + } + + case "scroll_time_in_millis": + if err := dec.Decode(&s.ScrollTimeInMillis); err != nil { + return err + } + + case "scroll_total": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.ScrollTotal = value + case float64: + f := int64(v) + s.ScrollTotal = f + } + + case "suggest_current": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.SuggestCurrent = value + case float64: + f := int64(v) + s.SuggestCurrent = f + } + + case "suggest_time": + if err := dec.Decode(&s.SuggestTime); err != nil { + return err + } + + case "suggest_time_in_millis": + if err := dec.Decode(&s.SuggestTimeInMillis); err != nil { + return err + } + + case "suggest_total": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.SuggestTotal = value + case float64: + f := int64(v) + s.SuggestTotal = f + } + + } + } + return nil +} + // NewSearchStats returns a SearchStats. func NewSearchStats() *SearchStats { r := &SearchStats{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchtemplaterequestbody.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchtemplaterequestbody.go index 8f06241d1..b62180d91 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchtemplaterequestbody.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchtemplaterequestbody.go @@ -16,17 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // SearchTemplateRequestBody type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Input.ts#L128-L145 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Input.ts#L128-L145 type SearchTemplateRequestBody struct { Explain *bool `json:"explain,omitempty"` // Id ID of the search template to use. If no source is specified, @@ -40,6 +44,79 @@ type SearchTemplateRequestBody struct { Source *string `json:"source,omitempty"` } +func (s *SearchTemplateRequestBody) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "explain": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Explain = &value + case bool: + s.Explain = &v + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return err + } + + case "params": + if s.Params == nil { + s.Params = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Params); err != nil { + return err + } + + case "profile": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Profile = &value + case bool: + s.Profile = &v + } + + case "source": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Source = &o + + } + } + return nil +} + // NewSearchTemplateRequestBody returns a SearchTemplateRequestBody. func NewSearchTemplateRequestBody() *SearchTemplateRequestBody { r := &SearchTemplateRequestBody{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchtransform.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchtransform.go index c447689fc..7f3c256a3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchtransform.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/searchtransform.go @@ -16,18 +16,55 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // SearchTransform type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Transform.ts#L46-L49 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Transform.ts#L46-L49 type SearchTransform struct { Request SearchInputRequestDefinition `json:"request"` Timeout Duration `json:"timeout"` } +func (s *SearchTransform) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "request": + if err := dec.Decode(&s.Request); err != nil { + return err + } + + case "timeout": + if err := dec.Decode(&s.Timeout); err != nil { + return err + } + + } + } + return nil +} + // NewSearchTransform returns a SearchTransform. func NewSearchTransform() *SearchTransform { r := &SearchTransform{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/security.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/security.go index 7b24e5b7b..22285bc60 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/security.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/security.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Security type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L425-L438 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L434-L447 type Security struct { Anonymous FeatureToggle `json:"anonymous"` ApiKeyService FeatureToggle `json:"api_key_service"` @@ -40,6 +48,120 @@ type Security struct { TokenService FeatureToggle `json:"token_service"` } +func (s *Security) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "anonymous": + if err := dec.Decode(&s.Anonymous); err != nil { + return err + } + + case "api_key_service": + if err := dec.Decode(&s.ApiKeyService); err != nil { + return err + } + + case "audit": + if err := dec.Decode(&s.Audit); err != nil { + return err + } + + case "available": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Available = value + case bool: + s.Available = v + } + + case "enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "fips_140": + if err := dec.Decode(&s.Fips140); err != nil { + return err + } + + case "ipfilter": + if err := dec.Decode(&s.Ipfilter); err != nil { + return err + } + + case "operator_privileges": + if err := dec.Decode(&s.OperatorPrivileges); err != nil { + return err + } + + case "realms": + if s.Realms == nil { + s.Realms = make(map[string]XpackRealm, 0) + } + if err := dec.Decode(&s.Realms); err != nil { + return err + } + + case "role_mapping": + if s.RoleMapping == nil { + s.RoleMapping = make(map[string]XpackRoleMapping, 0) + } + if err := dec.Decode(&s.RoleMapping); err != nil { + return err + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return err + } + + case "ssl": + if err := dec.Decode(&s.Ssl); err != nil { + return err + } + + case "system_key": + if err := dec.Decode(&s.SystemKey); err != nil { + return err + } + + case "token_service": + if err := dec.Decode(&s.TokenService); err != nil { + return err + } + + } + } + return nil +} + // NewSecurity returns a Security. func NewSecurity() *Security { r := &Security{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/securityrealm.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/securityrealm.go index 0808ae5df..c45df3b9c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/securityrealm.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/securityrealm.go @@ -16,17 +16,49 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // SecurityRealm type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/_types/RoleMappingRule.ts#L44-L46 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/_types/RoleMappingRule.ts#L44-L46 type SecurityRealm struct { Name string `json:"name"` } +func (s *SecurityRealm) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + } + } + return nil +} + // NewSecurityRealm returns a SecurityRealm. func NewSecurityRealm() *SecurityRealm { r := &SecurityRealm{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/securityrolemapping.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/securityrolemapping.go index 41e8a73d3..b8fa01b49 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/securityrolemapping.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/securityrolemapping.go @@ -16,23 +16,81 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // SecurityRoleMapping type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/_types/RoleMapping.ts#L25-L31 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/_types/RoleMapping.ts#L25-L31 type SecurityRoleMapping struct { - Enabled bool `json:"enabled"` - Metadata map[string]json.RawMessage `json:"metadata"` - RoleTemplates []RoleTemplate `json:"role_templates,omitempty"` - Roles []string `json:"roles"` - Rules RoleMappingRule `json:"rules"` + Enabled bool `json:"enabled"` + Metadata Metadata `json:"metadata"` + RoleTemplates []RoleTemplate `json:"role_templates,omitempty"` + Roles []string `json:"roles"` + Rules RoleMappingRule `json:"rules"` +} + +func (s *SecurityRoleMapping) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return err + } + + case "role_templates": + if err := dec.Decode(&s.RoleTemplates); err != nil { + return err + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return err + } + + case "rules": + if err := dec.Decode(&s.Rules); err != nil { + return err + } + + } + } + return nil } // NewSecurityRoleMapping returns a SecurityRoleMapping. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/securityroles.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/securityroles.go index 580319b73..f97c0f756 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/securityroles.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/securityroles.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // SecurityRoles type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L290-L294 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L296-L300 type SecurityRoles struct { Dls SecurityRolesDls `json:"dls"` File SecurityRolesFile `json:"file"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/securityrolesdls.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/securityrolesdls.go index 18715b996..8424b245e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/securityrolesdls.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/securityrolesdls.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // SecurityRolesDls type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L302-L304 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L308-L310 type SecurityRolesDls struct { BitSetCache SecurityRolesDlsBitSetCache `json:"bit_set_cache"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/securityrolesdlsbitsetcache.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/securityrolesdlsbitsetcache.go index 83c6fdddc..828deac71 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/securityrolesdlsbitsetcache.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/securityrolesdlsbitsetcache.go @@ -16,19 +16,73 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // SecurityRolesDlsBitSetCache type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L306-L310 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L312-L316 type SecurityRolesDlsBitSetCache struct { Count int `json:"count"` Memory ByteSize `json:"memory,omitempty"` MemoryInBytes uint64 `json:"memory_in_bytes"` } +func (s *SecurityRolesDlsBitSetCache) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Count = value + case float64: + f := int(v) + s.Count = f + } + + case "memory": + if err := dec.Decode(&s.Memory); err != nil { + return err + } + + case "memory_in_bytes": + if err := dec.Decode(&s.MemoryInBytes); err != nil { + return err + } + + } + } + return nil +} + // NewSecurityRolesDlsBitSetCache returns a SecurityRolesDlsBitSetCache. func NewSecurityRolesDlsBitSetCache() *SecurityRolesDlsBitSetCache { r := &SecurityRolesDlsBitSetCache{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/securityrolesfile.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/securityrolesfile.go index ffea3937a..a2a2cda4b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/securityrolesfile.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/securityrolesfile.go @@ -16,19 +16,90 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // SecurityRolesFile type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L312-L316 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L318-L322 type SecurityRolesFile struct { Dls bool `json:"dls"` Fls bool `json:"fls"` Size int64 `json:"size"` } +func (s *SecurityRolesFile) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "dls": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Dls = value + case bool: + s.Dls = v + } + + case "fls": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Fls = value + case bool: + s.Fls = v + } + + case "size": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Size = value + case float64: + f := int64(v) + s.Size = f + } + + } + } + return nil +} + // NewSecurityRolesFile returns a SecurityRolesFile. func NewSecurityRolesFile() *SecurityRolesFile { r := &SecurityRolesFile{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/securityrolesnative.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/securityrolesnative.go index 4a54068c1..602fc8514 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/securityrolesnative.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/securityrolesnative.go @@ -16,19 +16,90 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // SecurityRolesNative type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L296-L300 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L302-L306 type SecurityRolesNative struct { Dls bool `json:"dls"` Fls bool `json:"fls"` Size int64 `json:"size"` } +func (s *SecurityRolesNative) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "dls": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Dls = value + case bool: + s.Dls = v + } + + case "fls": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Fls = value + case bool: + s.Fls = v + } + + case "size": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Size = value + case float64: + f := int64(v) + s.Size = f + } + + } + } + return nil +} + // NewSecurityRolesNative returns a SecurityRolesNative. func NewSecurityRolesNative() *SecurityRolesNative { r := &SecurityRolesNative{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/segment.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/segment.go index 8f42109f0..8ab3c190f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/segment.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/segment.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Segment type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/segments/types.ts#L28-L38 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/segments/types.ts#L28-L38 type Segment struct { Attributes map[string]string `json:"attributes"` Committed bool `json:"committed"` @@ -35,6 +43,143 @@ type Segment struct { Version string `json:"version"` } +func (s *Segment) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "attributes": + if s.Attributes == nil { + s.Attributes = make(map[string]string, 0) + } + if err := dec.Decode(&s.Attributes); err != nil { + return err + } + + case "committed": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Committed = value + case bool: + s.Committed = v + } + + case "compound": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Compound = value + case bool: + s.Compound = v + } + + case "deleted_docs": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DeletedDocs = value + case float64: + f := int64(v) + s.DeletedDocs = f + } + + case "generation": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Generation = value + case float64: + f := int(v) + s.Generation = f + } + + case "num_docs": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.NumDocs = value + case float64: + f := int64(v) + s.NumDocs = f + } + + case "search": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Search = value + case bool: + s.Search = v + } + + case "size_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.SizeInBytes = f + case float64: + f := Float64(v) + s.SizeInBytes = f + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + // NewSegment returns a Segment. func NewSegment() *Segment { r := &Segment{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/segmentsrecord.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/segmentsrecord.go index e2b718880..b957cadab 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/segmentsrecord.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/segmentsrecord.go @@ -16,46 +16,242 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // SegmentsRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/segments/types.ts#L22-L96 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/segments/types.ts#L22-L107 type SegmentsRecord struct { - // Committed is segment committed + // Committed If `true`, the segment is synced to disk. + // Segments that are synced can survive a hard reboot. + // If `false`, the data from uncommitted segments is also stored in the + // transaction log so that Elasticsearch is able to replay changes on the next + // start. Committed *string `json:"committed,omitempty"` - // Compound is segment compound + // Compound If `true`, the segment is stored in a compound file. + // This means Lucene merged all files from the segment in a single file to save + // file descriptors. Compound *string `json:"compound,omitempty"` - // DocsCount number of docs in segment + // DocsCount The number of documents in the segment. + // This excludes deleted documents and counts any nested documents separately + // from their parents. + // It also excludes documents which were indexed recently and do not yet belong + // to a segment. DocsCount *string `json:"docs.count,omitempty"` - // DocsDeleted number of deleted docs in segment + // DocsDeleted The number of deleted documents in the segment, which might be higher or + // lower than the number of delete operations you have performed. + // This number excludes deletes that were performed recently and do not yet + // belong to a segment. + // Deleted documents are cleaned up by the automatic merge process if it makes + // sense to do so. + // Also, Elasticsearch creates extra deleted documents to internally track the + // recent history of operations on a shard. DocsDeleted *string `json:"docs.deleted,omitempty"` - // Generation segment generation + // Generation The segment generation number. + // Elasticsearch increments this generation number for each segment written then + // uses this number to derive the segment name. Generation *string `json:"generation,omitempty"` - // Id unique id of node where it lives + // Id The unique identifier of the node where it lives. Id *string `json:"id,omitempty"` - // Index index name + // Index The index name. Index *string `json:"index,omitempty"` - // Ip ip of node where it lives + // Ip The IP address of the node where it lives. Ip *string `json:"ip,omitempty"` - // Prirep primary or replica + // Prirep The shard type: `primary` or `replica`. Prirep *string `json:"prirep,omitempty"` - // Searchable is segment searched + // Searchable If `true`, the segment is searchable. + // If `false`, the segment has most likely been written to disk but needs a + // refresh to be searchable. Searchable *string `json:"searchable,omitempty"` - // Segment segment name + // Segment The segment name, which is derived from the segment generation and used + // internally to create file names in the directory of the shard. Segment *string `json:"segment,omitempty"` - // Shard shard name + // Shard The shard name. Shard *string `json:"shard,omitempty"` - // Size segment size in bytes + // Size The segment size in bytes. Size ByteSize `json:"size,omitempty"` - // SizeMemory segment memory in bytes + // SizeMemory The segment memory in bytes. + // A value of `-1` indicates Elasticsearch was unable to compute this number. SizeMemory ByteSize `json:"size.memory,omitempty"` - // Version version + // Version The version of Lucene used to write the segment. Version *string `json:"version,omitempty"` } +func (s *SegmentsRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "committed", "ic", "isCommitted": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Committed = &o + + case "compound", "ico", "isCompound": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Compound = &o + + case "docs.count", "dc", "docsCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DocsCount = &o + + case "docs.deleted", "dd", "docsDeleted": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DocsDeleted = &o + + case "generation", "g", "gen": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Generation = &o + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return err + } + + case "index", "i", "idx": + if err := dec.Decode(&s.Index); err != nil { + return err + } + + case "ip": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Ip = &o + + case "prirep", "p", "pr", "primaryOrReplica": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Prirep = &o + + case "searchable", "is", "isSearchable": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Searchable = &o + + case "segment", "seg": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Segment = &o + + case "shard", "s", "sh": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Shard = &o + + case "size", "si": + if err := dec.Decode(&s.Size); err != nil { + return err + } + + case "size.memory", "sm", "sizeMemory": + if err := dec.Decode(&s.SizeMemory); err != nil { + return err + } + + case "version", "v": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + // NewSegmentsRecord returns a SegmentsRecord. func NewSegmentsRecord() *SegmentsRecord { r := &SegmentsRecord{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/segmentsstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/segmentsstats.go index 183513f15..9ba284f0a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/segmentsstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/segmentsstats.go @@ -16,38 +16,365 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // SegmentsStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Stats.ts#L206-L231 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Stats.ts#L273-L366 type SegmentsStats struct { - Count int `json:"count"` - DocValuesMemory ByteSize `json:"doc_values_memory,omitempty"` - DocValuesMemoryInBytes int `json:"doc_values_memory_in_bytes"` - FileSizes map[string]ShardFileSizeInfo `json:"file_sizes"` - FixedBitSet ByteSize `json:"fixed_bit_set,omitempty"` - FixedBitSetMemoryInBytes int `json:"fixed_bit_set_memory_in_bytes"` - IndexWriterMaxMemoryInBytes *int `json:"index_writer_max_memory_in_bytes,omitempty"` - IndexWriterMemory ByteSize `json:"index_writer_memory,omitempty"` - IndexWriterMemoryInBytes int `json:"index_writer_memory_in_bytes"` - MaxUnsafeAutoIdTimestamp int64 `json:"max_unsafe_auto_id_timestamp"` - Memory ByteSize `json:"memory,omitempty"` - MemoryInBytes int `json:"memory_in_bytes"` - NormsMemory ByteSize `json:"norms_memory,omitempty"` - NormsMemoryInBytes int `json:"norms_memory_in_bytes"` - PointsMemory ByteSize `json:"points_memory,omitempty"` - PointsMemoryInBytes int `json:"points_memory_in_bytes"` - StoredFieldsMemoryInBytes int `json:"stored_fields_memory_in_bytes"` - StoredMemory ByteSize `json:"stored_memory,omitempty"` - TermVectorsMemoryInBytes int `json:"term_vectors_memory_in_bytes"` - TermVectoryMemory ByteSize `json:"term_vectory_memory,omitempty"` - TermsMemory ByteSize `json:"terms_memory,omitempty"` - TermsMemoryInBytes int `json:"terms_memory_in_bytes"` - VersionMapMemory ByteSize `json:"version_map_memory,omitempty"` - VersionMapMemoryInBytes int `json:"version_map_memory_in_bytes"` + // Count Total number of segments across all shards assigned to selected nodes. + Count int `json:"count"` + // DocValuesMemory Total amount of memory used for doc values across all shards assigned to + // selected nodes. + DocValuesMemory ByteSize `json:"doc_values_memory,omitempty"` + // DocValuesMemoryInBytes Total amount, in bytes, of memory used for doc values across all shards + // assigned to selected nodes. + DocValuesMemoryInBytes int64 `json:"doc_values_memory_in_bytes"` + // FileSizes This object is not populated by the cluster stats API. + // To get information on segment files, use the node stats API. + FileSizes map[string]ShardFileSizeInfo `json:"file_sizes"` + // FixedBitSet Total amount of memory used by fixed bit sets across all shards assigned to + // selected nodes. + // Fixed bit sets are used for nested object field types and type filters for + // join fields. + FixedBitSet ByteSize `json:"fixed_bit_set,omitempty"` + // FixedBitSetMemoryInBytes Total amount of memory, in bytes, used by fixed bit sets across all shards + // assigned to selected nodes. + FixedBitSetMemoryInBytes int64 `json:"fixed_bit_set_memory_in_bytes"` + IndexWriterMaxMemoryInBytes *int64 `json:"index_writer_max_memory_in_bytes,omitempty"` + // IndexWriterMemory Total amount of memory used by all index writers across all shards assigned + // to selected nodes. + IndexWriterMemory ByteSize `json:"index_writer_memory,omitempty"` + // IndexWriterMemoryInBytes Total amount, in bytes, of memory used by all index writers across all shards + // assigned to selected nodes. + IndexWriterMemoryInBytes int64 `json:"index_writer_memory_in_bytes"` + // MaxUnsafeAutoIdTimestamp Unix timestamp, in milliseconds, of the most recently retried indexing + // request. + MaxUnsafeAutoIdTimestamp int64 `json:"max_unsafe_auto_id_timestamp"` + // Memory Total amount of memory used for segments across all shards assigned to + // selected nodes. + Memory ByteSize `json:"memory,omitempty"` + // MemoryInBytes Total amount, in bytes, of memory used for segments across all shards + // assigned to selected nodes. + MemoryInBytes int64 `json:"memory_in_bytes"` + // NormsMemory Total amount of memory used for normalization factors across all shards + // assigned to selected nodes. + NormsMemory ByteSize `json:"norms_memory,omitempty"` + // NormsMemoryInBytes Total amount, in bytes, of memory used for normalization factors across all + // shards assigned to selected nodes. + NormsMemoryInBytes int64 `json:"norms_memory_in_bytes"` + // PointsMemory Total amount of memory used for points across all shards assigned to selected + // nodes. + PointsMemory ByteSize `json:"points_memory,omitempty"` + // PointsMemoryInBytes Total amount, in bytes, of memory used for points across all shards assigned + // to selected nodes. + PointsMemoryInBytes int64 `json:"points_memory_in_bytes"` + // StoredFieldsMemoryInBytes Total amount, in bytes, of memory used for stored fields across all shards + // assigned to selected nodes. + StoredFieldsMemoryInBytes int64 `json:"stored_fields_memory_in_bytes"` + StoredMemory ByteSize `json:"stored_memory,omitempty"` + // TermVectorsMemoryInBytes Total amount, in bytes, of memory used for term vectors across all shards + // assigned to selected nodes. + TermVectorsMemoryInBytes int64 `json:"term_vectors_memory_in_bytes"` + // TermVectoryMemory Total amount of memory used for term vectors across all shards assigned to + // selected nodes. + TermVectoryMemory ByteSize `json:"term_vectory_memory,omitempty"` + // TermsMemory Total amount of memory used for terms across all shards assigned to selected + // nodes. + TermsMemory ByteSize `json:"terms_memory,omitempty"` + // TermsMemoryInBytes Total amount, in bytes, of memory used for terms across all shards assigned + // to selected nodes. + TermsMemoryInBytes int64 `json:"terms_memory_in_bytes"` + // VersionMapMemory Total amount of memory used by all version maps across all shards assigned to + // selected nodes. + VersionMapMemory ByteSize `json:"version_map_memory,omitempty"` + // VersionMapMemoryInBytes Total amount, in bytes, of memory used by all version maps across all shards + // assigned to selected nodes. + VersionMapMemoryInBytes int64 `json:"version_map_memory_in_bytes"` +} + +func (s *SegmentsStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Count = value + case float64: + f := int(v) + s.Count = f + } + + case "doc_values_memory": + if err := dec.Decode(&s.DocValuesMemory); err != nil { + return err + } + + case "doc_values_memory_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DocValuesMemoryInBytes = value + case float64: + f := int64(v) + s.DocValuesMemoryInBytes = f + } + + case "file_sizes": + if s.FileSizes == nil { + s.FileSizes = make(map[string]ShardFileSizeInfo, 0) + } + if err := dec.Decode(&s.FileSizes); err != nil { + return err + } + + case "fixed_bit_set": + if err := dec.Decode(&s.FixedBitSet); err != nil { + return err + } + + case "fixed_bit_set_memory_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.FixedBitSetMemoryInBytes = value + case float64: + f := int64(v) + s.FixedBitSetMemoryInBytes = f + } + + case "index_writer_max_memory_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.IndexWriterMaxMemoryInBytes = &value + case float64: + f := int64(v) + s.IndexWriterMaxMemoryInBytes = &f + } + + case "index_writer_memory": + if err := dec.Decode(&s.IndexWriterMemory); err != nil { + return err + } + + case "index_writer_memory_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.IndexWriterMemoryInBytes = value + case float64: + f := int64(v) + s.IndexWriterMemoryInBytes = f + } + + case "max_unsafe_auto_id_timestamp": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.MaxUnsafeAutoIdTimestamp = value + case float64: + f := int64(v) + s.MaxUnsafeAutoIdTimestamp = f + } + + case "memory": + if err := dec.Decode(&s.Memory); err != nil { + return err + } + + case "memory_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.MemoryInBytes = value + case float64: + f := int64(v) + s.MemoryInBytes = f + } + + case "norms_memory": + if err := dec.Decode(&s.NormsMemory); err != nil { + return err + } + + case "norms_memory_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.NormsMemoryInBytes = value + case float64: + f := int64(v) + s.NormsMemoryInBytes = f + } + + case "points_memory": + if err := dec.Decode(&s.PointsMemory); err != nil { + return err + } + + case "points_memory_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.PointsMemoryInBytes = value + case float64: + f := int64(v) + s.PointsMemoryInBytes = f + } + + case "stored_fields_memory_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.StoredFieldsMemoryInBytes = value + case float64: + f := int64(v) + s.StoredFieldsMemoryInBytes = f + } + + case "stored_memory": + if err := dec.Decode(&s.StoredMemory); err != nil { + return err + } + + case "term_vectors_memory_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TermVectorsMemoryInBytes = value + case float64: + f := int64(v) + s.TermVectorsMemoryInBytes = f + } + + case "term_vectory_memory": + if err := dec.Decode(&s.TermVectoryMemory); err != nil { + return err + } + + case "terms_memory": + if err := dec.Decode(&s.TermsMemory); err != nil { + return err + } + + case "terms_memory_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TermsMemoryInBytes = value + case float64: + f := int64(v) + s.TermsMemoryInBytes = f + } + + case "version_map_memory": + if err := dec.Decode(&s.VersionMapMemory); err != nil { + return err + } + + case "version_map_memory_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.VersionMapMemoryInBytes = value + case float64: + f := int64(v) + s.VersionMapMemoryInBytes = f + } + + } + } + return nil } // NewSegmentsStats returns a SegmentsStats. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/serialdifferencingaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/serialdifferencingaggregation.go index 68d08c136..bc271c827 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/serialdifferencingaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/serialdifferencingaggregation.go @@ -16,34 +16,41 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" ) // SerialDifferencingAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/pipeline.ts#L280-L282 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/pipeline.ts#L363-L369 type SerialDifferencingAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. - BucketsPath BucketsPath `json:"buckets_path,omitempty"` - Format *string `json:"format,omitempty"` - GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` - Lag *int `json:"lag,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Name *string `json:"name,omitempty"` + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` + // Lag The historical bucket to subtract from the current value. + // Must be a positive, non-zero integer. + Lag *int `json:"lag,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Name *string `json:"name,omitempty"` } func (s *SerialDifferencingAggregation) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -63,9 +70,16 @@ func (s *SerialDifferencingAggregation) UnmarshalJSON(data []byte) error { } case "format": - if err := dec.Decode(&s.Format); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o case "gap_policy": if err := dec.Decode(&s.GapPolicy); err != nil { @@ -73,8 +87,19 @@ func (s *SerialDifferencingAggregation) UnmarshalJSON(data []byte) error { } case "lag": - if err := dec.Decode(&s.Lag); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Lag = &value + case float64: + f := int(v) + s.Lag = &f } case "meta": @@ -83,9 +108,16 @@ func (s *SerialDifferencingAggregation) UnmarshalJSON(data []byte) error { } case "name": - if err := dec.Decode(&s.Name); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/serializedclusterstate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/serializedclusterstate.go index d51dfe690..508aaf8fb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/serializedclusterstate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/serializedclusterstate.go @@ -16,15 +16,16 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // SerializedClusterState type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L101-L104 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L232-L238 type SerializedClusterState struct { - Diffs *SerializedClusterStateDetail `json:"diffs,omitempty"` + Diffs *SerializedClusterStateDetail `json:"diffs,omitempty"` + // FullStates Number of published cluster states. FullStates *SerializedClusterStateDetail `json:"full_states,omitempty"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/serializedclusterstatedetail.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/serializedclusterstatedetail.go index 6dfc10247..17756e200 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/serializedclusterstatedetail.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/serializedclusterstatedetail.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // SerializedClusterStateDetail type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L106-L112 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L240-L246 type SerializedClusterStateDetail struct { CompressedSize *string `json:"compressed_size,omitempty"` CompressedSizeInBytes *int64 `json:"compressed_size_in_bytes,omitempty"` @@ -31,6 +39,95 @@ type SerializedClusterStateDetail struct { UncompressedSizeInBytes *int64 `json:"uncompressed_size_in_bytes,omitempty"` } +func (s *SerializedClusterStateDetail) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "compressed_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CompressedSize = &o + + case "compressed_size_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.CompressedSizeInBytes = &value + case float64: + f := int64(v) + s.CompressedSizeInBytes = &f + } + + case "count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Count = &value + case float64: + f := int64(v) + s.Count = &f + } + + case "uncompressed_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.UncompressedSize = &o + + case "uncompressed_size_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.UncompressedSizeInBytes = &value + case float64: + f := int64(v) + s.UncompressedSizeInBytes = &f + } + + } + } + return nil +} + // NewSerializedClusterStateDetail returns a SerializedClusterStateDetail. func NewSerializedClusterStateDetail() *SerializedClusterStateDetail { r := &SerializedClusterStateDetail{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/servicetoken.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/servicetoken.go index b2afbb4c5..2474bd5f1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/servicetoken.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/servicetoken.go @@ -16,18 +16,63 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ServiceToken type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/create_service_token/types.ts#L22-L25 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/create_service_token/types.ts#L22-L25 type ServiceToken struct { Name string `json:"name"` Value string `json:"value"` } +func (s *ServiceToken) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Value = o + + } + } + return nil +} + // NewServiceToken returns a ServiceToken. func NewServiceToken() *ServiceToken { r := &ServiceToken{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/setprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/setprocessor.go index d0086e6d5..555d7b7a0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/setprocessor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/setprocessor.go @@ -16,29 +16,188 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // SetProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/_types/Processors.ts#L329-L336 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/_types/Processors.ts#L982-L1016 type SetProcessor struct { - CopyFrom *string `json:"copy_from,omitempty"` - Description *string `json:"description,omitempty"` - Field string `json:"field"` - If *string `json:"if,omitempty"` - IgnoreEmptyValue *bool `json:"ignore_empty_value,omitempty"` - IgnoreFailure *bool `json:"ignore_failure,omitempty"` - MediaType *string `json:"media_type,omitempty"` - OnFailure []ProcessorContainer `json:"on_failure,omitempty"` - Override *bool `json:"override,omitempty"` - Tag *string `json:"tag,omitempty"` - Value json.RawMessage `json:"value,omitempty"` + // CopyFrom The origin field which will be copied to `field`, cannot set `value` + // simultaneously. + // Supported data types are `boolean`, `number`, `array`, `object`, `string`, + // `date`, etc. + CopyFrom *string `json:"copy_from,omitempty"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field to insert, upsert, or update. + // Supports template snippets. + Field string `json:"field"` + // If Conditionally execute the processor. + If *string `json:"if,omitempty"` + // IgnoreEmptyValue If `true` and `value` is a template snippet that evaluates to `null` or the + // empty string, the processor quietly exits without modifying the document. + IgnoreEmptyValue *bool `json:"ignore_empty_value,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // MediaType The media type for encoding `value`. + // Applies only when value is a template snippet. + // Must be one of `application/json`, `text/plain`, or + // `application/x-www-form-urlencoded`. + MediaType *string `json:"media_type,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Override If `true` processor will update fields with pre-existing non-null-valued + // field. + // When set to `false`, such fields will not be touched. + Override *bool `json:"override,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // Value The value to be set for the field. + // Supports template snippets. + // May specify only one of `value` or `copy_from`. + Value json.RawMessage `json:"value,omitempty"` +} + +func (s *SetProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "copy_from": + if err := dec.Decode(&s.CopyFrom); err != nil { + return err + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.If = &o + + case "ignore_empty_value": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreEmptyValue = &value + case bool: + s.IgnoreEmptyValue = &v + } + + case "ignore_failure": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "media_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MediaType = &o + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return err + } + + case "override": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Override = &value + case bool: + s.Override = &v + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return err + } + + } + } + return nil } // NewSetProcessor returns a SetProcessor. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/setsecurityuserprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/setsecurityuserprocessor.go index 878132bf1..31d52bddc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/setsecurityuserprocessor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/setsecurityuserprocessor.go @@ -16,21 +16,123 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // SetSecurityUserProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/_types/Processors.ts#L338-L341 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/_types/Processors.ts#L1018-L1027 type SetSecurityUserProcessor struct { - Description *string `json:"description,omitempty"` - Field string `json:"field"` - If *string `json:"if,omitempty"` - IgnoreFailure *bool `json:"ignore_failure,omitempty"` - OnFailure []ProcessorContainer `json:"on_failure,omitempty"` - Properties []string `json:"properties,omitempty"` - Tag *string `json:"tag,omitempty"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field to store the user information into. + Field string `json:"field"` + // If Conditionally execute the processor. + If *string `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Properties Controls what user related properties are added to the field. + Properties []string `json:"properties,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` +} + +func (s *SetSecurityUserProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.If = &o + + case "ignore_failure": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return err + } + + case "properties": + if err := dec.Decode(&s.Properties); err != nil { + return err + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + } + } + return nil } // NewSetSecurityUserProcessor returns a SetSecurityUserProcessor. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settings.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settings.go index 90449d505..1b04e3b3b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settings.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settings.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Settings type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/transform/_types/Transform.ts#L98-L143 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/transform/_types/Transform.ts#L98-L144 type Settings struct { // AlignCheckpoints Specifies whether the transform checkpoint ranges should be optimized for // performance. Such optimization can align @@ -59,6 +67,114 @@ type Settings struct { Unattended *bool `json:"unattended,omitempty"` } +func (s *Settings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "align_checkpoints": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.AlignCheckpoints = &value + case bool: + s.AlignCheckpoints = &v + } + + case "dates_as_epoch_millis": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DatesAsEpochMillis = &value + case bool: + s.DatesAsEpochMillis = &v + } + + case "deduce_mappings": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DeduceMappings = &value + case bool: + s.DeduceMappings = &v + } + + case "docs_per_second": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.DocsPerSecond = &f + case float64: + f := float32(v) + s.DocsPerSecond = &f + } + + case "max_page_search_size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxPageSearchSize = &value + case float64: + f := int(v) + s.MaxPageSearchSize = &f + } + + case "unattended": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Unattended = &value + case bool: + s.Unattended = &v + } + + } + } + return nil +} + // NewSettings returns a Settings. func NewSettings() *Settings { r := &Settings{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingsanalyze.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingsanalyze.go index 7beef005e..db07f37b9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingsanalyze.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingsanalyze.go @@ -16,15 +16,47 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // SettingsAnalyze type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L226-L229 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L226-L229 type SettingsAnalyze struct { - MaxTokenCount *int `json:"max_token_count,omitempty"` + MaxTokenCount Stringifiedinteger `json:"max_token_count,omitempty"` +} + +func (s *SettingsAnalyze) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_token_count": + if err := dec.Decode(&s.MaxTokenCount); err != nil { + return err + } + + } + } + return nil } // NewSettingsAnalyze returns a SettingsAnalyze. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingshighlight.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingshighlight.go index 1b64053aa..4ccd2ce9e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingshighlight.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingshighlight.go @@ -16,17 +16,61 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // SettingsHighlight type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L221-L224 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L221-L224 type SettingsHighlight struct { MaxAnalyzedOffset *int `json:"max_analyzed_offset,omitempty"` } +func (s *SettingsHighlight) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_analyzed_offset": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxAnalyzedOffset = &value + case float64: + f := int(v) + s.MaxAnalyzedOffset = &f + } + + } + } + return nil +} + // NewSettingsHighlight returns a SettingsHighlight. func NewSettingsHighlight() *SettingsHighlight { r := &SettingsHighlight{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingsquerystring.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingsquerystring.go index e318d8651..fd3c03893 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingsquerystring.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingsquerystring.go @@ -16,15 +16,47 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // SettingsQueryString type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L241-L243 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L241-L243 type SettingsQueryString struct { - Lenient bool `json:"lenient"` + Lenient Stringifiedboolean `json:"lenient"` +} + +func (s *SettingsQueryString) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "lenient": + if err := dec.Decode(&s.Lenient); err != nil { + return err + } + + } + } + return nil } // NewSettingsQueryString returns a SettingsQueryString. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingssearch.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingssearch.go index 6718c7a64..84d19fb85 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingssearch.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingssearch.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // SettingsSearch type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L231-L234 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L231-L234 type SettingsSearch struct { Idle *SearchIdle `json:"idle,omitempty"` Slowlog *SlowlogSettings `json:"slowlog,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingssimilarity.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingssimilarity.go index d5a444453..031eb9955 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingssimilarity.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingssimilarity.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // SettingsSimilarity type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L170-L178 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L170-L178 type SettingsSimilarity struct { Bm25 *SettingsSimilarityBm25 `json:"bm25,omitempty"` Dfi *SettingsSimilarityDfi `json:"dfi,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingssimilaritybm25.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingssimilaritybm25.go index 02df6fd9c..e33a2f106 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingssimilaritybm25.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingssimilaritybm25.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // SettingsSimilarityBm25 type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L180-L185 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L180-L185 type SettingsSimilarityBm25 struct { B Float64 `json:"b"` DiscountOverlaps bool `json:"discount_overlaps"` @@ -30,11 +38,95 @@ type SettingsSimilarityBm25 struct { Type string `json:"type,omitempty"` } +func (s *SettingsSimilarityBm25) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "b": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.B = f + case float64: + f := Float64(v) + s.B = f + } + + case "discount_overlaps": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DiscountOverlaps = value + case bool: + s.DiscountOverlaps = v + } + + case "k1": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.K1 = f + case float64: + f := Float64(v) + s.K1 = f + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s SettingsSimilarityBm25) MarshalJSON() ([]byte, error) { + type innerSettingsSimilarityBm25 SettingsSimilarityBm25 + tmp := innerSettingsSimilarityBm25{ + B: s.B, + DiscountOverlaps: s.DiscountOverlaps, + K1: s.K1, + Type: s.Type, + } + + tmp.Type = "BM25" + + return json.Marshal(tmp) +} + // NewSettingsSimilarityBm25 returns a SettingsSimilarityBm25. func NewSettingsSimilarityBm25() *SettingsSimilarityBm25 { r := &SettingsSimilarityBm25{} - r.Type = "BM25" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingssimilaritydfi.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingssimilaritydfi.go index ed577afd3..39362072b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingssimilaritydfi.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingssimilaritydfi.go @@ -16,27 +16,40 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dfiindependencemeasure" ) // SettingsSimilarityDfi type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L187-L190 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L187-L190 type SettingsSimilarityDfi struct { IndependenceMeasure dfiindependencemeasure.DFIIndependenceMeasure `json:"independence_measure"` Type string `json:"type,omitempty"` } +// MarshalJSON override marshalling to include literal value +func (s SettingsSimilarityDfi) MarshalJSON() ([]byte, error) { + type innerSettingsSimilarityDfi SettingsSimilarityDfi + tmp := innerSettingsSimilarityDfi{ + IndependenceMeasure: s.IndependenceMeasure, + Type: s.Type, + } + + tmp.Type = "DFI" + + return json.Marshal(tmp) +} + // NewSettingsSimilarityDfi returns a SettingsSimilarityDfi. func NewSettingsSimilarityDfi() *SettingsSimilarityDfi { r := &SettingsSimilarityDfi{} - r.Type = "DFI" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingssimilaritydfr.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingssimilaritydfr.go index ca68d4071..1a3a8a7f2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingssimilaritydfr.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingssimilaritydfr.go @@ -16,11 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dfraftereffect" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dfrbasicmodel" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/normalization" @@ -28,7 +30,7 @@ import ( // SettingsSimilarityDfr type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L192-L197 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L192-L197 type SettingsSimilarityDfr struct { AfterEffect dfraftereffect.DFRAfterEffect `json:"after_effect"` BasicModel dfrbasicmodel.DFRBasicModel `json:"basic_model"` @@ -36,11 +38,24 @@ type SettingsSimilarityDfr struct { Type string `json:"type,omitempty"` } +// MarshalJSON override marshalling to include literal value +func (s SettingsSimilarityDfr) MarshalJSON() ([]byte, error) { + type innerSettingsSimilarityDfr SettingsSimilarityDfr + tmp := innerSettingsSimilarityDfr{ + AfterEffect: s.AfterEffect, + BasicModel: s.BasicModel, + Normalization: s.Normalization, + Type: s.Type, + } + + tmp.Type = "DFR" + + return json.Marshal(tmp) +} + // NewSettingsSimilarityDfr returns a SettingsSimilarityDfr. func NewSettingsSimilarityDfr() *SettingsSimilarityDfr { r := &SettingsSimilarityDfr{} - r.Type = "DFR" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingssimilarityib.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingssimilarityib.go index ea0fee444..200a98a32 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingssimilarityib.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingssimilarityib.go @@ -16,11 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/ibdistribution" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/iblambda" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/normalization" @@ -28,7 +30,7 @@ import ( // SettingsSimilarityIb type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L199-L204 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L199-L204 type SettingsSimilarityIb struct { Distribution ibdistribution.IBDistribution `json:"distribution"` Lambda iblambda.IBLambda `json:"lambda"` @@ -36,11 +38,24 @@ type SettingsSimilarityIb struct { Type string `json:"type,omitempty"` } +// MarshalJSON override marshalling to include literal value +func (s SettingsSimilarityIb) MarshalJSON() ([]byte, error) { + type innerSettingsSimilarityIb SettingsSimilarityIb + tmp := innerSettingsSimilarityIb{ + Distribution: s.Distribution, + Lambda: s.Lambda, + Normalization: s.Normalization, + Type: s.Type, + } + + tmp.Type = "IB" + + return json.Marshal(tmp) +} + // NewSettingsSimilarityIb returns a SettingsSimilarityIb. func NewSettingsSimilarityIb() *SettingsSimilarityIb { r := &SettingsSimilarityIb{} - r.Type = "IB" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingssimilaritylmd.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingssimilaritylmd.go index c1dbd9e99..90b6e0f12 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingssimilaritylmd.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingssimilaritylmd.go @@ -16,23 +16,83 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // SettingsSimilarityLmd type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L206-L209 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L206-L209 type SettingsSimilarityLmd struct { Mu int `json:"mu"` Type string `json:"type,omitempty"` } +func (s *SettingsSimilarityLmd) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "mu": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Mu = value + case float64: + f := int(v) + s.Mu = f + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s SettingsSimilarityLmd) MarshalJSON() ([]byte, error) { + type innerSettingsSimilarityLmd SettingsSimilarityLmd + tmp := innerSettingsSimilarityLmd{ + Mu: s.Mu, + Type: s.Type, + } + + tmp.Type = "LMDirichlet" + + return json.Marshal(tmp) +} + // NewSettingsSimilarityLmd returns a SettingsSimilarityLmd. func NewSettingsSimilarityLmd() *SettingsSimilarityLmd { r := &SettingsSimilarityLmd{} - r.Type = "LMDirichlet" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingssimilaritylmj.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingssimilaritylmj.go index 0209efad7..342a1a2c3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingssimilaritylmj.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingssimilaritylmj.go @@ -16,23 +16,83 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // SettingsSimilarityLmj type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L211-L214 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L211-L214 type SettingsSimilarityLmj struct { Lambda Float64 `json:"lambda"` Type string `json:"type,omitempty"` } +func (s *SettingsSimilarityLmj) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "lambda": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Lambda = f + case float64: + f := Float64(v) + s.Lambda = f + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s SettingsSimilarityLmj) MarshalJSON() ([]byte, error) { + type innerSettingsSimilarityLmj SettingsSimilarityLmj + tmp := innerSettingsSimilarityLmj{ + Lambda: s.Lambda, + Type: s.Type, + } + + tmp.Type = "LMJelinekMercer" + + return json.Marshal(tmp) +} + // NewSettingsSimilarityLmj returns a SettingsSimilarityLmj. func NewSettingsSimilarityLmj() *SettingsSimilarityLmj { r := &SettingsSimilarityLmj{} - r.Type = "LMJelinekMercer" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingssimilarityscriptedtfidf.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingssimilarityscriptedtfidf.go index 06bdabff7..69e551951 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingssimilarityscriptedtfidf.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/settingssimilarityscriptedtfidf.go @@ -16,23 +16,71 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // SettingsSimilarityScriptedTfidf type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L216-L219 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L216-L219 type SettingsSimilarityScriptedTfidf struct { Script Script `json:"script"` Type string `json:"type,omitempty"` } +func (s *SettingsSimilarityScriptedTfidf) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s SettingsSimilarityScriptedTfidf) MarshalJSON() ([]byte, error) { + type innerSettingsSimilarityScriptedTfidf SettingsSimilarityScriptedTfidf + tmp := innerSettingsSimilarityScriptedTfidf{ + Script: s.Script, + Type: s.Type, + } + + tmp.Type = "scripted" + + return json.Marshal(tmp) +} + // NewSettingsSimilarityScriptedTfidf returns a SettingsSimilarityScriptedTfidf. func NewSettingsSimilarityScriptedTfidf() *SettingsSimilarityScriptedTfidf { r := &SettingsSimilarityScriptedTfidf{} - r.Type = "scripted" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shapefieldquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shapefieldquery.go index 6cc4cca1f..7b56bfb49 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shapefieldquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shapefieldquery.go @@ -16,23 +16,65 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geoshaperelation" ) // ShapeFieldQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/specialized.ts#L183-L187 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/specialized.ts#L354-L367 type ShapeFieldQuery struct { - IndexedShape *FieldLookup `json:"indexed_shape,omitempty"` - Relation *geoshaperelation.GeoShapeRelation `json:"relation,omitempty"` - Shape json.RawMessage `json:"shape,omitempty"` + // IndexedShape Queries using a pre-indexed shape. + IndexedShape *FieldLookup `json:"indexed_shape,omitempty"` + // Relation Spatial relation between the query shape and the document shape. + Relation *geoshaperelation.GeoShapeRelation `json:"relation,omitempty"` + // Shape Queries using an inline shape definition in GeoJSON or Well Known Text (WKT) + // format. + Shape json.RawMessage `json:"shape,omitempty"` +} + +func (s *ShapeFieldQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "indexed_shape": + if err := dec.Decode(&s.IndexedShape); err != nil { + return err + } + + case "relation": + if err := dec.Decode(&s.Relation); err != nil { + return err + } + + case "shape": + if err := dec.Decode(&s.Shape); err != nil { + return err + } + + } + } + return nil } // NewShapeFieldQuery returns a ShapeFieldQuery. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shapeproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shapeproperty.go index b6e0f0421..9c548b9da 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shapeproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shapeproperty.go @@ -16,24 +16,24 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geoorientation" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/geoorientation" ) // ShapeProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/geo.ts#L69-L81 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/geo.ts#L69-L81 type ShapeProperty struct { Coerce *bool `json:"coerce,omitempty"` CopyTo []string `json:"copy_to,omitempty"` @@ -53,6 +53,7 @@ type ShapeProperty struct { } func (s *ShapeProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -67,18 +68,47 @@ func (s *ShapeProperty) UnmarshalJSON(data []byte) error { switch t { case "coerce": - if err := dec.Decode(&s.Coerce); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Coerce = &value + case bool: + s.Coerce = &v } case "copy_to": - if err := dec.Decode(&s.CopyTo); err != nil { - return err + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return err + } } case "doc_values": - if err := dec.Decode(&s.DocValues); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DocValues = &value + case bool: + s.DocValues = &v } case "dynamic": @@ -87,6 +117,9 @@ func (s *ShapeProperty) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -95,7 +128,9 @@ func (s *ShapeProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -374,28 +409,62 @@ func (s *ShapeProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "ignore_malformed": - if err := dec.Decode(&s.IgnoreMalformed); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreMalformed = &value + case bool: + s.IgnoreMalformed = &v } case "ignore_z_value": - if err := dec.Decode(&s.IgnoreZValue); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreZValue = &value + case bool: + s.IgnoreZValue = &v } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } @@ -406,6 +475,9 @@ func (s *ShapeProperty) UnmarshalJSON(data []byte) error { } case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -414,7 +486,9 @@ func (s *ShapeProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -693,20 +767,38 @@ func (s *ShapeProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } case "similarity": - if err := dec.Decode(&s.Similarity); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Similarity = &o case "store": - if err := dec.Decode(&s.Store); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Store = &value + case bool: + s.Store = &v } case "type": @@ -719,6 +811,31 @@ func (s *ShapeProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s ShapeProperty) MarshalJSON() ([]byte, error) { + type innerShapeProperty ShapeProperty + tmp := innerShapeProperty{ + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + IgnoreZValue: s.IgnoreZValue, + Meta: s.Meta, + Orientation: s.Orientation, + Properties: s.Properties, + Similarity: s.Similarity, + Store: s.Store, + Type: s.Type, + } + + tmp.Type = "shape" + + return json.Marshal(tmp) +} + // NewShapeProperty returns a ShapeProperty. func NewShapeProperty() *ShapeProperty { r := &ShapeProperty{ @@ -727,7 +844,5 @@ func NewShapeProperty() *ShapeProperty { Properties: make(map[string]Property, 0), } - r.Type = "shape" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shapequery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shapequery.go index 98d51542a..656a3e545 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shapequery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shapequery.go @@ -16,23 +16,106 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" "fmt" + "io" + "strconv" ) // ShapeQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/specialized.ts#L176-L181 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/specialized.ts#L344-L352 type ShapeQuery struct { - Boost *float32 `json:"boost,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // IgnoreUnmapped When set to `true` the query ignores an unmapped field and will not match any + // documents. IgnoreUnmapped *bool `json:"ignore_unmapped,omitempty"` QueryName_ *string `json:"_name,omitempty"` - ShapeQuery map[string]ShapeFieldQuery `json:"-"` + ShapeQuery map[string]ShapeFieldQuery `json:"ShapeQuery,omitempty"` +} + +func (s *ShapeQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "ignore_unmapped": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreUnmapped = &value + case bool: + s.IgnoreUnmapped = &v + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "ShapeQuery": + if s.ShapeQuery == nil { + s.ShapeQuery = make(map[string]ShapeFieldQuery, 0) + } + if err := dec.Decode(&s.ShapeQuery); err != nil { + return err + } + + default: + + } + } + return nil } // MarhsalJSON overrides marshalling for types with additional properties @@ -54,6 +137,7 @@ func (s ShapeQuery) MarshalJSON() ([]byte, error) { for key, value := range s.ShapeQuery { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "ShapeQuery") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardcommit.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardcommit.go index 44c03b1f7..2c67f4d25 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardcommit.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardcommit.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ShardCommit type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/stats/types.ts#L103-L108 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/stats/types.ts#L112-L117 type ShardCommit struct { Generation int `json:"generation"` Id string `json:"id"` @@ -30,6 +38,70 @@ type ShardCommit struct { UserData map[string]string `json:"user_data"` } +func (s *ShardCommit) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "generation": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Generation = value + case float64: + f := int(v) + s.Generation = f + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return err + } + + case "num_docs": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.NumDocs = value + case float64: + f := int64(v) + s.NumDocs = f + } + + case "user_data": + if s.UserData == nil { + s.UserData = make(map[string]string, 0) + } + if err := dec.Decode(&s.UserData); err != nil { + return err + } + + } + } + return nil +} + // NewShardCommit returns a ShardCommit. func NewShardCommit() *ShardCommit { r := &ShardCommit{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardfailure.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardfailure.go index b0310ccba..90d87d3e6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardfailure.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardfailure.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ShardFailure type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Errors.ts#L50-L56 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Errors.ts#L50-L56 type ShardFailure struct { Index *string `json:"index,omitempty"` Node *string `json:"node,omitempty"` @@ -31,6 +39,76 @@ type ShardFailure struct { Status *string `json:"status,omitempty"` } +func (s *ShardFailure) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return err + } + + case "node": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Node = &o + + case "reason": + if err := dec.Decode(&s.Reason); err != nil { + return err + } + + case "shard": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Shard = value + case float64: + f := int(v) + s.Shard = f + } + + case "status": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Status = &o + + } + } + return nil +} + // NewShardFailure returns a ShardFailure. func NewShardFailure() *ShardFailure { r := &ShardFailure{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardfilesizeinfo.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardfilesizeinfo.go index a1832eabf..fc74d02f3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardfilesizeinfo.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardfilesizeinfo.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ShardFileSizeInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/stats/types.ts#L115-L122 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/stats/types.ts#L124-L131 type ShardFileSizeInfo struct { AverageSizeInBytes *int64 `json:"average_size_in_bytes,omitempty"` Count *int64 `json:"count,omitempty"` @@ -32,6 +40,113 @@ type ShardFileSizeInfo struct { SizeInBytes int64 `json:"size_in_bytes"` } +func (s *ShardFileSizeInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "average_size_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.AverageSizeInBytes = &value + case float64: + f := int64(v) + s.AverageSizeInBytes = &f + } + + case "count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Count = &value + case float64: + f := int64(v) + s.Count = &f + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = o + + case "max_size_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.MaxSizeInBytes = &value + case float64: + f := int64(v) + s.MaxSizeInBytes = &f + } + + case "min_size_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.MinSizeInBytes = &value + case float64: + f := int64(v) + s.MinSizeInBytes = &f + } + + case "size_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.SizeInBytes = value + case float64: + f := int64(v) + s.SizeInBytes = f + } + + } + } + return nil +} + // NewShardFileSizeInfo returns a ShardFileSizeInfo. func NewShardFileSizeInfo() *ShardFileSizeInfo { r := &ShardFileSizeInfo{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardhealthstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardhealthstats.go index 2fe2f8d8f..931a71447 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardhealthstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardhealthstats.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/healthstatus" ) // ShardHealthStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/health/types.ts#L36-L43 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/health/types.ts#L36-L43 type ShardHealthStats struct { ActiveShards int `json:"active_shards"` InitializingShards int `json:"initializing_shards"` @@ -36,6 +42,109 @@ type ShardHealthStats struct { UnassignedShards int `json:"unassigned_shards"` } +func (s *ShardHealthStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "active_shards": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.ActiveShards = value + case float64: + f := int(v) + s.ActiveShards = f + } + + case "initializing_shards": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.InitializingShards = value + case float64: + f := int(v) + s.InitializingShards = f + } + + case "primary_active": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.PrimaryActive = value + case bool: + s.PrimaryActive = v + } + + case "relocating_shards": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.RelocatingShards = value + case float64: + f := int(v) + s.RelocatingShards = f + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return err + } + + case "unassigned_shards": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.UnassignedShards = value + case float64: + f := int(v) + s.UnassignedShards = f + } + + } + } + return nil +} + // NewShardHealthStats returns a ShardHealthStats. func NewShardHealthStats() *ShardHealthStats { r := &ShardHealthStats{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardlease.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardlease.go index 745370990..9bbdb1bbe 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardlease.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardlease.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ShardLease type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/stats/types.ts#L124-L129 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/stats/types.ts#L133-L138 type ShardLease struct { Id string `json:"id"` RetainingSeqNo int64 `json:"retaining_seq_no"` @@ -30,6 +38,63 @@ type ShardLease struct { Timestamp int64 `json:"timestamp"` } +func (s *ShardLease) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return err + } + + case "retaining_seq_no": + if err := dec.Decode(&s.RetainingSeqNo); err != nil { + return err + } + + case "source": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Source = o + + case "timestamp": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Timestamp = value + case float64: + f := int64(v) + s.Timestamp = f + } + + } + } + return nil +} + // NewShardLease returns a ShardLease. func NewShardLease() *ShardLease { r := &ShardLease{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardmigrationstatus.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardmigrationstatus.go index b8e908a7e..9d42e9171 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardmigrationstatus.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardmigrationstatus.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -26,7 +26,7 @@ import ( // ShardMigrationStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L52-L54 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/shutdown/get_node/ShutdownGetNodeResponse.ts#L52-L54 type ShardMigrationStatus struct { Status shutdownstatus.ShutdownStatus `json:"status"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardpath.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardpath.go index 3c44c2252..e2194e333 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardpath.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardpath.go @@ -16,19 +16,85 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ShardPath type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/stats/types.ts#L131-L135 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/stats/types.ts#L140-L144 type ShardPath struct { DataPath string `json:"data_path"` IsCustomDataPath bool `json:"is_custom_data_path"` StatePath string `json:"state_path"` } +func (s *ShardPath) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "data_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DataPath = o + + case "is_custom_data_path": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IsCustomDataPath = value + case bool: + s.IsCustomDataPath = v + } + + case "state_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StatePath = o + + } + } + return nil +} + // NewShardPath returns a ShardPath. func NewShardPath() *ShardPath { r := &ShardPath{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardprofile.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardprofile.go index 18b6e1ada..342343b31 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardprofile.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardprofile.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ShardProfile type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/profile.ts#L132-L137 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/profile.ts#L132-L137 type ShardProfile struct { Aggregations []AggregationProfile `json:"aggregations"` Fetch *FetchProfile `json:"fetch,omitempty"` @@ -30,6 +38,53 @@ type ShardProfile struct { Searches []SearchProfile `json:"searches"` } +func (s *ShardProfile) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aggregations": + if err := dec.Decode(&s.Aggregations); err != nil { + return err + } + + case "fetch": + if err := dec.Decode(&s.Fetch); err != nil { + return err + } + + case "id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Id = o + + case "searches": + if err := dec.Decode(&s.Searches); err != nil { + return err + } + + } + } + return nil +} + // NewShardProfile returns a ShardProfile. func NewShardProfile() *ShardProfile { r := &ShardProfile{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardquerycache.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardquerycache.go index caf8a4c67..48698fe88 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardquerycache.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardquerycache.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ShardQueryCache type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/stats/types.ts#L137-L145 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/stats/types.ts#L146-L154 type ShardQueryCache struct { CacheCount int64 `json:"cache_count"` CacheSize int64 `json:"cache_size"` @@ -33,6 +41,131 @@ type ShardQueryCache struct { TotalCount int64 `json:"total_count"` } +func (s *ShardQueryCache) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "cache_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.CacheCount = value + case float64: + f := int64(v) + s.CacheCount = f + } + + case "cache_size": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.CacheSize = value + case float64: + f := int64(v) + s.CacheSize = f + } + + case "evictions": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Evictions = value + case float64: + f := int64(v) + s.Evictions = f + } + + case "hit_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.HitCount = value + case float64: + f := int64(v) + s.HitCount = f + } + + case "memory_size_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.MemorySizeInBytes = value + case float64: + f := int64(v) + s.MemorySizeInBytes = f + } + + case "miss_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.MissCount = value + case float64: + f := int64(v) + s.MissCount = f + } + + case "total_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TotalCount = value + case float64: + f := int64(v) + s.TotalCount = f + } + + } + } + return nil +} + // NewShardQueryCache returns a ShardQueryCache. func NewShardQueryCache() *ShardQueryCache { r := &ShardQueryCache{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardrecovery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardrecovery.go index e05dfcc81..1ac23ea36 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardrecovery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardrecovery.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ShardRecovery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/recovery/types.ts#L118-L135 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/recovery/types.ts#L118-L135 type ShardRecovery struct { Id int64 `json:"id"` Index RecoveryIndexStatus `json:"index"` @@ -42,6 +50,139 @@ type ShardRecovery struct { VerifyIndex VerifyIndex `json:"verify_index"` } +func (s *ShardRecovery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "id": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Id = value + case float64: + f := int64(v) + s.Id = f + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return err + } + + case "primary": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Primary = value + case bool: + s.Primary = v + } + + case "source": + if err := dec.Decode(&s.Source); err != nil { + return err + } + + case "stage": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Stage = o + + case "start": + if err := dec.Decode(&s.Start); err != nil { + return err + } + + case "start_time": + if err := dec.Decode(&s.StartTime); err != nil { + return err + } + + case "start_time_in_millis": + if err := dec.Decode(&s.StartTimeInMillis); err != nil { + return err + } + + case "stop_time": + if err := dec.Decode(&s.StopTime); err != nil { + return err + } + + case "stop_time_in_millis": + if err := dec.Decode(&s.StopTimeInMillis); err != nil { + return err + } + + case "target": + if err := dec.Decode(&s.Target); err != nil { + return err + } + + case "total_time": + if err := dec.Decode(&s.TotalTime); err != nil { + return err + } + + case "total_time_in_millis": + if err := dec.Decode(&s.TotalTimeInMillis); err != nil { + return err + } + + case "translog": + if err := dec.Decode(&s.Translog); err != nil { + return err + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + case "verify_index": + if err := dec.Decode(&s.VerifyIndex); err != nil { + return err + } + + } + } + return nil +} + // NewShardRecovery returns a ShardRecovery. func NewShardRecovery() *ShardRecovery { r := &ShardRecovery{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardretentionleases.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardretentionleases.go index 72081aca7..e7add2092 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardretentionleases.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardretentionleases.go @@ -16,19 +16,72 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ShardRetentionLeases type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/stats/types.ts#L147-L151 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/stats/types.ts#L156-L160 type ShardRetentionLeases struct { Leases []ShardLease `json:"leases"` PrimaryTerm int64 `json:"primary_term"` Version int64 `json:"version"` } +func (s *ShardRetentionLeases) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "leases": + if err := dec.Decode(&s.Leases); err != nil { + return err + } + + case "primary_term": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.PrimaryTerm = value + case float64: + f := int64(v) + s.PrimaryTerm = f + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + // NewShardRetentionLeases returns a ShardRetentionLeases. func NewShardRetentionLeases() *ShardRetentionLeases { r := &ShardRetentionLeases{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardrouting.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardrouting.go index 3e0b262e6..337358ad8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardrouting.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardrouting.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/shardroutingstate" ) // ShardRouting type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/stats/types.ts#L153-L158 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/stats/types.ts#L162-L167 type ShardRouting struct { Node string `json:"node"` Primary bool `json:"primary"` @@ -34,6 +40,69 @@ type ShardRouting struct { State shardroutingstate.ShardRoutingState `json:"state"` } +func (s *ShardRouting) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "node": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Node = o + + case "primary": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Primary = value + case bool: + s.Primary = v + } + + case "relocating_node": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RelocatingNode = o + + case "state": + if err := dec.Decode(&s.State); err != nil { + return err + } + + } + } + return nil +} + // NewShardRouting returns a ShardRouting. func NewShardRouting() *ShardRouting { r := &ShardRouting{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardsavailabilityindicator.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardsavailabilityindicator.go new file mode 100644 index 000000000..3abb92637 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardsavailabilityindicator.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indicatorhealthstatus" +) + +// ShardsAvailabilityIndicator type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/health_report/types.ts#L104-L108 +type ShardsAvailabilityIndicator struct { + Details *ShardsAvailabilityIndicatorDetails `json:"details,omitempty"` + Diagnosis []Diagnosis `json:"diagnosis,omitempty"` + Impacts []Impact `json:"impacts,omitempty"` + Status indicatorhealthstatus.IndicatorHealthStatus `json:"status"` + Symptom string `json:"symptom"` +} + +func (s *ShardsAvailabilityIndicator) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "details": + if err := dec.Decode(&s.Details); err != nil { + return err + } + + case "diagnosis": + if err := dec.Decode(&s.Diagnosis); err != nil { + return err + } + + case "impacts": + if err := dec.Decode(&s.Impacts); err != nil { + return err + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return err + } + + case "symptom": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Symptom = o + + } + } + return nil +} + +// NewShardsAvailabilityIndicator returns a ShardsAvailabilityIndicator. +func NewShardsAvailabilityIndicator() *ShardsAvailabilityIndicator { + r := &ShardsAvailabilityIndicator{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardsavailabilityindicatordetails.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardsavailabilityindicatordetails.go new file mode 100644 index 000000000..0985b2f05 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardsavailabilityindicatordetails.go @@ -0,0 +1,206 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + +// ShardsAvailabilityIndicatorDetails type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/health_report/types.ts#L109-L119 +type ShardsAvailabilityIndicatorDetails struct { + CreatingPrimaries int64 `json:"creating_primaries"` + InitializingPrimaries int64 `json:"initializing_primaries"` + InitializingReplicas int64 `json:"initializing_replicas"` + RestartingPrimaries int64 `json:"restarting_primaries"` + RestartingReplicas int64 `json:"restarting_replicas"` + StartedPrimaries int64 `json:"started_primaries"` + StartedReplicas int64 `json:"started_replicas"` + UnassignedPrimaries int64 `json:"unassigned_primaries"` + UnassignedReplicas int64 `json:"unassigned_replicas"` +} + +func (s *ShardsAvailabilityIndicatorDetails) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "creating_primaries": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.CreatingPrimaries = value + case float64: + f := int64(v) + s.CreatingPrimaries = f + } + + case "initializing_primaries": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.InitializingPrimaries = value + case float64: + f := int64(v) + s.InitializingPrimaries = f + } + + case "initializing_replicas": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.InitializingReplicas = value + case float64: + f := int64(v) + s.InitializingReplicas = f + } + + case "restarting_primaries": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.RestartingPrimaries = value + case float64: + f := int64(v) + s.RestartingPrimaries = f + } + + case "restarting_replicas": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.RestartingReplicas = value + case float64: + f := int64(v) + s.RestartingReplicas = f + } + + case "started_primaries": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.StartedPrimaries = value + case float64: + f := int64(v) + s.StartedPrimaries = f + } + + case "started_replicas": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.StartedReplicas = value + case float64: + f := int64(v) + s.StartedReplicas = f + } + + case "unassigned_primaries": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.UnassignedPrimaries = value + case float64: + f := int64(v) + s.UnassignedPrimaries = f + } + + case "unassigned_replicas": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.UnassignedReplicas = value + case float64: + f := int64(v) + s.UnassignedReplicas = f + } + + } + } + return nil +} + +// NewShardsAvailabilityIndicatorDetails returns a ShardsAvailabilityIndicatorDetails. +func NewShardsAvailabilityIndicatorDetails() *ShardsAvailabilityIndicatorDetails { + r := &ShardsAvailabilityIndicatorDetails{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardscapacityindicator.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardscapacityindicator.go new file mode 100644 index 000000000..886e1dcb0 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardscapacityindicator.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indicatorhealthstatus" +) + +// ShardsCapacityIndicator type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/health_report/types.ts#L171-L175 +type ShardsCapacityIndicator struct { + Details *ShardsCapacityIndicatorDetails `json:"details,omitempty"` + Diagnosis []Diagnosis `json:"diagnosis,omitempty"` + Impacts []Impact `json:"impacts,omitempty"` + Status indicatorhealthstatus.IndicatorHealthStatus `json:"status"` + Symptom string `json:"symptom"` +} + +func (s *ShardsCapacityIndicator) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "details": + if err := dec.Decode(&s.Details); err != nil { + return err + } + + case "diagnosis": + if err := dec.Decode(&s.Diagnosis); err != nil { + return err + } + + case "impacts": + if err := dec.Decode(&s.Impacts); err != nil { + return err + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return err + } + + case "symptom": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Symptom = o + + } + } + return nil +} + +// NewShardsCapacityIndicator returns a ShardsCapacityIndicator. +func NewShardsCapacityIndicator() *ShardsCapacityIndicator { + r := &ShardsCapacityIndicator{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardscapacityindicatordetails.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardscapacityindicatordetails.go new file mode 100644 index 000000000..316732501 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardscapacityindicatordetails.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +// ShardsCapacityIndicatorDetails type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/health_report/types.ts#L177-L180 +type ShardsCapacityIndicatorDetails struct { + Data ShardsCapacityIndicatorTierDetail `json:"data"` + Frozen ShardsCapacityIndicatorTierDetail `json:"frozen"` +} + +// NewShardsCapacityIndicatorDetails returns a ShardsCapacityIndicatorDetails. +func NewShardsCapacityIndicatorDetails() *ShardsCapacityIndicatorDetails { + r := &ShardsCapacityIndicatorDetails{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardscapacityindicatortierdetail.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardscapacityindicatortierdetail.go new file mode 100644 index 000000000..bd5435f0d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardscapacityindicatortierdetail.go @@ -0,0 +1,96 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + +// ShardsCapacityIndicatorTierDetail type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/health_report/types.ts#L182-L185 +type ShardsCapacityIndicatorTierDetail struct { + CurrentUsedShards *int `json:"current_used_shards,omitempty"` + MaxShardsInCluster int `json:"max_shards_in_cluster"` +} + +func (s *ShardsCapacityIndicatorTierDetail) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "current_used_shards": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.CurrentUsedShards = &value + case float64: + f := int(v) + s.CurrentUsedShards = &f + } + + case "max_shards_in_cluster": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxShardsInCluster = value + case float64: + f := int(v) + s.MaxShardsInCluster = f + } + + } + } + return nil +} + +// NewShardsCapacityIndicatorTierDetail returns a ShardsCapacityIndicatorTierDetail. +func NewShardsCapacityIndicatorTierDetail() *ShardsCapacityIndicatorTierDetail { + r := &ShardsCapacityIndicatorTierDetail{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardsegmentrouting.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardsegmentrouting.go index 6b406dae0..431cc651b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardsegmentrouting.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardsegmentrouting.go @@ -16,19 +16,85 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ShardSegmentRouting type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/segments/types.ts#L40-L44 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/segments/types.ts#L40-L44 type ShardSegmentRouting struct { Node string `json:"node"` Primary bool `json:"primary"` State string `json:"state"` } +func (s *ShardSegmentRouting) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "node": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Node = o + + case "primary": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Primary = value + case bool: + s.Primary = v + } + + case "state": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.State = o + + } + } + return nil +} + // NewShardSegmentRouting returns a ShardSegmentRouting. func NewShardSegmentRouting() *ShardSegmentRouting { r := &ShardSegmentRouting{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardsequencenumber.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardsequencenumber.go index 5d68a26cd..5780503af 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardsequencenumber.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardsequencenumber.go @@ -16,19 +16,82 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ShardSequenceNumber type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/stats/types.ts#L167-L171 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/stats/types.ts#L176-L180 type ShardSequenceNumber struct { GlobalCheckpoint int64 `json:"global_checkpoint"` LocalCheckpoint int64 `json:"local_checkpoint"` MaxSeqNo int64 `json:"max_seq_no"` } +func (s *ShardSequenceNumber) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "global_checkpoint": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.GlobalCheckpoint = value + case float64: + f := int64(v) + s.GlobalCheckpoint = f + } + + case "local_checkpoint": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.LocalCheckpoint = value + case float64: + f := int64(v) + s.LocalCheckpoint = f + } + + case "max_seq_no": + if err := dec.Decode(&s.MaxSeqNo); err != nil { + return err + } + + } + } + return nil +} + // NewShardSequenceNumber returns a ShardSequenceNumber. func NewShardSequenceNumber() *ShardSequenceNumber { r := &ShardSequenceNumber{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardsrecord.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardsrecord.go index 651ae3c99..14eddb8c6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardsrecord.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardsrecord.go @@ -16,169 +16,1148 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ShardsRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/shards/types.ts#L20-L396 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/shards/types.ts#L20-L421 type ShardsRecord struct { - // BulkAvgSizeInBytes avg size in bytes of shard bulk + // BulkAvgSizeInBytes The average size in bytes of shard bulk operations. BulkAvgSizeInBytes *string `json:"bulk.avg_size_in_bytes,omitempty"` - // BulkAvgTime average time spend in shard bulk + // BulkAvgTime The average time spent in shard bulk operations. BulkAvgTime *string `json:"bulk.avg_time,omitempty"` - // BulkTotalOperations number of bulk shard ops + // BulkTotalOperations The number of bulk shard operations. BulkTotalOperations *string `json:"bulk.total_operations,omitempty"` - // BulkTotalSizeInBytes total size in bytes of shard bulk + // BulkTotalSizeInBytes The total size in bytes of shard bulk operations. BulkTotalSizeInBytes *string `json:"bulk.total_size_in_bytes,omitempty"` - // BulkTotalTime time spend in shard bulk + // BulkTotalTime The time spent in shard bulk operations. BulkTotalTime *string `json:"bulk.total_time,omitempty"` - // CompletionSize size of completion + // CompletionSize The size of completion. CompletionSize *string `json:"completion.size,omitempty"` - // Docs number of docs in shard + // Docs The number of documents in the shard. Docs string `json:"docs,omitempty"` - // FielddataEvictions fielddata evictions + // FielddataEvictions The fielddata cache evictions. FielddataEvictions *string `json:"fielddata.evictions,omitempty"` - // FielddataMemorySize used fielddata cache + // FielddataMemorySize The used fielddata cache memory. FielddataMemorySize *string `json:"fielddata.memory_size,omitempty"` - // FlushTotal number of flushes + // FlushTotal The number of flushes. FlushTotal *string `json:"flush.total,omitempty"` - // FlushTotalTime time spent in flush + // FlushTotalTime The time spent in flush. FlushTotalTime *string `json:"flush.total_time,omitempty"` - // GetCurrent number of current get ops + // GetCurrent The number of current get operations. GetCurrent *string `json:"get.current,omitempty"` - // GetExistsTime time spent in successful gets + // GetExistsTime The time spent in successful get operations. GetExistsTime *string `json:"get.exists_time,omitempty"` - // GetExistsTotal number of successful gets + // GetExistsTotal The number of successful get operations. GetExistsTotal *string `json:"get.exists_total,omitempty"` - // GetMissingTime time spent in failed gets + // GetMissingTime The time spent in failed get operations. GetMissingTime *string `json:"get.missing_time,omitempty"` - // GetMissingTotal number of failed gets + // GetMissingTotal The number of failed get operations. GetMissingTotal *string `json:"get.missing_total,omitempty"` - // GetTime time spent in get + // GetTime The time spent in get operations. GetTime *string `json:"get.time,omitempty"` - // GetTotal number of get ops + // GetTotal The number of get operations. GetTotal *string `json:"get.total,omitempty"` - // Id unique id of node where it lives + // Id The unique identifier for the node. Id *string `json:"id,omitempty"` - // Index index name + // Index The index name. Index *string `json:"index,omitempty"` - // IndexingDeleteCurrent number of current deletions + // IndexingDeleteCurrent The number of current deletion operations. IndexingDeleteCurrent *string `json:"indexing.delete_current,omitempty"` - // IndexingDeleteTime time spent in deletions + // IndexingDeleteTime The time spent in deletion operations. IndexingDeleteTime *string `json:"indexing.delete_time,omitempty"` - // IndexingDeleteTotal number of delete ops + // IndexingDeleteTotal The number of delete operations. IndexingDeleteTotal *string `json:"indexing.delete_total,omitempty"` - // IndexingIndexCurrent number of current indexing ops + // IndexingIndexCurrent The number of current indexing operations. IndexingIndexCurrent *string `json:"indexing.index_current,omitempty"` - // IndexingIndexFailed number of failed indexing ops + // IndexingIndexFailed The number of failed indexing operations. IndexingIndexFailed *string `json:"indexing.index_failed,omitempty"` - // IndexingIndexTime time spent in indexing + // IndexingIndexTime The time spent in indexing operations. IndexingIndexTime *string `json:"indexing.index_time,omitempty"` - // IndexingIndexTotal number of indexing ops + // IndexingIndexTotal The number of indexing operations. IndexingIndexTotal *string `json:"indexing.index_total,omitempty"` - // Ip ip of node where it lives + // Ip The IP address of the node. Ip string `json:"ip,omitempty"` - // MergesCurrent number of current merges + // MergesCurrent The number of current merge operations. MergesCurrent *string `json:"merges.current,omitempty"` - // MergesCurrentDocs number of current merging docs + // MergesCurrentDocs The number of current merging documents. MergesCurrentDocs *string `json:"merges.current_docs,omitempty"` - // MergesCurrentSize size of current merges + // MergesCurrentSize The size of current merge operations. MergesCurrentSize *string `json:"merges.current_size,omitempty"` - // MergesTotal number of completed merge ops + // MergesTotal The number of completed merge operations. MergesTotal *string `json:"merges.total,omitempty"` - // MergesTotalDocs docs merged + // MergesTotalDocs The nuber of merged documents. MergesTotalDocs *string `json:"merges.total_docs,omitempty"` - // MergesTotalSize size merged + // MergesTotalSize The size of current merges. MergesTotalSize *string `json:"merges.total_size,omitempty"` - // MergesTotalTime time spent in merges + // MergesTotalTime The time spent merging documents. MergesTotalTime *string `json:"merges.total_time,omitempty"` - // Node name of node where it lives + // Node The name of node. Node string `json:"node,omitempty"` - // PathData shard data path + // PathData The shard data path. PathData *string `json:"path.data,omitempty"` - // PathState shard state path + // PathState The shard state path. PathState *string `json:"path.state,omitempty"` - // Prirep primary or replica + // Prirep The shard type: `primary` or `replica`. Prirep *string `json:"prirep,omitempty"` - // QueryCacheEvictions query cache evictions + // QueryCacheEvictions The query cache evictions. QueryCacheEvictions *string `json:"query_cache.evictions,omitempty"` - // QueryCacheMemorySize used query cache + // QueryCacheMemorySize The used query cache memory. QueryCacheMemorySize *string `json:"query_cache.memory_size,omitempty"` - // RecoverysourceType recovery source type + // RecoverysourceType The type of recovery source. RecoverysourceType *string `json:"recoverysource.type,omitempty"` - // RefreshExternalTime time spent in external refreshes + // RefreshExternalTime The time spent in external refreshes. RefreshExternalTime *string `json:"refresh.external_time,omitempty"` - // RefreshExternalTotal total external refreshes + // RefreshExternalTotal The total nunber of external refreshes. RefreshExternalTotal *string `json:"refresh.external_total,omitempty"` - // RefreshListeners number of pending refresh listeners + // RefreshListeners The number of pending refresh listeners. RefreshListeners *string `json:"refresh.listeners,omitempty"` - // RefreshTime time spent in refreshes + // RefreshTime The time spent in refreshes. RefreshTime *string `json:"refresh.time,omitempty"` - // RefreshTotal total refreshes + // RefreshTotal The total number of refreshes. RefreshTotal *string `json:"refresh.total,omitempty"` - // SearchFetchCurrent current fetch phase ops + // SearchFetchCurrent The current fetch phase operations. SearchFetchCurrent *string `json:"search.fetch_current,omitempty"` - // SearchFetchTime time spent in fetch phase + // SearchFetchTime The time spent in fetch phase. SearchFetchTime *string `json:"search.fetch_time,omitempty"` - // SearchFetchTotal total fetch ops + // SearchFetchTotal The total number of fetch operations. SearchFetchTotal *string `json:"search.fetch_total,omitempty"` - // SearchOpenContexts open search contexts + // SearchOpenContexts The number of open search contexts. SearchOpenContexts *string `json:"search.open_contexts,omitempty"` - // SearchQueryCurrent current query phase ops + // SearchQueryCurrent The current query phase operations. SearchQueryCurrent *string `json:"search.query_current,omitempty"` - // SearchQueryTime time spent in query phase + // SearchQueryTime The time spent in query phase. SearchQueryTime *string `json:"search.query_time,omitempty"` - // SearchQueryTotal total query phase ops + // SearchQueryTotal The total number of query phase operations. SearchQueryTotal *string `json:"search.query_total,omitempty"` - // SearchScrollCurrent open scroll contexts + // SearchScrollCurrent The open scroll contexts. SearchScrollCurrent *string `json:"search.scroll_current,omitempty"` - // SearchScrollTime time scroll contexts held open + // SearchScrollTime The time scroll contexts were held open. SearchScrollTime *string `json:"search.scroll_time,omitempty"` - // SearchScrollTotal completed scroll contexts + // SearchScrollTotal The number of completed scroll contexts. SearchScrollTotal *string `json:"search.scroll_total,omitempty"` - // SegmentsCount number of segments + // SegmentsCount The number of segments. SegmentsCount *string `json:"segments.count,omitempty"` - // SegmentsFixedBitsetMemory memory used by fixed bit sets for nested object field types and export type - // filters for types referred in _parent fields + // SegmentsFixedBitsetMemory The memory used by fixed bit sets for nested object field types and export + // type filters for types referred in `_parent` fields. SegmentsFixedBitsetMemory *string `json:"segments.fixed_bitset_memory,omitempty"` - // SegmentsIndexWriterMemory memory used by index writer + // SegmentsIndexWriterMemory The memory used by the index writer. SegmentsIndexWriterMemory *string `json:"segments.index_writer_memory,omitempty"` - // SegmentsMemory memory used by segments + // SegmentsMemory The memory used by segments. SegmentsMemory *string `json:"segments.memory,omitempty"` - // SegmentsVersionMapMemory memory used by version map + // SegmentsVersionMapMemory The memory used by the version map. SegmentsVersionMapMemory *string `json:"segments.version_map_memory,omitempty"` - // SeqNoGlobalCheckpoint global checkpoint + // SeqNoGlobalCheckpoint The global checkpoint. SeqNoGlobalCheckpoint *string `json:"seq_no.global_checkpoint,omitempty"` - // SeqNoLocalCheckpoint local checkpoint + // SeqNoLocalCheckpoint The local checkpoint. SeqNoLocalCheckpoint *string `json:"seq_no.local_checkpoint,omitempty"` - // SeqNoMax max sequence number + // SeqNoMax The maximum sequence number. SeqNoMax *string `json:"seq_no.max,omitempty"` - // Shard shard name + // Shard The shard name. Shard *string `json:"shard,omitempty"` - // State shard state + // State The shard state. + // Returned values include: + // `INITIALIZING`: The shard is recovering from a peer shard or gateway. + // `RELOCATING`: The shard is relocating. + // `STARTED`: The shard has started. + // `UNASSIGNED`: The shard is not assigned to any node. State *string `json:"state,omitempty"` - // Store store size of shard (how much disk it uses) + // Store The disk space used by the shard. Store string `json:"store,omitempty"` - // SyncId sync id + // SyncId The sync identifier. SyncId *string `json:"sync_id,omitempty"` - // UnassignedAt time shard became unassigned (UTC) + // UnassignedAt The time at which the shard became unassigned in Coordinated Universal Time + // (UTC). UnassignedAt *string `json:"unassigned.at,omitempty"` - // UnassignedDetails additional details as to why the shard became unassigned + // UnassignedDetails Additional details as to why the shard became unassigned. + // It does not explain why the shard is not assigned; use the cluster allocation + // explain API for that information. UnassignedDetails *string `json:"unassigned.details,omitempty"` - // UnassignedFor time has been unassigned + // UnassignedFor The time at which the shard was requested to be unassigned in Coordinated + // Universal Time (UTC). UnassignedFor *string `json:"unassigned.for,omitempty"` - // UnassignedReason reason shard is unassigned + // UnassignedReason The reason for the last change to the state of an unassigned shard. + // It does not explain why the shard is currently unassigned; use the cluster + // allocation explain API for that information. + // Returned values include: + // `ALLOCATION_FAILED`: Unassigned as a result of a failed allocation of the + // shard. + // `CLUSTER_RECOVERED`: Unassigned as a result of a full cluster recovery. + // `DANGLING_INDEX_IMPORTED`: Unassigned as a result of importing a dangling + // index. + // `EXISTING_INDEX_RESTORED`: Unassigned as a result of restoring into a closed + // index. + // `FORCED_EMPTY_PRIMARY`: The shard’s allocation was last modified by forcing + // an empty primary using the cluster reroute API. + // `INDEX_CLOSED`: Unassigned because the index was closed. + // `INDEX_CREATED`: Unassigned as a result of an API creation of an index. + // `INDEX_REOPENED`: Unassigned as a result of opening a closed index. + // `MANUAL_ALLOCATION`: The shard’s allocation was last modified by the cluster + // reroute API. + // `NEW_INDEX_RESTORED`: Unassigned as a result of restoring into a new index. + // `NODE_LEFT`: Unassigned as a result of the node hosting it leaving the + // cluster. + // `NODE_RESTARTING`: Similar to `NODE_LEFT`, except that the node was + // registered as restarting using the node shutdown API. + // `PRIMARY_FAILED`: The shard was initializing as a replica, but the primary + // shard failed before the initialization completed. + // `REALLOCATED_REPLICA`: A better replica location is identified and causes the + // existing replica allocation to be cancelled. + // `REINITIALIZED`: When a shard moves from started back to initializing. + // `REPLICA_ADDED`: Unassigned as a result of explicit addition of a replica. + // `REROUTE_CANCELLED`: Unassigned as a result of explicit cancel reroute + // command. UnassignedReason *string `json:"unassigned.reason,omitempty"` - // WarmerCurrent current warmer ops + // WarmerCurrent The number of current warmer operations. WarmerCurrent *string `json:"warmer.current,omitempty"` - // WarmerTotal total warmer ops + // WarmerTotal The total number of warmer operations. WarmerTotal *string `json:"warmer.total,omitempty"` - // WarmerTotalTime time spent in warmers + // WarmerTotalTime The time spent in warmer operations. WarmerTotalTime *string `json:"warmer.total_time,omitempty"` } +func (s *ShardsRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bulk.avg_size_in_bytes", "basi", "bulkAvgSizeInBytes": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BulkAvgSizeInBytes = &o + + case "bulk.avg_time", "bati", "bulkAvgTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BulkAvgTime = &o + + case "bulk.total_operations", "bto", "bulkTotalOperations": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BulkTotalOperations = &o + + case "bulk.total_size_in_bytes", "btsi", "bulkTotalSizeInBytes": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BulkTotalSizeInBytes = &o + + case "bulk.total_time", "btti", "bulkTotalTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.BulkTotalTime = &o + + case "completion.size", "cs", "completionSize": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CompletionSize = &o + + case "docs", "d", "dc": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Docs = o + + case "fielddata.evictions", "fe", "fielddataEvictions": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FielddataEvictions = &o + + case "fielddata.memory_size", "fm", "fielddataMemory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FielddataMemorySize = &o + + case "flush.total", "ft", "flushTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FlushTotal = &o + + case "flush.total_time", "ftt", "flushTotalTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FlushTotalTime = &o + + case "get.current", "gc", "getCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.GetCurrent = &o + + case "get.exists_time", "geti", "getExistsTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.GetExistsTime = &o + + case "get.exists_total", "geto", "getExistsTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.GetExistsTotal = &o + + case "get.missing_time", "gmti", "getMissingTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.GetMissingTime = &o + + case "get.missing_total", "gmto", "getMissingTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.GetMissingTotal = &o + + case "get.time", "gti", "getTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.GetTime = &o + + case "get.total", "gto", "getTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.GetTotal = &o + + case "id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Id = &o + + case "index", "i", "idx": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Index = &o + + case "indexing.delete_current", "idc", "indexingDeleteCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexingDeleteCurrent = &o + + case "indexing.delete_time", "idti", "indexingDeleteTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexingDeleteTime = &o + + case "indexing.delete_total", "idto", "indexingDeleteTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexingDeleteTotal = &o + + case "indexing.index_current", "iic", "indexingIndexCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexingIndexCurrent = &o + + case "indexing.index_failed", "iif", "indexingIndexFailed": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexingIndexFailed = &o + + case "indexing.index_time", "iiti", "indexingIndexTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexingIndexTime = &o + + case "indexing.index_total", "iito", "indexingIndexTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexingIndexTotal = &o + + case "ip": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Ip = o + + case "merges.current", "mc", "mergesCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MergesCurrent = &o + + case "merges.current_docs", "mcd", "mergesCurrentDocs": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MergesCurrentDocs = &o + + case "merges.current_size", "mcs", "mergesCurrentSize": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MergesCurrentSize = &o + + case "merges.total", "mt", "mergesTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MergesTotal = &o + + case "merges.total_docs", "mtd", "mergesTotalDocs": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MergesTotalDocs = &o + + case "merges.total_size", "mts", "mergesTotalSize": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MergesTotalSize = &o + + case "merges.total_time", "mtt", "mergesTotalTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MergesTotalTime = &o + + case "node", "n": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Node = o + + case "path.data", "pd", "dataPath": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PathData = &o + + case "path.state", "ps", "statsPath": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PathState = &o + + case "prirep", "p", "pr", "primaryOrReplica": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Prirep = &o + + case "query_cache.evictions", "qce", "queryCacheEvictions": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryCacheEvictions = &o + + case "query_cache.memory_size", "qcm", "queryCacheMemory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryCacheMemorySize = &o + + case "recoverysource.type", "rs": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RecoverysourceType = &o + + case "refresh.external_time", "rti", "refreshTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RefreshExternalTime = &o + + case "refresh.external_total", "rto", "refreshTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RefreshExternalTotal = &o + + case "refresh.listeners", "rli", "refreshListeners": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RefreshListeners = &o + + case "refresh.time": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RefreshTime = &o + + case "refresh.total": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RefreshTotal = &o + + case "search.fetch_current", "sfc", "searchFetchCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchFetchCurrent = &o + + case "search.fetch_time", "sfti", "searchFetchTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchFetchTime = &o + + case "search.fetch_total", "sfto", "searchFetchTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchFetchTotal = &o + + case "search.open_contexts", "so", "searchOpenContexts": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchOpenContexts = &o + + case "search.query_current", "sqc", "searchQueryCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchQueryCurrent = &o + + case "search.query_time", "sqti", "searchQueryTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchQueryTime = &o + + case "search.query_total", "sqto", "searchQueryTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchQueryTotal = &o + + case "search.scroll_current", "scc", "searchScrollCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchScrollCurrent = &o + + case "search.scroll_time", "scti", "searchScrollTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchScrollTime = &o + + case "search.scroll_total", "scto", "searchScrollTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchScrollTotal = &o + + case "segments.count", "sc", "segmentsCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SegmentsCount = &o + + case "segments.fixed_bitset_memory", "sfbm", "fixedBitsetMemory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SegmentsFixedBitsetMemory = &o + + case "segments.index_writer_memory", "siwm", "segmentsIndexWriterMemory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SegmentsIndexWriterMemory = &o + + case "segments.memory", "sm", "segmentsMemory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SegmentsMemory = &o + + case "segments.version_map_memory", "svmm", "segmentsVersionMapMemory": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SegmentsVersionMapMemory = &o + + case "seq_no.global_checkpoint", "sqg", "globalCheckpoint": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SeqNoGlobalCheckpoint = &o + + case "seq_no.local_checkpoint", "sql", "localCheckpoint": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SeqNoLocalCheckpoint = &o + + case "seq_no.max", "sqm", "maxSeqNo": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SeqNoMax = &o + + case "shard", "s", "sh": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Shard = &o + + case "state", "st": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.State = &o + + case "store", "sto": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Store = o + + case "sync_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SyncId = &o + + case "unassigned.at", "ua": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.UnassignedAt = &o + + case "unassigned.details", "ud": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.UnassignedDetails = &o + + case "unassigned.for", "uf": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.UnassignedFor = &o + + case "unassigned.reason", "ur": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.UnassignedReason = &o + + case "warmer.current", "wc", "warmerCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.WarmerCurrent = &o + + case "warmer.total", "wto", "warmerTotal": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.WarmerTotal = &o + + case "warmer.total_time", "wtt", "warmerTotalTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.WarmerTotalTime = &o + + } + } + return nil +} + // NewShardsRecord returns a ShardsRecord. func NewShardsRecord() *ShardsRecord { r := &ShardsRecord{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardssegment.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardssegment.go index 81c09288f..02bf06692 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardssegment.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardssegment.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ShardsSegment type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/segments/types.ts#L46-L51 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/segments/types.ts#L46-L51 type ShardsSegment struct { NumCommittedSegments int `json:"num_committed_segments"` NumSearchSegments int `json:"num_search_segments"` @@ -30,6 +38,71 @@ type ShardsSegment struct { Segments map[string]Segment `json:"segments"` } +func (s *ShardsSegment) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "num_committed_segments": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NumCommittedSegments = value + case float64: + f := int(v) + s.NumCommittedSegments = f + } + + case "num_search_segments": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NumSearchSegments = value + case float64: + f := int(v) + s.NumSearchSegments = f + } + + case "routing": + if err := dec.Decode(&s.Routing); err != nil { + return err + } + + case "segments": + if s.Segments == nil { + s.Segments = make(map[string]Segment, 0) + } + if err := dec.Decode(&s.Segments); err != nil { + return err + } + + } + } + return nil +} + // NewShardsSegment returns a ShardsSegment. func NewShardsSegment() *ShardsSegment { r := &ShardsSegment{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardsstatssummary.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardsstatssummary.go index 97135d8c3..b6e9d8be4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardsstatssummary.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardsstatssummary.go @@ -16,13 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // ShardsStatsSummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/snapshot/_types/SnapshotShardsStatus.ts#L29-L35 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/snapshot/_types/SnapshotShardsStatus.ts#L29-L35 type ShardsStatsSummary struct { Incremental ShardsStatsSummaryItem `json:"incremental"` StartTimeInMillis int64 `json:"start_time_in_millis"` @@ -31,6 +38,51 @@ type ShardsStatsSummary struct { Total ShardsStatsSummaryItem `json:"total"` } +func (s *ShardsStatsSummary) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "incremental": + if err := dec.Decode(&s.Incremental); err != nil { + return err + } + + case "start_time_in_millis": + if err := dec.Decode(&s.StartTimeInMillis); err != nil { + return err + } + + case "time": + if err := dec.Decode(&s.Time); err != nil { + return err + } + + case "time_in_millis": + if err := dec.Decode(&s.TimeInMillis); err != nil { + return err + } + + case "total": + if err := dec.Decode(&s.Total); err != nil { + return err + } + + } + } + return nil +} + // NewShardsStatsSummary returns a ShardsStatsSummary. func NewShardsStatsSummary() *ShardsStatsSummary { r := &ShardsStatsSummary{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardsstatssummaryitem.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardsstatssummaryitem.go index fc347c729..3be26e5ba 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardsstatssummaryitem.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardsstatssummaryitem.go @@ -16,18 +16,76 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ShardsStatsSummaryItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/snapshot/_types/SnapshotShardsStatus.ts#L37-L40 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/snapshot/_types/SnapshotShardsStatus.ts#L37-L40 type ShardsStatsSummaryItem struct { FileCount int64 `json:"file_count"` SizeInBytes int64 `json:"size_in_bytes"` } +func (s *ShardsStatsSummaryItem) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "file_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.FileCount = value + case float64: + f := int64(v) + s.FileCount = f + } + + case "size_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.SizeInBytes = value + case float64: + f := int64(v) + s.SizeInBytes = f + } + + } + } + return nil +} + // NewShardsStatsSummaryItem returns a ShardsStatsSummaryItem. func NewShardsStatsSummaryItem() *ShardsStatsSummaryItem { r := &ShardsStatsSummaryItem{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardstatistics.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardstatistics.go index bb4c77bac..034877098 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardstatistics.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardstatistics.go @@ -16,19 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // ShardStatistics type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Stats.ts#L33-L39 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Stats.ts#L54-L66 type ShardStatistics struct { - Failed uint `json:"failed"` - Failures []ShardFailure `json:"failures,omitempty"` - Skipped *uint `json:"skipped,omitempty"` - Successful uint `json:"successful"` - Total uint `json:"total"` + Failed uint `json:"failed"` + Failures []ShardFailure `json:"failures,omitempty"` + Skipped *uint `json:"skipped,omitempty"` + // Successful Indicates how many shards have successfully run the search. + Successful uint `json:"successful"` + // Total Indicates how many shards the search will run on overall. + Total uint `json:"total"` } // NewShardStatistics returns a ShardStatistics. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardstore.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardstore.go index d578d5674..ff3071f3d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardstore.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardstore.go @@ -16,27 +16,75 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/shardstoreallocation" - + "bytes" "encoding/json" + "errors" "fmt" + "io" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/shardstoreallocation" ) // ShardStore type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/shard_stores/types.ts#L30-L34 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/shard_stores/types.ts#L30-L34 type ShardStore struct { Allocation shardstoreallocation.ShardStoreAllocation `json:"allocation"` AllocationId *string `json:"allocation_id,omitempty"` - ShardStore map[string]ShardStoreNode `json:"-"` + ShardStore map[string]ShardStoreNode `json:"ShardStore,omitempty"` StoreException *ShardStoreException `json:"store_exception,omitempty"` } +func (s *ShardStore) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allocation": + if err := dec.Decode(&s.Allocation); err != nil { + return err + } + + case "allocation_id": + if err := dec.Decode(&s.AllocationId); err != nil { + return err + } + + case "ShardStore": + if s.ShardStore == nil { + s.ShardStore = make(map[string]ShardStoreNode, 0) + } + if err := dec.Decode(&s.ShardStore); err != nil { + return err + } + + case "store_exception": + if err := dec.Decode(&s.StoreException); err != nil { + return err + } + + default: + + } + } + return nil +} + // MarhsalJSON overrides marshalling for types with additional properties func (s ShardStore) MarshalJSON() ([]byte, error) { type opt ShardStore @@ -56,6 +104,7 @@ func (s ShardStore) MarshalJSON() ([]byte, error) { for key, value := range s.ShardStore { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "ShardStore") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardstoreexception.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardstoreexception.go index 7eb093653..2e0ca5773 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardstoreexception.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardstoreexception.go @@ -16,18 +16,70 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ShardStoreException type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/shard_stores/types.ts#L51-L54 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/shard_stores/types.ts#L51-L54 type ShardStoreException struct { Reason string `json:"reason"` Type string `json:"type"` } +func (s *ShardStoreException) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "reason": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Reason = o + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + // NewShardStoreException returns a ShardStoreException. func NewShardStoreException() *ShardStoreException { r := &ShardStoreException{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardstoreindex.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardstoreindex.go index 47d2cf712..decce0c2a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardstoreindex.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardstoreindex.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // ShardStoreIndex type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search_shards/SearchShardsResponse.ts#L33-L36 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search_shards/SearchShardsResponse.ts#L33-L36 type ShardStoreIndex struct { Aliases []string `json:"aliases,omitempty"` Filter *Query `json:"filter,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardstorenode.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardstorenode.go index 44dcb2a7e..1d36d7af2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardstorenode.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardstorenode.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ShardStoreNode type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/shard_stores/types.ts#L36-L43 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/shard_stores/types.ts#L36-L43 type ShardStoreNode struct { Attributes map[string]string `json:"attributes"` EphemeralId *string `json:"ephemeral_id,omitempty"` @@ -32,6 +40,73 @@ type ShardStoreNode struct { TransportAddress string `json:"transport_address"` } +func (s *ShardStoreNode) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "attributes": + if s.Attributes == nil { + s.Attributes = make(map[string]string, 0) + } + if err := dec.Decode(&s.Attributes); err != nil { + return err + } + + case "ephemeral_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.EphemeralId = &o + + case "external_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ExternalId = &o + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return err + } + + case "transport_address": + if err := dec.Decode(&s.TransportAddress); err != nil { + return err + } + + } + } + return nil +} + // NewShardStoreNode returns a ShardStoreNode. func NewShardStoreNode() *ShardStoreNode { r := &ShardStoreNode{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardstorewrapper.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardstorewrapper.go index d168c46d8..fdbe1fbb5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardstorewrapper.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardstorewrapper.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // ShardStoreWrapper type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/shard_stores/types.ts#L56-L58 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/shard_stores/types.ts#L56-L58 type ShardStoreWrapper struct { Stores []ShardStore `json:"stores"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardstotalstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardstotalstats.go index ee4646c18..877464ee5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardstotalstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shardstotalstats.go @@ -16,17 +16,60 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ShardsTotalStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/stats/types.ts#L173-L175 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/stats/types.ts#L182-L184 type ShardsTotalStats struct { TotalCount int64 `json:"total_count"` } +func (s *ShardsTotalStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "total_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TotalCount = value + case float64: + f := int64(v) + s.TotalCount = f + } + + } + } + return nil +} + // NewShardsTotalStats returns a ShardsTotalStats. func NewShardsTotalStats() *ShardsTotalStats { r := &ShardsTotalStats{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shared.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shared.go index 91e4adc83..e1125031d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shared.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shared.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Shared type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/searchable_snapshots/cache_stats/Response.ts#L34-L43 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/searchable_snapshots/cache_stats/Response.ts#L34-L43 type Shared struct { BytesReadInBytes ByteSize `json:"bytes_read_in_bytes"` BytesWrittenInBytes ByteSize `json:"bytes_written_in_bytes"` @@ -34,6 +42,107 @@ type Shared struct { Writes int64 `json:"writes"` } +func (s *Shared) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bytes_read_in_bytes": + if err := dec.Decode(&s.BytesReadInBytes); err != nil { + return err + } + + case "bytes_written_in_bytes": + if err := dec.Decode(&s.BytesWrittenInBytes); err != nil { + return err + } + + case "evictions": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Evictions = value + case float64: + f := int64(v) + s.Evictions = f + } + + case "num_regions": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NumRegions = value + case float64: + f := int(v) + s.NumRegions = f + } + + case "reads": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Reads = value + case float64: + f := int64(v) + s.Reads = f + } + + case "region_size_in_bytes": + if err := dec.Decode(&s.RegionSizeInBytes); err != nil { + return err + } + + case "size_in_bytes": + if err := dec.Decode(&s.SizeInBytes); err != nil { + return err + } + + case "writes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Writes = value + case float64: + f := int64(v) + s.Writes = f + } + + } + } + return nil +} + // NewShared returns a Shared. func NewShared() *Shared { r := &Shared{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shingletokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shingletokenfilter.go index 1485f3114..2154c181c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shingletokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shingletokenfilter.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ShingleTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L86-L94 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L87-L95 type ShingleTokenFilter struct { FillerToken *string `json:"filler_token,omitempty"` MaxShingleSize string `json:"max_shingle_size,omitempty"` @@ -34,11 +42,134 @@ type ShingleTokenFilter struct { Version *string `json:"version,omitempty"` } +func (s *ShingleTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "filler_token": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FillerToken = &o + + case "max_shingle_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MaxShingleSize = o + + case "min_shingle_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MinShingleSize = o + + case "output_unigrams": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.OutputUnigrams = &value + case bool: + s.OutputUnigrams = &v + } + + case "output_unigrams_if_no_shingles": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.OutputUnigramsIfNoShingles = &value + case bool: + s.OutputUnigramsIfNoShingles = &v + } + + case "token_separator": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TokenSeparator = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s ShingleTokenFilter) MarshalJSON() ([]byte, error) { + type innerShingleTokenFilter ShingleTokenFilter + tmp := innerShingleTokenFilter{ + FillerToken: s.FillerToken, + MaxShingleSize: s.MaxShingleSize, + MinShingleSize: s.MinShingleSize, + OutputUnigrams: s.OutputUnigrams, + OutputUnigramsIfNoShingles: s.OutputUnigramsIfNoShingles, + TokenSeparator: s.TokenSeparator, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "shingle" + + return json.Marshal(tmp) +} + // NewShingleTokenFilter returns a ShingleTokenFilter. func NewShingleTokenFilter() *ShingleTokenFilter { r := &ShingleTokenFilter{} - r.Type = "shingle" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shortnumberproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shortnumberproperty.go index 474bbf9ab..94ee20f62 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shortnumberproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shortnumberproperty.go @@ -16,25 +16,25 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" ) // ShortNumberProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/core.ts#L156-L159 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/core.ts#L159-L162 type ShortNumberProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -63,6 +63,7 @@ type ShortNumberProperty struct { } func (s *ShortNumberProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -77,23 +78,63 @@ func (s *ShortNumberProperty) UnmarshalJSON(data []byte) error { switch t { case "boost": - if err := dec.Decode(&s.Boost); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f } case "coerce": - if err := dec.Decode(&s.Coerce); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Coerce = &value + case bool: + s.Coerce = &v } case "copy_to": - if err := dec.Decode(&s.CopyTo); err != nil { - return err + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return err + } } case "doc_values": - if err := dec.Decode(&s.DocValues); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DocValues = &value + case bool: + s.DocValues = &v } case "dynamic": @@ -102,6 +143,9 @@ func (s *ShortNumberProperty) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -110,7 +154,9 @@ func (s *ShortNumberProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -389,28 +435,62 @@ func (s *ShortNumberProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "ignore_malformed": - if err := dec.Decode(&s.IgnoreMalformed); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreMalformed = &value + case bool: + s.IgnoreMalformed = &v } case "index": - if err := dec.Decode(&s.Index); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Index = &value + case bool: + s.Index = &v } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } @@ -426,6 +506,9 @@ func (s *ShortNumberProperty) UnmarshalJSON(data []byte) error { } case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -434,7 +517,9 @@ func (s *ShortNumberProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -713,9 +798,11 @@ func (s *ShortNumberProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } @@ -725,18 +812,43 @@ func (s *ShortNumberProperty) UnmarshalJSON(data []byte) error { } case "similarity": - if err := dec.Decode(&s.Similarity); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Similarity = &o case "store": - if err := dec.Decode(&s.Store); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Store = &value + case bool: + s.Store = &v } case "time_series_dimension": - if err := dec.Decode(&s.TimeSeriesDimension); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.TimeSeriesDimension = &value + case bool: + s.TimeSeriesDimension = &v } case "time_series_metric": @@ -754,6 +866,36 @@ func (s *ShortNumberProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s ShortNumberProperty) MarshalJSON() ([]byte, error) { + type innerShortNumberProperty ShortNumberProperty + tmp := innerShortNumberProperty{ + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + Index: s.Index, + Meta: s.Meta, + NullValue: s.NullValue, + OnScriptError: s.OnScriptError, + Properties: s.Properties, + Script: s.Script, + Similarity: s.Similarity, + Store: s.Store, + TimeSeriesDimension: s.TimeSeriesDimension, + TimeSeriesMetric: s.TimeSeriesMetric, + Type: s.Type, + } + + tmp.Type = "short" + + return json.Marshal(tmp) +} + // NewShortNumberProperty returns a ShortNumberProperty. func NewShortNumberProperty() *ShortNumberProperty { r := &ShortNumberProperty{ @@ -762,7 +904,5 @@ func NewShortNumberProperty() *ShortNumberProperty { Properties: make(map[string]Property, 0), } - r.Type = "short" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shrinkconfiguration.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shrinkconfiguration.go index eb9a75160..1a473e7ea 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shrinkconfiguration.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/shrinkconfiguration.go @@ -16,17 +16,61 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ShrinkConfiguration type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ilm/_types/Phase.ts#L57-L59 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ilm/_types/Phase.ts#L60-L62 type ShrinkConfiguration struct { NumberOfShards int `json:"number_of_shards"` } +func (s *ShrinkConfiguration) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "number_of_shards": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NumberOfShards = value + case float64: + f := int(v) + s.NumberOfShards = f + } + + } + } + return nil +} + // NewShrinkConfiguration returns a ShrinkConfiguration. func NewShrinkConfiguration() *ShrinkConfiguration { r := &ShrinkConfiguration{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/significantlongtermsaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/significantlongtermsaggregate.go index 7ce6361f8..07121540b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/significantlongtermsaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/significantlongtermsaggregate.go @@ -16,29 +16,30 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" + "strconv" ) // SignificantLongTermsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L587-L589 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L588-L590 type SignificantLongTermsAggregate struct { BgCount *int64 `json:"bg_count,omitempty"` Buckets BucketsSignificantLongTermsBucket `json:"buckets"` DocCount *int64 `json:"doc_count,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Meta Metadata `json:"meta,omitempty"` } func (s *SignificantLongTermsAggregate) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -53,8 +54,18 @@ func (s *SignificantLongTermsAggregate) UnmarshalJSON(data []byte) error { switch t { case "bg_count": - if err := dec.Decode(&s.BgCount); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.BgCount = &value + case float64: + f := int64(v) + s.BgCount = &f } case "buckets": @@ -64,21 +75,33 @@ func (s *SignificantLongTermsAggregate) UnmarshalJSON(data []byte) error { source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]SignificantLongTermsBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []SignificantLongTermsBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } case "doc_count": - if err := dec.Decode(&s.DocCount); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DocCount = &value + case float64: + f := int64(v) + s.DocCount = &f } case "meta": diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/significantlongtermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/significantlongtermsbucket.go index 9a63f868a..fec2898f2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/significantlongtermsbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/significantlongtermsbucket.go @@ -16,25 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "fmt" - "bytes" + "encoding/json" "errors" + "fmt" "io" - + "strconv" "strings" - - "encoding/json" ) // SignificantLongTermsBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L596-L599 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L597-L600 type SignificantLongTermsBucket struct { Aggregations map[string]Aggregate `json:"-"` BgCount int64 `json:"bg_count"` @@ -45,6 +43,7 @@ type SignificantLongTermsBucket struct { } func (s *SignificantLongTermsBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -58,471 +57,590 @@ func (s *SignificantLongTermsBucket) UnmarshalJSON(data []byte) error { switch t { - case "aggregations": - for dec.More() { - tt, err := dec.Token() + case "bg_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) if err != nil { - if errors.Is(err, io.EOF) { - break - } return err } - if value, ok := tt.(string); ok { - if strings.Contains(value, "#") { - elems := strings.Split(value, "#") - if len(elems) == 2 { - switch elems[0] { - case "cardinality": - o := NewCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentiles": - o := NewHdrPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentile_ranks": - o := NewHdrPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentiles": - o := NewTDigestPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentile_ranks": - o := NewTDigestPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "percentiles_bucket": - o := NewPercentilesBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "median_absolute_deviation": - o := NewMedianAbsoluteDeviationAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "min": - o := NewMinAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "max": - o := NewMaxAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sum": - o := NewSumAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "avg": - o := NewAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "weighted_avg": - o := NewWeightedAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "value_count": - o := NewValueCountAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_value": - o := NewSimpleValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "derivative": - o := NewDerivativeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "bucket_metric_value": - o := NewBucketMetricValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats": - o := NewStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats_bucket": - o := NewStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats": - o := NewExtendedStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats_bucket": - o := NewExtendedStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_bounds": - o := NewGeoBoundsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_centroid": - o := NewGeoCentroidAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "histogram": - o := NewHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_histogram": - o := NewDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "auto_date_histogram": - o := NewAutoDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "variable_width_histogram": - o := NewVariableWidthHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sterms": - o := NewStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lterms": - o := NewLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "dterms": - o := NewDoubleTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umterms": - o := NewUnmappedTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lrareterms": - o := NewLongRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "srareterms": - o := NewStringRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umrareterms": - o := NewUnmappedRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "multi_terms": - o := NewMultiTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "missing": - o := NewMissingAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "nested": - o := NewNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "reverse_nested": - o := NewReverseNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "global": - o := NewGlobalAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filter": - o := NewFilterAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "children": - o := NewChildrenAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "parent": - o := NewParentAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sampler": - o := NewSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "unmapped_sampler": - o := NewUnmappedSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohash_grid": - o := NewGeoHashGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geotile_grid": - o := NewGeoTileGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohex_grid": - o := NewGeoHexGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "range": - o := NewRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_range": - o := NewDateRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_distance": - o := NewGeoDistanceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_range": - o := NewIpRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_prefix": - o := NewIpPrefixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filters": - o := NewFiltersAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "adjacency_matrix": - o := NewAdjacencyMatrixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "siglterms": - o := NewSignificantLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sigsterms": - o := NewSignificantStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umsigterms": - o := NewUnmappedSignificantTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "composite": - o := NewCompositeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "scripted_metric": - o := NewScriptedMetricAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_hits": - o := NewTopHitsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "inference": - o := NewInferenceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "string_stats": - o := NewStringStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "box_plot": - o := NewBoxPlotAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_metrics": - o := NewTopMetricsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "t_test": - o := NewTTestAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "rate": - o := NewRateAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_long_value": - o := NewCumulativeCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "matrix_stats": - o := NewMatrixStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_line": - o := NewGeoLineAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - default: - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - } - } else { - return errors.New("cannot decode JSON for field Aggregations") - } - } else { - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[value] = o - } - } - } - - case "bg_count": - if err := dec.Decode(&s.BgCount); err != nil { - return err + s.BgCount = value + case float64: + f := int64(v) + s.BgCount = f } case "doc_count": - if err := dec.Decode(&s.DocCount); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f } case "key": - if err := dec.Decode(&s.Key); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Key = value + case float64: + f := int64(v) + s.Key = f } case "key_as_string": - if err := dec.Decode(&s.KeyAsString); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.KeyAsString = &o case "score": - if err := dec.Decode(&s.Score); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Score = f + case float64: + f := Float64(v) + s.Score = f + } + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "box_plot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[value] = o + } } } @@ -549,6 +667,7 @@ func (s SignificantLongTermsBucket) MarshalJSON() ([]byte, error) { for key, value := range s.Aggregations { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "Aggregations") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/significantstringtermsaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/significantstringtermsaggregate.go index 545945bd8..8b09eb061 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/significantstringtermsaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/significantstringtermsaggregate.go @@ -16,29 +16,30 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" + "strconv" ) // SignificantStringTermsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L601-L603 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L602-L604 type SignificantStringTermsAggregate struct { BgCount *int64 `json:"bg_count,omitempty"` Buckets BucketsSignificantStringTermsBucket `json:"buckets"` DocCount *int64 `json:"doc_count,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Meta Metadata `json:"meta,omitempty"` } func (s *SignificantStringTermsAggregate) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -53,8 +54,18 @@ func (s *SignificantStringTermsAggregate) UnmarshalJSON(data []byte) error { switch t { case "bg_count": - if err := dec.Decode(&s.BgCount); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.BgCount = &value + case float64: + f := int64(v) + s.BgCount = &f } case "buckets": @@ -64,21 +75,33 @@ func (s *SignificantStringTermsAggregate) UnmarshalJSON(data []byte) error { source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]SignificantStringTermsBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []SignificantStringTermsBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } case "doc_count": - if err := dec.Decode(&s.DocCount); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DocCount = &value + case float64: + f := int64(v) + s.DocCount = &f } case "meta": diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/significantstringtermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/significantstringtermsbucket.go index 21df80ef1..9e4cc678b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/significantstringtermsbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/significantstringtermsbucket.go @@ -16,25 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "fmt" - "bytes" + "encoding/json" "errors" + "fmt" "io" - + "strconv" "strings" - - "encoding/json" ) // SignificantStringTermsBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L605-L607 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L606-L608 type SignificantStringTermsBucket struct { Aggregations map[string]Aggregate `json:"-"` BgCount int64 `json:"bg_count"` @@ -44,6 +42,7 @@ type SignificantStringTermsBucket struct { } func (s *SignificantStringTermsBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -57,466 +56,575 @@ func (s *SignificantStringTermsBucket) UnmarshalJSON(data []byte) error { switch t { - case "aggregations": - for dec.More() { - tt, err := dec.Token() + case "bg_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) if err != nil { - if errors.Is(err, io.EOF) { - break - } return err } - if value, ok := tt.(string); ok { - if strings.Contains(value, "#") { - elems := strings.Split(value, "#") - if len(elems) == 2 { - switch elems[0] { - case "cardinality": - o := NewCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentiles": - o := NewHdrPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentile_ranks": - o := NewHdrPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentiles": - o := NewTDigestPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentile_ranks": - o := NewTDigestPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "percentiles_bucket": - o := NewPercentilesBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "median_absolute_deviation": - o := NewMedianAbsoluteDeviationAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "min": - o := NewMinAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "max": - o := NewMaxAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sum": - o := NewSumAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "avg": - o := NewAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "weighted_avg": - o := NewWeightedAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "value_count": - o := NewValueCountAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_value": - o := NewSimpleValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "derivative": - o := NewDerivativeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "bucket_metric_value": - o := NewBucketMetricValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats": - o := NewStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats_bucket": - o := NewStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats": - o := NewExtendedStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats_bucket": - o := NewExtendedStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_bounds": - o := NewGeoBoundsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_centroid": - o := NewGeoCentroidAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "histogram": - o := NewHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_histogram": - o := NewDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "auto_date_histogram": - o := NewAutoDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "variable_width_histogram": - o := NewVariableWidthHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sterms": - o := NewStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lterms": - o := NewLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "dterms": - o := NewDoubleTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umterms": - o := NewUnmappedTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lrareterms": - o := NewLongRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "srareterms": - o := NewStringRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umrareterms": - o := NewUnmappedRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "multi_terms": - o := NewMultiTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "missing": - o := NewMissingAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "nested": - o := NewNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "reverse_nested": - o := NewReverseNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "global": - o := NewGlobalAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filter": - o := NewFilterAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "children": - o := NewChildrenAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "parent": - o := NewParentAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sampler": - o := NewSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "unmapped_sampler": - o := NewUnmappedSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohash_grid": - o := NewGeoHashGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geotile_grid": - o := NewGeoTileGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohex_grid": - o := NewGeoHexGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "range": - o := NewRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_range": - o := NewDateRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_distance": - o := NewGeoDistanceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_range": - o := NewIpRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_prefix": - o := NewIpPrefixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filters": - o := NewFiltersAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "adjacency_matrix": - o := NewAdjacencyMatrixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "siglterms": - o := NewSignificantLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sigsterms": - o := NewSignificantStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umsigterms": - o := NewUnmappedSignificantTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "composite": - o := NewCompositeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "scripted_metric": - o := NewScriptedMetricAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_hits": - o := NewTopHitsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "inference": - o := NewInferenceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "string_stats": - o := NewStringStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "box_plot": - o := NewBoxPlotAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_metrics": - o := NewTopMetricsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "t_test": - o := NewTTestAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "rate": - o := NewRateAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_long_value": - o := NewCumulativeCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "matrix_stats": - o := NewMatrixStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_line": - o := NewGeoLineAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - default: - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - } - } else { - return errors.New("cannot decode JSON for field Aggregations") - } - } else { - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[value] = o - } - } - } - - case "bg_count": - if err := dec.Decode(&s.BgCount); err != nil { - return err + s.BgCount = value + case float64: + f := int64(v) + s.BgCount = f } case "doc_count": - if err := dec.Decode(&s.DocCount); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f } case "key": - if err := dec.Decode(&s.Key); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Key = o case "score": - if err := dec.Decode(&s.Score); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Score = f + case float64: + f := Float64(v) + s.Score = f + } + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "box_plot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[value] = o + } } } @@ -543,6 +651,7 @@ func (s SignificantStringTermsBucket) MarshalJSON() ([]byte, error) { for key, value := range s.Aggregations { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "Aggregations") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/significanttermsaggregatebasesignificantlongtermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/significanttermsaggregatebasesignificantlongtermsbucket.go index 36562ed5a..4c99d04c1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/significanttermsaggregatebasesignificantlongtermsbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/significanttermsaggregatebasesignificantlongtermsbucket.go @@ -16,29 +16,30 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" + "strconv" ) // SignificantTermsAggregateBaseSignificantLongTermsBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L580-L585 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L581-L586 type SignificantTermsAggregateBaseSignificantLongTermsBucket struct { BgCount *int64 `json:"bg_count,omitempty"` Buckets BucketsSignificantLongTermsBucket `json:"buckets"` DocCount *int64 `json:"doc_count,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Meta Metadata `json:"meta,omitempty"` } func (s *SignificantTermsAggregateBaseSignificantLongTermsBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -53,8 +54,18 @@ func (s *SignificantTermsAggregateBaseSignificantLongTermsBucket) UnmarshalJSON( switch t { case "bg_count": - if err := dec.Decode(&s.BgCount); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.BgCount = &value + case float64: + f := int64(v) + s.BgCount = &f } case "buckets": @@ -64,21 +75,33 @@ func (s *SignificantTermsAggregateBaseSignificantLongTermsBucket) UnmarshalJSON( source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]SignificantLongTermsBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []SignificantLongTermsBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } case "doc_count": - if err := dec.Decode(&s.DocCount); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DocCount = &value + case float64: + f := int64(v) + s.DocCount = &f } case "meta": diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/significanttermsaggregatebasesignificantstringtermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/significanttermsaggregatebasesignificantstringtermsbucket.go index 763ee7682..52caa4565 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/significanttermsaggregatebasesignificantstringtermsbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/significanttermsaggregatebasesignificantstringtermsbucket.go @@ -16,29 +16,30 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" + "strconv" ) // SignificantTermsAggregateBaseSignificantStringTermsBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L580-L585 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L581-L586 type SignificantTermsAggregateBaseSignificantStringTermsBucket struct { BgCount *int64 `json:"bg_count,omitempty"` Buckets BucketsSignificantStringTermsBucket `json:"buckets"` DocCount *int64 `json:"doc_count,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Meta Metadata `json:"meta,omitempty"` } func (s *SignificantTermsAggregateBaseSignificantStringTermsBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -53,8 +54,18 @@ func (s *SignificantTermsAggregateBaseSignificantStringTermsBucket) UnmarshalJSO switch t { case "bg_count": - if err := dec.Decode(&s.BgCount); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.BgCount = &value + case float64: + f := int64(v) + s.BgCount = &f } case "buckets": @@ -64,21 +75,33 @@ func (s *SignificantTermsAggregateBaseSignificantStringTermsBucket) UnmarshalJSO source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]SignificantStringTermsBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []SignificantStringTermsBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } case "doc_count": - if err := dec.Decode(&s.DocCount); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DocCount = &value + case float64: + f := int64(v) + s.DocCount = &f } case "meta": diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/significanttermsaggregatebasevoid.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/significanttermsaggregatebasevoid.go index 72e012de1..81e9f659a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/significanttermsaggregatebasevoid.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/significanttermsaggregatebasevoid.go @@ -16,29 +16,30 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" + "strconv" ) // SignificantTermsAggregateBaseVoid type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L580-L585 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L581-L586 type SignificantTermsAggregateBaseVoid struct { - BgCount *int64 `json:"bg_count,omitempty"` - Buckets BucketsVoid `json:"buckets"` - DocCount *int64 `json:"doc_count,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + BgCount *int64 `json:"bg_count,omitempty"` + Buckets BucketsVoid `json:"buckets"` + DocCount *int64 `json:"doc_count,omitempty"` + Meta Metadata `json:"meta,omitempty"` } func (s *SignificantTermsAggregateBaseVoid) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -53,8 +54,18 @@ func (s *SignificantTermsAggregateBaseVoid) UnmarshalJSON(data []byte) error { switch t { case "bg_count": - if err := dec.Decode(&s.BgCount); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.BgCount = &value + case float64: + f := int64(v) + s.BgCount = &f } case "buckets": @@ -64,21 +75,33 @@ func (s *SignificantTermsAggregateBaseVoid) UnmarshalJSON(data []byte) error { source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': - o := make(map[string]struct{}, 0) - localDec.Decode(&o) + o := make(map[string]interface{}, 0) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': - o := []struct{}{} - localDec.Decode(&o) + o := []interface{}{} + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } case "doc_count": - if err := dec.Decode(&s.DocCount); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DocCount = &value + case float64: + f := int64(v) + s.DocCount = &f } case "meta": diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/significanttermsaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/significanttermsaggregation.go index 95fe8eb30..9f37b331d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/significanttermsaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/significanttermsaggregation.go @@ -16,37 +16,232 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/termsaggregationexecutionhint" ) // SignificantTermsAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L342-L358 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L770-L834 type SignificantTermsAggregation struct { - BackgroundFilter *Query `json:"background_filter,omitempty"` - ChiSquare *ChiSquareHeuristic `json:"chi_square,omitempty"` - Exclude []string `json:"exclude,omitempty"` - ExecutionHint *termsaggregationexecutionhint.TermsAggregationExecutionHint `json:"execution_hint,omitempty"` - Field *string `json:"field,omitempty"` - Gnd *GoogleNormalizedDistanceHeuristic `json:"gnd,omitempty"` - Include TermsInclude `json:"include,omitempty"` - Jlh *EmptyObject `json:"jlh,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - MinDocCount *int64 `json:"min_doc_count,omitempty"` - MutualInformation *MutualInformationHeuristic `json:"mutual_information,omitempty"` - Name *string `json:"name,omitempty"` - Percentage *PercentageScoreHeuristic `json:"percentage,omitempty"` - ScriptHeuristic *ScriptedHeuristic `json:"script_heuristic,omitempty"` - ShardMinDocCount *int64 `json:"shard_min_doc_count,omitempty"` - ShardSize *int `json:"shard_size,omitempty"` - Size *int `json:"size,omitempty"` + // BackgroundFilter A background filter that can be used to focus in on significant terms within + // a narrower context, instead of the entire index. + BackgroundFilter *Query `json:"background_filter,omitempty"` + // ChiSquare Use Chi square, as described in "Information Retrieval", Manning et al., + // Chapter 13.5.2, as the significance score. + ChiSquare *ChiSquareHeuristic `json:"chi_square,omitempty"` + // Exclude Terms to exclude. + Exclude []string `json:"exclude,omitempty"` + // ExecutionHint Mechanism by which the aggregation should be executed: using field values + // directly or using global ordinals. + ExecutionHint *termsaggregationexecutionhint.TermsAggregationExecutionHint `json:"execution_hint,omitempty"` + // Field The field from which to return significant terms. + Field *string `json:"field,omitempty"` + // Gnd Use Google normalized distance as described in "The Google Similarity + // Distance", Cilibrasi and Vitanyi, 2007, as the significance score. + Gnd *GoogleNormalizedDistanceHeuristic `json:"gnd,omitempty"` + // Include Terms to include. + Include TermsInclude `json:"include,omitempty"` + // Jlh Use JLH score as the significance score. + Jlh *EmptyObject `json:"jlh,omitempty"` + Meta Metadata `json:"meta,omitempty"` + // MinDocCount Only return terms that are found in more than `min_doc_count` hits. + MinDocCount *int64 `json:"min_doc_count,omitempty"` + // MutualInformation Use mutual information as described in "Information Retrieval", Manning et + // al., Chapter 13.5.1, as the significance score. + MutualInformation *MutualInformationHeuristic `json:"mutual_information,omitempty"` + Name *string `json:"name,omitempty"` + // Percentage A simple calculation of the number of documents in the foreground sample with + // a term divided by the number of documents in the background with the term. + Percentage *PercentageScoreHeuristic `json:"percentage,omitempty"` + // ScriptHeuristic Customized score, implemented via a script. + ScriptHeuristic *ScriptedHeuristic `json:"script_heuristic,omitempty"` + // ShardMinDocCount Regulates the certainty a shard has if the term should actually be added to + // the candidate list or not with respect to the `min_doc_count`. + // Terms will only be considered if their local shard frequency within the set + // is higher than the `shard_min_doc_count`. + ShardMinDocCount *int64 `json:"shard_min_doc_count,omitempty"` + // ShardSize Can be used to control the volumes of candidate terms produced by each shard. + // By default, `shard_size` will be automatically estimated based on the number + // of shards and the `size` parameter. + ShardSize *int `json:"shard_size,omitempty"` + // Size The number of buckets returned out of the overall terms list. + Size *int `json:"size,omitempty"` +} + +func (s *SignificantTermsAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "background_filter": + if err := dec.Decode(&s.BackgroundFilter); err != nil { + return err + } + + case "chi_square": + if err := dec.Decode(&s.ChiSquare); err != nil { + return err + } + + case "exclude": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Exclude = append(s.Exclude, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Exclude); err != nil { + return err + } + } + + case "execution_hint": + if err := dec.Decode(&s.ExecutionHint); err != nil { + return err + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "gnd": + if err := dec.Decode(&s.Gnd); err != nil { + return err + } + + case "include": + if err := dec.Decode(&s.Include); err != nil { + return err + } + + case "jlh": + if err := dec.Decode(&s.Jlh); err != nil { + return err + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "min_doc_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.MinDocCount = &value + case float64: + f := int64(v) + s.MinDocCount = &f + } + + case "mutual_information": + if err := dec.Decode(&s.MutualInformation); err != nil { + return err + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + case "percentage": + if err := dec.Decode(&s.Percentage); err != nil { + return err + } + + case "script_heuristic": + if err := dec.Decode(&s.ScriptHeuristic); err != nil { + return err + } + + case "shard_min_doc_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.ShardMinDocCount = &value + case float64: + f := int64(v) + s.ShardMinDocCount = &f + } + + case "shard_size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.ShardSize = &value + case float64: + f := int(v) + s.ShardSize = &f + } + + case "size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + } + } + return nil } // NewSignificantTermsAggregation returns a SignificantTermsAggregation. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/significanttextaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/significanttextaggregation.go index a07e9d1e2..45fe35066 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/significanttextaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/significanttextaggregation.go @@ -16,39 +16,266 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/termsaggregationexecutionhint" ) // SignificantTextAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L360-L378 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L836-L908 type SignificantTextAggregation struct { - BackgroundFilter *Query `json:"background_filter,omitempty"` - ChiSquare *ChiSquareHeuristic `json:"chi_square,omitempty"` - Exclude []string `json:"exclude,omitempty"` - ExecutionHint *termsaggregationexecutionhint.TermsAggregationExecutionHint `json:"execution_hint,omitempty"` - Field *string `json:"field,omitempty"` - FilterDuplicateText *bool `json:"filter_duplicate_text,omitempty"` - Gnd *GoogleNormalizedDistanceHeuristic `json:"gnd,omitempty"` - Include []string `json:"include,omitempty"` - Jlh *EmptyObject `json:"jlh,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - MinDocCount *int64 `json:"min_doc_count,omitempty"` - MutualInformation *MutualInformationHeuristic `json:"mutual_information,omitempty"` - Name *string `json:"name,omitempty"` - Percentage *PercentageScoreHeuristic `json:"percentage,omitempty"` - ScriptHeuristic *ScriptedHeuristic `json:"script_heuristic,omitempty"` - ShardMinDocCount *int64 `json:"shard_min_doc_count,omitempty"` - ShardSize *int `json:"shard_size,omitempty"` - Size *int `json:"size,omitempty"` - SourceFields []string `json:"source_fields,omitempty"` + // BackgroundFilter A background filter that can be used to focus in on significant terms within + // a narrower context, instead of the entire index. + BackgroundFilter *Query `json:"background_filter,omitempty"` + // ChiSquare Use Chi square, as described in "Information Retrieval", Manning et al., + // Chapter 13.5.2, as the significance score. + ChiSquare *ChiSquareHeuristic `json:"chi_square,omitempty"` + // Exclude Values to exclude. + Exclude []string `json:"exclude,omitempty"` + // ExecutionHint Determines whether the aggregation will use field values directly or global + // ordinals. + ExecutionHint *termsaggregationexecutionhint.TermsAggregationExecutionHint `json:"execution_hint,omitempty"` + // Field The field from which to return significant text. + Field *string `json:"field,omitempty"` + // FilterDuplicateText Whether to out duplicate text to deal with noisy data. + FilterDuplicateText *bool `json:"filter_duplicate_text,omitempty"` + // Gnd Use Google normalized distance as described in "The Google Similarity + // Distance", Cilibrasi and Vitanyi, 2007, as the significance score. + Gnd *GoogleNormalizedDistanceHeuristic `json:"gnd,omitempty"` + // Include Values to include. + Include TermsInclude `json:"include,omitempty"` + // Jlh Use JLH score as the significance score. + Jlh *EmptyObject `json:"jlh,omitempty"` + Meta Metadata `json:"meta,omitempty"` + // MinDocCount Only return values that are found in more than `min_doc_count` hits. + MinDocCount *int64 `json:"min_doc_count,omitempty"` + // MutualInformation Use mutual information as described in "Information Retrieval", Manning et + // al., Chapter 13.5.1, as the significance score. + MutualInformation *MutualInformationHeuristic `json:"mutual_information,omitempty"` + Name *string `json:"name,omitempty"` + // Percentage A simple calculation of the number of documents in the foreground sample with + // a term divided by the number of documents in the background with the term. + Percentage *PercentageScoreHeuristic `json:"percentage,omitempty"` + // ScriptHeuristic Customized score, implemented via a script. + ScriptHeuristic *ScriptedHeuristic `json:"script_heuristic,omitempty"` + // ShardMinDocCount Regulates the certainty a shard has if the values should actually be added to + // the candidate list or not with respect to the min_doc_count. + // Values will only be considered if their local shard frequency within the set + // is higher than the `shard_min_doc_count`. + ShardMinDocCount *int64 `json:"shard_min_doc_count,omitempty"` + // ShardSize The number of candidate terms produced by each shard. + // By default, `shard_size` will be automatically estimated based on the number + // of shards and the `size` parameter. + ShardSize *int `json:"shard_size,omitempty"` + // Size The number of buckets returned out of the overall terms list. + Size *int `json:"size,omitempty"` + // SourceFields Overrides the JSON `_source` fields from which text will be analyzed. + SourceFields []string `json:"source_fields,omitempty"` +} + +func (s *SignificantTextAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "background_filter": + if err := dec.Decode(&s.BackgroundFilter); err != nil { + return err + } + + case "chi_square": + if err := dec.Decode(&s.ChiSquare); err != nil { + return err + } + + case "exclude": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Exclude = append(s.Exclude, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Exclude); err != nil { + return err + } + } + + case "execution_hint": + if err := dec.Decode(&s.ExecutionHint); err != nil { + return err + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "filter_duplicate_text": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.FilterDuplicateText = &value + case bool: + s.FilterDuplicateText = &v + } + + case "gnd": + if err := dec.Decode(&s.Gnd); err != nil { + return err + } + + case "include": + if err := dec.Decode(&s.Include); err != nil { + return err + } + + case "jlh": + if err := dec.Decode(&s.Jlh); err != nil { + return err + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "min_doc_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.MinDocCount = &value + case float64: + f := int64(v) + s.MinDocCount = &f + } + + case "mutual_information": + if err := dec.Decode(&s.MutualInformation); err != nil { + return err + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + case "percentage": + if err := dec.Decode(&s.Percentage); err != nil { + return err + } + + case "script_heuristic": + if err := dec.Decode(&s.ScriptHeuristic); err != nil { + return err + } + + case "shard_min_doc_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.ShardMinDocCount = &value + case float64: + f := int64(v) + s.ShardMinDocCount = &f + } + + case "shard_size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.ShardSize = &value + case float64: + f := int(v) + s.ShardSize = &f + } + + case "size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + case "source_fields": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.SourceFields = append(s.SourceFields, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.SourceFields); err != nil { + return err + } + } + + } + } + return nil } // NewSignificantTextAggregation returns a SignificantTextAggregation. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/simpleanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/simpleanalyzer.go index cdca435f6..59c16cec5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/simpleanalyzer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/simpleanalyzer.go @@ -16,23 +16,71 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // SimpleAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/analyzers.ts#L83-L86 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/analyzers.ts#L83-L86 type SimpleAnalyzer struct { Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` } +func (s *SimpleAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s SimpleAnalyzer) MarshalJSON() ([]byte, error) { + type innerSimpleAnalyzer SimpleAnalyzer + tmp := innerSimpleAnalyzer{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "simple" + + return json.Marshal(tmp) +} + // NewSimpleAnalyzer returns a SimpleAnalyzer. func NewSimpleAnalyzer() *SimpleAnalyzer { r := &SimpleAnalyzer{} - r.Type = "simple" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/simplemovingaverageaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/simplemovingaverageaggregation.go index 8431e42b1..c59f99095 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/simplemovingaverageaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/simplemovingaverageaggregation.go @@ -16,38 +16,43 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" ) // SimpleMovingAverageAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/pipeline.ts#L207-L210 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/pipeline.ts#L247-L250 type SimpleMovingAverageAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. - BucketsPath BucketsPath `json:"buckets_path,omitempty"` - Format *string `json:"format,omitempty"` - GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Minimize *bool `json:"minimize,omitempty"` - Model string `json:"model,omitempty"` - Name *string `json:"name,omitempty"` - Predict *int `json:"predict,omitempty"` - Settings EmptyObject `json:"settings"` - Window *int `json:"window,omitempty"` + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Minimize *bool `json:"minimize,omitempty"` + Model string `json:"model,omitempty"` + Name *string `json:"name,omitempty"` + Predict *int `json:"predict,omitempty"` + Settings EmptyObject `json:"settings"` + Window *int `json:"window,omitempty"` } func (s *SimpleMovingAverageAggregation) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -67,9 +72,16 @@ func (s *SimpleMovingAverageAggregation) UnmarshalJSON(data []byte) error { } case "format": - if err := dec.Decode(&s.Format); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o case "gap_policy": if err := dec.Decode(&s.GapPolicy); err != nil { @@ -82,8 +94,17 @@ func (s *SimpleMovingAverageAggregation) UnmarshalJSON(data []byte) error { } case "minimize": - if err := dec.Decode(&s.Minimize); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Minimize = &value + case bool: + s.Minimize = &v } case "model": @@ -92,13 +113,31 @@ func (s *SimpleMovingAverageAggregation) UnmarshalJSON(data []byte) error { } case "name": - if err := dec.Decode(&s.Name); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o case "predict": - if err := dec.Decode(&s.Predict); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Predict = &value + case float64: + f := int(v) + s.Predict = &f } case "settings": @@ -107,8 +146,19 @@ func (s *SimpleMovingAverageAggregation) UnmarshalJSON(data []byte) error { } case "window": - if err := dec.Decode(&s.Window); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Window = &value + case float64: + f := int(v) + s.Window = &f } } @@ -116,11 +166,30 @@ func (s *SimpleMovingAverageAggregation) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s SimpleMovingAverageAggregation) MarshalJSON() ([]byte, error) { + type innerSimpleMovingAverageAggregation SimpleMovingAverageAggregation + tmp := innerSimpleMovingAverageAggregation{ + BucketsPath: s.BucketsPath, + Format: s.Format, + GapPolicy: s.GapPolicy, + Meta: s.Meta, + Minimize: s.Minimize, + Model: s.Model, + Name: s.Name, + Predict: s.Predict, + Settings: s.Settings, + Window: s.Window, + } + + tmp.Model = "simple" + + return json.Marshal(tmp) +} + // NewSimpleMovingAverageAggregation returns a SimpleMovingAverageAggregation. func NewSimpleMovingAverageAggregation() *SimpleMovingAverageAggregation { r := &SimpleMovingAverageAggregation{} - r.Model = "simple" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/simplequerystringflags.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/simplequerystringflags.go index 60f71b3c7..63421a607 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/simplequerystringflags.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/simplequerystringflags.go @@ -16,14 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types -// SimpleQueryStringFlags holds the union for the following types: +// SimpleQueryStringFlags type alias. // -// simplequerystringflag.SimpleQueryStringFlag -// string -// -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/fulltext.ts#L271-L276 -type SimpleQueryStringFlags interface{} +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/fulltext.ts#L702-L706 +type SimpleQueryStringFlags PipeSeparatedFlagsSimpleQueryStringFlag diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/simplequerystringquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/simplequerystringquery.go index 20a383468..f784b8aa3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/simplequerystringquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/simplequerystringquery.go @@ -16,43 +16,71 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/operator" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/simplequerystringflag" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/operator" ) // SimpleQueryStringQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/fulltext.ts#L294-L312 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/fulltext.ts#L765-L830 type SimpleQueryStringQuery struct { - AnalyzeWildcard *bool `json:"analyze_wildcard,omitempty"` - Analyzer *string `json:"analyzer,omitempty"` - AutoGenerateSynonymsPhraseQuery *bool `json:"auto_generate_synonyms_phrase_query,omitempty"` - Boost *float32 `json:"boost,omitempty"` - DefaultOperator *operator.Operator `json:"default_operator,omitempty"` - Fields []string `json:"fields,omitempty"` - Flags SimpleQueryStringFlags `json:"flags,omitempty"` - FuzzyMaxExpansions *int `json:"fuzzy_max_expansions,omitempty"` - FuzzyPrefixLength *int `json:"fuzzy_prefix_length,omitempty"` - FuzzyTranspositions *bool `json:"fuzzy_transpositions,omitempty"` - Lenient *bool `json:"lenient,omitempty"` - MinimumShouldMatch MinimumShouldMatch `json:"minimum_should_match,omitempty"` - Query string `json:"query"` - QueryName_ *string `json:"_name,omitempty"` - QuoteFieldSuffix *string `json:"quote_field_suffix,omitempty"` + // AnalyzeWildcard If `true`, the query attempts to analyze wildcard terms in the query string. + AnalyzeWildcard *bool `json:"analyze_wildcard,omitempty"` + // Analyzer Analyzer used to convert text in the query string into tokens. + Analyzer *string `json:"analyzer,omitempty"` + // AutoGenerateSynonymsPhraseQuery If `true`, the parser creates a match_phrase query for each multi-position + // token. + AutoGenerateSynonymsPhraseQuery *bool `json:"auto_generate_synonyms_phrase_query,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // DefaultOperator Default boolean logic used to interpret text in the query string if no + // operators are specified. + DefaultOperator *operator.Operator `json:"default_operator,omitempty"` + // Fields Array of fields you wish to search. + // Accepts wildcard expressions. + // You also can boost relevance scores for matches to particular fields using a + // caret (`^`) notation. + // Defaults to the `index.query.default_field index` setting, which has a + // default value of `*`. + Fields []string `json:"fields,omitempty"` + // Flags List of enabled operators for the simple query string syntax. + Flags PipeSeparatedFlagsSimpleQueryStringFlag `json:"flags,omitempty"` + // FuzzyMaxExpansions Maximum number of terms to which the query expands for fuzzy matching. + FuzzyMaxExpansions *int `json:"fuzzy_max_expansions,omitempty"` + // FuzzyPrefixLength Number of beginning characters left unchanged for fuzzy matching. + FuzzyPrefixLength *int `json:"fuzzy_prefix_length,omitempty"` + // FuzzyTranspositions If `true`, edits for fuzzy matching include transpositions of two adjacent + // characters (for example, `ab` to `ba`). + FuzzyTranspositions *bool `json:"fuzzy_transpositions,omitempty"` + // Lenient If `true`, format-based errors, such as providing a text value for a numeric + // field, are ignored. + Lenient *bool `json:"lenient,omitempty"` + // MinimumShouldMatch Minimum number of clauses that must match for a document to be returned. + MinimumShouldMatch MinimumShouldMatch `json:"minimum_should_match,omitempty"` + // Query Query string in the simple query string syntax you wish to parse and use for + // search. + Query string `json:"query"` + QueryName_ *string `json:"_name,omitempty"` + // QuoteFieldSuffix Suffix appended to quoted text in the query string. + QuoteFieldSuffix *string `json:"quote_field_suffix,omitempty"` } func (s *SimpleQueryStringQuery) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -67,23 +95,59 @@ func (s *SimpleQueryStringQuery) UnmarshalJSON(data []byte) error { switch t { case "analyze_wildcard": - if err := dec.Decode(&s.AnalyzeWildcard); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.AnalyzeWildcard = &value + case bool: + s.AnalyzeWildcard = &v } case "analyzer": - if err := dec.Decode(&s.Analyzer); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o case "auto_generate_synonyms_phrase_query": - if err := dec.Decode(&s.AutoGenerateSynonymsPhraseQuery); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.AutoGenerateSynonymsPhraseQuery = &value + case bool: + s.AutoGenerateSynonymsPhraseQuery = &v } case "boost": - if err := dec.Decode(&s.Boost); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f } case "default_operator": @@ -97,42 +161,68 @@ func (s *SimpleQueryStringQuery) UnmarshalJSON(data []byte) error { } case "flags": - rawMsg := json.RawMessage{} - dec.Decode(&rawMsg) - source := bytes.NewReader(rawMsg) - localDec := json.NewDecoder(source) - switch rawMsg[0] { - case '{': - o := &simplequerystringflag.SimpleQueryStringFlag{} - if err := localDec.Decode(&o); err != nil { - return err - } - s.Flags = *o - - default: - if err := localDec.Decode(&s.Flags); err != nil { - return err - } + if err := dec.Decode(&s.Flags); err != nil { + return err } case "fuzzy_max_expansions": - if err := dec.Decode(&s.FuzzyMaxExpansions); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.FuzzyMaxExpansions = &value + case float64: + f := int(v) + s.FuzzyMaxExpansions = &f } case "fuzzy_prefix_length": - if err := dec.Decode(&s.FuzzyPrefixLength); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.FuzzyPrefixLength = &value + case float64: + f := int(v) + s.FuzzyPrefixLength = &f } case "fuzzy_transpositions": - if err := dec.Decode(&s.FuzzyTranspositions); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.FuzzyTranspositions = &value + case bool: + s.FuzzyTranspositions = &v } case "lenient": - if err := dec.Decode(&s.Lenient); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Lenient = &value + case bool: + s.Lenient = &v } case "minimum_should_match": @@ -141,19 +231,40 @@ func (s *SimpleQueryStringQuery) UnmarshalJSON(data []byte) error { } case "query": - if err := dec.Decode(&s.Query); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Query = o case "_name": - if err := dec.Decode(&s.QueryName_); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o case "quote_field_suffix": - if err := dec.Decode(&s.QuoteFieldSuffix); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QuoteFieldSuffix = &o } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/simplevalueaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/simplevalueaggregate.go index d735514bb..d832f0a1a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/simplevalueaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/simplevalueaggregate.go @@ -16,19 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // SimpleValueAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L223-L224 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L224-L225 type SimpleValueAggregate struct { - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Meta Metadata `json:"meta,omitempty"` // Value The metric value. A missing value generally means that there was no data to // aggregate, // unless specified otherwise. @@ -36,6 +40,48 @@ type SimpleValueAggregate struct { ValueAsString *string `json:"value_as_string,omitempty"` } +func (s *SimpleValueAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return err + } + + case "value_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ValueAsString = &o + + } + } + return nil +} + // NewSimpleValueAggregate returns a SimpleValueAggregate. func NewSimpleValueAggregate() *SimpleValueAggregate { r := &SimpleValueAggregate{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/simulatedactions.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/simulatedactions.go index 2671bf53d..7297a8430 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/simulatedactions.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/simulatedactions.go @@ -16,19 +16,71 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // SimulatedActions type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Action.ts#L93-L97 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Action.ts#L96-L100 type SimulatedActions struct { Actions []string `json:"actions"` All *SimulatedActions `json:"all,omitempty"` UseAll bool `json:"use_all"` } +func (s *SimulatedActions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "actions": + if err := dec.Decode(&s.Actions); err != nil { + return err + } + + case "all": + if err := dec.Decode(&s.All); err != nil { + return err + } + + case "use_all": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.UseAll = value + case bool: + s.UseAll = v + } + + } + } + return nil +} + // NewSimulatedActions returns a SimulatedActions. func NewSimulatedActions() *SimulatedActions { r := &SimulatedActions{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/simulateingest.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/simulateingest.go index 4643ec324..fdd6791a9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/simulateingest.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/simulateingest.go @@ -16,18 +16,55 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // SimulateIngest type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/simulate/types.ts#L28-L31 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/simulate/types.ts#L28-L31 type SimulateIngest struct { Pipeline *string `json:"pipeline,omitempty"` Timestamp DateTime `json:"timestamp"` } +func (s *SimulateIngest) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "pipeline": + if err := dec.Decode(&s.Pipeline); err != nil { + return err + } + + case "timestamp": + if err := dec.Decode(&s.Timestamp); err != nil { + return err + } + + } + } + return nil +} + // NewSimulateIngest returns a SimulateIngest. func NewSimulateIngest() *SimulateIngest { r := &SimulateIngest{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sizefield.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sizefield.go index 30ab3e644..88d312fcd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sizefield.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sizefield.go @@ -16,17 +16,59 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // SizeField type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/meta-fields.ts#L54-L56 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/meta-fields.ts#L54-L56 type SizeField struct { Enabled bool `json:"enabled"` } +func (s *SizeField) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = value + case bool: + s.Enabled = v + } + + } + } + return nil +} + // NewSizeField returns a SizeField. func NewSizeField() *SizeField { r := &SizeField{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slackaction.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slackaction.go index c00581705..bdeb43d36 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slackaction.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slackaction.go @@ -16,18 +16,63 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // SlackAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Actions.ts#L91-L94 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Actions.ts#L91-L94 type SlackAction struct { Account *string `json:"account,omitempty"` Message SlackMessage `json:"message"` } +func (s *SlackAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "account": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Account = &o + + case "message": + if err := dec.Decode(&s.Message); err != nil { + return err + } + + } + } + return nil +} + // NewSlackAction returns a SlackAction. func NewSlackAction() *SlackAction { r := &SlackAction{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slackattachment.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slackattachment.go index 7abc1aeb1..dd832983f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slackattachment.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slackattachment.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // SlackAttachment type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Actions.ts#L101-L117 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Actions.ts#L101-L117 type SlackAttachment struct { AuthorIcon *string `json:"author_icon,omitempty"` AuthorLink *string `json:"author_link,omitempty"` @@ -41,6 +49,192 @@ type SlackAttachment struct { Ts *int64 `json:"ts,omitempty"` } +func (s *SlackAttachment) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "author_icon": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AuthorIcon = &o + + case "author_link": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AuthorLink = &o + + case "author_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AuthorName = o + + case "color": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Color = &o + + case "fallback": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Fallback = &o + + case "fields": + if err := dec.Decode(&s.Fields); err != nil { + return err + } + + case "footer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Footer = &o + + case "footer_icon": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FooterIcon = &o + + case "image_url": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ImageUrl = &o + + case "pretext": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pretext = &o + + case "text": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Text = &o + + case "thumb_url": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ThumbUrl = &o + + case "title": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Title = o + + case "title_link": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TitleLink = &o + + case "ts": + if err := dec.Decode(&s.Ts); err != nil { + return err + } + + } + } + return nil +} + // NewSlackAttachment returns a SlackAttachment. func NewSlackAttachment() *SlackAttachment { r := &SlackAttachment{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slackattachmentfield.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slackattachmentfield.go index d70224079..b51ec8c4e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slackattachmentfield.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slackattachmentfield.go @@ -16,19 +16,85 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // SlackAttachmentField type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Actions.ts#L119-L123 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Actions.ts#L119-L123 type SlackAttachmentField struct { Int bool `json:"short"` Title string `json:"title"` Value string `json:"value"` } +func (s *SlackAttachmentField) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "short": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Int = value + case bool: + s.Int = v + } + + case "title": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Title = o + + case "value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Value = o + + } + } + return nil +} + // NewSlackAttachmentField returns a SlackAttachmentField. func NewSlackAttachmentField() *SlackAttachmentField { r := &SlackAttachmentField{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slackdynamicattachment.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slackdynamicattachment.go index a38b169e9..426b4ce29 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slackdynamicattachment.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slackdynamicattachment.go @@ -16,18 +16,63 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // SlackDynamicAttachment type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Actions.ts#L125-L128 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Actions.ts#L125-L128 type SlackDynamicAttachment struct { AttachmentTemplate SlackAttachment `json:"attachment_template"` ListPath string `json:"list_path"` } +func (s *SlackDynamicAttachment) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "attachment_template": + if err := dec.Decode(&s.AttachmentTemplate); err != nil { + return err + } + + case "list_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ListPath = o + + } + } + return nil +} + // NewSlackDynamicAttachment returns a SlackDynamicAttachment. func NewSlackDynamicAttachment() *SlackDynamicAttachment { r := &SlackDynamicAttachment{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slackmessage.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slackmessage.go index 80fcf8a71..e747145ce 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slackmessage.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slackmessage.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // SlackMessage type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Actions.ts#L130-L137 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Actions.ts#L130-L137 type SlackMessage struct { Attachments []SlackAttachment `json:"attachments"` DynamicAttachments *SlackDynamicAttachment `json:"dynamic_attachments,omitempty"` @@ -32,6 +40,77 @@ type SlackMessage struct { To []string `json:"to"` } +func (s *SlackMessage) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "attachments": + if err := dec.Decode(&s.Attachments); err != nil { + return err + } + + case "dynamic_attachments": + if err := dec.Decode(&s.DynamicAttachments); err != nil { + return err + } + + case "from": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.From = o + + case "icon": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Icon = &o + + case "text": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Text = o + + case "to": + if err := dec.Decode(&s.To); err != nil { + return err + } + + } + } + return nil +} + // NewSlackMessage returns a SlackMessage. func NewSlackMessage() *SlackMessage { r := &SlackMessage{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slackresult.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slackresult.go index 89e7b02cd..39c2f6394 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slackresult.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slackresult.go @@ -16,18 +16,63 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // SlackResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Actions.ts#L96-L99 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Actions.ts#L96-L99 type SlackResult struct { Account *string `json:"account,omitempty"` Message SlackMessage `json:"message"` } +func (s *SlackResult) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "account": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Account = &o + + case "message": + if err := dec.Decode(&s.Message); err != nil { + return err + } + + } + } + return nil +} + // NewSlackResult returns a SlackResult. func NewSlackResult() *SlackResult { r := &SlackResult{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slicedscroll.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slicedscroll.go index ce62c0200..44275c1e4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slicedscroll.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slicedscroll.go @@ -16,19 +16,73 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // SlicedScroll type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/SlicedScroll.ts#L23-L27 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/SlicedScroll.ts#L23-L27 type SlicedScroll struct { Field *string `json:"field,omitempty"` Id string `json:"id"` Max int `json:"max"` } +func (s *SlicedScroll) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return err + } + + case "max": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Max = value + case float64: + f := int(v) + s.Max = f + } + + } + } + return nil +} + // NewSlicedScroll returns a SlicedScroll. func NewSlicedScroll() *SlicedScroll { r := &SlicedScroll{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slices.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slices.go index 7bc3de808..99c5d0b7e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slices.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slices.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // int // slicescalculation.SlicesCalculation // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/common.ts#L326-L331 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/common.ts#L361-L366 type Slices interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slm.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slm.go index d5bbe8ab8..f9a604941 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slm.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slm.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Slm type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L440-L443 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L449-L452 type Slm struct { Available bool `json:"available"` Enabled bool `json:"enabled"` @@ -30,6 +38,75 @@ type Slm struct { PolicyStats *Statistics `json:"policy_stats,omitempty"` } +func (s *Slm) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "available": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Available = value + case bool: + s.Available = v + } + + case "enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "policy_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.PolicyCount = &value + case float64: + f := int(v) + s.PolicyCount = &f + } + + case "policy_stats": + if err := dec.Decode(&s.PolicyStats); err != nil { + return err + } + + } + } + return nil +} + // NewSlm returns a Slm. func NewSlm() *Slm { r := &Slm{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slmindicator.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slmindicator.go new file mode 100644 index 000000000..9995e969d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slmindicator.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indicatorhealthstatus" +) + +// SlmIndicator type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/health_report/types.ts#L155-L159 +type SlmIndicator struct { + Details *SlmIndicatorDetails `json:"details,omitempty"` + Diagnosis []Diagnosis `json:"diagnosis,omitempty"` + Impacts []Impact `json:"impacts,omitempty"` + Status indicatorhealthstatus.IndicatorHealthStatus `json:"status"` + Symptom string `json:"symptom"` +} + +func (s *SlmIndicator) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "details": + if err := dec.Decode(&s.Details); err != nil { + return err + } + + case "diagnosis": + if err := dec.Decode(&s.Diagnosis); err != nil { + return err + } + + case "impacts": + if err := dec.Decode(&s.Impacts); err != nil { + return err + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return err + } + + case "symptom": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Symptom = o + + } + } + return nil +} + +// NewSlmIndicator returns a SlmIndicator. +func NewSlmIndicator() *SlmIndicator { + r := &SlmIndicator{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slmindicatordetails.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slmindicatordetails.go new file mode 100644 index 000000000..01087eb2d --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slmindicatordetails.go @@ -0,0 +1,92 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/lifecycleoperationmode" +) + +// SlmIndicatorDetails type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/health_report/types.ts#L160-L164 +type SlmIndicatorDetails struct { + Policies int64 `json:"policies"` + SlmStatus lifecycleoperationmode.LifecycleOperationMode `json:"slm_status"` + UnhealthyPolicies SlmIndicatorUnhealthyPolicies `json:"unhealthy_policies"` +} + +func (s *SlmIndicatorDetails) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "policies": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Policies = value + case float64: + f := int64(v) + s.Policies = f + } + + case "slm_status": + if err := dec.Decode(&s.SlmStatus); err != nil { + return err + } + + case "unhealthy_policies": + if err := dec.Decode(&s.UnhealthyPolicies); err != nil { + return err + } + + } + } + return nil +} + +// NewSlmIndicatorDetails returns a SlmIndicatorDetails. +func NewSlmIndicatorDetails() *SlmIndicatorDetails { + r := &SlmIndicatorDetails{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slmindicatorunhealthypolicies.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slmindicatorunhealthypolicies.go new file mode 100644 index 000000000..dea3cb42c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slmindicatorunhealthypolicies.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + +// SlmIndicatorUnhealthyPolicies type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/health_report/types.ts#L166-L169 +type SlmIndicatorUnhealthyPolicies struct { + Count int64 `json:"count"` + InvocationsSinceLastSuccess map[string]int64 `json:"invocations_since_last_success,omitempty"` +} + +func (s *SlmIndicatorUnhealthyPolicies) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + case "invocations_since_last_success": + if s.InvocationsSinceLastSuccess == nil { + s.InvocationsSinceLastSuccess = make(map[string]int64, 0) + } + if err := dec.Decode(&s.InvocationsSinceLastSuccess); err != nil { + return err + } + + } + } + return nil +} + +// NewSlmIndicatorUnhealthyPolicies returns a SlmIndicatorUnhealthyPolicies. +func NewSlmIndicatorUnhealthyPolicies() *SlmIndicatorUnhealthyPolicies { + r := &SlmIndicatorUnhealthyPolicies{ + InvocationsSinceLastSuccess: make(map[string]int64, 0), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slmpolicy.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slmpolicy.go index e62da9c98..4d1945077 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slmpolicy.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slmpolicy.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // SLMPolicy type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/slm/_types/SnapshotLifecycle.ts#L76-L82 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/slm/_types/SnapshotLifecycle.ts#L76-L82 type SLMPolicy struct { Config *Configuration `json:"config,omitempty"` Name string `json:"name"` @@ -31,6 +39,58 @@ type SLMPolicy struct { Schedule string `json:"schedule"` } +func (s *SLMPolicy) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "config": + if err := dec.Decode(&s.Config); err != nil { + return err + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "repository": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Repository = o + + case "retention": + if err := dec.Decode(&s.Retention); err != nil { + return err + } + + case "schedule": + if err := dec.Decode(&s.Schedule); err != nil { + return err + } + + } + } + return nil +} + // NewSLMPolicy returns a SLMPolicy. func NewSLMPolicy() *SLMPolicy { r := &SLMPolicy{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slowlogsettings.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slowlogsettings.go index c0081a564..7822f70ec 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slowlogsettings.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slowlogsettings.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // SlowlogSettings type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L472-L477 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L472-L477 type SlowlogSettings struct { Level *string `json:"level,omitempty"` Reformat *bool `json:"reformat,omitempty"` @@ -30,6 +38,73 @@ type SlowlogSettings struct { Threshold *SlowlogTresholds `json:"threshold,omitempty"` } +func (s *SlowlogSettings) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "level": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Level = &o + + case "reformat": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Reformat = &value + case bool: + s.Reformat = &v + } + + case "source": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Source = &value + case float64: + f := int(v) + s.Source = &f + } + + case "threshold": + if err := dec.Decode(&s.Threshold); err != nil { + return err + } + + } + } + return nil +} + // NewSlowlogSettings returns a SlowlogSettings. func NewSlowlogSettings() *SlowlogSettings { r := &SlowlogSettings{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slowlogtresholdlevels.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slowlogtresholdlevels.go index 37969edf0..ed49c4c9f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slowlogtresholdlevels.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slowlogtresholdlevels.go @@ -16,13 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // SlowlogTresholdLevels type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L490-L495 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L484-L489 type SlowlogTresholdLevels struct { Debug Duration `json:"debug,omitempty"` Info Duration `json:"info,omitempty"` @@ -30,6 +37,46 @@ type SlowlogTresholdLevels struct { Warn Duration `json:"warn,omitempty"` } +func (s *SlowlogTresholdLevels) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "debug": + if err := dec.Decode(&s.Debug); err != nil { + return err + } + + case "info": + if err := dec.Decode(&s.Info); err != nil { + return err + } + + case "trace": + if err := dec.Decode(&s.Trace); err != nil { + return err + } + + case "warn": + if err := dec.Decode(&s.Warn); err != nil { + return err + } + + } + } + return nil +} + // NewSlowlogTresholdLevels returns a SlowlogTresholdLevels. func NewSlowlogTresholdLevels() *SlowlogTresholdLevels { r := &SlowlogTresholdLevels{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slowlogtresholds.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slowlogtresholds.go index 8d325d0a3..d186fd2b2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slowlogtresholds.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/slowlogtresholds.go @@ -16,19 +16,15 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // SlowlogTresholds type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L479-L488 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L479-L482 type SlowlogTresholds struct { Fetch *SlowlogTresholdLevels `json:"fetch,omitempty"` - // Index The indexing slow log, similar in functionality to the search slow log. The - // log file name ends with `_index_indexing_slowlog.json`. - // Log and the thresholds are configured in the same way as the search slowlog. - Index *SlowlogTresholdLevels `json:"index,omitempty"` Query *SlowlogTresholdLevels `json:"query,omitempty"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/smoothingmodelcontainer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/smoothingmodelcontainer.go index 8563c3ba1..aabb27be3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/smoothingmodelcontainer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/smoothingmodelcontainer.go @@ -16,17 +16,24 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // SmoothingModelContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/suggester.ts#L224-L231 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/suggester.ts#L442-L458 type SmoothingModelContainer struct { - Laplace *LaplaceSmoothingModel `json:"laplace,omitempty"` + // Laplace A smoothing model that uses an additive smoothing where a constant (typically + // `1.0` or smaller) is added to all counts to balance weights. + Laplace *LaplaceSmoothingModel `json:"laplace,omitempty"` + // LinearInterpolation A smoothing model that takes the weighted mean of the unigrams, bigrams, and + // trigrams based on user supplied weights (lambdas). LinearInterpolation *LinearInterpolationSmoothingModel `json:"linear_interpolation,omitempty"` - StupidBackoff *StupidBackoffSmoothingModel `json:"stupid_backoff,omitempty"` + // StupidBackoff A simple backoff model that backs off to lower order n-gram models if the + // higher order count is `0` and discounts the lower order n-gram model by a + // constant factor. + StupidBackoff *StupidBackoffSmoothingModel `json:"stupid_backoff,omitempty"` } // NewSmoothingModelContainer returns a SmoothingModelContainer. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snapshotindexstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snapshotindexstats.go index c662c18fc..f2e048e10 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snapshotindexstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snapshotindexstats.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // SnapshotIndexStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/snapshot/_types/SnapshotIndexStats.ts#L25-L29 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/snapshot/_types/SnapshotIndexStats.ts#L25-L29 type SnapshotIndexStats struct { Shards map[string]SnapshotShardsStatus `json:"shards"` ShardsStats SnapshotShardsStats `json:"shards_stats"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snapshotinfo.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snapshotinfo.go index c8de77e47..ab2f34219 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snapshotinfo.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snapshotinfo.go @@ -16,39 +16,194 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // SnapshotInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/snapshot/_types/SnapshotInfo.ts#L41-L65 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/snapshot/_types/SnapshotInfo.ts#L41-L71 type SnapshotInfo struct { - DataStreams []string `json:"data_streams"` - Duration Duration `json:"duration,omitempty"` - DurationInMillis *int64 `json:"duration_in_millis,omitempty"` - EndTime DateTime `json:"end_time,omitempty"` - EndTimeInMillis *int64 `json:"end_time_in_millis,omitempty"` - Failures []SnapshotShardFailure `json:"failures,omitempty"` - FeatureStates []InfoFeatureState `json:"feature_states,omitempty"` - IncludeGlobalState *bool `json:"include_global_state,omitempty"` - IndexDetails map[string]IndexDetails `json:"index_details,omitempty"` - Indices []string `json:"indices,omitempty"` - Metadata map[string]json.RawMessage `json:"metadata,omitempty"` - Reason *string `json:"reason,omitempty"` - Repository *string `json:"repository,omitempty"` - Shards *ShardStatistics `json:"shards,omitempty"` - Snapshot string `json:"snapshot"` - StartTime DateTime `json:"start_time,omitempty"` - StartTimeInMillis *int64 `json:"start_time_in_millis,omitempty"` - State *string `json:"state,omitempty"` - Uuid string `json:"uuid"` - Version *string `json:"version,omitempty"` - VersionId *int64 `json:"version_id,omitempty"` + DataStreams []string `json:"data_streams"` + Duration Duration `json:"duration,omitempty"` + DurationInMillis *int64 `json:"duration_in_millis,omitempty"` + EndTime DateTime `json:"end_time,omitempty"` + EndTimeInMillis *int64 `json:"end_time_in_millis,omitempty"` + Failures []SnapshotShardFailure `json:"failures,omitempty"` + FeatureStates []InfoFeatureState `json:"feature_states,omitempty"` + IncludeGlobalState *bool `json:"include_global_state,omitempty"` + IndexDetails map[string]IndexDetails `json:"index_details,omitempty"` + Indices []string `json:"indices,omitempty"` + Metadata Metadata `json:"metadata,omitempty"` + Reason *string `json:"reason,omitempty"` + Repository *string `json:"repository,omitempty"` + Shards *ShardStatistics `json:"shards,omitempty"` + Snapshot string `json:"snapshot"` + StartTime DateTime `json:"start_time,omitempty"` + StartTimeInMillis *int64 `json:"start_time_in_millis,omitempty"` + State *string `json:"state,omitempty"` + Uuid string `json:"uuid"` + Version *string `json:"version,omitempty"` + VersionId *int64 `json:"version_id,omitempty"` +} + +func (s *SnapshotInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "data_streams": + if err := dec.Decode(&s.DataStreams); err != nil { + return err + } + + case "duration": + if err := dec.Decode(&s.Duration); err != nil { + return err + } + + case "duration_in_millis": + if err := dec.Decode(&s.DurationInMillis); err != nil { + return err + } + + case "end_time": + if err := dec.Decode(&s.EndTime); err != nil { + return err + } + + case "end_time_in_millis": + if err := dec.Decode(&s.EndTimeInMillis); err != nil { + return err + } + + case "failures": + if err := dec.Decode(&s.Failures); err != nil { + return err + } + + case "feature_states": + if err := dec.Decode(&s.FeatureStates); err != nil { + return err + } + + case "include_global_state": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IncludeGlobalState = &value + case bool: + s.IncludeGlobalState = &v + } + + case "index_details": + if s.IndexDetails == nil { + s.IndexDetails = make(map[string]IndexDetails, 0) + } + if err := dec.Decode(&s.IndexDetails); err != nil { + return err + } + + case "indices": + if err := dec.Decode(&s.Indices); err != nil { + return err + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return err + } + + case "reason": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Reason = &o + + case "repository": + if err := dec.Decode(&s.Repository); err != nil { + return err + } + + case "shards": + if err := dec.Decode(&s.Shards); err != nil { + return err + } + + case "snapshot": + if err := dec.Decode(&s.Snapshot); err != nil { + return err + } + + case "start_time": + if err := dec.Decode(&s.StartTime); err != nil { + return err + } + + case "start_time_in_millis": + if err := dec.Decode(&s.StartTimeInMillis); err != nil { + return err + } + + case "state": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.State = &o + + case "uuid": + if err := dec.Decode(&s.Uuid); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + case "version_id": + if err := dec.Decode(&s.VersionId); err != nil { + return err + } + + } + } + return nil } // NewSnapshotInfo returns a SnapshotInfo. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snapshotlifecycle.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snapshotlifecycle.go index 673ae9b54..40453544f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snapshotlifecycle.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snapshotlifecycle.go @@ -16,13 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // SnapshotLifecycle type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/slm/_types/SnapshotLifecycle.ts#L38-L49 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/slm/_types/SnapshotLifecycle.ts#L38-L49 type SnapshotLifecycle struct { InProgress *InProgress `json:"in_progress,omitempty"` LastFailure *Invocation `json:"last_failure,omitempty"` @@ -36,6 +43,76 @@ type SnapshotLifecycle struct { Version int64 `json:"version"` } +func (s *SnapshotLifecycle) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "in_progress": + if err := dec.Decode(&s.InProgress); err != nil { + return err + } + + case "last_failure": + if err := dec.Decode(&s.LastFailure); err != nil { + return err + } + + case "last_success": + if err := dec.Decode(&s.LastSuccess); err != nil { + return err + } + + case "modified_date": + if err := dec.Decode(&s.ModifiedDate); err != nil { + return err + } + + case "modified_date_millis": + if err := dec.Decode(&s.ModifiedDateMillis); err != nil { + return err + } + + case "next_execution": + if err := dec.Decode(&s.NextExecution); err != nil { + return err + } + + case "next_execution_millis": + if err := dec.Decode(&s.NextExecutionMillis); err != nil { + return err + } + + case "policy": + if err := dec.Decode(&s.Policy); err != nil { + return err + } + + case "stats": + if err := dec.Decode(&s.Stats); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + // NewSnapshotLifecycle returns a SnapshotLifecycle. func NewSnapshotLifecycle() *SnapshotLifecycle { r := &SnapshotLifecycle{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snapshotresponseitem.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snapshotresponseitem.go index 550f6a45f..9665e4015 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snapshotresponseitem.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snapshotresponseitem.go @@ -16,19 +16,61 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // SnapshotResponseItem type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/snapshot/get/SnapshotGetResponse.ts#L42-L46 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/snapshot/get/SnapshotGetResponse.ts#L44-L48 type SnapshotResponseItem struct { Error *ErrorCause `json:"error,omitempty"` Repository string `json:"repository"` Snapshots []SnapshotInfo `json:"snapshots,omitempty"` } +func (s *SnapshotResponseItem) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "error": + if err := dec.Decode(&s.Error); err != nil { + return err + } + + case "repository": + if err := dec.Decode(&s.Repository); err != nil { + return err + } + + case "snapshots": + if err := dec.Decode(&s.Snapshots); err != nil { + return err + } + + } + } + return nil +} + // NewSnapshotResponseItem returns a SnapshotResponseItem. func NewSnapshotResponseItem() *SnapshotResponseItem { r := &SnapshotResponseItem{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snapshotrestore.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snapshotrestore.go index 000965e84..b4833ebcf 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snapshotrestore.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snapshotrestore.go @@ -16,19 +16,69 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // SnapshotRestore type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/snapshot/restore/SnapshotRestoreResponse.ts#L27-L31 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/snapshot/restore/SnapshotRestoreResponse.ts#L27-L31 type SnapshotRestore struct { Indices []string `json:"indices"` Shards ShardStatistics `json:"shards"` Snapshot string `json:"snapshot"` } +func (s *SnapshotRestore) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "indices": + if err := dec.Decode(&s.Indices); err != nil { + return err + } + + case "shards": + if err := dec.Decode(&s.Shards); err != nil { + return err + } + + case "snapshot": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Snapshot = o + + } + } + return nil +} + // NewSnapshotRestore returns a SnapshotRestore. func NewSnapshotRestore() *SnapshotRestore { r := &SnapshotRestore{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snapshotshardfailure.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snapshotshardfailure.go index cbf20dc22..595e61ac6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snapshotshardfailure.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snapshotshardfailure.go @@ -16,19 +16,86 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // SnapshotShardFailure type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/snapshot/_types/SnapshotShardFailure.ts#L22-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/snapshot/_types/SnapshotShardFailure.ts#L22-L28 type SnapshotShardFailure struct { - Index string `json:"index"` - NodeId string `json:"node_id"` - Reason string `json:"reason"` - ShardId string `json:"shard_id"` - Status string `json:"status"` + Index string `json:"index"` + NodeId *string `json:"node_id,omitempty"` + Reason string `json:"reason"` + ShardId string `json:"shard_id"` + Status string `json:"status"` +} + +func (s *SnapshotShardFailure) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return err + } + + case "node_id": + if err := dec.Decode(&s.NodeId); err != nil { + return err + } + + case "reason": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Reason = o + + case "shard_id": + if err := dec.Decode(&s.ShardId); err != nil { + return err + } + + case "status": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Status = o + + } + } + return nil } // NewSnapshotShardFailure returns a SnapshotShardFailure. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snapshotshardsstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snapshotshardsstats.go index 34bcdb38d..089fcd600 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snapshotshardsstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snapshotshardsstats.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // SnapshotShardsStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/snapshot/_types/SnapshotShardsStats.ts#L22-L29 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/snapshot/_types/SnapshotShardsStats.ts#L22-L29 type SnapshotShardsStats struct { Done int64 `json:"done"` Failed int64 `json:"failed"` @@ -32,6 +40,116 @@ type SnapshotShardsStats struct { Total int64 `json:"total"` } +func (s *SnapshotShardsStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "done": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Done = value + case float64: + f := int64(v) + s.Done = f + } + + case "failed": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Failed = value + case float64: + f := int64(v) + s.Failed = f + } + + case "finalizing": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Finalizing = value + case float64: + f := int64(v) + s.Finalizing = f + } + + case "initializing": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Initializing = value + case float64: + f := int64(v) + s.Initializing = f + } + + case "started": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Started = value + case float64: + f := int64(v) + s.Started = f + } + + case "total": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Total = value + case float64: + f := int64(v) + s.Total = f + } + + } + } + return nil +} + // NewSnapshotShardsStats returns a SnapshotShardsStats. func NewSnapshotShardsStats() *SnapshotShardsStats { r := &SnapshotShardsStats{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snapshotshardsstatus.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snapshotshardsstatus.go index 01dbbf5c8..f30b276ed 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snapshotshardsstatus.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snapshotshardsstatus.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -26,7 +26,7 @@ import ( // SnapshotShardsStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/snapshot/_types/SnapshotShardsStatus.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/snapshot/_types/SnapshotShardsStatus.ts#L24-L27 type SnapshotShardsStatus struct { Stage shardsstatsstage.ShardsStatsStage `json:"stage"` Stats ShardsStatsSummary `json:"stats"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snapshotsrecord.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snapshotsrecord.go index 83dd1304a..b6648b8d9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snapshotsrecord.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snapshotsrecord.go @@ -16,51 +16,61 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" + "strconv" ) // SnapshotsRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/snapshots/types.ts#L24-L90 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/snapshots/types.ts#L24-L96 type SnapshotsRecord struct { - // Duration duration + // Duration The time it took the snapshot process to complete, in time units. Duration Duration `json:"duration,omitempty"` - // EndEpoch end time in seconds since 1970-01-01 00:00:00 + // EndEpoch The Unix epoch time (seconds since 1970-01-01 00:00:00) at which the snapshot + // process ended. EndEpoch StringifiedEpochTimeUnitSeconds `json:"end_epoch,omitempty"` - // EndTime end time in HH:MM:SS + // EndTime The time (HH:MM:SS) at which the snapshot process ended. EndTime *string `json:"end_time,omitempty"` - // FailedShards number of failed shards + // FailedShards The number of failed shards in the snapshot. FailedShards *string `json:"failed_shards,omitempty"` - // Id unique snapshot + // Id The unique identifier for the snapshot. Id *string `json:"id,omitempty"` - // Indices number of indices + // Indices The number of indices in the snapshot. Indices *string `json:"indices,omitempty"` - // Reason reason for failures + // Reason The reason for any snapshot failures. Reason *string `json:"reason,omitempty"` - // Repository repository name + // Repository The repository name. Repository *string `json:"repository,omitempty"` - // StartEpoch start time in seconds since 1970-01-01 00:00:00 + // StartEpoch The Unix epoch time (seconds since 1970-01-01 00:00:00) at which the snapshot + // process started. StartEpoch StringifiedEpochTimeUnitSeconds `json:"start_epoch,omitempty"` - // StartTime start time in HH:MM:SS + // StartTime The time (HH:MM:SS) at which the snapshot process started. StartTime ScheduleTimeOfDay `json:"start_time,omitempty"` - // Status snapshot name + // Status The state of the snapshot process. + // Returned values include: + // `FAILED`: The snapshot process failed. + // `INCOMPATIBLE`: The snapshot process is incompatible with the current cluster + // version. + // `IN_PROGRESS`: The snapshot process started but has not completed. + // `PARTIAL`: The snapshot process completed with a partial success. + // `SUCCESS`: The snapshot process completed with a full success. Status *string `json:"status,omitempty"` - // SuccessfulShards number of successful shards + // SuccessfulShards The number of successful shards in the snapshot. SuccessfulShards *string `json:"successful_shards,omitempty"` - // TotalShards number of total shards + // TotalShards The total number of shards in the snapshot. TotalShards *string `json:"total_shards,omitempty"` } func (s *SnapshotsRecord) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -90,29 +100,64 @@ func (s *SnapshotsRecord) UnmarshalJSON(data []byte) error { } case "failed_shards", "fs": - if err := dec.Decode(&s.FailedShards); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FailedShards = &o case "id", "snapshot": - if err := dec.Decode(&s.Id); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Id = &o case "indices", "i": - if err := dec.Decode(&s.Indices); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Indices = &o case "reason", "r": - if err := dec.Decode(&s.Reason); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Reason = &o case "repository", "re", "repo": - if err := dec.Decode(&s.Repository); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Repository = &o case "start_epoch", "ste", "startEpoch": if err := dec.Decode(&s.StartEpoch); err != nil { @@ -120,6 +165,7 @@ func (s *SnapshotsRecord) UnmarshalJSON(data []byte) error { } case "start_time", "sti", "startTime": + rawMsg := json.RawMessage{} dec.Decode(&rawMsg) source := bytes.NewReader(rawMsg) @@ -139,19 +185,40 @@ func (s *SnapshotsRecord) UnmarshalJSON(data []byte) error { } case "status", "s": - if err := dec.Decode(&s.Status); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Status = &o case "successful_shards", "ss": - if err := dec.Decode(&s.SuccessfulShards); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SuccessfulShards = &o case "total_shards", "ts": - if err := dec.Decode(&s.TotalShards); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TotalShards = &o } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snapshotstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snapshotstats.go index 499e1efe3..61839d9e7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snapshotstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snapshotstats.go @@ -16,13 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // SnapshotStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/snapshot/_types/SnapshotStats.ts#L23-L29 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/snapshot/_types/SnapshotStats.ts#L23-L29 type SnapshotStats struct { Incremental FileCountSnapshotStats `json:"incremental"` StartTimeInMillis int64 `json:"start_time_in_millis"` @@ -31,6 +38,51 @@ type SnapshotStats struct { Total FileCountSnapshotStats `json:"total"` } +func (s *SnapshotStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "incremental": + if err := dec.Decode(&s.Incremental); err != nil { + return err + } + + case "start_time_in_millis": + if err := dec.Decode(&s.StartTimeInMillis); err != nil { + return err + } + + case "time": + if err := dec.Decode(&s.Time); err != nil { + return err + } + + case "time_in_millis": + if err := dec.Decode(&s.TimeInMillis); err != nil { + return err + } + + case "total": + if err := dec.Decode(&s.Total); err != nil { + return err + } + + } + } + return nil +} + // NewSnapshotStats returns a SnapshotStats. func NewSnapshotStats() *SnapshotStats { r := &SnapshotStats{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snowballanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snowballanalyzer.go index e1fae2c05..17151bf38 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snowballanalyzer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snowballanalyzer.go @@ -16,17 +16,22 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/snowballlanguage" ) // SnowballAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/analyzers.ts#L88-L93 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/analyzers.ts#L88-L93 type SnowballAnalyzer struct { Language snowballlanguage.SnowballLanguage `json:"language"` Stopwords []string `json:"stopwords,omitempty"` @@ -34,11 +39,75 @@ type SnowballAnalyzer struct { Version *string `json:"version,omitempty"` } +func (s *SnowballAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "language": + if err := dec.Decode(&s.Language); err != nil { + return err + } + + case "stopwords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Stopwords = append(s.Stopwords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { + return err + } + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s SnowballAnalyzer) MarshalJSON() ([]byte, error) { + type innerSnowballAnalyzer SnowballAnalyzer + tmp := innerSnowballAnalyzer{ + Language: s.Language, + Stopwords: s.Stopwords, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "snowball" + + return json.Marshal(tmp) +} + // NewSnowballAnalyzer returns a SnowballAnalyzer. func NewSnowballAnalyzer() *SnowballAnalyzer { r := &SnowballAnalyzer{} - r.Type = "snowball" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snowballtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snowballtokenfilter.go index c4c944389..fa9ec2d82 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snowballtokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/snowballtokenfilter.go @@ -16,28 +16,80 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/snowballlanguage" ) // SnowballTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L308-L311 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L309-L312 type SnowballTokenFilter struct { Language snowballlanguage.SnowballLanguage `json:"language"` Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` } +func (s *SnowballTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "language": + if err := dec.Decode(&s.Language); err != nil { + return err + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s SnowballTokenFilter) MarshalJSON() ([]byte, error) { + type innerSnowballTokenFilter SnowballTokenFilter + tmp := innerSnowballTokenFilter{ + Language: s.Language, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "snowball" + + return json.Marshal(tmp) +} + // NewSnowballTokenFilter returns a SnowballTokenFilter. func NewSnowballTokenFilter() *SnowballTokenFilter { r := &SnowballTokenFilter{} - r.Type = "snowball" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/softdeletes.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/softdeletes.go index e3ccb9cce..40e73545f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/softdeletes.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/softdeletes.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // SoftDeletes type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L50-L63 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L50-L63 type SoftDeletes struct { // Enabled Indicates whether soft deletes are enabled on the index. Enabled *bool `json:"enabled,omitempty"` @@ -36,6 +44,45 @@ type SoftDeletes struct { RetentionLease *RetentionLease `json:"retention_lease,omitempty"` } +func (s *SoftDeletes) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = &value + case bool: + s.Enabled = &v + } + + case "retention_lease": + if err := dec.Decode(&s.RetentionLease); err != nil { + return err + } + + } + } + return nil +} + // NewSoftDeletes returns a SoftDeletes. func NewSoftDeletes() *SoftDeletes { r := &SoftDeletes{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sort.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sort.go index 26f644c1c..8a8f41f7c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sort.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sort.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // Sort type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/sort.ts#L99-L99 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/sort.ts#L99-L99 type Sort []SortCombinations diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sortcombinations.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sortcombinations.go index 5ce3059bf..3d143d470 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sortcombinations.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sortcombinations.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // string // SortOptions // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/sort.ts#L93-L97 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/sort.ts#L93-L97 type SortCombinations interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sortoptions.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sortoptions.go index 7f7bad625..6c83761e1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sortoptions.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sortoptions.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -27,13 +27,13 @@ import ( // SortOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/sort.ts#L82-L91 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/sort.ts#L82-L91 type SortOptions struct { Doc_ *ScoreSort `json:"_doc,omitempty"` GeoDistance_ *GeoDistanceSort `json:"_geo_distance,omitempty"` Score_ *ScoreSort `json:"_score,omitempty"` Script_ *ScriptSort `json:"_script,omitempty"` - SortOptions map[string]FieldSort `json:"-"` + SortOptions map[string]FieldSort `json:"SortOptions,omitempty"` } // MarhsalJSON overrides marshalling for types with additional properties @@ -55,6 +55,7 @@ func (s SortOptions) MarshalJSON() ([]byte, error) { for key, value := range s.SortOptions { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "SortOptions") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sortprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sortprocessor.go index b6b8556d3..f5f058795 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sortprocessor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sortprocessor.go @@ -16,26 +16,134 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortorder" ) // SortProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/_types/Processors.ts#L348-L352 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/_types/Processors.ts#L1034-L1050 type SortProcessor struct { - Description *string `json:"description,omitempty"` - Field string `json:"field"` - If *string `json:"if,omitempty"` - IgnoreFailure *bool `json:"ignore_failure,omitempty"` - OnFailure []ProcessorContainer `json:"on_failure,omitempty"` - Order *sortorder.SortOrder `json:"order,omitempty"` - Tag *string `json:"tag,omitempty"` - TargetField *string `json:"target_field,omitempty"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field to be sorted. + Field string `json:"field"` + // If Conditionally execute the processor. + If *string `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Order The sort order to use. + // Accepts `"asc"` or `"desc"`. + Order *sortorder.SortOrder `json:"order,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField The field to assign the sorted value to. + // By default, the field is updated in-place. + TargetField *string `json:"target_field,omitempty"` +} + +func (s *SortProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.If = &o + + case "ignore_failure": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return err + } + + case "order": + if err := dec.Decode(&s.Order); err != nil { + return err + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return err + } + + } + } + return nil } // NewSortProcessor returns a SortProcessor. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sortresults.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sortresults.go deleted file mode 100644 index 3cbc48de6..000000000 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sortresults.go +++ /dev/null @@ -1,26 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 - -package types - -// SortResults type alias. -// -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/sort.ts#L101-L101 -type SortResults []FieldValue diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sourceconfig.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sourceconfig.go index b9d525407..1fc2260a3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sourceconfig.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sourceconfig.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // bool // SourceFilter // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/SourceFilter.ts#L33-L37 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/SourceFilter.ts#L33-L37 type SourceConfig interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sourceconfigparam.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sourceconfigparam.go index edef97998..49a4074cb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sourceconfigparam.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sourceconfigparam.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // bool // []string // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/SourceFilter.ts#L39-L45 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/SourceFilter.ts#L39-L45 type SourceConfigParam interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sourcefield.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sourcefield.go index 039faa476..a14ddc5d9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sourcefield.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sourcefield.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sourcefieldmode" ) // SourceField type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/meta-fields.ts#L58-L65 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/meta-fields.ts#L58-L65 type SourceField struct { Compress *bool `json:"compress,omitempty"` CompressThreshold *string `json:"compress_threshold,omitempty"` @@ -36,6 +42,81 @@ type SourceField struct { Mode *sourcefieldmode.SourceFieldMode `json:"mode,omitempty"` } +func (s *SourceField) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "compress": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Compress = &value + case bool: + s.Compress = &v + } + + case "compress_threshold": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CompressThreshold = &o + + case "enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = &value + case bool: + s.Enabled = &v + } + + case "excludes": + if err := dec.Decode(&s.Excludes); err != nil { + return err + } + + case "includes": + if err := dec.Decode(&s.Includes); err != nil { + return err + } + + case "mode": + if err := dec.Decode(&s.Mode); err != nil { + return err + } + + } + } + return nil +} + // NewSourceField returns a SourceField. func NewSourceField() *SourceField { r := &SourceField{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sourcefilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sourcefilter.go index 16d2883b5..5f3122b40 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sourcefilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sourcefilter.go @@ -16,18 +16,82 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // SourceFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/SourceFilter.ts#L23-L31 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/SourceFilter.ts#L23-L31 type SourceFilter struct { Excludes []string `json:"excludes,omitempty"` Includes []string `json:"includes,omitempty"` } +func (s *SourceFilter) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Includes) + return err + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "excludes", "exclude": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Excludes = append(s.Excludes, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Excludes); err != nil { + return err + } + } + + case "includes", "include": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Includes = append(s.Includes, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Includes); err != nil { + return err + } + } + + } + } + return nil +} + // NewSourceFilter returns a SourceFilter. func NewSourceFilter() *SourceFilter { r := &SourceFilter{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spancontainingquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spancontainingquery.go index 0d8b0eb97..8beafc38b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spancontainingquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spancontainingquery.go @@ -16,20 +16,95 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // SpanContainingQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/span.ts#L25-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/span.ts#L25-L36 type SpanContainingQuery struct { - Big *SpanQuery `json:"big,omitempty"` - Boost *float32 `json:"boost,omitempty"` + // Big Can be any span query. + // Matching spans from `big` that contain matches from `little` are returned. + Big *SpanQuery `json:"big,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Little Can be any span query. + // Matching spans from `big` that contain matches from `little` are returned. Little *SpanQuery `json:"little,omitempty"` QueryName_ *string `json:"_name,omitempty"` } +func (s *SpanContainingQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "big": + if err := dec.Decode(&s.Big); err != nil { + return err + } + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "little": + if err := dec.Decode(&s.Little); err != nil { + return err + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + } + } + return nil +} + // NewSpanContainingQuery returns a SpanContainingQuery. func NewSpanContainingQuery() *SpanContainingQuery { r := &SpanContainingQuery{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spanfieldmaskingquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spanfieldmaskingquery.go index b9cb279b7..87efe4886 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spanfieldmaskingquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spanfieldmaskingquery.go @@ -16,20 +16,91 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // SpanFieldMaskingQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/span.ts#L30-L33 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/span.ts#L38-L41 type SpanFieldMaskingQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. Boost *float32 `json:"boost,omitempty"` Field string `json:"field"` Query *SpanQuery `json:"query,omitempty"` QueryName_ *string `json:"_name,omitempty"` } +func (s *SpanFieldMaskingQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return err + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + } + } + return nil +} + // NewSpanFieldMaskingQuery returns a SpanFieldMaskingQuery. func NewSpanFieldMaskingQuery() *SpanFieldMaskingQuery { r := &SpanFieldMaskingQuery{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spanfirstquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spanfirstquery.go index 1daaf37c1..62c2a4b7a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spanfirstquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spanfirstquery.go @@ -16,20 +16,104 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // SpanFirstQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/span.ts#L35-L38 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/span.ts#L43-L52 type SpanFirstQuery struct { - Boost *float32 `json:"boost,omitempty"` - End int `json:"end"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // End Controls the maximum end position permitted in a match. + End int `json:"end"` + // Match Can be any other span type query. Match *SpanQuery `json:"match,omitempty"` QueryName_ *string `json:"_name,omitempty"` } +func (s *SpanFirstQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "end": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.End = value + case float64: + f := int(v) + s.End = f + } + + case "match": + if err := dec.Decode(&s.Match); err != nil { + return err + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + } + } + return nil +} + // NewSpanFirstQuery returns a SpanFirstQuery. func NewSpanFirstQuery() *SpanFirstQuery { r := &SpanFirstQuery{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spangapquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spangapquery.go index 67230a0c8..bf6992dc7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spangapquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spangapquery.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // SpanGapQuery type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/span.ts#L40-L42 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/span.ts#L54-L56 type SpanGapQuery map[string]int diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spanmultitermquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spanmultitermquery.go index 31642eac7..cbe4fcb26 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spanmultitermquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spanmultitermquery.go @@ -16,21 +16,87 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // SpanMultiTermQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/span.ts#L44-L47 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/span.ts#L58-L63 type SpanMultiTermQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. Boost *float32 `json:"boost,omitempty"` - // Match Should be a multi term query (one of wildcard, fuzzy, prefix, range or regexp - // query) + // Match Should be a multi term query (one of `wildcard`, `fuzzy`, `prefix`, `range`, + // or `regexp` query). Match *Query `json:"match,omitempty"` QueryName_ *string `json:"_name,omitempty"` } +func (s *SpanMultiTermQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "match": + if err := dec.Decode(&s.Match); err != nil { + return err + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + } + } + return nil +} + // NewSpanMultiTermQuery returns a SpanMultiTermQuery. func NewSpanMultiTermQuery() *SpanMultiTermQuery { r := &SpanMultiTermQuery{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spannearquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spannearquery.go index fb426526c..07c195175 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spannearquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spannearquery.go @@ -16,19 +16,118 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // SpanNearQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/span.ts#L49-L53 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/span.ts#L65-L78 type SpanNearQuery struct { - Boost *float32 `json:"boost,omitempty"` - Clauses []SpanQuery `json:"clauses"` - InOrder *bool `json:"in_order,omitempty"` - QueryName_ *string `json:"_name,omitempty"` - Slop *int `json:"slop,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Clauses Array of one or more other span type queries. + Clauses []SpanQuery `json:"clauses"` + // InOrder Controls whether matches are required to be in-order. + InOrder *bool `json:"in_order,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + // Slop Controls the maximum number of intervening unmatched positions permitted. + Slop *int `json:"slop,omitempty"` +} + +func (s *SpanNearQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "clauses": + if err := dec.Decode(&s.Clauses); err != nil { + return err + } + + case "in_order": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.InOrder = &value + case bool: + s.InOrder = &v + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "slop": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Slop = &value + case float64: + f := int(v) + s.Slop = &f + } + + } + } + return nil } // NewSpanNearQuery returns a SpanNearQuery. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spannotquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spannotquery.go index 4861ab508..49dd4e2f3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spannotquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spannotquery.go @@ -16,21 +16,149 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // SpanNotQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/span.ts#L55-L63 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/span.ts#L80-L104 type SpanNotQuery struct { - Boost *float32 `json:"boost,omitempty"` - Dist *int `json:"dist,omitempty"` - Exclude *SpanQuery `json:"exclude,omitempty"` - Include *SpanQuery `json:"include,omitempty"` - Post *int `json:"post,omitempty"` - Pre *int `json:"pre,omitempty"` - QueryName_ *string `json:"_name,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Dist The number of tokens from within the include span that can’t have overlap + // with the exclude span. + // Equivalent to setting both `pre` and `post`. + Dist *int `json:"dist,omitempty"` + // Exclude Span query whose matches must not overlap those returned. + Exclude *SpanQuery `json:"exclude,omitempty"` + // Include Span query whose matches are filtered. + Include *SpanQuery `json:"include,omitempty"` + // Post The number of tokens after the include span that can’t have overlap with the + // exclude span. + Post *int `json:"post,omitempty"` + // Pre The number of tokens before the include span that can’t have overlap with the + // exclude span. + Pre *int `json:"pre,omitempty"` + QueryName_ *string `json:"_name,omitempty"` +} + +func (s *SpanNotQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "dist": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Dist = &value + case float64: + f := int(v) + s.Dist = &f + } + + case "exclude": + if err := dec.Decode(&s.Exclude); err != nil { + return err + } + + case "include": + if err := dec.Decode(&s.Include); err != nil { + return err + } + + case "post": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Post = &value + case float64: + f := int(v) + s.Post = &f + } + + case "pre": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Pre = &value + case float64: + f := int(v) + s.Pre = &f + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + } + } + return nil } // NewSpanNotQuery returns a SpanNotQuery. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spanorquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spanorquery.go index 1a548d6b0..2ae5591e7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spanorquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spanorquery.go @@ -16,19 +16,86 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // SpanOrQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/span.ts#L65-L67 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/span.ts#L106-L111 type SpanOrQuery struct { - Boost *float32 `json:"boost,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Clauses Array of one or more other span type queries. Clauses []SpanQuery `json:"clauses"` QueryName_ *string `json:"_name,omitempty"` } +func (s *SpanOrQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "clauses": + if err := dec.Decode(&s.Clauses); err != nil { + return err + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + } + } + return nil +} + // NewSpanOrQuery returns a SpanOrQuery. func NewSpanOrQuery() *SpanOrQuery { r := &SpanOrQuery{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spanquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spanquery.go index 7341c9da7..340c3bd22 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spanquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spanquery.go @@ -16,24 +16,118 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // SpanQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/span.ts#L79-L91 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/span.ts#L131-L170 type SpanQuery struct { - FieldMaskingSpan *SpanFieldMaskingQuery `json:"field_masking_span,omitempty"` - SpanContaining *SpanContainingQuery `json:"span_containing,omitempty"` - SpanFirst *SpanFirstQuery `json:"span_first,omitempty"` - SpanGap map[string]int `json:"span_gap,omitempty"` - SpanMulti *SpanMultiTermQuery `json:"span_multi,omitempty"` - SpanNear *SpanNearQuery `json:"span_near,omitempty"` - SpanNot *SpanNotQuery `json:"span_not,omitempty"` - SpanOr *SpanOrQuery `json:"span_or,omitempty"` - SpanTerm map[string]SpanTermQuery `json:"span_term,omitempty"` - SpanWithin *SpanWithinQuery `json:"span_within,omitempty"` + // FieldMaskingSpan Allows queries like `span_near` or `span_or` across different fields. + FieldMaskingSpan *SpanFieldMaskingQuery `json:"field_masking_span,omitempty"` + // SpanContaining Accepts a list of span queries, but only returns those spans which also match + // a second span query. + SpanContaining *SpanContainingQuery `json:"span_containing,omitempty"` + // SpanFirst Accepts another span query whose matches must appear within the first N + // positions of the field. + SpanFirst *SpanFirstQuery `json:"span_first,omitempty"` + SpanGap SpanGapQuery `json:"span_gap,omitempty"` + // SpanMulti Wraps a `term`, `range`, `prefix`, `wildcard`, `regexp`, or `fuzzy` query. + SpanMulti *SpanMultiTermQuery `json:"span_multi,omitempty"` + // SpanNear Accepts multiple span queries whose matches must be within the specified + // distance of each other, and possibly in the same order. + SpanNear *SpanNearQuery `json:"span_near,omitempty"` + // SpanNot Wraps another span query, and excludes any documents which match that query. + SpanNot *SpanNotQuery `json:"span_not,omitempty"` + // SpanOr Combines multiple span queries and returns documents which match any of the + // specified queries. + SpanOr *SpanOrQuery `json:"span_or,omitempty"` + // SpanTerm The equivalent of the `term` query but for use with other span queries. + SpanTerm map[string]SpanTermQuery `json:"span_term,omitempty"` + // SpanWithin The result from a single span query is returned as long is its span falls + // within the spans returned by a list of other span queries. + SpanWithin *SpanWithinQuery `json:"span_within,omitempty"` +} + +func (s *SpanQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field_masking_span": + if err := dec.Decode(&s.FieldMaskingSpan); err != nil { + return err + } + + case "span_containing": + if err := dec.Decode(&s.SpanContaining); err != nil { + return err + } + + case "span_first": + if err := dec.Decode(&s.SpanFirst); err != nil { + return err + } + + case "span_gap": + if err := dec.Decode(&s.SpanGap); err != nil { + return err + } + + case "span_multi": + if err := dec.Decode(&s.SpanMulti); err != nil { + return err + } + + case "span_near": + if err := dec.Decode(&s.SpanNear); err != nil { + return err + } + + case "span_not": + if err := dec.Decode(&s.SpanNot); err != nil { + return err + } + + case "span_or": + if err := dec.Decode(&s.SpanOr); err != nil { + return err + } + + case "span_term": + if s.SpanTerm == nil { + s.SpanTerm = make(map[string]SpanTermQuery, 0) + } + if err := dec.Decode(&s.SpanTerm); err != nil { + return err + } + + case "span_within": + if err := dec.Decode(&s.SpanWithin); err != nil { + return err + } + + } + } + return nil } // NewSpanQuery returns a SpanQuery. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spantermquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spantermquery.go index 741dfc880..61e9c6f14 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spantermquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spantermquery.go @@ -16,19 +16,97 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // SpanTermQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/span.ts#L69-L72 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/span.ts#L113-L116 type SpanTermQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. Boost *float32 `json:"boost,omitempty"` QueryName_ *string `json:"_name,omitempty"` Value string `json:"value"` } +func (s *SpanTermQuery) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Value) + return err + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Value = o + + } + } + return nil +} + // NewSpanTermQuery returns a SpanTermQuery. func NewSpanTermQuery() *SpanTermQuery { r := &SpanTermQuery{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spanwithinquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spanwithinquery.go index a07780ae4..33b93d108 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spanwithinquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/spanwithinquery.go @@ -16,20 +16,95 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // SpanWithinQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/span.ts#L74-L77 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/span.ts#L118-L129 type SpanWithinQuery struct { - Big *SpanQuery `json:"big,omitempty"` - Boost *float32 `json:"boost,omitempty"` + // Big Can be any span query. + // Matching spans from `little` that are enclosed within `big` are returned. + Big *SpanQuery `json:"big,omitempty"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // Little Can be any span query. + // Matching spans from `little` that are enclosed within `big` are returned. Little *SpanQuery `json:"little,omitempty"` QueryName_ *string `json:"_name,omitempty"` } +func (s *SpanWithinQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "big": + if err := dec.Decode(&s.Big); err != nil { + return err + } + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "little": + if err := dec.Decode(&s.Little); err != nil { + return err + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + } + } + return nil +} + // NewSpanWithinQuery returns a SpanWithinQuery. func NewSpanWithinQuery() *SpanWithinQuery { r := &SpanWithinQuery{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/splitprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/splitprocessor.go index d56dced0a..39e9a9063 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/splitprocessor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/splitprocessor.go @@ -16,24 +16,171 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // SplitProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/_types/Processors.ts#L354-L360 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/_types/Processors.ts#L1052-L1077 type SplitProcessor struct { - Description *string `json:"description,omitempty"` - Field string `json:"field"` - If *string `json:"if,omitempty"` - IgnoreFailure *bool `json:"ignore_failure,omitempty"` - IgnoreMissing *bool `json:"ignore_missing,omitempty"` - OnFailure []ProcessorContainer `json:"on_failure,omitempty"` - PreserveTrailing *bool `json:"preserve_trailing,omitempty"` - Separator string `json:"separator"` - Tag *string `json:"tag,omitempty"` - TargetField *string `json:"target_field,omitempty"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field to split. + Field string `json:"field"` + // If Conditionally execute the processor. + If *string `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist, the processor quietly exits without + // modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // PreserveTrailing Preserves empty trailing fields, if any. + PreserveTrailing *bool `json:"preserve_trailing,omitempty"` + // Separator A regex which matches the separator, for example, `,` or `\s+`. + Separator string `json:"separator"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField The field to assign the split value to. + // By default, the field is updated in-place. + TargetField *string `json:"target_field,omitempty"` +} + +func (s *SplitProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.If = &o + + case "ignore_failure": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return err + } + + case "preserve_trailing": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.PreserveTrailing = &value + case bool: + s.PreserveTrailing = &v + } + + case "separator": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Separator = o + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return err + } + + } + } + return nil } // NewSplitProcessor returns a SplitProcessor. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sql.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sql.go index 4a9b5295b..9776a7c7d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sql.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sql.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Sql type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L377-L380 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L386-L389 type Sql struct { Available bool `json:"available"` Enabled bool `json:"enabled"` @@ -30,6 +38,70 @@ type Sql struct { Queries map[string]XpackQuery `json:"queries"` } +func (s *Sql) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "available": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Available = value + case bool: + s.Available = v + } + + case "enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "features": + if s.Features == nil { + s.Features = make(map[string]int, 0) + } + if err := dec.Decode(&s.Features); err != nil { + return err + } + + case "queries": + if s.Queries == nil { + s.Queries = make(map[string]XpackQuery, 0) + } + if err := dec.Decode(&s.Queries); err != nil { + return err + } + + } + } + return nil +} + // NewSql returns a Sql. func NewSql() *Sql { r := &Sql{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ssl.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ssl.go index 6178f661b..f59c5400d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ssl.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ssl.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // Ssl type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L382-L385 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L391-L394 type Ssl struct { Http FeatureToggle `json:"http"` Transport FeatureToggle `json:"transport"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/standardanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/standardanalyzer.go index dcb8cd3ea..d68325918 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/standardanalyzer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/standardanalyzer.go @@ -16,24 +16,101 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // StandardAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/analyzers.ts#L95-L99 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/analyzers.ts#L95-L99 type StandardAnalyzer struct { MaxTokenLength *int `json:"max_token_length,omitempty"` Stopwords []string `json:"stopwords,omitempty"` Type string `json:"type,omitempty"` } +func (s *StandardAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_token_length": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxTokenLength = &value + case float64: + f := int(v) + s.MaxTokenLength = &f + } + + case "stopwords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Stopwords = append(s.Stopwords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { + return err + } + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s StandardAnalyzer) MarshalJSON() ([]byte, error) { + type innerStandardAnalyzer StandardAnalyzer + tmp := innerStandardAnalyzer{ + MaxTokenLength: s.MaxTokenLength, + Stopwords: s.Stopwords, + Type: s.Type, + } + + tmp.Type = "standard" + + return json.Marshal(tmp) +} + // NewStandardAnalyzer returns a StandardAnalyzer. func NewStandardAnalyzer() *StandardAnalyzer { r := &StandardAnalyzer{} - r.Type = "standard" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/standarddeviationbounds.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/standarddeviationbounds.go index 0c5442327..a02bc9d6c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/standarddeviationbounds.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/standarddeviationbounds.go @@ -16,13 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // StandardDeviationBounds type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L259-L266 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L260-L267 type StandardDeviationBounds struct { Lower Float64 `json:"lower,omitempty"` LowerPopulation Float64 `json:"lower_population,omitempty"` @@ -32,6 +39,56 @@ type StandardDeviationBounds struct { UpperSampling Float64 `json:"upper_sampling,omitempty"` } +func (s *StandardDeviationBounds) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "lower": + if err := dec.Decode(&s.Lower); err != nil { + return err + } + + case "lower_population": + if err := dec.Decode(&s.LowerPopulation); err != nil { + return err + } + + case "lower_sampling": + if err := dec.Decode(&s.LowerSampling); err != nil { + return err + } + + case "upper": + if err := dec.Decode(&s.Upper); err != nil { + return err + } + + case "upper_population": + if err := dec.Decode(&s.UpperPopulation); err != nil { + return err + } + + case "upper_sampling": + if err := dec.Decode(&s.UpperSampling); err != nil { + return err + } + + } + } + return nil +} + // NewStandardDeviationBounds returns a StandardDeviationBounds. func NewStandardDeviationBounds() *StandardDeviationBounds { r := &StandardDeviationBounds{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/standarddeviationboundsasstring.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/standarddeviationboundsasstring.go index d3d7bc1bf..bffce4f9d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/standarddeviationboundsasstring.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/standarddeviationboundsasstring.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // StandardDeviationBoundsAsString type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L268-L275 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L269-L276 type StandardDeviationBoundsAsString struct { Lower string `json:"lower"` LowerPopulation string `json:"lower_population"` @@ -32,6 +40,98 @@ type StandardDeviationBoundsAsString struct { UpperSampling string `json:"upper_sampling"` } +func (s *StandardDeviationBoundsAsString) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "lower": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Lower = o + + case "lower_population": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.LowerPopulation = o + + case "lower_sampling": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.LowerSampling = o + + case "upper": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Upper = o + + case "upper_population": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.UpperPopulation = o + + case "upper_sampling": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.UpperSampling = o + + } + } + return nil +} + // NewStandardDeviationBoundsAsString returns a StandardDeviationBoundsAsString. func NewStandardDeviationBoundsAsString() *StandardDeviationBoundsAsString { r := &StandardDeviationBoundsAsString{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/standardtokenizer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/standardtokenizer.go index 8dc729d65..2a384f271 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/standardtokenizer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/standardtokenizer.go @@ -16,24 +16,90 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // StandardTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/tokenizers.ts#L104-L107 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/tokenizers.ts#L105-L108 type StandardTokenizer struct { MaxTokenLength *int `json:"max_token_length,omitempty"` Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` } +func (s *StandardTokenizer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_token_length": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxTokenLength = &value + case float64: + f := int(v) + s.MaxTokenLength = &f + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s StandardTokenizer) MarshalJSON() ([]byte, error) { + type innerStandardTokenizer StandardTokenizer + tmp := innerStandardTokenizer{ + MaxTokenLength: s.MaxTokenLength, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "standard" + + return json.Marshal(tmp) +} + // NewStandardTokenizer returns a StandardTokenizer. func NewStandardTokenizer() *StandardTokenizer { r := &StandardTokenizer{} - r.Type = "standard" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/statistics.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/statistics.go index d30088c06..46a4698eb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/statistics.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/statistics.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Statistics type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/slm/_types/SnapshotLifecycle.ts#L51-L74 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/slm/_types/SnapshotLifecycle.ts#L51-L74 type Statistics struct { Policy *string `json:"policy,omitempty"` RetentionDeletionTime Duration `json:"retention_deletion_time,omitempty"` @@ -36,6 +44,146 @@ type Statistics struct { TotalSnapshotsTaken *int64 `json:"total_snapshots_taken,omitempty"` } +func (s *Statistics) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "policy": + if err := dec.Decode(&s.Policy); err != nil { + return err + } + + case "retention_deletion_time": + if err := dec.Decode(&s.RetentionDeletionTime); err != nil { + return err + } + + case "retention_deletion_time_millis": + if err := dec.Decode(&s.RetentionDeletionTimeMillis); err != nil { + return err + } + + case "retention_failed": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.RetentionFailed = &value + case float64: + f := int64(v) + s.RetentionFailed = &f + } + + case "retention_runs": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.RetentionRuns = &value + case float64: + f := int64(v) + s.RetentionRuns = &f + } + + case "retention_timed_out": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.RetentionTimedOut = &value + case float64: + f := int64(v) + s.RetentionTimedOut = &f + } + + case "total_snapshot_deletion_failures", "snapshot_deletion_failures": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TotalSnapshotDeletionFailures = &value + case float64: + f := int64(v) + s.TotalSnapshotDeletionFailures = &f + } + + case "total_snapshots_deleted", "snapshots_deleted": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TotalSnapshotsDeleted = &value + case float64: + f := int64(v) + s.TotalSnapshotsDeleted = &f + } + + case "total_snapshots_failed", "snapshots_failed": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TotalSnapshotsFailed = &value + case float64: + f := int64(v) + s.TotalSnapshotsFailed = &f + } + + case "total_snapshots_taken", "snapshots_taken": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TotalSnapshotsTaken = &value + case float64: + f := int64(v) + s.TotalSnapshotsTaken = &f + } + + } + } + return nil +} + // NewStatistics returns a Statistics. func NewStatistics() *Statistics { r := &Statistics{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stats.go index 483164f8d..7b3cbe16d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stats.go @@ -16,40 +16,254 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/noderole" ) // Stats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L30-L53 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L30-L114 type Stats struct { + // AdaptiveSelection Statistics about adaptive replica selection. AdaptiveSelection map[string]AdaptiveSelection `json:"adaptive_selection,omitempty"` - Attributes map[string]string `json:"attributes,omitempty"` - Breakers map[string]Breaker `json:"breakers,omitempty"` - Discovery *Discovery `json:"discovery,omitempty"` - Fs *FileSystem `json:"fs,omitempty"` - Host *string `json:"host,omitempty"` - Http *Http `json:"http,omitempty"` - IndexingPressure *NodesIndexingPressure `json:"indexing_pressure,omitempty"` - Indices *IndicesShardStats `json:"indices,omitempty"` - Ingest *NodesIngest `json:"ingest,omitempty"` - Ip []string `json:"ip,omitempty"` - Jvm *Jvm `json:"jvm,omitempty"` - Name *string `json:"name,omitempty"` - Os *OperatingSystem `json:"os,omitempty"` - Process *Process `json:"process,omitempty"` - Roles []noderole.NodeRole `json:"roles,omitempty"` - Script *Scripting `json:"script,omitempty"` - ScriptCache map[string][]ScriptCache `json:"script_cache,omitempty"` - ThreadPool map[string]ThreadCount `json:"thread_pool,omitempty"` - Timestamp *int64 `json:"timestamp,omitempty"` - Transport *Transport `json:"transport,omitempty"` - TransportAddress *string `json:"transport_address,omitempty"` + // Attributes Contains a list of attributes for the node. + Attributes map[string]string `json:"attributes,omitempty"` + // Breakers Statistics about the field data circuit breaker. + Breakers map[string]Breaker `json:"breakers,omitempty"` + // Discovery Contains node discovery statistics for the node. + Discovery *Discovery `json:"discovery,omitempty"` + // Fs File system information, data path, free disk space, read/write stats. + Fs *FileSystem `json:"fs,omitempty"` + // Host Network host for the node, based on the network host setting. + Host *string `json:"host,omitempty"` + // Http HTTP connection information. + Http *Http `json:"http,omitempty"` + // IndexingPressure Contains indexing pressure statistics for the node. + IndexingPressure *NodesIndexingPressure `json:"indexing_pressure,omitempty"` + // Indices Indices stats about size, document count, indexing and deletion times, search + // times, field cache size, merges and flushes. + Indices *IndicesShardStats `json:"indices,omitempty"` + // Ingest Statistics about ingest preprocessing. + Ingest *NodesIngest `json:"ingest,omitempty"` + // Ip IP address and port for the node. + Ip []string `json:"ip,omitempty"` + // Jvm JVM stats, memory pool information, garbage collection, buffer pools, number + // of loaded/unloaded classes. + Jvm *Jvm `json:"jvm,omitempty"` + // Name Human-readable identifier for the node. + // Based on the node name setting. + Name *string `json:"name,omitempty"` + // Os Operating system stats, load average, mem, swap. + Os *OperatingSystem `json:"os,omitempty"` + // Process Process statistics, memory consumption, cpu usage, open file descriptors. + Process *Process `json:"process,omitempty"` + // Roles Roles assigned to the node. + Roles []noderole.NodeRole `json:"roles,omitempty"` + // Script Contains script statistics for the node. + Script *Scripting `json:"script,omitempty"` + ScriptCache map[string][]ScriptCache `json:"script_cache,omitempty"` + // ThreadPool Statistics about each thread pool, including current size, queue and rejected + // tasks. + ThreadPool map[string]ThreadCount `json:"thread_pool,omitempty"` + Timestamp *int64 `json:"timestamp,omitempty"` + // Transport Transport statistics about sent and received bytes in cluster communication. + Transport *Transport `json:"transport,omitempty"` + // TransportAddress Host and port for the transport layer, used for internal communication + // between nodes in a cluster. + TransportAddress *string `json:"transport_address,omitempty"` +} + +func (s *Stats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "adaptive_selection": + if s.AdaptiveSelection == nil { + s.AdaptiveSelection = make(map[string]AdaptiveSelection, 0) + } + if err := dec.Decode(&s.AdaptiveSelection); err != nil { + return err + } + + case "attributes": + if s.Attributes == nil { + s.Attributes = make(map[string]string, 0) + } + if err := dec.Decode(&s.Attributes); err != nil { + return err + } + + case "breakers": + if s.Breakers == nil { + s.Breakers = make(map[string]Breaker, 0) + } + if err := dec.Decode(&s.Breakers); err != nil { + return err + } + + case "discovery": + if err := dec.Decode(&s.Discovery); err != nil { + return err + } + + case "fs": + if err := dec.Decode(&s.Fs); err != nil { + return err + } + + case "host": + if err := dec.Decode(&s.Host); err != nil { + return err + } + + case "http": + if err := dec.Decode(&s.Http); err != nil { + return err + } + + case "indexing_pressure": + if err := dec.Decode(&s.IndexingPressure); err != nil { + return err + } + + case "indices": + if err := dec.Decode(&s.Indices); err != nil { + return err + } + + case "ingest": + if err := dec.Decode(&s.Ingest); err != nil { + return err + } + + case "ip": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Ip = append(s.Ip, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Ip); err != nil { + return err + } + } + + case "jvm": + if err := dec.Decode(&s.Jvm); err != nil { + return err + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "os": + if err := dec.Decode(&s.Os); err != nil { + return err + } + + case "process": + if err := dec.Decode(&s.Process); err != nil { + return err + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return err + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + case "script_cache": + if s.ScriptCache == nil { + s.ScriptCache = make(map[string][]ScriptCache, 0) + } + rawMsg := make(map[string]json.RawMessage, 0) + dec.Decode(&rawMsg) + for key, value := range rawMsg { + switch { + case bytes.HasPrefix(value, []byte("\"")), bytes.HasPrefix(value, []byte("{")): + o := NewScriptCache() + err := json.NewDecoder(bytes.NewReader(value)).Decode(&o) + if err != nil { + return err + } + s.ScriptCache[key] = append(s.ScriptCache[key], *o) + default: + o := []ScriptCache{} + err := json.NewDecoder(bytes.NewReader(value)).Decode(&o) + if err != nil { + return err + } + s.ScriptCache[key] = o + } + } + + case "thread_pool": + if s.ThreadPool == nil { + s.ThreadPool = make(map[string]ThreadCount, 0) + } + if err := dec.Decode(&s.ThreadPool); err != nil { + return err + } + + case "timestamp": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Timestamp = &value + case float64: + f := int64(v) + s.Timestamp = &f + } + + case "transport": + if err := dec.Decode(&s.Transport); err != nil { + return err + } + + case "transport_address": + if err := dec.Decode(&s.TransportAddress); err != nil { + return err + } + + } + } + return nil } // NewStats returns a Stats. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/statsaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/statsaggregate.go index 40c1a065c..c115fa53d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/statsaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/statsaggregate.go @@ -16,28 +16,151 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // StatsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L239-L254 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L240-L255 type StatsAggregate struct { - Avg Float64 `json:"avg,omitempty"` - AvgAsString *string `json:"avg_as_string,omitempty"` - Count int64 `json:"count"` - Max Float64 `json:"max,omitempty"` - MaxAsString *string `json:"max_as_string,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Min Float64 `json:"min,omitempty"` - MinAsString *string `json:"min_as_string,omitempty"` - Sum Float64 `json:"sum"` - SumAsString *string `json:"sum_as_string,omitempty"` + Avg Float64 `json:"avg,omitempty"` + AvgAsString *string `json:"avg_as_string,omitempty"` + Count int64 `json:"count"` + Max Float64 `json:"max,omitempty"` + MaxAsString *string `json:"max_as_string,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Min Float64 `json:"min,omitempty"` + MinAsString *string `json:"min_as_string,omitempty"` + Sum Float64 `json:"sum"` + SumAsString *string `json:"sum_as_string,omitempty"` +} + +func (s *StatsAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "avg": + if err := dec.Decode(&s.Avg); err != nil { + return err + } + + case "avg_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AvgAsString = &o + + case "count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + case "max": + if err := dec.Decode(&s.Max); err != nil { + return err + } + + case "max_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MaxAsString = &o + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "min": + if err := dec.Decode(&s.Min); err != nil { + return err + } + + case "min_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MinAsString = &o + + case "sum": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Sum = f + case float64: + f := Float64(v) + s.Sum = f + } + + case "sum_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SumAsString = &o + + } + } + return nil } // NewStatsAggregate returns a StatsAggregate. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/statsaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/statsaggregation.go index 2ab91d1bd..9d653609e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/statsaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/statsaggregation.go @@ -16,20 +16,78 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // StatsAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/metric.ts#L145-L145 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/metric.ts#L282-L282 type StatsAggregation struct { - Field *string `json:"field,omitempty"` - Format *string `json:"format,omitempty"` + // Field The field on which to run the aggregation. + Field *string `json:"field,omitempty"` + Format *string `json:"format,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. Missing Missing `json:"missing,omitempty"` Script Script `json:"script,omitempty"` } +func (s *StatsAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return err + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + } + } + return nil +} + // NewStatsAggregation returns a StatsAggregation. func NewStatsAggregation() *StatsAggregation { r := &StatsAggregation{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/statsbucketaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/statsbucketaggregate.go index e0a85e030..f1ed73db4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/statsbucketaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/statsbucketaggregate.go @@ -16,28 +16,151 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // StatsBucketAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L256-L257 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L257-L258 type StatsBucketAggregate struct { - Avg Float64 `json:"avg,omitempty"` - AvgAsString *string `json:"avg_as_string,omitempty"` - Count int64 `json:"count"` - Max Float64 `json:"max,omitempty"` - MaxAsString *string `json:"max_as_string,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Min Float64 `json:"min,omitempty"` - MinAsString *string `json:"min_as_string,omitempty"` - Sum Float64 `json:"sum"` - SumAsString *string `json:"sum_as_string,omitempty"` + Avg Float64 `json:"avg,omitempty"` + AvgAsString *string `json:"avg_as_string,omitempty"` + Count int64 `json:"count"` + Max Float64 `json:"max,omitempty"` + MaxAsString *string `json:"max_as_string,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Min Float64 `json:"min,omitempty"` + MinAsString *string `json:"min_as_string,omitempty"` + Sum Float64 `json:"sum"` + SumAsString *string `json:"sum_as_string,omitempty"` +} + +func (s *StatsBucketAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "avg": + if err := dec.Decode(&s.Avg); err != nil { + return err + } + + case "avg_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AvgAsString = &o + + case "count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + case "max": + if err := dec.Decode(&s.Max); err != nil { + return err + } + + case "max_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MaxAsString = &o + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "min": + if err := dec.Decode(&s.Min); err != nil { + return err + } + + case "min_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MinAsString = &o + + case "sum": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Sum = f + case float64: + f := Float64(v) + s.Sum = f + } + + case "sum_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SumAsString = &o + + } + } + return nil } // NewStatsBucketAggregate returns a StatsBucketAggregate. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/statsbucketaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/statsbucketaggregation.go index 752120203..b429cacb0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/statsbucketaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/statsbucketaggregation.go @@ -16,33 +16,38 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" ) // StatsBucketAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/pipeline.ts#L284-L284 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/pipeline.ts#L371-L371 type StatsBucketAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. - BucketsPath BucketsPath `json:"buckets_path,omitempty"` - Format *string `json:"format,omitempty"` - GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Name *string `json:"name,omitempty"` + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Name *string `json:"name,omitempty"` } func (s *StatsBucketAggregation) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -62,9 +67,16 @@ func (s *StatsBucketAggregation) UnmarshalJSON(data []byte) error { } case "format": - if err := dec.Decode(&s.Format); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o case "gap_policy": if err := dec.Decode(&s.GapPolicy); err != nil { @@ -77,9 +89,16 @@ func (s *StatsBucketAggregation) UnmarshalJSON(data []byte) error { } case "name": - if err := dec.Decode(&s.Name); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/status.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/status.go index 4a83c8788..c48909b7b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/status.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/status.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Status type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/snapshot/_types/SnapshotStatus.ts#L26-L35 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/snapshot/_types/SnapshotStatus.ts#L26-L35 type Status struct { IncludeGlobalState bool `json:"include_global_state"` Indices map[string]SnapshotIndexStats `json:"indices"` @@ -34,6 +42,99 @@ type Status struct { Uuid string `json:"uuid"` } +func (s *Status) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "include_global_state": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IncludeGlobalState = value + case bool: + s.IncludeGlobalState = v + } + + case "indices": + if s.Indices == nil { + s.Indices = make(map[string]SnapshotIndexStats, 0) + } + if err := dec.Decode(&s.Indices); err != nil { + return err + } + + case "repository": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Repository = o + + case "shards_stats": + if err := dec.Decode(&s.ShardsStats); err != nil { + return err + } + + case "snapshot": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Snapshot = o + + case "state": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.State = o + + case "stats": + if err := dec.Decode(&s.Stats); err != nil { + return err + } + + case "uuid": + if err := dec.Decode(&s.Uuid); err != nil { + return err + } + + } + } + return nil +} + // NewStatus returns a Status. func NewStatus() *Status { r := &Status{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stemmeroverridetokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stemmeroverridetokenfilter.go index 7ba713940..28b4bc408 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stemmeroverridetokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stemmeroverridetokenfilter.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // StemmerOverrideTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L313-L317 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L314-L318 type StemmerOverrideTokenFilter struct { Rules []string `json:"rules,omitempty"` RulesPath *string `json:"rules_path,omitempty"` @@ -30,11 +38,71 @@ type StemmerOverrideTokenFilter struct { Version *string `json:"version,omitempty"` } +func (s *StemmerOverrideTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "rules": + if err := dec.Decode(&s.Rules); err != nil { + return err + } + + case "rules_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RulesPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s StemmerOverrideTokenFilter) MarshalJSON() ([]byte, error) { + type innerStemmerOverrideTokenFilter StemmerOverrideTokenFilter + tmp := innerStemmerOverrideTokenFilter{ + Rules: s.Rules, + RulesPath: s.RulesPath, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "stemmer_override" + + return json.Marshal(tmp) +} + // NewStemmerOverrideTokenFilter returns a StemmerOverrideTokenFilter. func NewStemmerOverrideTokenFilter() *StemmerOverrideTokenFilter { r := &StemmerOverrideTokenFilter{} - r.Type = "stemmer_override" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stemmertokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stemmertokenfilter.go index db02a4798..d5bae4017 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stemmertokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stemmertokenfilter.go @@ -16,24 +16,86 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // StemmerTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L319-L322 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L320-L324 type StemmerTokenFilter struct { - Language string `json:"language"` + Language *string `json:"language,omitempty"` Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` } +func (s *StemmerTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "language", "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Language = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s StemmerTokenFilter) MarshalJSON() ([]byte, error) { + type innerStemmerTokenFilter StemmerTokenFilter + tmp := innerStemmerTokenFilter{ + Language: s.Language, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "stemmer" + + return json.Marshal(tmp) +} + // NewStemmerTokenFilter returns a StemmerTokenFilter. func NewStemmerTokenFilter() *StemmerTokenFilter { r := &StemmerTokenFilter{} - r.Type = "stemmer" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stepkey.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stepkey.go index d0c5dde73..fb40ee3d4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stepkey.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stepkey.go @@ -16,19 +16,83 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // StepKey type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ilm/move_to_step/types.ts#L20-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ilm/move_to_step/types.ts#L20-L24 type StepKey struct { Action string `json:"action"` Name string `json:"name"` Phase string `json:"phase"` } +func (s *StepKey) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "action": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Action = o + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = o + + case "phase": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Phase = o + + } + } + return nil +} + // NewStepKey returns a StepKey. func NewStepKey() *StepKey { r := &StepKey{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stopanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stopanalyzer.go index 3e3776bf9..bcbaf4711 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stopanalyzer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stopanalyzer.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // StopAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/analyzers.ts#L101-L106 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/analyzers.ts#L101-L106 type StopAnalyzer struct { Stopwords []string `json:"stopwords,omitempty"` StopwordsPath *string `json:"stopwords_path,omitempty"` @@ -30,11 +38,82 @@ type StopAnalyzer struct { Version *string `json:"version,omitempty"` } +func (s *StopAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "stopwords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Stopwords = append(s.Stopwords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { + return err + } + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s StopAnalyzer) MarshalJSON() ([]byte, error) { + type innerStopAnalyzer StopAnalyzer + tmp := innerStopAnalyzer{ + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "stop" + + return json.Marshal(tmp) +} + // NewStopAnalyzer returns a StopAnalyzer. func NewStopAnalyzer() *StopAnalyzer { r := &StopAnalyzer{} - r.Type = "stop" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stoptokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stoptokenfilter.go index ceb8f1563..d9bec2f59 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stoptokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stoptokenfilter.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // StopTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L96-L102 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L97-L103 type StopTokenFilter struct { IgnoreCase *bool `json:"ignore_case,omitempty"` RemoveTrailing *bool `json:"remove_trailing,omitempty"` @@ -32,11 +40,112 @@ type StopTokenFilter struct { Version *string `json:"version,omitempty"` } +func (s *StopTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "ignore_case": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreCase = &value + case bool: + s.IgnoreCase = &v + } + + case "remove_trailing": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.RemoveTrailing = &value + case bool: + s.RemoveTrailing = &v + } + + case "stopwords": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Stopwords = append(s.Stopwords, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Stopwords); err != nil { + return err + } + } + + case "stopwords_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StopwordsPath = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s StopTokenFilter) MarshalJSON() ([]byte, error) { + type innerStopTokenFilter StopTokenFilter + tmp := innerStopTokenFilter{ + IgnoreCase: s.IgnoreCase, + RemoveTrailing: s.RemoveTrailing, + Stopwords: s.Stopwords, + StopwordsPath: s.StopwordsPath, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "stop" + + return json.Marshal(tmp) +} + // NewStopTokenFilter returns a StopTokenFilter. func NewStopTokenFilter() *StopTokenFilter { r := &StopTokenFilter{} - r.Type = "stop" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stopwords.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stopwords.go index f63568572..b61bc00b1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stopwords.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stopwords.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // StopWords type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/StopWords.ts#L20-L26 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/StopWords.ts#L20-L26 type StopWords []string diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/storage.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/storage.go index 34dcf23dd..5aa9123d5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/storage.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/storage.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/storagetype" ) // Storage type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L497-L506 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L491-L500 type Storage struct { // AllowMmap You can restrict the use of the mmapfs and the related hybridfs store type // via the setting node.store.allow_mmap. @@ -39,6 +45,45 @@ type Storage struct { Type storagetype.StorageType `json:"type"` } +func (s *Storage) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_mmap": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.AllowMmap = &value + case bool: + s.AllowMmap = &v + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + } + } + return nil +} + // NewStorage returns a Storage. func NewStorage() *Storage { r := &Storage{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/storedscript.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/storedscript.go index eca21ed33..ad4d1c912 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/storedscript.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/storedscript.go @@ -16,21 +16,74 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/scriptlanguage" ) // StoredScript type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Scripting.ts#L35-L39 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Scripting.ts#L47-L57 type StoredScript struct { + // Lang Specifies the language the script is written in. Lang scriptlanguage.ScriptLanguage `json:"lang"` Options map[string]string `json:"options,omitempty"` - Source string `json:"source"` + // Source The script source. + Source string `json:"source"` +} + +func (s *StoredScript) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "lang": + if err := dec.Decode(&s.Lang); err != nil { + return err + } + + case "options": + if s.Options == nil { + s.Options = make(map[string]string, 0) + } + if err := dec.Decode(&s.Options); err != nil { + return err + } + + case "source": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Source = o + + } + } + return nil } // NewStoredScript returns a StoredScript. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/storedscriptid.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/storedscriptid.go index 77938d74e..431e8afd0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/storedscriptid.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/storedscriptid.go @@ -16,22 +16,61 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" ) // StoredScriptId type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Scripting.ts#L52-L54 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Scripting.ts#L81-L86 type StoredScriptId struct { - Id string `json:"id"` + // Id The `id` for a stored script. + Id string `json:"id"` + // Params Specifies any named parameters that are passed into the script as variables. + // Use parameters instead of hard-coded values to decrease compile time. Params map[string]json.RawMessage `json:"params,omitempty"` } +func (s *StoredScriptId) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return err + } + + case "params": + if s.Params == nil { + s.Params = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Params); err != nil { + return err + } + + } + } + return nil +} + // NewStoredScriptId returns a StoredScriptId. func NewStoredScriptId() *StoredScriptId { r := &StoredScriptId{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/storestats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/storestats.go index 153171d31..a1610839d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/storestats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/storestats.go @@ -16,20 +16,121 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // StoreStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Stats.ts#L233-L240 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Stats.ts#L368-L395 type StoreStats struct { - Reserved ByteSize `json:"reserved,omitempty"` - ReservedInBytes int `json:"reserved_in_bytes"` - Size ByteSize `json:"size,omitempty"` - SizeInBytes int `json:"size_in_bytes"` - TotalDataSetSize ByteSize `json:"total_data_set_size,omitempty"` - TotalDataSetSizeInBytes *int `json:"total_data_set_size_in_bytes,omitempty"` + // Reserved A prediction of how much larger the shard stores will eventually grow due to + // ongoing peer recoveries, restoring snapshots, and similar activities. + Reserved ByteSize `json:"reserved,omitempty"` + // ReservedInBytes A prediction, in bytes, of how much larger the shard stores will eventually + // grow due to ongoing peer recoveries, restoring snapshots, and similar + // activities. + ReservedInBytes int64 `json:"reserved_in_bytes"` + // Size Total size of all shards assigned to selected nodes. + Size ByteSize `json:"size,omitempty"` + // SizeInBytes Total size, in bytes, of all shards assigned to selected nodes. + SizeInBytes int64 `json:"size_in_bytes"` + // TotalDataSetSize Total data set size of all shards assigned to selected nodes. + // This includes the size of shards not stored fully on the nodes, such as the + // cache for partially mounted indices. + TotalDataSetSize ByteSize `json:"total_data_set_size,omitempty"` + // TotalDataSetSizeInBytes Total data set size, in bytes, of all shards assigned to selected nodes. + // This includes the size of shards not stored fully on the nodes, such as the + // cache for partially mounted indices. + TotalDataSetSizeInBytes *int64 `json:"total_data_set_size_in_bytes,omitempty"` +} + +func (s *StoreStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "reserved": + if err := dec.Decode(&s.Reserved); err != nil { + return err + } + + case "reserved_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.ReservedInBytes = value + case float64: + f := int64(v) + s.ReservedInBytes = f + } + + case "size": + if err := dec.Decode(&s.Size); err != nil { + return err + } + + case "size_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.SizeInBytes = value + case float64: + f := int64(v) + s.SizeInBytes = f + } + + case "total_data_set_size": + if err := dec.Decode(&s.TotalDataSetSize); err != nil { + return err + } + + case "total_data_set_size_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TotalDataSetSizeInBytes = &value + case float64: + f := int64(v) + s.TotalDataSetSizeInBytes = &f + } + + } + } + return nil } // NewStoreStats returns a StoreStats. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoshape.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stringifiedboolean.go similarity index 70% rename from vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoshape.go rename to vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stringifiedboolean.go index 9bc7d0e90..0763401e8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/geoshape.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stringifiedboolean.go @@ -16,13 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types -import "encoding/json" - -// GeoShape type alias. +// Stringifiedboolean holds the union for the following types: +// +// bool +// string // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Geo.ts#L56-L57 -type GeoShape json.RawMessage +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_spec_utils/Stringified.ts#L20-L27 +type Stringifiedboolean interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stringifiedepochtimeunitmillis.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stringifiedepochtimeunitmillis.go index cefb7ab85..222c74952 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stringifiedepochtimeunitmillis.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stringifiedepochtimeunitmillis.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // int64 // string // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_spec_utils/Stringified.ts#L20-L27 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_spec_utils/Stringified.ts#L20-L27 type StringifiedEpochTimeUnitMillis interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stringifiedepochtimeunitseconds.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stringifiedepochtimeunitseconds.go index 5453c9078..faadd43a9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stringifiedepochtimeunitseconds.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stringifiedepochtimeunitseconds.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // int64 // string // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_spec_utils/Stringified.ts#L20-L27 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_spec_utils/Stringified.ts#L20-L27 type StringifiedEpochTimeUnitSeconds interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexpatterns.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stringifiedinteger.go similarity index 70% rename from vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexpatterns.go rename to vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stringifiedinteger.go index 1461e0f06..be92ce6f1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/indexpatterns.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stringifiedinteger.go @@ -16,11 +16,14 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types -// IndexPatterns type alias. +// Stringifiedinteger holds the union for the following types: // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/common.ts#L64-L64 -type IndexPatterns []string +// int +// string +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_spec_utils/Stringified.ts#L20-L27 +type Stringifiedinteger interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stringifiedversionnumber.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stringifiedversionnumber.go index b7674c4b7..c2e732ca4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stringifiedversionnumber.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stringifiedversionnumber.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // int64 // string // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_spec_utils/Stringified.ts#L20-L27 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_spec_utils/Stringified.ts#L20-L27 type StringifiedVersionNumber interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stringraretermsaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stringraretermsaggregate.go index 62ee21a46..a8bdf3725 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stringraretermsaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stringraretermsaggregate.go @@ -16,27 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" ) // StringRareTermsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L442-L446 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L443-L447 type StringRareTermsAggregate struct { Buckets BucketsStringRareTermsBucket `json:"buckets"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Meta Metadata `json:"meta,omitempty"` } func (s *StringRareTermsAggregate) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -57,15 +57,17 @@ func (s *StringRareTermsAggregate) UnmarshalJSON(data []byte) error { source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]StringRareTermsBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []StringRareTermsBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stringraretermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stringraretermsbucket.go index 8c765133f..aeb6ae9d9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stringraretermsbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stringraretermsbucket.go @@ -16,25 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "fmt" - "bytes" + "encoding/json" "errors" + "fmt" "io" - + "strconv" "strings" - - "encoding/json" ) // StringRareTermsBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L448-L450 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L449-L451 type StringRareTermsBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -42,6 +40,7 @@ type StringRareTermsBucket struct { } func (s *StringRareTermsBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -55,456 +54,544 @@ func (s *StringRareTermsBucket) UnmarshalJSON(data []byte) error { switch t { - case "aggregations": - for dec.More() { - tt, err := dec.Token() + case "doc_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) if err != nil { - if errors.Is(err, io.EOF) { - break - } return err } - if value, ok := tt.(string); ok { - if strings.Contains(value, "#") { - elems := strings.Split(value, "#") - if len(elems) == 2 { - switch elems[0] { - case "cardinality": - o := NewCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentiles": - o := NewHdrPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentile_ranks": - o := NewHdrPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentiles": - o := NewTDigestPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentile_ranks": - o := NewTDigestPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "percentiles_bucket": - o := NewPercentilesBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "median_absolute_deviation": - o := NewMedianAbsoluteDeviationAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "min": - o := NewMinAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "max": - o := NewMaxAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sum": - o := NewSumAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "avg": - o := NewAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "weighted_avg": - o := NewWeightedAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "value_count": - o := NewValueCountAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_value": - o := NewSimpleValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "derivative": - o := NewDerivativeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "bucket_metric_value": - o := NewBucketMetricValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats": - o := NewStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats_bucket": - o := NewStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats": - o := NewExtendedStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats_bucket": - o := NewExtendedStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_bounds": - o := NewGeoBoundsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_centroid": - o := NewGeoCentroidAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "histogram": - o := NewHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_histogram": - o := NewDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "auto_date_histogram": - o := NewAutoDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "variable_width_histogram": - o := NewVariableWidthHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sterms": - o := NewStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lterms": - o := NewLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "dterms": - o := NewDoubleTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umterms": - o := NewUnmappedTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lrareterms": - o := NewLongRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "srareterms": - o := NewStringRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umrareterms": - o := NewUnmappedRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "multi_terms": - o := NewMultiTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "missing": - o := NewMissingAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "nested": - o := NewNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "reverse_nested": - o := NewReverseNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "global": - o := NewGlobalAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filter": - o := NewFilterAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "children": - o := NewChildrenAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "parent": - o := NewParentAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sampler": - o := NewSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "unmapped_sampler": - o := NewUnmappedSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohash_grid": - o := NewGeoHashGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geotile_grid": - o := NewGeoTileGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohex_grid": - o := NewGeoHexGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "range": - o := NewRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_range": - o := NewDateRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_distance": - o := NewGeoDistanceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_range": - o := NewIpRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_prefix": - o := NewIpPrefixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filters": - o := NewFiltersAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "adjacency_matrix": - o := NewAdjacencyMatrixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "siglterms": - o := NewSignificantLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sigsterms": - o := NewSignificantStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umsigterms": - o := NewUnmappedSignificantTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "composite": - o := NewCompositeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "scripted_metric": - o := NewScriptedMetricAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_hits": - o := NewTopHitsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "inference": - o := NewInferenceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "string_stats": - o := NewStringStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "box_plot": - o := NewBoxPlotAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_metrics": - o := NewTopMetricsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "t_test": - o := NewTTestAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "rate": - o := NewRateAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_long_value": - o := NewCumulativeCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "matrix_stats": - o := NewMatrixStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_line": - o := NewGeoLineAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - default: - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - } - } else { - return errors.New("cannot decode JSON for field Aggregations") - } - } else { - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[value] = o - } - } + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f } - case "doc_count": - if err := dec.Decode(&s.DocCount); err != nil { + case "key": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Key = o - case "key": - if err := dec.Decode(&s.Key); err != nil { - return err + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "box_plot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[value] = o + } } } @@ -531,6 +618,7 @@ func (s StringRareTermsBucket) MarshalJSON() ([]byte, error) { for key, value := range s.Aggregations { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "Aggregations") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stringstatsaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stringstatsaggregate.go index 4dbd48ce2..104e80a20 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stringstatsaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stringstatsaggregate.go @@ -16,28 +16,133 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // StringStatsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L684-L695 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L693-L704 type StringStatsAggregate struct { - AvgLength Float64 `json:"avg_length,omitempty"` - AvgLengthAsString *string `json:"avg_length_as_string,omitempty"` - Count int64 `json:"count"` - Distribution map[string]Float64 `json:"distribution,omitempty"` - Entropy Float64 `json:"entropy,omitempty"` - MaxLength int `json:"max_length,omitempty"` - MaxLengthAsString *string `json:"max_length_as_string,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - MinLength int `json:"min_length,omitempty"` - MinLengthAsString *string `json:"min_length_as_string,omitempty"` + AvgLength Float64 `json:"avg_length,omitempty"` + AvgLengthAsString *string `json:"avg_length_as_string,omitempty"` + Count int64 `json:"count"` + Distribution map[string]Float64 `json:"distribution,omitempty"` + Entropy Float64 `json:"entropy,omitempty"` + MaxLength int `json:"max_length,omitempty"` + MaxLengthAsString *string `json:"max_length_as_string,omitempty"` + Meta Metadata `json:"meta,omitempty"` + MinLength int `json:"min_length,omitempty"` + MinLengthAsString *string `json:"min_length_as_string,omitempty"` +} + +func (s *StringStatsAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "avg_length": + if err := dec.Decode(&s.AvgLength); err != nil { + return err + } + + case "avg_length_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AvgLengthAsString = &o + + case "count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + case "distribution": + if err := dec.Decode(&s.Distribution); err != nil { + return err + } + + case "entropy": + if err := dec.Decode(&s.Entropy); err != nil { + return err + } + + case "max_length": + if err := dec.Decode(&s.MaxLength); err != nil { + return err + } + + case "max_length_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MaxLengthAsString = &o + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "min_length": + if err := dec.Decode(&s.MinLength); err != nil { + return err + } + + case "min_length_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MinLengthAsString = &o + + } + } + return nil } // NewStringStatsAggregate returns a StringStatsAggregate. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stringstatsaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stringstatsaggregation.go index 31bc93914..4bfba16fa 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stringstatsaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stringstatsaggregation.go @@ -16,18 +16,79 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // StringStatsAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/metric.ts#L147-L149 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/metric.ts#L284-L290 type StringStatsAggregation struct { - Field *string `json:"field,omitempty"` - Missing Missing `json:"missing,omitempty"` - Script Script `json:"script,omitempty"` - ShowDistribution *bool `json:"show_distribution,omitempty"` + // Field The field on which to run the aggregation. + Field *string `json:"field,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing Missing `json:"missing,omitempty"` + Script Script `json:"script,omitempty"` + // ShowDistribution Shows the probability distribution for all characters. + ShowDistribution *bool `json:"show_distribution,omitempty"` +} + +func (s *StringStatsAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return err + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + case "show_distribution": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.ShowDistribution = &value + case bool: + s.ShowDistribution = &v + } + + } + } + return nil } // NewStringStatsAggregation returns a StringStatsAggregation. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stringtermsaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stringtermsaggregate.go index 2ec27b0c4..13be8ab9f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stringtermsaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stringtermsaggregate.go @@ -16,29 +16,30 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" + "strconv" ) // StringTermsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L383-L388 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L384-L389 type StringTermsAggregate struct { - Buckets BucketsStringTermsBucket `json:"buckets"` - DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - SumOtherDocCount *int64 `json:"sum_other_doc_count,omitempty"` + Buckets BucketsStringTermsBucket `json:"buckets"` + DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` + Meta Metadata `json:"meta,omitempty"` + SumOtherDocCount *int64 `json:"sum_other_doc_count,omitempty"` } func (s *StringTermsAggregate) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -59,21 +60,33 @@ func (s *StringTermsAggregate) UnmarshalJSON(data []byte) error { source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]StringTermsBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []StringTermsBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } case "doc_count_error_upper_bound": - if err := dec.Decode(&s.DocCountErrorUpperBound); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DocCountErrorUpperBound = &value + case float64: + f := int64(v) + s.DocCountErrorUpperBound = &f } case "meta": @@ -82,8 +95,18 @@ func (s *StringTermsAggregate) UnmarshalJSON(data []byte) error { } case "sum_other_doc_count": - if err := dec.Decode(&s.SumOtherDocCount); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.SumOtherDocCount = &value + case float64: + f := int64(v) + s.SumOtherDocCount = &f } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stringtermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stringtermsbucket.go index 359bf79bc..34e59dd59 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stringtermsbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stringtermsbucket.go @@ -16,25 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "fmt" - "bytes" + "encoding/json" "errors" + "fmt" "io" - + "strconv" "strings" - - "encoding/json" ) // StringTermsBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L394-L396 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L395-L397 type StringTermsBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -43,6 +41,7 @@ type StringTermsBucket struct { } func (s *StringTermsBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -56,456 +55,34 @@ func (s *StringTermsBucket) UnmarshalJSON(data []byte) error { switch t { - case "aggregations": - for dec.More() { - tt, err := dec.Token() + case "doc_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) if err != nil { - if errors.Is(err, io.EOF) { - break - } return err } - if value, ok := tt.(string); ok { - if strings.Contains(value, "#") { - elems := strings.Split(value, "#") - if len(elems) == 2 { - switch elems[0] { - case "cardinality": - o := NewCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentiles": - o := NewHdrPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentile_ranks": - o := NewHdrPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentiles": - o := NewTDigestPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentile_ranks": - o := NewTDigestPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "percentiles_bucket": - o := NewPercentilesBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "median_absolute_deviation": - o := NewMedianAbsoluteDeviationAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "min": - o := NewMinAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "max": - o := NewMaxAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sum": - o := NewSumAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "avg": - o := NewAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "weighted_avg": - o := NewWeightedAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "value_count": - o := NewValueCountAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_value": - o := NewSimpleValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "derivative": - o := NewDerivativeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "bucket_metric_value": - o := NewBucketMetricValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats": - o := NewStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats_bucket": - o := NewStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats": - o := NewExtendedStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats_bucket": - o := NewExtendedStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_bounds": - o := NewGeoBoundsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_centroid": - o := NewGeoCentroidAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "histogram": - o := NewHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_histogram": - o := NewDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "auto_date_histogram": - o := NewAutoDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "variable_width_histogram": - o := NewVariableWidthHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sterms": - o := NewStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lterms": - o := NewLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "dterms": - o := NewDoubleTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umterms": - o := NewUnmappedTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lrareterms": - o := NewLongRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "srareterms": - o := NewStringRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umrareterms": - o := NewUnmappedRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "multi_terms": - o := NewMultiTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "missing": - o := NewMissingAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "nested": - o := NewNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "reverse_nested": - o := NewReverseNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "global": - o := NewGlobalAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filter": - o := NewFilterAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "children": - o := NewChildrenAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "parent": - o := NewParentAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sampler": - o := NewSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "unmapped_sampler": - o := NewUnmappedSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohash_grid": - o := NewGeoHashGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geotile_grid": - o := NewGeoTileGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohex_grid": - o := NewGeoHexGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "range": - o := NewRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_range": - o := NewDateRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_distance": - o := NewGeoDistanceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_range": - o := NewIpRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_prefix": - o := NewIpPrefixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filters": - o := NewFiltersAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "adjacency_matrix": - o := NewAdjacencyMatrixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "siglterms": - o := NewSignificantLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sigsterms": - o := NewSignificantStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umsigterms": - o := NewUnmappedSignificantTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "composite": - o := NewCompositeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "scripted_metric": - o := NewScriptedMetricAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_hits": - o := NewTopHitsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "inference": - o := NewInferenceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "string_stats": - o := NewStringStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "box_plot": - o := NewBoxPlotAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_metrics": - o := NewTopMetricsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "t_test": - o := NewTTestAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "rate": - o := NewRateAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_long_value": - o := NewCumulativeCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "matrix_stats": - o := NewMatrixStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_line": - o := NewGeoLineAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - default: - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - } - } else { - return errors.New("cannot decode JSON for field Aggregations") - } - } else { - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[value] = o - } - } - } - - case "doc_count": - if err := dec.Decode(&s.DocCount); err != nil { - return err + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f } case "doc_count_error": - if err := dec.Decode(&s.DocCountError); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DocCountError = &value + case float64: + f := int64(v) + s.DocCountError = &f } case "key": @@ -513,6 +90,519 @@ func (s *StringTermsBucket) UnmarshalJSON(data []byte) error { return err } + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "box_plot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[value] = o + } + } + } } return nil @@ -537,6 +627,7 @@ func (s StringTermsBucket) MarshalJSON() ([]byte, error) { for key, value := range s.Aggregations { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "Aggregations") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stupidbackoffsmoothingmodel.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stupidbackoffsmoothingmodel.go index 8fa403591..9c7f5dad9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stupidbackoffsmoothingmodel.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/stupidbackoffsmoothingmodel.go @@ -16,17 +16,62 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // StupidBackoffSmoothingModel type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/suggester.ts#L233-L235 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/suggester.ts#L460-L465 type StupidBackoffSmoothingModel struct { + // Discount A constant factor that the lower order n-gram model is discounted by. Discount Float64 `json:"discount"` } +func (s *StupidBackoffSmoothingModel) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "discount": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Discount = f + case float64: + f := Float64(v) + s.Discount = f + } + + } + } + return nil +} + // NewStupidBackoffSmoothingModel returns a StupidBackoffSmoothingModel. func NewStupidBackoffSmoothingModel() *StupidBackoffSmoothingModel { r := &StupidBackoffSmoothingModel{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/suggest.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/suggest.go index bcef897be..b32a9942b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/suggest.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/suggest.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -26,5 +26,5 @@ package types // PhraseSuggest // TermSuggest // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/suggester.ts#L34-L40 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/suggester.ts#L34-L40 type Suggest interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/suggestcontext.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/suggestcontext.go index 1d5e0f0cc..d89f5325b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/suggestcontext.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/suggestcontext.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // SuggestContext type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/specialized.ts#L37-L42 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/specialized.ts#L37-L42 type SuggestContext struct { Name string `json:"name"` Path *string `json:"path,omitempty"` @@ -30,6 +38,60 @@ type SuggestContext struct { Type string `json:"type"` } +func (s *SuggestContext) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "path": + if err := dec.Decode(&s.Path); err != nil { + return err + } + + case "precision": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Precision = o + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + // NewSuggestContext returns a SuggestContext. func NewSuggestContext() *SuggestContext { r := &SuggestContext{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/suggester.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/suggester.go index 708c64f04..59a73de34 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/suggester.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/suggester.go @@ -16,18 +16,22 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" "fmt" + "io" + "strconv" ) // Suggester type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/suggester.ts#L101-L104 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/suggester.ts#L101-L104 type Suggester struct { Suggesters map[string]FieldSuggester `json:"-"` // Text Global suggest text, to avoid repetition when the same text is used in @@ -35,6 +39,51 @@ type Suggester struct { Text *string `json:"text,omitempty"` } +func (s *Suggester) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "text": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Text = &o + + default: + + if key, ok := t.(string); ok { + if s.Suggesters == nil { + s.Suggesters = make(map[string]FieldSuggester, 0) + } + raw := NewFieldSuggester() + if err := dec.Decode(&raw); err != nil { + return err + } + s.Suggesters[key] = *raw + } + + } + } + return nil +} + // MarhsalJSON overrides marshalling for types with additional properties func (s Suggester) MarshalJSON() ([]byte, error) { type opt Suggester @@ -54,6 +103,7 @@ func (s Suggester) MarshalJSON() ([]byte, error) { for key, value := range s.Suggesters { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "Suggesters") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/suggestfuzziness.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/suggestfuzziness.go index ba579b02d..cde15c792 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/suggestfuzziness.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/suggestfuzziness.go @@ -16,19 +16,119 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // SuggestFuzziness type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/suggester.ts#L138-L144 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/suggester.ts#L193-L221 type SuggestFuzziness struct { - Fuzziness Fuzziness `json:"fuzziness,omitempty"` - MinLength *int `json:"min_length,omitempty"` - PrefixLength *int `json:"prefix_length,omitempty"` - Transpositions *bool `json:"transpositions,omitempty"` - UnicodeAware *bool `json:"unicode_aware,omitempty"` + // Fuzziness The fuzziness factor. + Fuzziness Fuzziness `json:"fuzziness,omitempty"` + // MinLength Minimum length of the input before fuzzy suggestions are returned. + MinLength *int `json:"min_length,omitempty"` + // PrefixLength Minimum length of the input, which is not checked for fuzzy alternatives. + PrefixLength *int `json:"prefix_length,omitempty"` + // Transpositions If set to `true`, transpositions are counted as one change instead of two. + Transpositions *bool `json:"transpositions,omitempty"` + // UnicodeAware If `true`, all measurements (like fuzzy edit distance, transpositions, and + // lengths) are measured in Unicode code points instead of in bytes. + // This is slightly slower than raw bytes. + UnicodeAware *bool `json:"unicode_aware,omitempty"` +} + +func (s *SuggestFuzziness) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fuzziness": + if err := dec.Decode(&s.Fuzziness); err != nil { + return err + } + + case "min_length": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MinLength = &value + case float64: + f := int(v) + s.MinLength = &f + } + + case "prefix_length": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.PrefixLength = &value + case float64: + f := int(v) + s.PrefixLength = &f + } + + case "transpositions": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Transpositions = &value + case bool: + s.Transpositions = &v + } + + case "unicode_aware": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.UnicodeAware = &value + case bool: + s.UnicodeAware = &v + } + + } + } + return nil } // NewSuggestFuzziness returns a SuggestFuzziness. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sumaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sumaggregate.go index 5cb8121d6..9362cdbd3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sumaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sumaggregate.go @@ -16,19 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // SumAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L202-L206 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L203-L207 type SumAggregate struct { - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Meta Metadata `json:"meta,omitempty"` // Value The metric value. A missing value generally means that there was no data to // aggregate, // unless specified otherwise. @@ -36,6 +40,48 @@ type SumAggregate struct { ValueAsString *string `json:"value_as_string,omitempty"` } +func (s *SumAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return err + } + + case "value_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ValueAsString = &o + + } + } + return nil +} + // NewSumAggregate returns a SumAggregate. func NewSumAggregate() *SumAggregate { r := &SumAggregate{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sumaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sumaggregation.go index 9724f56ef..3bf003b5f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sumaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sumaggregation.go @@ -16,20 +16,78 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // SumAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/metric.ts#L151-L151 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/metric.ts#L292-L292 type SumAggregation struct { - Field *string `json:"field,omitempty"` - Format *string `json:"format,omitempty"` + // Field The field on which to run the aggregation. + Field *string `json:"field,omitempty"` + Format *string `json:"format,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. Missing Missing `json:"missing,omitempty"` Script Script `json:"script,omitempty"` } +func (s *SumAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return err + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + } + } + return nil +} + // NewSumAggregation returns a SumAggregation. func NewSumAggregation() *SumAggregation { r := &SumAggregation{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sumbucketaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sumbucketaggregation.go index 9770cd98c..156c434e4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sumbucketaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/sumbucketaggregation.go @@ -16,33 +16,38 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/gappolicy" ) // SumBucketAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/pipeline.ts#L286-L286 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/pipeline.ts#L373-L373 type SumBucketAggregation struct { // BucketsPath Path to the buckets that contain one set of values to correlate. - BucketsPath BucketsPath `json:"buckets_path,omitempty"` - Format *string `json:"format,omitempty"` - GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Name *string `json:"name,omitempty"` + BucketsPath BucketsPath `json:"buckets_path,omitempty"` + // Format `DecimalFormat` pattern for the output value. + // If specified, the formatted value is returned in the aggregation’s + // `value_as_string` property. + Format *string `json:"format,omitempty"` + // GapPolicy Policy to apply when gaps are found in the data. + GapPolicy *gappolicy.GapPolicy `json:"gap_policy,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Name *string `json:"name,omitempty"` } func (s *SumBucketAggregation) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -62,9 +67,16 @@ func (s *SumBucketAggregation) UnmarshalJSON(data []byte) error { } case "format": - if err := dec.Decode(&s.Format); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o case "gap_policy": if err := dec.Decode(&s.GapPolicy); err != nil { @@ -77,9 +89,16 @@ func (s *SumBucketAggregation) UnmarshalJSON(data []byte) error { } case "name": - if err := dec.Decode(&s.Name); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/summary.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/summary.go index 1b8f4331e..65837ff76 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/summary.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/summary.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -26,7 +26,7 @@ import ( // Summary type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/enrich/_types/Policy.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/enrich/_types/Policy.ts#L23-L25 type Summary struct { Config map[policytype.PolicyType]EnrichPolicy `json:"config"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/synccontainer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/synccontainer.go index f2281cf95..f3fb3c949 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/synccontainer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/synccontainer.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // SyncContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/transform/_types/Transform.ts#L167-L173 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/transform/_types/Transform.ts#L169-L175 type SyncContainer struct { // Time Specifies that the transform uses a time field to synchronize the source and // destination indices. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/synonymgraphtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/synonymgraphtokenfilter.go index 215d59330..c3bf0b686 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/synonymgraphtokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/synonymgraphtokenfilter.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/synonymformat" ) // SynonymGraphTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L109-L118 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L110-L119 type SynonymGraphTokenFilter struct { Expand *bool `json:"expand,omitempty"` Format *synonymformat.SynonymFormat `json:"format,omitempty"` @@ -39,11 +45,135 @@ type SynonymGraphTokenFilter struct { Version *string `json:"version,omitempty"` } +func (s *SynonymGraphTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "expand": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Expand = &value + case bool: + s.Expand = &v + } + + case "format": + if err := dec.Decode(&s.Format); err != nil { + return err + } + + case "lenient": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Lenient = &value + case bool: + s.Lenient = &v + } + + case "synonyms": + if err := dec.Decode(&s.Synonyms); err != nil { + return err + } + + case "synonyms_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SynonymsPath = &o + + case "tokenizer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tokenizer = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "updateable": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Updateable = &value + case bool: + s.Updateable = &v + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s SynonymGraphTokenFilter) MarshalJSON() ([]byte, error) { + type innerSynonymGraphTokenFilter SynonymGraphTokenFilter + tmp := innerSynonymGraphTokenFilter{ + Expand: s.Expand, + Format: s.Format, + Lenient: s.Lenient, + Synonyms: s.Synonyms, + SynonymsPath: s.SynonymsPath, + Tokenizer: s.Tokenizer, + Type: s.Type, + Updateable: s.Updateable, + Version: s.Version, + } + + tmp.Type = "synonym_graph" + + return json.Marshal(tmp) +} + // NewSynonymGraphTokenFilter returns a SynonymGraphTokenFilter. func NewSynonymGraphTokenFilter() *SynonymGraphTokenFilter { r := &SynonymGraphTokenFilter{} - r.Type = "synonym_graph" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/synonymrule.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/synonymrule.go new file mode 100644 index 000000000..347e2eb22 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/synonymrule.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + +// SynonymRule type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/synonyms/_types/SynonymRule.ts#L26-L35 +type SynonymRule struct { + // Id Synonym Rule identifier + Id *string `json:"id,omitempty"` + // Synonyms Synonyms, in Solr format, that conform the synonym rule. See + // https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-synonym-graph-tokenfilter.html#_solr_synonyms_2 + Synonyms string `json:"synonyms"` +} + +func (s *SynonymRule) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return err + } + + case "synonyms": + if err := dec.Decode(&s.Synonyms); err != nil { + return err + } + + } + } + return nil +} + +// NewSynonymRule returns a SynonymRule. +func NewSynonymRule() *SynonymRule { + r := &SynonymRule{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/synonymruleread.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/synonymruleread.go new file mode 100644 index 000000000..cd5aa6053 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/synonymruleread.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + +// SynonymRuleRead type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/synonyms/_types/SynonymRule.ts#L38-L47 +type SynonymRuleRead struct { + // Id Synonym Rule identifier + Id string `json:"id"` + // Synonyms Synonyms, in Solr format, that conform the synonym rule. See + // https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-synonym-graph-tokenfilter.html#_solr_synonyms_2 + Synonyms string `json:"synonyms"` +} + +func (s *SynonymRuleRead) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return err + } + + case "synonyms": + if err := dec.Decode(&s.Synonyms); err != nil { + return err + } + + } + } + return nil +} + +// NewSynonymRuleRead returns a SynonymRuleRead. +func NewSynonymRuleRead() *SynonymRuleRead { + r := &SynonymRuleRead{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/synonymssetitem.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/synonymssetitem.go new file mode 100644 index 000000000..6e3724a5c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/synonymssetitem.go @@ -0,0 +1,87 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + +// SynonymsSetItem type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/synonyms/get_synonyms_sets/SynonymsSetsGetResponse.ts#L30-L39 +type SynonymsSetItem struct { + // Count Number of synonym rules that the synonym set contains + Count int `json:"count"` + // SynonymsSet Synonyms set identifier + SynonymsSet string `json:"synonyms_set"` +} + +func (s *SynonymsSetItem) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Count = value + case float64: + f := int(v) + s.Count = f + } + + case "synonyms_set": + if err := dec.Decode(&s.SynonymsSet); err != nil { + return err + } + + } + } + return nil +} + +// NewSynonymsSetItem returns a SynonymsSetItem. +func NewSynonymsSetItem() *SynonymsSetItem { + r := &SynonymsSetItem{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/synonymsupdateresult.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/synonymsupdateresult.go new file mode 100644 index 000000000..de15dc82e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/synonymsupdateresult.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/result" +) + +// SynonymsUpdateResult type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/synonyms/_types/SynonymsUpdateResult.ts#L23-L34 +type SynonymsUpdateResult struct { + // ReloadAnalyzersDetails Updating synonyms in a synonym set reloads the associated analyzers. + // This is the analyzers reloading result + ReloadAnalyzersDetails ReloadResult `json:"reload_analyzers_details"` + // Result Update operation result + Result result.Result `json:"result"` +} + +// NewSynonymsUpdateResult returns a SynonymsUpdateResult. +func NewSynonymsUpdateResult() *SynonymsUpdateResult { + r := &SynonymsUpdateResult{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/synonymtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/synonymtokenfilter.go index beff3e0e0..3fd6647c5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/synonymtokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/synonymtokenfilter.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/synonymformat" ) // SynonymTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L120-L129 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L121-L130 type SynonymTokenFilter struct { Expand *bool `json:"expand,omitempty"` Format *synonymformat.SynonymFormat `json:"format,omitempty"` @@ -39,11 +45,135 @@ type SynonymTokenFilter struct { Version *string `json:"version,omitempty"` } +func (s *SynonymTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "expand": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Expand = &value + case bool: + s.Expand = &v + } + + case "format": + if err := dec.Decode(&s.Format); err != nil { + return err + } + + case "lenient": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Lenient = &value + case bool: + s.Lenient = &v + } + + case "synonyms": + if err := dec.Decode(&s.Synonyms); err != nil { + return err + } + + case "synonyms_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SynonymsPath = &o + + case "tokenizer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tokenizer = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "updateable": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Updateable = &value + case bool: + s.Updateable = &v + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s SynonymTokenFilter) MarshalJSON() ([]byte, error) { + type innerSynonymTokenFilter SynonymTokenFilter + tmp := innerSynonymTokenFilter{ + Expand: s.Expand, + Format: s.Format, + Lenient: s.Lenient, + Synonyms: s.Synonyms, + SynonymsPath: s.SynonymsPath, + Tokenizer: s.Tokenizer, + Type: s.Type, + Updateable: s.Updateable, + Version: s.Version, + } + + tmp.Type = "synonym" + + return json.Marshal(tmp) +} + // NewSynonymTokenFilter returns a SynonymTokenFilter. func NewSynonymTokenFilter() *SynonymTokenFilter { r := &SynonymTokenFilter{} - r.Type = "synonym" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/targetmeanencodingpreprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/targetmeanencodingpreprocessor.go index 13fb4786a..9a082a8f4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/targetmeanencodingpreprocessor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/targetmeanencodingpreprocessor.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TargetMeanEncodingPreprocessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/put_trained_model/types.ts#L49-L54 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/put_trained_model/types.ts#L49-L54 type TargetMeanEncodingPreprocessor struct { DefaultValue Float64 `json:"default_value"` FeatureName string `json:"feature_name"` @@ -30,6 +38,74 @@ type TargetMeanEncodingPreprocessor struct { TargetMap map[string]Float64 `json:"target_map"` } +func (s *TargetMeanEncodingPreprocessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "default_value": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.DefaultValue = f + case float64: + f := Float64(v) + s.DefaultValue = f + } + + case "feature_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FeatureName = o + + case "field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Field = o + + case "target_map": + if s.TargetMap == nil { + s.TargetMap = make(map[string]Float64, 0) + } + if err := dec.Decode(&s.TargetMap); err != nil { + return err + } + + } + } + return nil +} + // NewTargetMeanEncodingPreprocessor returns a TargetMeanEncodingPreprocessor. func NewTargetMeanEncodingPreprocessor() *TargetMeanEncodingPreprocessor { r := &TargetMeanEncodingPreprocessor{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/taskfailure.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/taskfailure.go index 229f43c7d..3cad2543b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/taskfailure.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/taskfailure.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TaskFailure type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Errors.ts#L66-L71 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Errors.ts#L66-L71 type TaskFailure struct { NodeId string `json:"node_id"` Reason ErrorCause `json:"reason"` @@ -30,6 +38,63 @@ type TaskFailure struct { TaskId int64 `json:"task_id"` } +func (s *TaskFailure) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "node_id": + if err := dec.Decode(&s.NodeId); err != nil { + return err + } + + case "reason": + if err := dec.Decode(&s.Reason); err != nil { + return err + } + + case "status": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Status = o + + case "task_id": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TaskId = value + case float64: + f := int64(v) + s.TaskId = f + } + + } + } + return nil +} + // NewTaskFailure returns a TaskFailure. func NewTaskFailure() *TaskFailure { r := &TaskFailure{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/taskid.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/taskid.go index 860ab09bc..cdb981b28 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/taskid.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/taskid.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // string // int // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/common.ts#L113-L113 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/common.ts#L126-L126 type TaskId interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/taskinfo.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/taskinfo.go index 1d81c91c8..8e4f9e64e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/taskinfo.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/taskinfo.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TaskInfo type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/tasks/_types/TaskInfo.ts#L32-L46 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/tasks/_types/TaskInfo.ts#L32-L47 type TaskInfo struct { Action string `json:"action"` Cancellable bool `json:"cancellable"` @@ -35,8 +43,146 @@ type TaskInfo struct { RunningTime Duration `json:"running_time,omitempty"` RunningTimeInNanos int64 `json:"running_time_in_nanos"` StartTimeInMillis int64 `json:"start_time_in_millis"` - Status *TaskStatus `json:"status,omitempty"` - Type string `json:"type"` + // Status Task status information can vary wildly from task to task. + Status json.RawMessage `json:"status,omitempty"` + Type string `json:"type"` +} + +func (s *TaskInfo) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "action": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Action = o + + case "cancellable": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Cancellable = value + case bool: + s.Cancellable = v + } + + case "cancelled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Cancelled = &value + case bool: + s.Cancelled = &v + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "headers": + if s.Headers == nil { + s.Headers = make(map[string]string, 0) + } + if err := dec.Decode(&s.Headers); err != nil { + return err + } + + case "id": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Id = value + case float64: + f := int64(v) + s.Id = f + } + + case "node": + if err := dec.Decode(&s.Node); err != nil { + return err + } + + case "parent_task_id": + if err := dec.Decode(&s.ParentTaskId); err != nil { + return err + } + + case "running_time": + if err := dec.Decode(&s.RunningTime); err != nil { + return err + } + + case "running_time_in_nanos": + if err := dec.Decode(&s.RunningTimeInNanos); err != nil { + return err + } + + case "start_time_in_millis": + if err := dec.Decode(&s.StartTimeInMillis); err != nil { + return err + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return err + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil } // NewTaskInfo returns a TaskInfo. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/taskinfos.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/taskinfos.go index c4d472c59..7ff6122b2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/taskinfos.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/taskinfos.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // []TaskInfo // map[string]ParentTaskInfo // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/tasks/_types/TaskListResponseBase.ts#L40-L43 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/tasks/_types/TaskListResponseBase.ts#L40-L43 type TaskInfos interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tasksrecord.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tasksrecord.go index f334ad4e3..acf1fec7e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tasksrecord.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tasksrecord.go @@ -16,48 +16,240 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TasksRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/tasks/types.ts#L22-L101 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/tasks/types.ts#L22-L101 type TasksRecord struct { - // Action task action + // Action The task action. Action *string `json:"action,omitempty"` - // Description task action + // Description The task action description. Description *string `json:"description,omitempty"` - // Id id of the task with the node + // Id The identifier of the task with the node. Id *string `json:"id,omitempty"` - // Ip ip address + // Ip The IP address for the node. Ip *string `json:"ip,omitempty"` - // Node node name + // Node The node name. Node *string `json:"node,omitempty"` - // NodeId unique node id + // NodeId The unique node identifier. NodeId *string `json:"node_id,omitempty"` - // ParentTaskId parent task id + // ParentTaskId The parent task identifier. ParentTaskId *string `json:"parent_task_id,omitempty"` - // Port bound transport port + // Port The bound transport port for the node. Port *string `json:"port,omitempty"` - // RunningTime running time + // RunningTime The running time. RunningTime *string `json:"running_time,omitempty"` - // RunningTimeNs running time ns + // RunningTimeNs The running time in nanoseconds. RunningTimeNs *string `json:"running_time_ns,omitempty"` - // StartTime start time in ms + // StartTime The start time in milliseconds. StartTime *string `json:"start_time,omitempty"` - // TaskId unique task id + // TaskId The unique task identifier. TaskId *string `json:"task_id,omitempty"` - // Timestamp start time in HH:MM:SS + // Timestamp The start time in `HH:MM:SS` format. Timestamp *string `json:"timestamp,omitempty"` - // Type task type + // Type The task type. Type *string `json:"type,omitempty"` - // Version es version + // Version The Elasticsearch version. Version *string `json:"version,omitempty"` - // XOpaqueId X-Opaque-ID header + // XOpaqueId The X-Opaque-ID header. XOpaqueId *string `json:"x_opaque_id,omitempty"` } +func (s *TasksRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "action", "ac": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Action = &o + + case "description", "desc": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return err + } + + case "ip", "i": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Ip = &o + + case "node", "n": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Node = &o + + case "node_id", "ni": + if err := dec.Decode(&s.NodeId); err != nil { + return err + } + + case "parent_task_id", "pti": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ParentTaskId = &o + + case "port", "po": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Port = &o + + case "running_time", "time": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RunningTime = &o + + case "running_time_ns": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RunningTimeNs = &o + + case "start_time", "start": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.StartTime = &o + + case "task_id", "ti": + if err := dec.Decode(&s.TaskId); err != nil { + return err + } + + case "timestamp", "ts", "hms", "hhmmss": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Timestamp = &o + + case "type", "ty": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = &o + + case "version", "v": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + case "x_opaque_id", "x": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.XOpaqueId = &o + + } + } + return nil +} + // NewTasksRecord returns a TasksRecord. func NewTasksRecord() *TasksRecord { r := &TasksRecord{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/taskstatus.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/taskstatus.go deleted file mode 100644 index e5243ee87..000000000 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/taskstatus.go +++ /dev/null @@ -1,51 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 - -package types - -// TaskStatus type. -// -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/tasks/_types/TaskStatus.ts#L24-L42 -type TaskStatus struct { - Batches int64 `json:"batches"` - Canceled *string `json:"canceled,omitempty"` - Created int64 `json:"created"` - Deleted int64 `json:"deleted"` - Failures []string `json:"failures,omitempty"` - Noops int64 `json:"noops"` - RequestsPerSecond float32 `json:"requests_per_second"` - Retries Retries `json:"retries"` - Throttled Duration `json:"throttled,omitempty"` - ThrottledMillis int64 `json:"throttled_millis"` - ThrottledUntil Duration `json:"throttled_until,omitempty"` - ThrottledUntilMillis int64 `json:"throttled_until_millis"` - TimedOut *bool `json:"timed_out,omitempty"` - Took *int64 `json:"took,omitempty"` - Total int64 `json:"total"` - Updated int64 `json:"updated"` - VersionConflicts int64 `json:"version_conflicts"` -} - -// NewTaskStatus returns a TaskStatus. -func NewTaskStatus() *TaskStatus { - r := &TaskStatus{} - - return r -} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tdigest.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tdigest.go index 57e9ab12e..b62c2ea62 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tdigest.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tdigest.go @@ -16,17 +16,64 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TDigest type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/metric.ts#L123-L125 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/metric.ts#L223-L228 type TDigest struct { + // Compression Limits the maximum number of nodes used by the underlying TDigest algorithm + // to `20 * compression`, enabling control of memory usage and approximation + // error. Compression *int `json:"compression,omitempty"` } +func (s *TDigest) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "compression": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Compression = &value + case float64: + f := int(v) + s.Compression = &f + } + + } + } + return nil +} + // NewTDigest returns a TDigest. func NewTDigest() *TDigest { r := &TDigest{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tdigestpercentileranksaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tdigestpercentileranksaggregate.go index ecd587446..0f04545cc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tdigestpercentileranksaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tdigestpercentileranksaggregate.go @@ -16,20 +16,69 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" ) // TDigestPercentileRanksAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L174-L175 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L175-L176 type TDigestPercentileRanksAggregate struct { - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Values Percentiles `json:"values"` + Meta Metadata `json:"meta,omitempty"` + Values Percentiles `json:"values"` +} + +func (s *TDigestPercentileRanksAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "values": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(KeyedPercentiles, 0) + if err := localDec.Decode(&o); err != nil { + return err + } + s.Values = o + case '[': + o := []ArrayPercentilesItem{} + if err := localDec.Decode(&o); err != nil { + return err + } + s.Values = o + } + + } + } + return nil } // NewTDigestPercentileRanksAggregate returns a TDigestPercentileRanksAggregate. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tdigestpercentilesaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tdigestpercentilesaggregate.go index f12680dd0..669073095 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tdigestpercentilesaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tdigestpercentilesaggregate.go @@ -16,20 +16,69 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" ) // TDigestPercentilesAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L171-L172 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L172-L173 type TDigestPercentilesAggregate struct { - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Values Percentiles `json:"values"` + Meta Metadata `json:"meta,omitempty"` + Values Percentiles `json:"values"` +} + +func (s *TDigestPercentilesAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "values": + + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + source := bytes.NewReader(rawMsg) + localDec := json.NewDecoder(source) + switch rawMsg[0] { + case '{': + o := make(KeyedPercentiles, 0) + if err := localDec.Decode(&o); err != nil { + return err + } + s.Values = o + case '[': + o := []ArrayPercentilesItem{} + if err := localDec.Decode(&o); err != nil { + return err + } + s.Values = o + } + + } + } + return nil } // NewTDigestPercentilesAggregate returns a TDigestPercentilesAggregate. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/template.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/template.go index bd3b90bdf..c0ae751fd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/template.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/template.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // Template type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/simulate_template/IndicesSimulateTemplateResponse.ts#L33-L37 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/simulate_template/IndicesSimulateTemplateResponse.ts#L33-L37 type Template struct { Aliases map[string]Alias `json:"aliases"` Mappings TypeMapping `json:"mappings"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/templateconfig.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/templateconfig.go new file mode 100644 index 000000000..cea04ab3b --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/templateconfig.go @@ -0,0 +1,133 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + +// TemplateConfig type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/msearch_template/types.ts#L28-L54 +type TemplateConfig struct { + // Explain If `true`, returns detailed information about score calculation as part of + // each hit. + Explain *bool `json:"explain,omitempty"` + // Id ID of the search template to use. If no source is specified, + // this parameter is required. + Id *string `json:"id,omitempty"` + // Params Key-value pairs used to replace Mustache variables in the template. + // The key is the variable name. + // The value is the variable value. + Params map[string]json.RawMessage `json:"params,omitempty"` + // Profile If `true`, the query execution is profiled. + Profile *bool `json:"profile,omitempty"` + // Source An inline search template. Supports the same parameters as the search API's + // request body. Also supports Mustache variables. If no id is specified, this + // parameter is required. + Source *string `json:"source,omitempty"` +} + +func (s *TemplateConfig) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "explain": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Explain = &value + case bool: + s.Explain = &v + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return err + } + + case "params": + if s.Params == nil { + s.Params = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Params); err != nil { + return err + } + + case "profile": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Profile = &value + case bool: + s.Profile = &v + } + + case "source": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Source = &o + + } + } + return nil +} + +// NewTemplateConfig returns a TemplateConfig. +func NewTemplateConfig() *TemplateConfig { + r := &TemplateConfig{ + Params: make(map[string]json.RawMessage, 0), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/templatemapping.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/templatemapping.go index 5665596ad..f33ffc416 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/templatemapping.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/templatemapping.go @@ -16,17 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // TemplateMapping type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/TemplateMapping.ts#L27-L34 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/TemplateMapping.ts#L27-L34 type TemplateMapping struct { Aliases map[string]Alias `json:"aliases"` IndexPatterns []string `json:"index_patterns"` @@ -36,6 +40,73 @@ type TemplateMapping struct { Version *int64 `json:"version,omitempty"` } +func (s *TemplateMapping) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "aliases": + if s.Aliases == nil { + s.Aliases = make(map[string]Alias, 0) + } + if err := dec.Decode(&s.Aliases); err != nil { + return err + } + + case "index_patterns": + if err := dec.Decode(&s.IndexPatterns); err != nil { + return err + } + + case "mappings": + if err := dec.Decode(&s.Mappings); err != nil { + return err + } + + case "order": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Order = value + case float64: + f := int(v) + s.Order = f + } + + case "settings": + if s.Settings == nil { + s.Settings = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Settings); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + // NewTemplateMapping returns a TemplateMapping. func NewTemplateMapping() *TemplateMapping { r := &TemplateMapping{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/templatesrecord.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/templatesrecord.go index 8fd7006b2..ecd9ad4ec 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/templatesrecord.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/templatesrecord.go @@ -16,26 +16,100 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TemplatesRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/templates/types.ts#L22-L48 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/templates/types.ts#L22-L48 type TemplatesRecord struct { - // ComposedOf component templates comprising index template + // ComposedOf The component templates that comprise the index template. ComposedOf *string `json:"composed_of,omitempty"` - // IndexPatterns template index patterns + // IndexPatterns The template index patterns. IndexPatterns *string `json:"index_patterns,omitempty"` - // Name template name + // Name The template name. Name *string `json:"name,omitempty"` - // Order template application order/priority number + // Order The template application order or priority number. Order *string `json:"order,omitempty"` - // Version version + // Version The template version. Version string `json:"version,omitempty"` } +func (s *TemplatesRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "composed_of", "c": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ComposedOf = &o + + case "index_patterns", "t": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexPatterns = &o + + case "name", "n": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "order", "o", "p": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Order = &o + + case "version", "v": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + // NewTemplatesRecord returns a TemplatesRecord. func NewTemplatesRecord() *TemplatesRecord { r := &TemplatesRecord{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/term.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/term.go index 3e96502cd..9d485d269 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/term.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/term.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Term type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/termvectors/types.ts#L34-L40 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/termvectors/types.ts#L34-L40 type Term struct { DocFreq *int `json:"doc_freq,omitempty"` Score *Float64 `json:"score,omitempty"` @@ -31,6 +39,95 @@ type Term struct { Ttf *int `json:"ttf,omitempty"` } +func (s *Term) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "doc_freq": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.DocFreq = &value + case float64: + f := int(v) + s.DocFreq = &f + } + + case "score": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Score = &f + case float64: + f := Float64(v) + s.Score = &f + } + + case "term_freq": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.TermFreq = value + case float64: + f := int(v) + s.TermFreq = f + } + + case "tokens": + if err := dec.Decode(&s.Tokens); err != nil { + return err + } + + case "ttf": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Ttf = &value + case float64: + f := int(v) + s.Ttf = &f + } + + } + } + return nil +} + // NewTerm returns a Term. func NewTerm() *Term { r := &Term{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termquery.go index cb9324fad..cf70acabc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termquery.go @@ -16,18 +16,108 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TermQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/term.ts#L116-L121 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/term.ts#L217-L231 type TermQuery struct { - Boost *float32 `json:"boost,omitempty"` - CaseInsensitive *bool `json:"case_insensitive,omitempty"` - QueryName_ *string `json:"_name,omitempty"` - Value FieldValue `json:"value"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // CaseInsensitive Allows ASCII case insensitive matching of the value with the indexed field + // values when set to `true`. + // When `false`, the case sensitivity of matching depends on the underlying + // field’s mapping. + CaseInsensitive *bool `json:"case_insensitive,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + // Value Term you wish to find in the provided field. + Value FieldValue `json:"value"` +} + +func (s *TermQuery) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Value) + return err + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "case_insensitive": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.CaseInsensitive = &value + case bool: + s.CaseInsensitive = &v + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return err + } + + } + } + return nil } // NewTermQuery returns a TermQuery. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsaggregatebasedoubletermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsaggregatebasedoubletermsbucket.go index e7b90e237..13c6bdb30 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsaggregatebasedoubletermsbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsaggregatebasedoubletermsbucket.go @@ -16,29 +16,30 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" + "strconv" ) // TermsAggregateBaseDoubleTermsBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L376-L381 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L377-L382 type TermsAggregateBaseDoubleTermsBucket struct { - Buckets BucketsDoubleTermsBucket `json:"buckets"` - DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - SumOtherDocCount *int64 `json:"sum_other_doc_count,omitempty"` + Buckets BucketsDoubleTermsBucket `json:"buckets"` + DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` + Meta Metadata `json:"meta,omitempty"` + SumOtherDocCount *int64 `json:"sum_other_doc_count,omitempty"` } func (s *TermsAggregateBaseDoubleTermsBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -59,21 +60,33 @@ func (s *TermsAggregateBaseDoubleTermsBucket) UnmarshalJSON(data []byte) error { source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]DoubleTermsBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []DoubleTermsBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } case "doc_count_error_upper_bound": - if err := dec.Decode(&s.DocCountErrorUpperBound); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DocCountErrorUpperBound = &value + case float64: + f := int64(v) + s.DocCountErrorUpperBound = &f } case "meta": @@ -82,8 +95,18 @@ func (s *TermsAggregateBaseDoubleTermsBucket) UnmarshalJSON(data []byte) error { } case "sum_other_doc_count": - if err := dec.Decode(&s.SumOtherDocCount); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.SumOtherDocCount = &value + case float64: + f := int64(v) + s.SumOtherDocCount = &f } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsaggregatebaselongtermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsaggregatebaselongtermsbucket.go index 0272b986c..effef9895 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsaggregatebaselongtermsbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsaggregatebaselongtermsbucket.go @@ -16,29 +16,30 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" + "strconv" ) // TermsAggregateBaseLongTermsBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L376-L381 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L377-L382 type TermsAggregateBaseLongTermsBucket struct { - Buckets BucketsLongTermsBucket `json:"buckets"` - DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - SumOtherDocCount *int64 `json:"sum_other_doc_count,omitempty"` + Buckets BucketsLongTermsBucket `json:"buckets"` + DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` + Meta Metadata `json:"meta,omitempty"` + SumOtherDocCount *int64 `json:"sum_other_doc_count,omitempty"` } func (s *TermsAggregateBaseLongTermsBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -59,21 +60,33 @@ func (s *TermsAggregateBaseLongTermsBucket) UnmarshalJSON(data []byte) error { source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]LongTermsBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []LongTermsBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } case "doc_count_error_upper_bound": - if err := dec.Decode(&s.DocCountErrorUpperBound); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DocCountErrorUpperBound = &value + case float64: + f := int64(v) + s.DocCountErrorUpperBound = &f } case "meta": @@ -82,8 +95,18 @@ func (s *TermsAggregateBaseLongTermsBucket) UnmarshalJSON(data []byte) error { } case "sum_other_doc_count": - if err := dec.Decode(&s.SumOtherDocCount); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.SumOtherDocCount = &value + case float64: + f := int64(v) + s.SumOtherDocCount = &f } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsaggregatebasemultitermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsaggregatebasemultitermsbucket.go index b971060e1..7fc5d2ecd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsaggregatebasemultitermsbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsaggregatebasemultitermsbucket.go @@ -16,29 +16,30 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" + "strconv" ) // TermsAggregateBaseMultiTermsBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L376-L381 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L377-L382 type TermsAggregateBaseMultiTermsBucket struct { - Buckets BucketsMultiTermsBucket `json:"buckets"` - DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - SumOtherDocCount *int64 `json:"sum_other_doc_count,omitempty"` + Buckets BucketsMultiTermsBucket `json:"buckets"` + DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` + Meta Metadata `json:"meta,omitempty"` + SumOtherDocCount *int64 `json:"sum_other_doc_count,omitempty"` } func (s *TermsAggregateBaseMultiTermsBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -59,21 +60,33 @@ func (s *TermsAggregateBaseMultiTermsBucket) UnmarshalJSON(data []byte) error { source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]MultiTermsBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []MultiTermsBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } case "doc_count_error_upper_bound": - if err := dec.Decode(&s.DocCountErrorUpperBound); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DocCountErrorUpperBound = &value + case float64: + f := int64(v) + s.DocCountErrorUpperBound = &f } case "meta": @@ -82,8 +95,18 @@ func (s *TermsAggregateBaseMultiTermsBucket) UnmarshalJSON(data []byte) error { } case "sum_other_doc_count": - if err := dec.Decode(&s.SumOtherDocCount); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.SumOtherDocCount = &value + case float64: + f := int64(v) + s.SumOtherDocCount = &f } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsaggregatebasestringtermsbucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsaggregatebasestringtermsbucket.go index ced8adaf7..8781deb62 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsaggregatebasestringtermsbucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsaggregatebasestringtermsbucket.go @@ -16,29 +16,30 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" + "strconv" ) // TermsAggregateBaseStringTermsBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L376-L381 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L377-L382 type TermsAggregateBaseStringTermsBucket struct { - Buckets BucketsStringTermsBucket `json:"buckets"` - DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - SumOtherDocCount *int64 `json:"sum_other_doc_count,omitempty"` + Buckets BucketsStringTermsBucket `json:"buckets"` + DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` + Meta Metadata `json:"meta,omitempty"` + SumOtherDocCount *int64 `json:"sum_other_doc_count,omitempty"` } func (s *TermsAggregateBaseStringTermsBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -59,21 +60,33 @@ func (s *TermsAggregateBaseStringTermsBucket) UnmarshalJSON(data []byte) error { source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]StringTermsBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []StringTermsBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } case "doc_count_error_upper_bound": - if err := dec.Decode(&s.DocCountErrorUpperBound); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DocCountErrorUpperBound = &value + case float64: + f := int64(v) + s.DocCountErrorUpperBound = &f } case "meta": @@ -82,8 +95,18 @@ func (s *TermsAggregateBaseStringTermsBucket) UnmarshalJSON(data []byte) error { } case "sum_other_doc_count": - if err := dec.Decode(&s.SumOtherDocCount); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.SumOtherDocCount = &value + case float64: + f := int64(v) + s.SumOtherDocCount = &f } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsaggregatebasevoid.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsaggregatebasevoid.go index 6b6371ee4..e14c156ad 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsaggregatebasevoid.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsaggregatebasevoid.go @@ -16,29 +16,30 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" + "strconv" ) // TermsAggregateBaseVoid type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L376-L381 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L377-L382 type TermsAggregateBaseVoid struct { - Buckets BucketsVoid `json:"buckets"` - DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - SumOtherDocCount *int64 `json:"sum_other_doc_count,omitempty"` + Buckets BucketsVoid `json:"buckets"` + DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` + Meta Metadata `json:"meta,omitempty"` + SumOtherDocCount *int64 `json:"sum_other_doc_count,omitempty"` } func (s *TermsAggregateBaseVoid) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -59,21 +60,33 @@ func (s *TermsAggregateBaseVoid) UnmarshalJSON(data []byte) error { source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': - o := make(map[string]struct{}, 0) - localDec.Decode(&o) + o := make(map[string]interface{}, 0) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': - o := []struct{}{} - localDec.Decode(&o) + o := []interface{}{} + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } case "doc_count_error_upper_bound": - if err := dec.Decode(&s.DocCountErrorUpperBound); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DocCountErrorUpperBound = &value + case float64: + f := int64(v) + s.DocCountErrorUpperBound = &f } case "meta": @@ -82,8 +95,18 @@ func (s *TermsAggregateBaseVoid) UnmarshalJSON(data []byte) error { } case "sum_other_doc_count": - if err := dec.Decode(&s.SumOtherDocCount); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.SumOtherDocCount = &value + case float64: + f := int64(v) + s.SumOtherDocCount = &f } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsaggregation.go index ba4099c90..9287b1116 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsaggregation.go @@ -16,48 +16,70 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/missingorder" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortorder" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/termsaggregationcollectmode" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/termsaggregationexecutionhint" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/missingorder" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/sortorder" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/termsaggregationcollectmode" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/termsaggregationexecutionhint" ) // TermsAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L380-L397 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L910-L970 type TermsAggregation struct { - CollectMode *termsaggregationcollectmode.TermsAggregationCollectMode `json:"collect_mode,omitempty"` - Exclude []string `json:"exclude,omitempty"` - ExecutionHint *termsaggregationexecutionhint.TermsAggregationExecutionHint `json:"execution_hint,omitempty"` - Field *string `json:"field,omitempty"` - Format *string `json:"format,omitempty"` - Include TermsInclude `json:"include,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - MinDocCount *int `json:"min_doc_count,omitempty"` - Missing Missing `json:"missing,omitempty"` - MissingBucket *bool `json:"missing_bucket,omitempty"` - MissingOrder *missingorder.MissingOrder `json:"missing_order,omitempty"` - Name *string `json:"name,omitempty"` - Order AggregateOrder `json:"order,omitempty"` - Script Script `json:"script,omitempty"` - ShardSize *int `json:"shard_size,omitempty"` - ShowTermDocCountError *bool `json:"show_term_doc_count_error,omitempty"` - Size *int `json:"size,omitempty"` - ValueType *string `json:"value_type,omitempty"` + // CollectMode Determines how child aggregations should be calculated: breadth-first or + // depth-first. + CollectMode *termsaggregationcollectmode.TermsAggregationCollectMode `json:"collect_mode,omitempty"` + // Exclude Values to exclude. + // Accepts regular expressions and partitions. + Exclude []string `json:"exclude,omitempty"` + // ExecutionHint Determines whether the aggregation will use field values directly or global + // ordinals. + ExecutionHint *termsaggregationexecutionhint.TermsAggregationExecutionHint `json:"execution_hint,omitempty"` + // Field The field from which to return terms. + Field *string `json:"field,omitempty"` + Format *string `json:"format,omitempty"` + // Include Values to include. + // Accepts regular expressions and partitions. + Include TermsInclude `json:"include,omitempty"` + Meta Metadata `json:"meta,omitempty"` + // MinDocCount Only return values that are found in more than `min_doc_count` hits. + MinDocCount *int `json:"min_doc_count,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing Missing `json:"missing,omitempty"` + MissingBucket *bool `json:"missing_bucket,omitempty"` + MissingOrder *missingorder.MissingOrder `json:"missing_order,omitempty"` + Name *string `json:"name,omitempty"` + // Order Specifies the sort order of the buckets. + // Defaults to sorting by descending document count. + Order AggregateOrder `json:"order,omitempty"` + Script Script `json:"script,omitempty"` + // ShardSize The number of candidate terms produced by each shard. + // By default, `shard_size` will be automatically estimated based on the number + // of shards and the `size` parameter. + ShardSize *int `json:"shard_size,omitempty"` + // ShowTermDocCountError Set to `true` to return the `doc_count_error_upper_bound`, which is an upper + // bound to the error on the `doc_count` returned by each shard. + ShowTermDocCountError *bool `json:"show_term_doc_count_error,omitempty"` + // Size The number of buckets returned out of the overall terms list. + Size *int `json:"size,omitempty"` + // ValueType Coerced unmapped fields into the specified type. + ValueType *string `json:"value_type,omitempty"` } func (s *TermsAggregation) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -77,8 +99,19 @@ func (s *TermsAggregation) UnmarshalJSON(data []byte) error { } case "exclude": - if err := dec.Decode(&s.Exclude); err != nil { - return err + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Exclude = append(s.Exclude, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Exclude); err != nil { + return err + } } case "execution_hint": @@ -92,9 +125,16 @@ func (s *TermsAggregation) UnmarshalJSON(data []byte) error { } case "format": - if err := dec.Decode(&s.Format); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o case "include": if err := dec.Decode(&s.Include); err != nil { @@ -107,8 +147,19 @@ func (s *TermsAggregation) UnmarshalJSON(data []byte) error { } case "min_doc_count": - if err := dec.Decode(&s.MinDocCount); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MinDocCount = &value + case float64: + f := int(v) + s.MinDocCount = &f } case "missing": @@ -117,8 +168,17 @@ func (s *TermsAggregation) UnmarshalJSON(data []byte) error { } case "missing_bucket": - if err := dec.Decode(&s.MissingBucket); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.MissingBucket = &value + case bool: + s.MissingBucket = &v } case "missing_order": @@ -127,9 +187,16 @@ func (s *TermsAggregation) UnmarshalJSON(data []byte) error { } case "name": - if err := dec.Decode(&s.Name); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o case "order": @@ -138,15 +205,17 @@ func (s *TermsAggregation) UnmarshalJSON(data []byte) error { source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]sortorder.SortOrder, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Order = o - case '[': o := make([]map[string]sortorder.SortOrder, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Order = o } @@ -156,24 +225,62 @@ func (s *TermsAggregation) UnmarshalJSON(data []byte) error { } case "shard_size": - if err := dec.Decode(&s.ShardSize); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.ShardSize = &value + case float64: + f := int(v) + s.ShardSize = &f } case "show_term_doc_count_error": - if err := dec.Decode(&s.ShowTermDocCountError); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.ShowTermDocCountError = &value + case bool: + s.ShowTermDocCountError = &v } case "size": - if err := dec.Decode(&s.Size); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f } case "value_type": - if err := dec.Decode(&s.ValueType); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ValueType = &o } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsexclude.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsexclude.go index d3e91c5ce..3558d4aa7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsexclude.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsexclude.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // TermsExclude type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L422-L423 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L1001-L1002 type TermsExclude []string diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsgrouping.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsgrouping.go index 54d9445cc..76ed80fb7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsgrouping.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsgrouping.go @@ -16,17 +16,63 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // TermsGrouping type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/rollup/_types/Groupings.ts#L40-L42 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/rollup/_types/Groupings.ts#L75-L82 type TermsGrouping struct { + // Fields The set of fields that you wish to collect terms for. + // This array can contain fields that are both keyword and numerics. + // Order does not matter. Fields []string `json:"fields"` } +func (s *TermsGrouping) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fields": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Fields = append(s.Fields, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Fields); err != nil { + return err + } + } + + } + } + return nil +} + // NewTermsGrouping returns a TermsGrouping. func NewTermsGrouping() *TermsGrouping { r := &TermsGrouping{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsinclude.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsinclude.go index f667acd71..6d6c9f773 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsinclude.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsinclude.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -26,5 +26,5 @@ package types // []string // TermsPartition // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L419-L420 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L998-L999 type TermsInclude interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termslookup.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termslookup.go index 2c848daea..ce9b861a5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termslookup.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termslookup.go @@ -16,13 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // TermsLookup type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/term.ts#L132-L137 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/term.ts#L242-L247 type TermsLookup struct { Id string `json:"id"` Index string `json:"index"` @@ -30,6 +37,46 @@ type TermsLookup struct { Routing *string `json:"routing,omitempty"` } +func (s *TermsLookup) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return err + } + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return err + } + + case "path": + if err := dec.Decode(&s.Path); err != nil { + return err + } + + case "routing": + if err := dec.Decode(&s.Routing); err != nil { + return err + } + + } + } + return nil +} + // NewTermsLookup returns a TermsLookup. func NewTermsLookup() *TermsLookup { r := &TermsLookup{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termspartition.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termspartition.go index daa6982e3..e3155ccba 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termspartition.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termspartition.go @@ -16,16 +16,76 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TermsPartition type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L425-L428 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L1004-L1013 type TermsPartition struct { + // NumPartitions The number of partitions. NumPartitions int64 `json:"num_partitions"` - Partition int64 `json:"partition"` + // Partition The partition number for this request. + Partition int64 `json:"partition"` +} + +func (s *TermsPartition) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "num_partitions": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.NumPartitions = value + case float64: + f := int64(v) + s.NumPartitions = f + } + + case "partition": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Partition = value + case float64: + f := int64(v) + s.Partition = f + } + + } + } + return nil } // NewTermsPartition returns a TermsPartition. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsquery.go index bd01d6edc..dde3cbf23 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsquery.go @@ -16,22 +16,89 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" "fmt" + "io" + "strconv" ) // TermsQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/term.ts#L123-L125 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/term.ts#L233-L235 type TermsQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. Boost *float32 `json:"boost,omitempty"` QueryName_ *string `json:"_name,omitempty"` - TermsQuery map[string]TermsQueryField `json:"-"` + TermsQuery map[string]TermsQueryField `json:"TermsQuery,omitempty"` +} + +func (s *TermsQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "TermsQuery": + if s.TermsQuery == nil { + s.TermsQuery = make(map[string]TermsQueryField, 0) + } + if err := dec.Decode(&s.TermsQuery); err != nil { + return err + } + + default: + + } + } + return nil } // MarhsalJSON overrides marshalling for types with additional properties @@ -53,6 +120,7 @@ func (s TermsQuery) MarshalJSON() ([]byte, error) { for key, value := range s.TermsQuery { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "TermsQuery") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsqueryfield.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsqueryfield.go index f8d6ef025..fca83c3b0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsqueryfield.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsqueryfield.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // []FieldValue // TermsLookup // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/term.ts#L127-L130 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/term.ts#L237-L240 type TermsQueryField interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termssetquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termssetquery.go index cd2f03596..174c11ed5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termssetquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termssetquery.go @@ -16,19 +16,100 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TermsSetQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/term.ts#L139-L143 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/term.ts#L249-L262 type TermsSetQuery struct { - Boost *float32 `json:"boost,omitempty"` - MinimumShouldMatchField *string `json:"minimum_should_match_field,omitempty"` - MinimumShouldMatchScript Script `json:"minimum_should_match_script,omitempty"` - QueryName_ *string `json:"_name,omitempty"` - Terms []string `json:"terms"` + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // MinimumShouldMatchField Numeric field containing the number of matching terms required to return a + // document. + MinimumShouldMatchField *string `json:"minimum_should_match_field,omitempty"` + // MinimumShouldMatchScript Custom script containing the number of matching terms required to return a + // document. + MinimumShouldMatchScript Script `json:"minimum_should_match_script,omitempty"` + QueryName_ *string `json:"_name,omitempty"` + // Terms Array of terms you wish to find in the provided field. + Terms []string `json:"terms"` +} + +func (s *TermsSetQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "minimum_should_match_field": + if err := dec.Decode(&s.MinimumShouldMatchField); err != nil { + return err + } + + case "minimum_should_match_script": + if err := dec.Decode(&s.MinimumShouldMatchScript); err != nil { + return err + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "terms": + if err := dec.Decode(&s.Terms); err != nil { + return err + } + + } + } + return nil } // NewTermsSetQuery returns a TermsSetQuery. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsuggest.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsuggest.go index 1c9c03ab8..cb0386875 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsuggest.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsuggest.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TermSuggest type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/suggester.ts#L64-L69 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/suggester.ts#L64-L69 type TermSuggest struct { Length int `json:"length"` Offset int `json:"offset"` @@ -30,6 +38,86 @@ type TermSuggest struct { Text string `json:"text"` } +func (s *TermSuggest) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "length": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Length = value + case float64: + f := int(v) + s.Length = f + } + + case "offset": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Offset = value + case float64: + f := int(v) + s.Offset = f + } + + case "options": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewTermSuggestOption() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Options = append(s.Options, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Options); err != nil { + return err + } + } + + case "text": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Text = o + + } + } + return nil +} + // NewTermSuggest returns a TermSuggest. func NewTermSuggest() *TermSuggest { r := &TermSuggest{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsuggester.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsuggester.go index 308e598a4..99ed85fa9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsuggester.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsuggester.go @@ -16,11 +16,17 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/stringdistance" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/suggestmode" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/suggestsort" @@ -28,23 +34,263 @@ import ( // TermSuggester type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/suggester.ts#L252-L265 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/suggester.ts#L503-L565 type TermSuggester struct { - Analyzer *string `json:"analyzer,omitempty"` - Field string `json:"field"` - LowercaseTerms *bool `json:"lowercase_terms,omitempty"` - MaxEdits *int `json:"max_edits,omitempty"` - MaxInspections *int `json:"max_inspections,omitempty"` - MaxTermFreq *float32 `json:"max_term_freq,omitempty"` - MinDocFreq *float32 `json:"min_doc_freq,omitempty"` - MinWordLength *int `json:"min_word_length,omitempty"` - PrefixLength *int `json:"prefix_length,omitempty"` - ShardSize *int `json:"shard_size,omitempty"` - Size *int `json:"size,omitempty"` - Sort *suggestsort.SuggestSort `json:"sort,omitempty"` + // Analyzer The analyzer to analyze the suggest text with. + // Defaults to the search analyzer of the suggest field. + Analyzer *string `json:"analyzer,omitempty"` + // Field The field to fetch the candidate suggestions from. + // Needs to be set globally or per suggestion. + Field string `json:"field"` + LowercaseTerms *bool `json:"lowercase_terms,omitempty"` + // MaxEdits The maximum edit distance candidate suggestions can have in order to be + // considered as a suggestion. + // Can only be `1` or `2`. + MaxEdits *int `json:"max_edits,omitempty"` + // MaxInspections A factor that is used to multiply with the shard_size in order to inspect + // more candidate spelling corrections on the shard level. + // Can improve accuracy at the cost of performance. + MaxInspections *int `json:"max_inspections,omitempty"` + // MaxTermFreq The maximum threshold in number of documents in which a suggest text token + // can exist in order to be included. + // Can be a relative percentage number (for example `0.4`) or an absolute number + // to represent document frequencies. + // If a value higher than 1 is specified, then fractional can not be specified. + MaxTermFreq *float32 `json:"max_term_freq,omitempty"` + // MinDocFreq The minimal threshold in number of documents a suggestion should appear in. + // This can improve quality by only suggesting high frequency terms. + // Can be specified as an absolute number or as a relative percentage of number + // of documents. + // If a value higher than 1 is specified, then the number cannot be fractional. + MinDocFreq *float32 `json:"min_doc_freq,omitempty"` + // MinWordLength The minimum length a suggest text term must have in order to be included. + MinWordLength *int `json:"min_word_length,omitempty"` + // PrefixLength The number of minimal prefix characters that must match in order be a + // candidate for suggestions. + // Increasing this number improves spellcheck performance. + PrefixLength *int `json:"prefix_length,omitempty"` + // ShardSize Sets the maximum number of suggestions to be retrieved from each individual + // shard. + ShardSize *int `json:"shard_size,omitempty"` + // Size The maximum corrections to be returned per suggest text token. + Size *int `json:"size,omitempty"` + // Sort Defines how suggestions should be sorted per suggest text term. + Sort *suggestsort.SuggestSort `json:"sort,omitempty"` + // StringDistance The string distance implementation to use for comparing how similar suggested + // terms are. StringDistance *stringdistance.StringDistance `json:"string_distance,omitempty"` - SuggestMode *suggestmode.SuggestMode `json:"suggest_mode,omitempty"` - Text *string `json:"text,omitempty"` + // SuggestMode Controls what suggestions are included or controls for what suggest text + // terms, suggestions should be suggested. + SuggestMode *suggestmode.SuggestMode `json:"suggest_mode,omitempty"` + // Text The suggest text. + // Needs to be set globally or per suggestion. + Text *string `json:"text,omitempty"` +} + +func (s *TermSuggester) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "analyzer": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "lowercase_terms": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.LowercaseTerms = &value + case bool: + s.LowercaseTerms = &v + } + + case "max_edits": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxEdits = &value + case float64: + f := int(v) + s.MaxEdits = &f + } + + case "max_inspections": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxInspections = &value + case float64: + f := int(v) + s.MaxInspections = &f + } + + case "max_term_freq": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.MaxTermFreq = &f + case float64: + f := float32(v) + s.MaxTermFreq = &f + } + + case "min_doc_freq": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.MinDocFreq = &f + case float64: + f := float32(v) + s.MinDocFreq = &f + } + + case "min_word_length": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MinWordLength = &value + case float64: + f := int(v) + s.MinWordLength = &f + } + + case "prefix_length": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.PrefixLength = &value + case float64: + f := int(v) + s.PrefixLength = &f + } + + case "shard_size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.ShardSize = &value + case float64: + f := int(v) + s.ShardSize = &f + } + + case "size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + case "sort": + if err := dec.Decode(&s.Sort); err != nil { + return err + } + + case "string_distance": + if err := dec.Decode(&s.StringDistance); err != nil { + return err + } + + case "suggest_mode": + if err := dec.Decode(&s.SuggestMode); err != nil { + return err + } + + case "text": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Text = &o + + } + } + return nil } // NewTermSuggester returns a TermSuggester. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsuggestoption.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsuggestoption.go index 3781b68f7..6225959b0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsuggestoption.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termsuggestoption.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TermSuggestOption type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/suggester.ts#L93-L99 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/suggester.ts#L93-L99 type TermSuggestOption struct { CollateMatch *bool `json:"collate_match,omitempty"` Freq int64 `json:"freq"` @@ -31,6 +39,95 @@ type TermSuggestOption struct { Text string `json:"text"` } +func (s *TermSuggestOption) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "collate_match": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.CollateMatch = &value + case bool: + s.CollateMatch = &v + } + + case "freq": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Freq = value + case float64: + f := int64(v) + s.Freq = f + } + + case "highlighted": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Highlighted = &o + + case "score": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Score = f + case float64: + f := Float64(v) + s.Score = f + } + + case "text": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Text = o + + } + } + return nil +} + // NewTermSuggestOption returns a TermSuggestOption. func NewTermSuggestOption() *TermSuggestOption { r := &TermSuggestOption{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termvector.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termvector.go index e6455bdc3..a2d67a4e7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termvector.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termvector.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // TermVector type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/termvectors/types.ts#L23-L26 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/termvectors/types.ts#L23-L26 type TermVector struct { FieldStatistics FieldStatistics `json:"field_statistics"` Terms map[string]Term `json:"terms"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termvectorsfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termvectorsfilter.go index 7abe1f74b..2de2d1130 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termvectorsfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termvectorsfilter.go @@ -16,23 +16,173 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TermVectorsFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/termvectors/types.ts#L49-L57 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/termvectors/types.ts#L49-L86 type TermVectorsFilter struct { - MaxDocFreq *int `json:"max_doc_freq,omitempty"` - MaxNumTerms *int `json:"max_num_terms,omitempty"` - MaxTermFreq *int `json:"max_term_freq,omitempty"` + // MaxDocFreq Ignore words which occur in more than this many docs. + // Defaults to unbounded. + MaxDocFreq *int `json:"max_doc_freq,omitempty"` + // MaxNumTerms Maximum number of terms that must be returned per field. + MaxNumTerms *int `json:"max_num_terms,omitempty"` + // MaxTermFreq Ignore words with more than this frequency in the source doc. + // Defaults to unbounded. + MaxTermFreq *int `json:"max_term_freq,omitempty"` + // MaxWordLength The maximum word length above which words will be ignored. + // Defaults to unbounded. MaxWordLength *int `json:"max_word_length,omitempty"` - MinDocFreq *int `json:"min_doc_freq,omitempty"` - MinTermFreq *int `json:"min_term_freq,omitempty"` + // MinDocFreq Ignore terms which do not occur in at least this many docs. + MinDocFreq *int `json:"min_doc_freq,omitempty"` + // MinTermFreq Ignore words with less than this frequency in the source doc. + MinTermFreq *int `json:"min_term_freq,omitempty"` + // MinWordLength The minimum word length below which words will be ignored. MinWordLength *int `json:"min_word_length,omitempty"` } +func (s *TermVectorsFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_doc_freq": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxDocFreq = &value + case float64: + f := int(v) + s.MaxDocFreq = &f + } + + case "max_num_terms": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxNumTerms = &value + case float64: + f := int(v) + s.MaxNumTerms = &f + } + + case "max_term_freq": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxTermFreq = &value + case float64: + f := int(v) + s.MaxTermFreq = &f + } + + case "max_word_length": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxWordLength = &value + case float64: + f := int(v) + s.MaxWordLength = &f + } + + case "min_doc_freq": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MinDocFreq = &value + case float64: + f := int(v) + s.MinDocFreq = &f + } + + case "min_term_freq": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MinTermFreq = &value + case float64: + f := int(v) + s.MinTermFreq = &f + } + + case "min_word_length": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MinWordLength = &value + case float64: + f := int(v) + s.MinWordLength = &f + } + + } + } + return nil +} + // NewTermVectorsFilter returns a TermVectorsFilter. func NewTermVectorsFilter() *TermVectorsFilter { r := &TermVectorsFilter{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termvectorsresult.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termvectorsresult.go index 042e4dc77..a3616d930 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termvectorsresult.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termvectorsresult.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TermVectorsResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/mtermvectors/types.ts#L51-L59 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/mtermvectors/types.ts#L96-L104 type TermVectorsResult struct { Error *ErrorCause `json:"error,omitempty"` Found *bool `json:"found,omitempty"` @@ -33,6 +41,83 @@ type TermVectorsResult struct { Version_ *int64 `json:"_version,omitempty"` } +func (s *TermVectorsResult) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "error": + if err := dec.Decode(&s.Error); err != nil { + return err + } + + case "found": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Found = &value + case bool: + s.Found = &v + } + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return err + } + + case "_index": + if err := dec.Decode(&s.Index_); err != nil { + return err + } + + case "term_vectors": + if s.TermVectors == nil { + s.TermVectors = make(map[string]TermVector, 0) + } + if err := dec.Decode(&s.TermVectors); err != nil { + return err + } + + case "took": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Took = &value + case float64: + f := int64(v) + s.Took = &f + } + + case "_version": + if err := dec.Decode(&s.Version_); err != nil { + return err + } + + } + } + return nil +} + // NewTermVectorsResult returns a TermVectorsResult. func NewTermVectorsResult() *TermVectorsResult { r := &TermVectorsResult{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termvectorstoken.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termvectorstoken.go index aaafb7971..5f554b0b8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termvectorstoken.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/termvectorstoken.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TermVectorsToken type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/termvectors/types.ts#L42-L47 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/termvectors/types.ts#L42-L47 type TermVectorsToken struct { EndOffset *int `json:"end_offset,omitempty"` Payload *string `json:"payload,omitempty"` @@ -30,6 +38,86 @@ type TermVectorsToken struct { StartOffset *int `json:"start_offset,omitempty"` } +func (s *TermVectorsToken) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "end_offset": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.EndOffset = &value + case float64: + f := int(v) + s.EndOffset = &f + } + + case "payload": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Payload = &o + + case "position": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Position = value + case float64: + f := int(v) + s.Position = f + } + + case "start_offset": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.StartOffset = &value + case float64: + f := int(v) + s.StartOffset = &f + } + + } + } + return nil +} + // NewTermVectorsToken returns a TermVectorsToken. func NewTermVectorsToken() *TermVectorsToken { r := &TermVectorsToken{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/testpopulation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/testpopulation.go index d502f7f37..1a16c795e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/testpopulation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/testpopulation.go @@ -16,19 +16,63 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // TestPopulation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/metric.ts#L159-L163 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/metric.ts#L310-L320 type TestPopulation struct { - Field string `json:"field"` + // Field The field to aggregate. + Field string `json:"field"` + // Filter A filter used to define a set of records to run unpaired t-test on. Filter *Query `json:"filter,omitempty"` Script Script `json:"script,omitempty"` } +func (s *TestPopulation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "filter": + if err := dec.Decode(&s.Filter); err != nil { + return err + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + } + } + return nil +} + // NewTestPopulation returns a TestPopulation. func NewTestPopulation() *TestPopulation { r := &TestPopulation{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/textclassificationinferenceoptions.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/textclassificationinferenceoptions.go index f7d27748c..0ddc5f61d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/textclassificationinferenceoptions.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/textclassificationinferenceoptions.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TextClassificationInferenceOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/inference.ts#L174-L184 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/inference.ts#L189-L199 type TextClassificationInferenceOptions struct { // ClassificationLabels Classification labels to apply other than the stored labels. Must have the // same deminsions as the default configured labels @@ -36,6 +44,64 @@ type TextClassificationInferenceOptions struct { Tokenization *TokenizationConfigContainer `json:"tokenization,omitempty"` } +func (s *TextClassificationInferenceOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "classification_labels": + if err := dec.Decode(&s.ClassificationLabels); err != nil { + return err + } + + case "num_top_classes": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NumTopClasses = &value + case float64: + f := int(v) + s.NumTopClasses = &f + } + + case "results_field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultsField = &o + + case "tokenization": + if err := dec.Decode(&s.Tokenization); err != nil { + return err + } + + } + } + return nil +} + // NewTextClassificationInferenceOptions returns a TextClassificationInferenceOptions. func NewTextClassificationInferenceOptions() *TextClassificationInferenceOptions { r := &TextClassificationInferenceOptions{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/textclassificationinferenceupdateoptions.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/textclassificationinferenceupdateoptions.go index d15c92d53..385e313de 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/textclassificationinferenceupdateoptions.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/textclassificationinferenceupdateoptions.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TextClassificationInferenceUpdateOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/inference.ts#L328-L337 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/inference.ts#L363-L372 type TextClassificationInferenceUpdateOptions struct { // ClassificationLabels Classification labels to apply other than the stored labels. Must have the // same deminsions as the default configured labels @@ -36,6 +44,64 @@ type TextClassificationInferenceUpdateOptions struct { Tokenization *NlpTokenizationUpdateOptions `json:"tokenization,omitempty"` } +func (s *TextClassificationInferenceUpdateOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "classification_labels": + if err := dec.Decode(&s.ClassificationLabels); err != nil { + return err + } + + case "num_top_classes": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NumTopClasses = &value + case float64: + f := int(v) + s.NumTopClasses = &f + } + + case "results_field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultsField = &o + + case "tokenization": + if err := dec.Decode(&s.Tokenization); err != nil { + return err + } + + } + } + return nil +} + // NewTextClassificationInferenceUpdateOptions returns a TextClassificationInferenceUpdateOptions. func NewTextClassificationInferenceUpdateOptions() *TextClassificationInferenceUpdateOptions { r := &TextClassificationInferenceUpdateOptions{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/textembedding.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/textembedding.go index 74223d64d..41f6feca5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/textembedding.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/textembedding.go @@ -16,18 +16,70 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TextEmbedding type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Knn.ts#L48-L51 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Knn.ts#L50-L53 type TextEmbedding struct { ModelId string `json:"model_id"` ModelText string `json:"model_text"` } +func (s *TextEmbedding) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "model_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelId = o + + case "model_text": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelText = o + + } + } + return nil +} + // NewTextEmbedding returns a TextEmbedding. func NewTextEmbedding() *TextEmbedding { r := &TextEmbedding{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/textembeddinginferenceoptions.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/textembeddinginferenceoptions.go index 1b9edfbe0..e246f9355 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/textembeddinginferenceoptions.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/textembeddinginferenceoptions.go @@ -16,14 +16,24 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TextEmbeddingInferenceOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/inference.ts#L222-L228 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/inference.ts#L237-L245 type TextEmbeddingInferenceOptions struct { + // EmbeddingSize The number of dimensions in the embedding output + EmbeddingSize *int `json:"embedding_size,omitempty"` // ResultsField The field that is added to incoming documents to contain the inference // prediction. Defaults to predicted_value. ResultsField *string `json:"results_field,omitempty"` @@ -31,6 +41,59 @@ type TextEmbeddingInferenceOptions struct { Tokenization *TokenizationConfigContainer `json:"tokenization,omitempty"` } +func (s *TextEmbeddingInferenceOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "embedding_size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.EmbeddingSize = &value + case float64: + f := int(v) + s.EmbeddingSize = &f + } + + case "results_field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultsField = &o + + case "tokenization": + if err := dec.Decode(&s.Tokenization); err != nil { + return err + } + + } + } + return nil +} + // NewTextEmbeddingInferenceOptions returns a TextEmbeddingInferenceOptions. func NewTextEmbeddingInferenceOptions() *TextEmbeddingInferenceOptions { r := &TextEmbeddingInferenceOptions{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/textembeddinginferenceupdateoptions.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/textembeddinginferenceupdateoptions.go index 8cfedc1b0..882dcfd04 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/textembeddinginferenceupdateoptions.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/textembeddinginferenceupdateoptions.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TextEmbeddingInferenceUpdateOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/inference.ts#L357-L361 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/inference.ts#L392-L396 type TextEmbeddingInferenceUpdateOptions struct { // ResultsField The field that is added to incoming documents to contain the inference // prediction. Defaults to predicted_value. @@ -30,6 +38,43 @@ type TextEmbeddingInferenceUpdateOptions struct { Tokenization *NlpTokenizationUpdateOptions `json:"tokenization,omitempty"` } +func (s *TextEmbeddingInferenceUpdateOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "results_field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultsField = &o + + case "tokenization": + if err := dec.Decode(&s.Tokenization); err != nil { + return err + } + + } + } + return nil +} + // NewTextEmbeddingInferenceUpdateOptions returns a TextEmbeddingInferenceUpdateOptions. func NewTextEmbeddingInferenceUpdateOptions() *TextEmbeddingInferenceUpdateOptions { r := &TextEmbeddingInferenceUpdateOptions{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/textexpansioninferenceoptions.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/textexpansioninferenceoptions.go new file mode 100644 index 000000000..08b3776e9 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/textexpansioninferenceoptions.go @@ -0,0 +1,84 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + +// TextExpansionInferenceOptions type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/inference.ts#L247-L253 +type TextExpansionInferenceOptions struct { + // ResultsField The field that is added to incoming documents to contain the inference + // prediction. Defaults to predicted_value. + ResultsField *string `json:"results_field,omitempty"` + // Tokenization The tokenization options + Tokenization *TokenizationConfigContainer `json:"tokenization,omitempty"` +} + +func (s *TextExpansionInferenceOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "results_field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultsField = &o + + case "tokenization": + if err := dec.Decode(&s.Tokenization); err != nil { + return err + } + + } + } + return nil +} + +// NewTextExpansionInferenceOptions returns a TextExpansionInferenceOptions. +func NewTextExpansionInferenceOptions() *TextExpansionInferenceOptions { + r := &TextExpansionInferenceOptions{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/textexpansioninferenceupdateoptions.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/textexpansioninferenceupdateoptions.go new file mode 100644 index 000000000..73170d068 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/textexpansioninferenceupdateoptions.go @@ -0,0 +1,83 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + +// TextExpansionInferenceUpdateOptions type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/inference.ts#L398-L402 +type TextExpansionInferenceUpdateOptions struct { + // ResultsField The field that is added to incoming documents to contain the inference + // prediction. Defaults to predicted_value. + ResultsField *string `json:"results_field,omitempty"` + Tokenization *NlpTokenizationUpdateOptions `json:"tokenization,omitempty"` +} + +func (s *TextExpansionInferenceUpdateOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "results_field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultsField = &o + + case "tokenization": + if err := dec.Decode(&s.Tokenization); err != nil { + return err + } + + } + } + return nil +} + +// NewTextExpansionInferenceUpdateOptions returns a TextExpansionInferenceUpdateOptions. +func NewTextExpansionInferenceUpdateOptions() *TextExpansionInferenceUpdateOptions { + r := &TextExpansionInferenceUpdateOptions{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/textexpansionquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/textexpansionquery.go new file mode 100644 index 000000000..c0ea0e109 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/textexpansionquery.go @@ -0,0 +1,125 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + +// TextExpansionQuery type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/TextExpansionQuery.ts#L22-L27 +type TextExpansionQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. + Boost *float32 `json:"boost,omitempty"` + // ModelId The text expansion NLP model to use + ModelId string `json:"model_id"` + // ModelText The query text + ModelText string `json:"model_text"` + QueryName_ *string `json:"_name,omitempty"` +} + +func (s *TextExpansionQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "model_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelId = o + + case "model_text": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ModelText = o + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + } + } + return nil +} + +// NewTextExpansionQuery returns a TextExpansionQuery. +func NewTextExpansionQuery() *TextExpansionQuery { + r := &TextExpansionQuery{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/textindexprefixes.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/textindexprefixes.go index eefe35040..cf5864da9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/textindexprefixes.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/textindexprefixes.go @@ -16,18 +16,78 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TextIndexPrefixes type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/core.ts#L242-L245 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/core.ts#L249-L252 type TextIndexPrefixes struct { MaxChars int `json:"max_chars"` MinChars int `json:"min_chars"` } +func (s *TextIndexPrefixes) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_chars": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxChars = value + case float64: + f := int(v) + s.MaxChars = f + } + + case "min_chars": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MinChars = value + case float64: + f := int(v) + s.MinChars = f + } + + } + } + return nil +} + // NewTextIndexPrefixes returns a TextIndexPrefixes. func NewTextIndexPrefixes() *TextIndexPrefixes { r := &TextIndexPrefixes{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/textproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/textproperty.go index de3791bf5..dbae48352 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/textproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/textproperty.go @@ -16,25 +16,25 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexoptions" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/termvectoroption" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexoptions" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/termvectoroption" ) // TextProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/core.ts#L247-L263 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/core.ts#L254-L270 type TextProperty struct { Analyzer *string `json:"analyzer,omitempty"` Boost *Float64 `json:"boost,omitempty"` @@ -63,6 +63,7 @@ type TextProperty struct { } func (s *TextProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -77,18 +78,47 @@ func (s *TextProperty) UnmarshalJSON(data []byte) error { switch t { case "analyzer": - if err := dec.Decode(&s.Analyzer); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o case "boost": - if err := dec.Decode(&s.Boost); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f } case "copy_to": - if err := dec.Decode(&s.CopyTo); err != nil { - return err + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return err + } } case "dynamic": @@ -97,13 +127,31 @@ func (s *TextProperty) UnmarshalJSON(data []byte) error { } case "eager_global_ordinals": - if err := dec.Decode(&s.EagerGlobalOrdinals); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.EagerGlobalOrdinals = &value + case bool: + s.EagerGlobalOrdinals = &v } case "fielddata": - if err := dec.Decode(&s.Fielddata); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Fielddata = &value + case bool: + s.Fielddata = &v } case "fielddata_frequency_filter": @@ -112,6 +160,9 @@ func (s *TextProperty) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -120,7 +171,9 @@ func (s *TextProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -399,20 +452,42 @@ func (s *TextProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "index": - if err := dec.Decode(&s.Index); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Index = &value + case bool: + s.Index = &v } case "index_options": @@ -421,8 +496,17 @@ func (s *TextProperty) UnmarshalJSON(data []byte) error { } case "index_phrases": - if err := dec.Decode(&s.IndexPhrases); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IndexPhrases = &value + case bool: + s.IndexPhrases = &v } case "index_prefixes": @@ -431,21 +515,47 @@ func (s *TextProperty) UnmarshalJSON(data []byte) error { } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } case "norms": - if err := dec.Decode(&s.Norms); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Norms = &value + case bool: + s.Norms = &v } case "position_increment_gap": - if err := dec.Decode(&s.PositionIncrementGap); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.PositionIncrementGap = &value + case float64: + f := int(v) + s.PositionIncrementGap = &f } case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -454,7 +564,9 @@ func (s *TextProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -733,30 +845,62 @@ func (s *TextProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } case "search_analyzer": - if err := dec.Decode(&s.SearchAnalyzer); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchAnalyzer = &o case "search_quote_analyzer": - if err := dec.Decode(&s.SearchQuoteAnalyzer); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchQuoteAnalyzer = &o case "similarity": - if err := dec.Decode(&s.Similarity); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Similarity = &o case "store": - if err := dec.Decode(&s.Store); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Store = &value + case bool: + s.Store = &v } case "term_vector": @@ -774,6 +918,40 @@ func (s *TextProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s TextProperty) MarshalJSON() ([]byte, error) { + type innerTextProperty TextProperty + tmp := innerTextProperty{ + Analyzer: s.Analyzer, + Boost: s.Boost, + CopyTo: s.CopyTo, + Dynamic: s.Dynamic, + EagerGlobalOrdinals: s.EagerGlobalOrdinals, + Fielddata: s.Fielddata, + FielddataFrequencyFilter: s.FielddataFrequencyFilter, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + IndexOptions: s.IndexOptions, + IndexPhrases: s.IndexPhrases, + IndexPrefixes: s.IndexPrefixes, + Meta: s.Meta, + Norms: s.Norms, + PositionIncrementGap: s.PositionIncrementGap, + Properties: s.Properties, + SearchAnalyzer: s.SearchAnalyzer, + SearchQuoteAnalyzer: s.SearchQuoteAnalyzer, + Similarity: s.Similarity, + Store: s.Store, + TermVector: s.TermVector, + Type: s.Type, + } + + tmp.Type = "text" + + return json.Marshal(tmp) +} + // NewTextProperty returns a TextProperty. func NewTextProperty() *TextProperty { r := &TextProperty{ @@ -782,7 +960,5 @@ func NewTextProperty() *TextProperty { Properties: make(map[string]Property, 0), } - r.Type = "text" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/texttoanalyze.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/texttoanalyze.go index 1015f1231..4cf3e5e79 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/texttoanalyze.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/texttoanalyze.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // TextToAnalyze type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/analyze/types.ts#L66-L66 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/analyze/types.ts#L66-L66 type TextToAnalyze []string diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/threadcount.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/threadcount.go index d8edf6e7a..51c7c52a3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/threadcount.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/threadcount.go @@ -16,20 +16,144 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ThreadCount type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L404-L411 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L1004-L1029 type ThreadCount struct { - Active *int64 `json:"active,omitempty"` + // Active Number of active threads in the thread pool. + Active *int64 `json:"active,omitempty"` + // Completed Number of tasks completed by the thread pool executor. Completed *int64 `json:"completed,omitempty"` - Largest *int64 `json:"largest,omitempty"` - Queue *int64 `json:"queue,omitempty"` - Rejected *int64 `json:"rejected,omitempty"` - Threads *int64 `json:"threads,omitempty"` + // Largest Highest number of active threads in the thread pool. + Largest *int64 `json:"largest,omitempty"` + // Queue Number of tasks in queue for the thread pool. + Queue *int64 `json:"queue,omitempty"` + // Rejected Number of tasks rejected by the thread pool executor. + Rejected *int64 `json:"rejected,omitempty"` + // Threads Number of threads in the thread pool. + Threads *int64 `json:"threads,omitempty"` +} + +func (s *ThreadCount) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "active": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Active = &value + case float64: + f := int64(v) + s.Active = &f + } + + case "completed": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Completed = &value + case float64: + f := int64(v) + s.Completed = &f + } + + case "largest": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Largest = &value + case float64: + f := int64(v) + s.Largest = &f + } + + case "queue": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Queue = &value + case float64: + f := int64(v) + s.Queue = &f + } + + case "rejected": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Rejected = &value + case float64: + f := int64(v) + s.Rejected = &f + } + + case "threads": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Threads = &value + case float64: + f := int64(v) + s.Threads = &f + } + + } + } + return nil } // NewThreadCount returns a ThreadCount. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/threadpoolrecord.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/threadpoolrecord.go index a63f03d97..fe343a14e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/threadpoolrecord.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/threadpoolrecord.go @@ -16,56 +16,319 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ThreadPoolRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/thread_pool/types.ts#L22-L123 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/thread_pool/types.ts#L22-L124 type ThreadPoolRecord struct { - // Active number of active threads + // Active The number of active threads in the current thread pool. Active *string `json:"active,omitempty"` - // Completed number of completed tasks + // Completed The number of completed tasks. Completed *string `json:"completed,omitempty"` - // Core core number of threads in a scaling thread pool + // Core The core number of active threads allowed in a scaling thread pool. Core string `json:"core,omitempty"` - // EphemeralNodeId ephemeral node id + // EphemeralNodeId The ephemeral node identifier. EphemeralNodeId *string `json:"ephemeral_node_id,omitempty"` - // Host host name + // Host The host name for the current node. Host *string `json:"host,omitempty"` - // Ip ip address + // Ip The IP address for the current node. Ip *string `json:"ip,omitempty"` - // KeepAlive thread keep alive time + // KeepAlive The thread keep alive time. KeepAlive string `json:"keep_alive,omitempty"` - // Largest highest number of seen active threads + // Largest The highest number of active threads in the current thread pool. Largest *string `json:"largest,omitempty"` - // Max maximum number of threads in a scaling thread pool + // Max The maximum number of active threads allowed in a scaling thread pool. Max string `json:"max,omitempty"` - // Name thread pool name + // Name The thread pool name. Name *string `json:"name,omitempty"` - // NodeId persistent node id + // NodeId The persistent node identifier. NodeId *string `json:"node_id,omitempty"` - // NodeName node name + // NodeName The node name. NodeName *string `json:"node_name,omitempty"` - // Pid process id + // Pid The process identifier. Pid *string `json:"pid,omitempty"` - // PoolSize number of threads + // PoolSize The number of threads in the current thread pool. PoolSize *string `json:"pool_size,omitempty"` - // Port bound transport port + // Port The bound transport port for the current node. Port *string `json:"port,omitempty"` - // Queue number of tasks currently in queue + // Queue The number of tasks currently in queue. Queue *string `json:"queue,omitempty"` - // QueueSize maximum number of tasks permitted in queue + // QueueSize The maximum number of tasks permitted in the queue. QueueSize *string `json:"queue_size,omitempty"` - // Rejected number of rejected tasks + // Rejected The number of rejected tasks. Rejected *string `json:"rejected,omitempty"` - // Size number of threads in a fixed thread pool + // Size The number of active threads allowed in a fixed thread pool. Size string `json:"size,omitempty"` - // Type thread pool type + // Type The thread pool type. + // Returned values include `fixed`, `fixed_auto_queue_size`, `direct`, and + // `scaling`. Type *string `json:"type,omitempty"` } +func (s *ThreadPoolRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "active", "a": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Active = &o + + case "completed", "c": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Completed = &o + + case "core", "cr": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Core = o + + case "ephemeral_node_id", "eid": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.EphemeralNodeId = &o + + case "host", "h": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Host = &o + + case "ip", "i": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Ip = &o + + case "keep_alive", "ka": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.KeepAlive = o + + case "largest", "l": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Largest = &o + + case "max", "mx": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Max = o + + case "name", "n": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + case "node_id", "id": + if err := dec.Decode(&s.NodeId); err != nil { + return err + } + + case "node_name", "nn": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NodeName = &o + + case "pid", "p": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pid = &o + + case "pool_size", "psz": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PoolSize = &o + + case "port", "po": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Port = &o + + case "queue", "q": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Queue = &o + + case "queue_size", "qs": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueueSize = &o + + case "rejected", "r": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Rejected = &o + + case "size", "sz": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Size = o + + case "type", "t": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = &o + + } + } + return nil +} + // NewThreadPoolRecord returns a ThreadPoolRecord. func NewThreadPoolRecord() *ThreadPoolRecord { r := &ThreadPoolRecord{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/throttlestate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/throttlestate.go index d6dab4b42..2f1b02231 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/throttlestate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/throttlestate.go @@ -16,18 +16,63 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ThrottleState type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Action.ts#L123-L126 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Action.ts#L126-L129 type ThrottleState struct { Reason string `json:"reason"` Timestamp DateTime `json:"timestamp"` } +func (s *ThrottleState) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "reason": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Reason = o + + case "timestamp": + if err := dec.Decode(&s.Timestamp); err != nil { + return err + } + + } + } + return nil +} + // NewThrottleState returns a ThrottleState. func NewThrottleState() *ThrottleState { r := &ThrottleState{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/timeofmonth.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/timeofmonth.go index c2972643d..c0e011aeb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/timeofmonth.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/timeofmonth.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // TimeOfMonth type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Schedule.ts#L115-L118 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Schedule.ts#L115-L118 type TimeOfMonth struct { At []string `json:"at"` On []int `json:"on"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/timeofweek.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/timeofweek.go index 28a36ce72..53e5b2865 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/timeofweek.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/timeofweek.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -26,7 +26,7 @@ import ( // TimeOfWeek type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Schedule.ts#L120-L123 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Schedule.ts#L120-L123 type TimeOfWeek struct { At []string `json:"at"` On []day.Day `json:"on"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/timeofyear.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/timeofyear.go index a4c036196..3492392b5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/timeofyear.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/timeofyear.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -26,7 +26,7 @@ import ( // TimeOfYear type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Schedule.ts#L125-L129 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Schedule.ts#L125-L129 type TimeOfYear struct { At []string `json:"at"` Int []month.Month `json:"int"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/timesync.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/timesync.go index e96978227..7c0f10573 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/timesync.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/timesync.go @@ -16,13 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // TimeSync type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/transform/_types/Transform.ts#L175-L187 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/transform/_types/Transform.ts#L177-L189 type TimeSync struct { // Delay The time delay between the current time and the latest input data time. Delay Duration `json:"delay,omitempty"` @@ -34,6 +41,36 @@ type TimeSync struct { Field string `json:"field"` } +func (s *TimeSync) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "delay": + if err := dec.Decode(&s.Delay); err != nil { + return err + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + } + } + return nil +} + // NewTimeSync returns a TimeSync. func NewTimeSync() *TimeSync { r := &TimeSync{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/timingstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/timingstats.go index a2172036d..75a65b739 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/timingstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/timingstats.go @@ -16,13 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // TimingStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/DataframeAnalytics.ts#L421-L426 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/DataframeAnalytics.ts#L563-L568 type TimingStats struct { // ElapsedTime Runtime of the analysis in milliseconds. ElapsedTime int64 `json:"elapsed_time"` @@ -30,6 +37,36 @@ type TimingStats struct { IterationTime *int64 `json:"iteration_time,omitempty"` } +func (s *TimingStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "elapsed_time": + if err := dec.Decode(&s.ElapsedTime); err != nil { + return err + } + + case "iteration_time": + if err := dec.Decode(&s.IterationTime); err != nil { + return err + } + + } + } + return nil +} + // NewTimingStats returns a TimingStats. func NewTimingStats() *TimingStats { r := &TimingStats{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tokencountproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tokencountproperty.go index 77286c748..1f3252c9b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tokencountproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tokencountproperty.go @@ -16,23 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" ) // TokenCountProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/specialized.ts#L78-L85 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/specialized.ts#L79-L86 type TokenCountProperty struct { Analyzer *string `json:"analyzer,omitempty"` Boost *Float64 `json:"boost,omitempty"` @@ -53,6 +53,7 @@ type TokenCountProperty struct { } func (s *TokenCountProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -67,23 +68,61 @@ func (s *TokenCountProperty) UnmarshalJSON(data []byte) error { switch t { case "analyzer": - if err := dec.Decode(&s.Analyzer); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Analyzer = &o case "boost": - if err := dec.Decode(&s.Boost); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f } case "copy_to": - if err := dec.Decode(&s.CopyTo); err != nil { - return err + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return err + } } case "doc_values": - if err := dec.Decode(&s.DocValues); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DocValues = &value + case bool: + s.DocValues = &v } case "dynamic": @@ -92,11 +131,23 @@ func (s *TokenCountProperty) UnmarshalJSON(data []byte) error { } case "enable_position_increments": - if err := dec.Decode(&s.EnablePositionIncrements); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.EnablePositionIncrements = &value + case bool: + s.EnablePositionIncrements = &v } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -105,7 +156,9 @@ func (s *TokenCountProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -384,33 +437,72 @@ func (s *TokenCountProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "index": - if err := dec.Decode(&s.Index); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Index = &value + case bool: + s.Index = &v } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } case "null_value": - if err := dec.Decode(&s.NullValue); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.NullValue = &f + case float64: + f := Float64(v) + s.NullValue = &f } case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -419,7 +511,9 @@ func (s *TokenCountProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -698,20 +792,38 @@ func (s *TokenCountProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } case "similarity": - if err := dec.Decode(&s.Similarity); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Similarity = &o case "store": - if err := dec.Decode(&s.Store); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Store = &value + case bool: + s.Store = &v } case "type": @@ -724,6 +836,32 @@ func (s *TokenCountProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s TokenCountProperty) MarshalJSON() ([]byte, error) { + type innerTokenCountProperty TokenCountProperty + tmp := innerTokenCountProperty{ + Analyzer: s.Analyzer, + Boost: s.Boost, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + EnablePositionIncrements: s.EnablePositionIncrements, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Index: s.Index, + Meta: s.Meta, + NullValue: s.NullValue, + Properties: s.Properties, + Similarity: s.Similarity, + Store: s.Store, + Type: s.Type, + } + + tmp.Type = "token_count" + + return json.Marshal(tmp) +} + // NewTokenCountProperty returns a TokenCountProperty. func NewTokenCountProperty() *TokenCountProperty { r := &TokenCountProperty{ @@ -732,7 +870,5 @@ func NewTokenCountProperty() *TokenCountProperty { Properties: make(map[string]Property, 0), } - r.Type = "token_count" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tokendetail.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tokendetail.go index 355ac5c4f..34f3cf43d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tokendetail.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tokendetail.go @@ -16,18 +16,63 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TokenDetail type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/analyze/types.ts#L68-L71 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/analyze/types.ts#L68-L71 type TokenDetail struct { Name string `json:"name"` Tokens []ExplainAnalyzeToken `json:"tokens"` } +func (s *TokenDetail) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = o + + case "tokens": + if err := dec.Decode(&s.Tokens); err != nil { + return err + } + + } + } + return nil +} + // NewTokenDetail returns a TokenDetail. func NewTokenDetail() *TokenDetail { r := &TokenDetail{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tokenfilter.go index ba5ea2c95..165432b77 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tokenfilter.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // string // TokenFilterDefinition // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L342-L344 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L344-L346 type TokenFilter interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tokenfilterdefinition.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tokenfilterdefinition.go index 107cf77bf..ca543733f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tokenfilterdefinition.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tokenfilterdefinition.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -71,5 +71,5 @@ package types // PhoneticTokenFilter // DictionaryDecompounderTokenFilter // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L346-L399 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L348-L401 type TokenFilterDefinition interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tokenizationconfigcontainer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tokenizationconfigcontainer.go index df36ed686..32855ec59 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tokenizationconfigcontainer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tokenizationconfigcontainer.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // TokenizationConfigContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/inference.ts#L97-L114 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/inference.ts#L110-L129 type TokenizationConfigContainer struct { // Bert Indicates BERT tokenization and its options Bert *NlpBertTokenizationConfig `json:"bert,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tokenizer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tokenizer.go index 796d89818..b0e420f91 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tokenizer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tokenizer.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // string // TokenizerDefinition // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/tokenizers.ts#L119-L121 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/tokenizers.ts#L120-L122 type Tokenizer interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tokenizerdefinition.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tokenizerdefinition.go index 4021e0f8b..467b741ab 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tokenizerdefinition.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tokenizerdefinition.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -37,5 +37,5 @@ package types // PatternTokenizer // IcuTokenizer // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/tokenizers.ts#L123-L141 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/tokenizers.ts#L124-L142 type TokenizerDefinition interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/topclassentry.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/topclassentry.go index 168f07e0a..f4a37989b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/topclassentry.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/topclassentry.go @@ -16,19 +16,91 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TopClassEntry type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/inference.ts#L399-L403 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/inference.ts#L440-L444 type TopClassEntry struct { ClassName string `json:"class_name"` ClassProbability Float64 `json:"class_probability"` ClassScore Float64 `json:"class_score"` } +func (s *TopClassEntry) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "class_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ClassName = o + + case "class_probability": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.ClassProbability = f + case float64: + f := Float64(v) + s.ClassProbability = f + } + + case "class_score": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.ClassScore = f + case float64: + f := Float64(v) + s.ClassScore = f + } + + } + } + return nil +} + // NewTopClassEntry returns a TopClassEntry. func NewTopClassEntry() *TopClassEntry { r := &TopClassEntry{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tophit.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tophit.go new file mode 100644 index 000000000..756e5a95a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tophit.go @@ -0,0 +1,84 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + +// TopHit type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/text_structure/find_structure/types.ts#L35-L38 +type TopHit struct { + Count int64 `json:"count"` + Value json.RawMessage `json:"value,omitempty"` +} + +func (s *TopHit) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return err + } + + } + } + return nil +} + +// NewTopHit returns a TopHit. +func NewTopHit() *TopHit { + r := &TopHit{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tophitsaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tophitsaggregate.go index 0103a711f..150f134c7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tophitsaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tophitsaggregate.go @@ -16,20 +16,53 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" ) // TopHitsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L645-L648 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L654-L657 type TopHitsAggregate struct { - Hits HitsMetadata `json:"hits"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Hits HitsMetadata `json:"hits"` + Meta Metadata `json:"meta,omitempty"` +} + +func (s *TopHitsAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "hits": + if err := dec.Decode(&s.Hits); err != nil { + return err + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + } + } + return nil } // NewTopHitsAggregate returns a TopHitsAggregate. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tophitsaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tophitsaggregation.go index 334fa3b88..a166389f1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tophitsaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/tophitsaggregation.go @@ -16,29 +16,247 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TopHitsAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/metric.ts#L171-L184 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/metric.ts#L337-L392 type TopHitsAggregation struct { - DocvalueFields []string `json:"docvalue_fields,omitempty"` - Explain *bool `json:"explain,omitempty"` - Field *string `json:"field,omitempty"` - From *int `json:"from,omitempty"` - Highlight *Highlight `json:"highlight,omitempty"` - Missing Missing `json:"missing,omitempty"` - Script Script `json:"script,omitempty"` - ScriptFields map[string]ScriptField `json:"script_fields,omitempty"` - SeqNoPrimaryTerm *bool `json:"seq_no_primary_term,omitempty"` - Size *int `json:"size,omitempty"` - Sort []SortCombinations `json:"sort,omitempty"` - Source_ SourceConfig `json:"_source,omitempty"` - StoredFields []string `json:"stored_fields,omitempty"` - TrackScores *bool `json:"track_scores,omitempty"` - Version *bool `json:"version,omitempty"` + // DocvalueFields Fields for which to return doc values. + DocvalueFields []string `json:"docvalue_fields,omitempty"` + // Explain If `true`, returns detailed information about score computation as part of a + // hit. + Explain *bool `json:"explain,omitempty"` + // Field The field on which to run the aggregation. + Field *string `json:"field,omitempty"` + // From Starting document offset. + From *int `json:"from,omitempty"` + // Highlight Specifies the highlighter to use for retrieving highlighted snippets from one + // or more fields in the search results. + Highlight *Highlight `json:"highlight,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing Missing `json:"missing,omitempty"` + Script Script `json:"script,omitempty"` + // ScriptFields Returns the result of one or more script evaluations for each hit. + ScriptFields map[string]ScriptField `json:"script_fields,omitempty"` + // SeqNoPrimaryTerm If `true`, returns sequence number and primary term of the last modification + // of each hit. + SeqNoPrimaryTerm *bool `json:"seq_no_primary_term,omitempty"` + // Size The maximum number of top matching hits to return per bucket. + Size *int `json:"size,omitempty"` + // Sort Sort order of the top matching hits. + // By default, the hits are sorted by the score of the main query. + Sort []SortCombinations `json:"sort,omitempty"` + // Source_ Selects the fields of the source that are returned. + Source_ SourceConfig `json:"_source,omitempty"` + // StoredFields Returns values for the specified stored fields (fields that use the `store` + // mapping option). + StoredFields []string `json:"stored_fields,omitempty"` + // TrackScores If `true`, calculates and returns document scores, even if the scores are not + // used for sorting. + TrackScores *bool `json:"track_scores,omitempty"` + // Version If `true`, returns document version as part of a hit. + Version *bool `json:"version,omitempty"` +} + +func (s *TopHitsAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "docvalue_fields": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.DocvalueFields = append(s.DocvalueFields, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.DocvalueFields); err != nil { + return err + } + } + + case "explain": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Explain = &value + case bool: + s.Explain = &v + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "from": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.From = &value + case float64: + f := int(v) + s.From = &f + } + + case "highlight": + if err := dec.Decode(&s.Highlight); err != nil { + return err + } + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return err + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + case "script_fields": + if s.ScriptFields == nil { + s.ScriptFields = make(map[string]ScriptField, 0) + } + if err := dec.Decode(&s.ScriptFields); err != nil { + return err + } + + case "seq_no_primary_term": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.SeqNoPrimaryTerm = &value + case bool: + s.SeqNoPrimaryTerm = &v + } + + case "size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + case "sort": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(SortCombinations) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Sort = append(s.Sort, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Sort); err != nil { + return err + } + } + + case "_source": + if err := dec.Decode(&s.Source_); err != nil { + return err + } + + case "stored_fields": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.StoredFields = append(s.StoredFields, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.StoredFields); err != nil { + return err + } + } + + case "track_scores": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.TrackScores = &value + case bool: + s.TrackScores = &v + } + + case "version": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Version = &value + case bool: + s.Version = &v + } + + } + } + return nil } // NewTopHitsAggregation returns a TopHitsAggregation. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/topleftbottomrightgeobounds.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/topleftbottomrightgeobounds.go index e8a615280..15d6be37b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/topleftbottomrightgeobounds.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/topleftbottomrightgeobounds.go @@ -16,18 +16,55 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // TopLeftBottomRightGeoBounds type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Geo.ts#L145-L148 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Geo.ts#L170-L173 type TopLeftBottomRightGeoBounds struct { BottomRight GeoLocation `json:"bottom_right"` TopLeft GeoLocation `json:"top_left"` } +func (s *TopLeftBottomRightGeoBounds) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bottom_right": + if err := dec.Decode(&s.BottomRight); err != nil { + return err + } + + case "top_left": + if err := dec.Decode(&s.TopLeft); err != nil { + return err + } + + } + } + return nil +} + // NewTopLeftBottomRightGeoBounds returns a TopLeftBottomRightGeoBounds. func NewTopLeftBottomRightGeoBounds() *TopLeftBottomRightGeoBounds { r := &TopLeftBottomRightGeoBounds{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/topmetrics.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/topmetrics.go index 85dedbb1f..50b1eb6cd 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/topmetrics.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/topmetrics.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // TopMetrics type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L720-L724 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L729-L733 type TopMetrics struct { Metrics map[string]FieldValue `json:"metrics"` Sort []FieldValue `json:"sort"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/topmetricsaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/topmetricsaggregate.go index 65a2f2d99..3e6ef4e73 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/topmetricsaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/topmetricsaggregate.go @@ -16,20 +16,53 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" ) // TopMetricsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L715-L718 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L724-L727 type TopMetricsAggregate struct { - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Top []TopMetrics `json:"top"` + Meta Metadata `json:"meta,omitempty"` + Top []TopMetrics `json:"top"` +} + +func (s *TopMetricsAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "top": + if err := dec.Decode(&s.Top); err != nil { + return err + } + + } + } + return nil } // NewTopMetricsAggregate returns a TopMetricsAggregate. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/topmetricsaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/topmetricsaggregation.go index 846f9c3c8..686b23e84 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/topmetricsaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/topmetricsaggregation.go @@ -16,20 +16,117 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TopMetricsAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/metric.ts#L186-L190 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/metric.ts#L394-L408 type TopMetricsAggregation struct { - Field *string `json:"field,omitempty"` - Metrics []TopMetricsValue `json:"metrics,omitempty"` - Missing Missing `json:"missing,omitempty"` - Script Script `json:"script,omitempty"` - Size *int `json:"size,omitempty"` - Sort []SortCombinations `json:"sort,omitempty"` + // Field The field on which to run the aggregation. + Field *string `json:"field,omitempty"` + // Metrics The fields of the top document to return. + Metrics []TopMetricsValue `json:"metrics,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. + Missing Missing `json:"missing,omitempty"` + Script Script `json:"script,omitempty"` + // Size The number of top documents from which to return metrics. + Size *int `json:"size,omitempty"` + // Sort The sort order of the documents. + Sort []SortCombinations `json:"sort,omitempty"` +} + +func (s *TopMetricsAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "metrics": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := NewTopMetricsValue() + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Metrics = append(s.Metrics, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Metrics); err != nil { + return err + } + } + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return err + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + case "size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + case "sort": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(SortCombinations) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Sort = append(s.Sort, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Sort); err != nil { + return err + } + } + + } + } + return nil } // NewTopMetricsAggregation returns a TopMetricsAggregation. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/topmetricsvalue.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/topmetricsvalue.go index be0dcab78..85e317c2a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/topmetricsvalue.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/topmetricsvalue.go @@ -16,17 +16,50 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // TopMetricsValue type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/metric.ts#L192-L194 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/metric.ts#L410-L415 type TopMetricsValue struct { + // Field A field to return as a metric. Field string `json:"field"` } +func (s *TopMetricsValue) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + } + } + return nil +} + // NewTopMetricsValue returns a TopMetricsValue. func NewTopMetricsValue() *TopMetricsValue { r := &TopMetricsValue{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/toprightbottomleftgeobounds.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/toprightbottomleftgeobounds.go index 11f2a1c29..d92860e84 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/toprightbottomleftgeobounds.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/toprightbottomleftgeobounds.go @@ -16,18 +16,55 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // TopRightBottomLeftGeoBounds type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Geo.ts#L150-L153 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Geo.ts#L175-L178 type TopRightBottomLeftGeoBounds struct { BottomLeft GeoLocation `json:"bottom_left"` TopRight GeoLocation `json:"top_right"` } +func (s *TopRightBottomLeftGeoBounds) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "bottom_left": + if err := dec.Decode(&s.BottomLeft); err != nil { + return err + } + + case "top_right": + if err := dec.Decode(&s.TopRight); err != nil { + return err + } + + } + } + return nil +} + // NewTopRightBottomLeftGeoBounds returns a TopRightBottomLeftGeoBounds. func NewTopRightBottomLeftGeoBounds() *TopRightBottomLeftGeoBounds { r := &TopRightBottomLeftGeoBounds{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/totalfeatureimportance.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/totalfeatureimportance.go index 17691fa83..dd0cab806 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/totalfeatureimportance.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/totalfeatureimportance.go @@ -16,13 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // TotalFeatureImportance type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/TrainedModel.ts#L222-L229 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/TrainedModel.ts#L232-L239 type TotalFeatureImportance struct { // Classes If the trained model is a classification model, feature importance statistics // are gathered per target class value. @@ -34,6 +41,41 @@ type TotalFeatureImportance struct { Importance []TotalFeatureImportanceStatistics `json:"importance"` } +func (s *TotalFeatureImportance) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "classes": + if err := dec.Decode(&s.Classes); err != nil { + return err + } + + case "feature_name": + if err := dec.Decode(&s.FeatureName); err != nil { + return err + } + + case "importance": + if err := dec.Decode(&s.Importance); err != nil { + return err + } + + } + } + return nil +} + // NewTotalFeatureImportance returns a TotalFeatureImportance. func NewTotalFeatureImportance() *TotalFeatureImportance { r := &TotalFeatureImportance{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/totalfeatureimportanceclass.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/totalfeatureimportanceclass.go index 74635c295..b4f39ec6b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/totalfeatureimportanceclass.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/totalfeatureimportanceclass.go @@ -16,13 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // TotalFeatureImportanceClass type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/TrainedModel.ts#L231-L236 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/TrainedModel.ts#L241-L246 type TotalFeatureImportanceClass struct { // ClassName The target class value. Could be a string, boolean, or number. ClassName string `json:"class_name"` @@ -31,6 +38,36 @@ type TotalFeatureImportanceClass struct { Importance []TotalFeatureImportanceStatistics `json:"importance"` } +func (s *TotalFeatureImportanceClass) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "class_name": + if err := dec.Decode(&s.ClassName); err != nil { + return err + } + + case "importance": + if err := dec.Decode(&s.Importance); err != nil { + return err + } + + } + } + return nil +} + // NewTotalFeatureImportanceClass returns a TotalFeatureImportanceClass. func NewTotalFeatureImportanceClass() *TotalFeatureImportanceClass { r := &TotalFeatureImportanceClass{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/totalfeatureimportancestatistics.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/totalfeatureimportancestatistics.go index eda4668ff..f61f44fe4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/totalfeatureimportancestatistics.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/totalfeatureimportancestatistics.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TotalFeatureImportanceStatistics type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/TrainedModel.ts#L238-L245 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/TrainedModel.ts#L248-L255 type TotalFeatureImportanceStatistics struct { // Max The maximum importance value across all the training data for this feature. Max int `json:"max"` @@ -34,6 +42,74 @@ type TotalFeatureImportanceStatistics struct { Min int `json:"min"` } +func (s *TotalFeatureImportanceStatistics) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Max = value + case float64: + f := int(v) + s.Max = f + } + + case "mean_magnitude": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.MeanMagnitude = f + case float64: + f := Float64(v) + s.MeanMagnitude = f + } + + case "min": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Min = value + case float64: + f := int(v) + s.Min = f + } + + } + } + return nil +} + // NewTotalFeatureImportanceStatistics returns a TotalFeatureImportanceStatistics. func NewTotalFeatureImportanceStatistics() *TotalFeatureImportanceStatistics { r := &TotalFeatureImportanceStatistics{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/totalhits.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/totalhits.go index 212df4c9d..4adf03c6d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/totalhits.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/totalhits.go @@ -15,23 +15,46 @@ // specific language governing permissions and limitations // under the License. -// Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 - package types import ( + "bytes" + "encoding/json" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/totalhitsrelation" ) // TotalHits type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/hits.ts#L94-L97 +// https://github.com/elastic/elasticsearch-specification/blob/18d160a8583deec1bbef274d2c0e563a0cd20e2f/specification/_global/search/_types/hits.ts#L94-L97 type TotalHits struct { Relation totalhitsrelation.TotalHitsRelation `json:"relation"` Value int64 `json:"value"` } +// UnmarshalJSON implements Unmarshaler interface, it handles the shortcut for total hits. +func (t *TotalHits) UnmarshalJSON(data []byte) error { + type stub TotalHits + tmp := stub{} + dec := json.NewDecoder(bytes.NewReader(data)) + if _, err := strconv.Atoi(string(data)); err == nil { + err := dec.Decode(&t.Value) + if err != nil { + return err + } + t.Relation = totalhitsrelation.Eq + } else { + err := dec.Decode(&tmp) + if err != nil { + return err + } + *t = TotalHits(tmp) + } + + return nil +} + // NewTotalHits returns a TotalHits. func NewTotalHits() *TotalHits { r := &TotalHits{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/totaluserprofiles.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/totaluserprofiles.go index 409a53c42..ff0dc2d8f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/totaluserprofiles.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/totaluserprofiles.go @@ -16,18 +16,66 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TotalUserProfiles type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/suggest_user_profiles/Response.ts#L24-L27 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/suggest_user_profiles/Response.ts#L24-L27 type TotalUserProfiles struct { Relation string `json:"relation"` Value int64 `json:"value"` } +func (s *TotalUserProfiles) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "relation": + if err := dec.Decode(&s.Relation); err != nil { + return err + } + + case "value": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Value = value + case float64: + f := int64(v) + s.Value = f + } + + } + } + return nil +} + // NewTotalUserProfiles returns a TotalUserProfiles. func NewTotalUserProfiles() *TotalUserProfiles { r := &TotalUserProfiles{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trackhits.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trackhits.go index a61049b1e..ee479b9a1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trackhits.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trackhits.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // bool // int // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/search/_types/hits.ts#L126-L134 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/search/_types/hits.ts#L142-L150 type TrackHits interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodel.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodel.go index 7a94f3073..629d2a46b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodel.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodel.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // TrainedModel type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/put_trained_model/types.ts#L60-L72 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/put_trained_model/types.ts#L60-L72 type TrainedModel struct { // Ensemble The definition for an ensemble model Ensemble *Ensemble `json:"ensemble,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelassignment.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelassignment.go index fc7d35a80..7a14d6d5b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelassignment.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelassignment.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/deploymentassignmentstate" ) // TrainedModelAssignment type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/TrainedModel.ts#L387-L402 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/TrainedModel.ts#L402-L417 type TrainedModelAssignment struct { // AssignmentState The overall assignment state. AssignmentState deploymentassignmentstate.DeploymentAssignmentState `json:"assignment_state"` @@ -38,6 +44,65 @@ type TrainedModelAssignment struct { TaskParameters TrainedModelAssignmentTaskParameters `json:"task_parameters"` } +func (s *TrainedModelAssignment) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "assignment_state": + if err := dec.Decode(&s.AssignmentState); err != nil { + return err + } + + case "max_assigned_allocations": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxAssignedAllocations = &value + case float64: + f := int(v) + s.MaxAssignedAllocations = &f + } + + case "routing_table": + if s.RoutingTable == nil { + s.RoutingTable = make(map[string]TrainedModelAssignmentRoutingTable, 0) + } + if err := dec.Decode(&s.RoutingTable); err != nil { + return err + } + + case "start_time": + if err := dec.Decode(&s.StartTime); err != nil { + return err + } + + case "task_parameters": + if err := dec.Decode(&s.TaskParameters); err != nil { + return err + } + + } + } + return nil +} + // NewTrainedModelAssignment returns a TrainedModelAssignment. func NewTrainedModelAssignment() *TrainedModelAssignment { r := &TrainedModelAssignment{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelassignmentroutingtable.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelassignmentroutingtable.go index 7810b6472..9a09c5d2d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelassignmentroutingtable.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelassignmentroutingtable.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/routingstate" ) // TrainedModelAssignmentRoutingTable type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/TrainedModel.ts#L358-L376 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/TrainedModel.ts#L373-L391 type TrainedModelAssignmentRoutingTable struct { // CurrentAllocations Current number of allocations. CurrentAllocations int `json:"current_allocations"` @@ -39,6 +45,75 @@ type TrainedModelAssignmentRoutingTable struct { TargetAllocations int `json:"target_allocations"` } +func (s *TrainedModelAssignmentRoutingTable) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "current_allocations": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.CurrentAllocations = value + case float64: + f := int(v) + s.CurrentAllocations = f + } + + case "reason": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Reason = o + + case "routing_state": + if err := dec.Decode(&s.RoutingState); err != nil { + return err + } + + case "target_allocations": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.TargetAllocations = value + case float64: + f := int(v) + s.TargetAllocations = f + } + + } + } + return nil +} + // NewTrainedModelAssignmentRoutingTable returns a TrainedModelAssignmentRoutingTable. func NewTrainedModelAssignmentRoutingTable() *TrainedModelAssignmentRoutingTable { r := &TrainedModelAssignmentRoutingTable{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelassignmenttaskparameters.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelassignmenttaskparameters.go index 89a1c1d41..af2a9cc2c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelassignmenttaskparameters.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelassignmenttaskparameters.go @@ -16,20 +16,28 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/trainingpriority" ) // TrainedModelAssignmentTaskParameters type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/TrainedModel.ts#L305-L333 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/TrainedModel.ts#L315-L348 type TrainedModelAssignmentTaskParameters struct { // CacheSize The size of the trained model cache. CacheSize ByteSize `json:"cache_size"` + // DeploymentId The unique identifier for the trained model deployment. + DeploymentId string `json:"deployment_id"` // ModelBytes The size of the trained model in bytes. ModelBytes int `json:"model_bytes"` // ModelId The unique identifier for the trained model. @@ -43,6 +51,110 @@ type TrainedModelAssignmentTaskParameters struct { ThreadsPerAllocation int `json:"threads_per_allocation"` } +func (s *TrainedModelAssignmentTaskParameters) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "cache_size": + if err := dec.Decode(&s.CacheSize); err != nil { + return err + } + + case "deployment_id": + if err := dec.Decode(&s.DeploymentId); err != nil { + return err + } + + case "model_bytes": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.ModelBytes = value + case float64: + f := int(v) + s.ModelBytes = f + } + + case "model_id": + if err := dec.Decode(&s.ModelId); err != nil { + return err + } + + case "number_of_allocations": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NumberOfAllocations = value + case float64: + f := int(v) + s.NumberOfAllocations = f + } + + case "priority": + if err := dec.Decode(&s.Priority); err != nil { + return err + } + + case "queue_capacity": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.QueueCapacity = value + case float64: + f := int(v) + s.QueueCapacity = f + } + + case "threads_per_allocation": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.ThreadsPerAllocation = value + case float64: + f := int(v) + s.ThreadsPerAllocation = f + } + + } + } + return nil +} + // NewTrainedModelAssignmentTaskParameters returns a TrainedModelAssignmentTaskParameters. func NewTrainedModelAssignmentTaskParameters() *TrainedModelAssignmentTaskParameters { r := &TrainedModelAssignmentTaskParameters{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelconfig.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelconfig.go index ac2280ec6..69deaab7f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelconfig.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelconfig.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/trainedmodeltype" ) // TrainedModelConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/TrainedModel.ts#L157-L189 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/TrainedModel.ts#L165-L199 type TrainedModelConfig struct { CompressedDefinition *string `json:"compressed_definition,omitempty"` // CreateTime The time when the trained model was created. @@ -41,10 +47,13 @@ type TrainedModelConfig struct { EstimatedHeapMemoryUsageBytes *int `json:"estimated_heap_memory_usage_bytes,omitempty"` // EstimatedOperations The estimated number of operations to use the trained model. EstimatedOperations *int `json:"estimated_operations,omitempty"` + // FullyDefined True if the full model definition is present. + FullyDefined *bool `json:"fully_defined,omitempty"` // InferenceConfig The default configuration for inference. This can be either a regression, // classification, or one of the many NLP focused configurations. It must match - // the underlying definition.trained_model's target_type. - InferenceConfig InferenceConfigCreateContainer `json:"inference_config"` + // the underlying definition.trained_model's target_type. For pre-packaged + // models such as ELSER the config is not required. + InferenceConfig *InferenceConfigCreateContainer `json:"inference_config,omitempty"` // Input The input field names for the model definition. Input TrainedModelConfigInput `json:"input"` // LicenseLevel The license level of the trained model. @@ -65,6 +74,178 @@ type TrainedModelConfig struct { Version *string `json:"version,omitempty"` } +func (s *TrainedModelConfig) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "compressed_definition": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CompressedDefinition = &o + + case "create_time": + if err := dec.Decode(&s.CreateTime); err != nil { + return err + } + + case "created_by": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CreatedBy = &o + + case "default_field_map": + if s.DefaultFieldMap == nil { + s.DefaultFieldMap = make(map[string]string, 0) + } + if err := dec.Decode(&s.DefaultFieldMap); err != nil { + return err + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "estimated_heap_memory_usage_bytes": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.EstimatedHeapMemoryUsageBytes = &value + case float64: + f := int(v) + s.EstimatedHeapMemoryUsageBytes = &f + } + + case "estimated_operations": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.EstimatedOperations = &value + case float64: + f := int(v) + s.EstimatedOperations = &f + } + + case "fully_defined": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.FullyDefined = &value + case bool: + s.FullyDefined = &v + } + + case "inference_config": + if err := dec.Decode(&s.InferenceConfig); err != nil { + return err + } + + case "input": + if err := dec.Decode(&s.Input); err != nil { + return err + } + + case "license_level": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.LicenseLevel = &o + + case "location": + if err := dec.Decode(&s.Location); err != nil { + return err + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return err + } + + case "model_id": + if err := dec.Decode(&s.ModelId); err != nil { + return err + } + + case "model_size_bytes": + if err := dec.Decode(&s.ModelSizeBytes); err != nil { + return err + } + + case "model_type": + if err := dec.Decode(&s.ModelType); err != nil { + return err + } + + case "tags": + if err := dec.Decode(&s.Tags); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + // NewTrainedModelConfig returns a TrainedModelConfig. func NewTrainedModelConfig() *TrainedModelConfig { r := &TrainedModelConfig{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelconfiginput.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelconfiginput.go index 83ff68097..d39be73ca 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelconfiginput.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelconfiginput.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // TrainedModelConfigInput type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/TrainedModel.ts#L191-L194 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/TrainedModel.ts#L201-L204 type TrainedModelConfigInput struct { // FieldNames An array of input field names for the model. FieldNames []string `json:"field_names"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelconfigmetadata.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelconfigmetadata.go index 10feb11f1..5224baaf4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelconfigmetadata.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelconfigmetadata.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // TrainedModelConfigMetadata type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/TrainedModel.ts#L196-L204 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/TrainedModel.ts#L206-L214 type TrainedModelConfigMetadata struct { // FeatureImportanceBaseline An object that contains the baseline for feature importance values. For // regression analysis, it is a single value. For classification analysis, there diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodeldeploymentallocationstatus.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodeldeploymentallocationstatus.go index 7cd755cd3..94033e58d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodeldeploymentallocationstatus.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodeldeploymentallocationstatus.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/deploymentallocationstate" ) // TrainedModelDeploymentAllocationStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/TrainedModel.ts#L378-L385 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/TrainedModel.ts#L393-L400 type TrainedModelDeploymentAllocationStatus struct { // AllocationCount The current number of nodes where the model is allocated. AllocationCount int `json:"allocation_count"` @@ -36,6 +42,63 @@ type TrainedModelDeploymentAllocationStatus struct { TargetAllocationCount int `json:"target_allocation_count"` } +func (s *TrainedModelDeploymentAllocationStatus) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allocation_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.AllocationCount = value + case float64: + f := int(v) + s.AllocationCount = f + } + + case "state": + if err := dec.Decode(&s.State); err != nil { + return err + } + + case "target_allocation_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.TargetAllocationCount = value + case float64: + f := int(v) + s.TargetAllocationCount = f + } + + } + } + return nil +} + // NewTrainedModelDeploymentAllocationStatus returns a TrainedModelDeploymentAllocationStatus. func NewTrainedModelDeploymentAllocationStatus() *TrainedModelDeploymentAllocationStatus { r := &TrainedModelDeploymentAllocationStatus{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodeldeploymentnodesstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodeldeploymentnodesstats.go index be709ba30..7e378b35a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodeldeploymentnodesstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodeldeploymentnodesstats.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TrainedModelDeploymentNodesStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/TrainedModel.ts#L128-L155 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/TrainedModel.ts#L133-L163 type TrainedModelDeploymentNodesStats struct { // AverageInferenceTimeMs The average time for each inference call to complete on this node. AverageInferenceTimeMs Float64 `json:"average_inference_time_ms"` @@ -52,6 +60,173 @@ type TrainedModelDeploymentNodesStats struct { TimeoutCount int `json:"timeout_count"` } +func (s *TrainedModelDeploymentNodesStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "average_inference_time_ms": + if err := dec.Decode(&s.AverageInferenceTimeMs); err != nil { + return err + } + + case "error_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.ErrorCount = value + case float64: + f := int(v) + s.ErrorCount = f + } + + case "inference_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.InferenceCount = value + case float64: + f := int(v) + s.InferenceCount = f + } + + case "last_access": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.LastAccess = value + case float64: + f := int64(v) + s.LastAccess = f + } + + case "node": + if err := dec.Decode(&s.Node); err != nil { + return err + } + + case "number_of_allocations": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NumberOfAllocations = value + case float64: + f := int(v) + s.NumberOfAllocations = f + } + + case "number_of_pending_requests": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NumberOfPendingRequests = value + case float64: + f := int(v) + s.NumberOfPendingRequests = f + } + + case "rejection_execution_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.RejectionExecutionCount = value + case float64: + f := int(v) + s.RejectionExecutionCount = f + } + + case "routing_state": + if err := dec.Decode(&s.RoutingState); err != nil { + return err + } + + case "start_time": + if err := dec.Decode(&s.StartTime); err != nil { + return err + } + + case "threads_per_allocation": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.ThreadsPerAllocation = value + case float64: + f := int(v) + s.ThreadsPerAllocation = f + } + + case "timeout_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.TimeoutCount = value + case float64: + f := int(v) + s.TimeoutCount = f + } + + } + } + return nil +} + // NewTrainedModelDeploymentNodesStats returns a TrainedModelDeploymentNodesStats. func NewTrainedModelDeploymentNodesStats() *TrainedModelDeploymentNodesStats { r := &TrainedModelDeploymentNodesStats{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodeldeploymentstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodeldeploymentstats.go index 08749154f..fcec87a7e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodeldeploymentstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodeldeploymentstats.go @@ -16,28 +16,37 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/deploymentstate" ) // TrainedModelDeploymentStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/TrainedModel.ts#L62-L97 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/TrainedModel.ts#L62-L102 type TrainedModelDeploymentStats struct { // AllocationStatus The detailed allocation status for the deployment. AllocationStatus TrainedModelDeploymentAllocationStatus `json:"allocation_status"` CacheSize ByteSize `json:"cache_size,omitempty"` + // DeploymentId The unique identifier for the trained model deployment. + DeploymentId string `json:"deployment_id"` // ErrorCount The sum of `error_count` for all nodes in the deployment. ErrorCount int `json:"error_count"` // InferenceCount The sum of `inference_count` for all nodes in the deployment. InferenceCount int `json:"inference_count"` // ModelId The unique identifier for the trained model. ModelId string `json:"model_id"` - // Nodes The deployent stats for each node that currently has the model allocated. + // Nodes The deployment stats for each node that currently has the model allocated. + // In serverless, stats are reported for a single unnamed virtual node. Nodes TrainedModelDeploymentNodesStats `json:"nodes"` // NumberOfAllocations The number of allocations requested. NumberOfAllocations int `json:"number_of_allocations"` @@ -62,6 +71,185 @@ type TrainedModelDeploymentStats struct { TimeoutCount int `json:"timeout_count"` } +func (s *TrainedModelDeploymentStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allocation_status": + if err := dec.Decode(&s.AllocationStatus); err != nil { + return err + } + + case "cache_size": + if err := dec.Decode(&s.CacheSize); err != nil { + return err + } + + case "deployment_id": + if err := dec.Decode(&s.DeploymentId); err != nil { + return err + } + + case "error_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.ErrorCount = value + case float64: + f := int(v) + s.ErrorCount = f + } + + case "inference_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.InferenceCount = value + case float64: + f := int(v) + s.InferenceCount = f + } + + case "model_id": + if err := dec.Decode(&s.ModelId); err != nil { + return err + } + + case "nodes": + if err := dec.Decode(&s.Nodes); err != nil { + return err + } + + case "number_of_allocations": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NumberOfAllocations = value + case float64: + f := int(v) + s.NumberOfAllocations = f + } + + case "queue_capacity": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.QueueCapacity = value + case float64: + f := int(v) + s.QueueCapacity = f + } + + case "reason": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Reason = o + + case "rejected_execution_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.RejectedExecutionCount = value + case float64: + f := int(v) + s.RejectedExecutionCount = f + } + + case "start_time": + if err := dec.Decode(&s.StartTime); err != nil { + return err + } + + case "state": + if err := dec.Decode(&s.State); err != nil { + return err + } + + case "threads_per_allocation": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.ThreadsPerAllocation = value + case float64: + f := int(v) + s.ThreadsPerAllocation = f + } + + case "timeout_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.TimeoutCount = value + case float64: + f := int(v) + s.TimeoutCount = f + } + + } + } + return nil +} + // NewTrainedModelDeploymentStats returns a TrainedModelDeploymentStats. func NewTrainedModelDeploymentStats() *TrainedModelDeploymentStats { r := &TrainedModelDeploymentStats{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelentities.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelentities.go index 7ee9f0009..d7ba77986 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelentities.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelentities.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TrainedModelEntities type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/inference.ts#L392-L398 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/inference.ts#L433-L439 type TrainedModelEntities struct { ClassName string `json:"class_name"` ClassProbability Float64 `json:"class_probability"` @@ -31,6 +39,98 @@ type TrainedModelEntities struct { StartPos int `json:"start_pos"` } +func (s *TrainedModelEntities) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "class_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ClassName = o + + case "class_probability": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.ClassProbability = f + case float64: + f := Float64(v) + s.ClassProbability = f + } + + case "end_pos": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.EndPos = value + case float64: + f := int(v) + s.EndPos = f + } + + case "entity": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Entity = o + + case "start_pos": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.StartPos = value + case float64: + f := int(v) + s.StartPos = f + } + + } + } + return nil +} + // NewTrainedModelEntities returns a TrainedModelEntities. func NewTrainedModelEntities() *TrainedModelEntities { r := &TrainedModelEntities{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelinferenceclassimportance.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelinferenceclassimportance.go index eb81940d4..6479dd75d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelinferenceclassimportance.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelinferenceclassimportance.go @@ -16,18 +16,74 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TrainedModelInferenceClassImportance type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/inference.ts#L405-L408 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/inference.ts#L446-L449 type TrainedModelInferenceClassImportance struct { ClassName string `json:"class_name"` Importance Float64 `json:"importance"` } +func (s *TrainedModelInferenceClassImportance) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "class_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ClassName = o + + case "importance": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Importance = f + case float64: + f := Float64(v) + s.Importance = f + } + + } + } + return nil +} + // NewTrainedModelInferenceClassImportance returns a TrainedModelInferenceClassImportance. func NewTrainedModelInferenceClassImportance() *TrainedModelInferenceClassImportance { r := &TrainedModelInferenceClassImportance{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelinferencefeatureimportance.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelinferencefeatureimportance.go index 01fbf4387..da55975d2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelinferencefeatureimportance.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelinferencefeatureimportance.go @@ -16,19 +16,80 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TrainedModelInferenceFeatureImportance type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/inference.ts#L410-L414 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/inference.ts#L451-L455 type TrainedModelInferenceFeatureImportance struct { Classes []TrainedModelInferenceClassImportance `json:"classes,omitempty"` FeatureName string `json:"feature_name"` Importance *Float64 `json:"importance,omitempty"` } +func (s *TrainedModelInferenceFeatureImportance) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "classes": + if err := dec.Decode(&s.Classes); err != nil { + return err + } + + case "feature_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.FeatureName = o + + case "importance": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Importance = &f + case float64: + f := Float64(v) + s.Importance = &f + } + + } + } + return nil +} + // NewTrainedModelInferenceFeatureImportance returns a TrainedModelInferenceFeatureImportance. func NewTrainedModelInferenceFeatureImportance() *TrainedModelInferenceFeatureImportance { r := &TrainedModelInferenceFeatureImportance{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelinferencestats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelinferencestats.go index ab1b56f70..19702863a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelinferencestats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelinferencestats.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TrainedModelInferenceStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/TrainedModel.ts#L99-L119 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/TrainedModel.ts#L104-L124 type TrainedModelInferenceStats struct { // CacheMissCount The number of times the model was loaded for inference and was not retrieved // from the cache. @@ -43,6 +51,95 @@ type TrainedModelInferenceStats struct { Timestamp DateTime `json:"timestamp"` } +func (s *TrainedModelInferenceStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "cache_miss_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.CacheMissCount = value + case float64: + f := int(v) + s.CacheMissCount = f + } + + case "failure_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.FailureCount = value + case float64: + f := int(v) + s.FailureCount = f + } + + case "inference_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.InferenceCount = value + case float64: + f := int(v) + s.InferenceCount = f + } + + case "missing_all_fields_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MissingAllFieldsCount = value + case float64: + f := int(v) + s.MissingAllFieldsCount = f + } + + case "timestamp": + if err := dec.Decode(&s.Timestamp); err != nil { + return err + } + + } + } + return nil +} + // NewTrainedModelInferenceStats returns a TrainedModelInferenceStats. func NewTrainedModelInferenceStats() *TrainedModelInferenceStats { r := &TrainedModelInferenceStats{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodellocation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodellocation.go index f9c8cf489..771bf0cdc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodellocation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodellocation.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // TrainedModelLocation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/TrainedModel.ts#L404-L406 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/TrainedModel.ts#L419-L421 type TrainedModelLocation struct { Index TrainedModelLocationIndex `json:"index"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodellocationindex.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodellocationindex.go index 07f677db9..60806cced 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodellocationindex.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodellocationindex.go @@ -16,17 +16,49 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // TrainedModelLocationIndex type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/TrainedModel.ts#L408-L410 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/TrainedModel.ts#L423-L425 type TrainedModelLocationIndex struct { Name string `json:"name"` } +func (s *TrainedModelLocationIndex) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + } + } + return nil +} + // NewTrainedModelLocationIndex returns a TrainedModelLocationIndex. func NewTrainedModelLocationIndex() *TrainedModelLocationIndex { r := &TrainedModelLocationIndex{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelsizestats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelsizestats.go index 837f86d7b..a6df79739 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelsizestats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelsizestats.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TrainedModelSizeStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/TrainedModel.ts#L121-L126 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/TrainedModel.ts#L126-L131 type TrainedModelSizeStats struct { // ModelSizeBytes The size of the model in bytes. ModelSizeBytes ByteSize `json:"model_size_bytes"` @@ -30,6 +38,47 @@ type TrainedModelSizeStats struct { RequiredNativeMemoryBytes int `json:"required_native_memory_bytes"` } +func (s *TrainedModelSizeStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "model_size_bytes": + if err := dec.Decode(&s.ModelSizeBytes); err != nil { + return err + } + + case "required_native_memory_bytes": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.RequiredNativeMemoryBytes = value + case float64: + f := int(v) + s.RequiredNativeMemoryBytes = f + } + + } + } + return nil +} + // NewTrainedModelSizeStats returns a TrainedModelSizeStats. func NewTrainedModelSizeStats() *TrainedModelSizeStats { r := &TrainedModelSizeStats{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelsrecord.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelsrecord.go index bf5e2e413..8478e9511 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelsrecord.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelsrecord.go @@ -16,52 +16,269 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TrainedModelsRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/ml_trained_models/types.ts#L23-L111 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/ml_trained_models/types.ts#L23-L115 type TrainedModelsRecord struct { - // CreateTime The time the model was created + // CreateTime The time the model was created. CreateTime DateTime `json:"create_time,omitempty"` - // CreatedBy who created the model + // CreatedBy Information about the creator of the model. CreatedBy *string `json:"created_by,omitempty"` - // DataFrameAnalysis The analysis used by the data frame to build the model + // DataFrameAnalysis The analysis used by the data frame to build the model. DataFrameAnalysis *string `json:"data_frame.analysis,omitempty"` - // DataFrameCreateTime The time the data frame analytics config was created + // DataFrameCreateTime The time the data frame analytics job was created. DataFrameCreateTime *string `json:"data_frame.create_time,omitempty"` - // DataFrameId The data frame analytics config id that created the model (if still - // available) + // DataFrameId The identifier for the data frame analytics job that created the model. + // Only displayed if the job is still available. DataFrameId *string `json:"data_frame.id,omitempty"` - // DataFrameSourceIndex The source index used to train in the data frame analysis + // DataFrameSourceIndex The source index used to train in the data frame analysis. DataFrameSourceIndex *string `json:"data_frame.source_index,omitempty"` - // Description The model description + // Description A description of the model. Description *string `json:"description,omitempty"` - // HeapSize the estimated heap size to keep the model in memory + // HeapSize The estimated heap size to keep the model in memory. HeapSize ByteSize `json:"heap_size,omitempty"` - // Id the trained model id + // Id The model identifier. Id *string `json:"id,omitempty"` - // IngestCount The total number of docs processed by the model + // IngestCount The total number of documents that are processed by the model. IngestCount *string `json:"ingest.count,omitempty"` - // IngestCurrent The total documents currently being handled by the model + // IngestCurrent The total number of documents that are currently being handled by the model. IngestCurrent *string `json:"ingest.current,omitempty"` - // IngestFailed The total count of failed ingest attempts with this model + // IngestFailed The total number of failed ingest attempts with the model. IngestFailed *string `json:"ingest.failed,omitempty"` - // IngestPipelines The number of pipelines referencing the model + // IngestPipelines The number of pipelines that are referencing the model. IngestPipelines *string `json:"ingest.pipelines,omitempty"` - // IngestTime The total time spent processing docs with this model + // IngestTime The total time spent processing documents with thie model. IngestTime *string `json:"ingest.time,omitempty"` - // License The license level of the model + // License The license level of the model. License *string `json:"license,omitempty"` - // Operations the estimated number of operations to use the model + // Operations The estimated number of operations to use the model. + // This number helps to measure the computational complexity of the model. Operations *string `json:"operations,omitempty"` Type *string `json:"type,omitempty"` - // Version The version of Elasticsearch when the model was created + // Version The version of Elasticsearch when the model was created. Version *string `json:"version,omitempty"` } +func (s *TrainedModelsRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "create_time", "ct": + if err := dec.Decode(&s.CreateTime); err != nil { + return err + } + + case "created_by", "c", "createdBy": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CreatedBy = &o + + case "data_frame.analysis", "dfa", "dataFrameAnalyticsAnalysis": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DataFrameAnalysis = &o + + case "data_frame.create_time", "dft", "dataFrameAnalyticsTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DataFrameCreateTime = &o + + case "data_frame.id", "dfid", "dataFrameAnalytics": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DataFrameId = &o + + case "data_frame.source_index", "dfsi", "dataFrameAnalyticsSrcIndex": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DataFrameSourceIndex = &o + + case "description", "d": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "heap_size", "hs", "modelHeapSize": + if err := dec.Decode(&s.HeapSize); err != nil { + return err + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return err + } + + case "ingest.count", "ic", "ingestCount": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IngestCount = &o + + case "ingest.current", "icurr", "ingestCurrent": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IngestCurrent = &o + + case "ingest.failed", "if", "ingestFailed": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IngestFailed = &o + + case "ingest.pipelines", "ip", "ingestPipelines": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IngestPipelines = &o + + case "ingest.time", "it", "ingestTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IngestTime = &o + + case "license", "l": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.License = &o + + case "operations", "o", "modelOperations": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Operations = &o + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = &o + + case "version", "v": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + // NewTrainedModelsRecord returns a TrainedModelsRecord. func NewTrainedModelsRecord() *TrainedModelsRecord { r := &TrainedModelsRecord{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelstats.go index 404b09e3a..3d5e34b1e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodelstats.go @@ -16,17 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // TrainedModelStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/TrainedModel.ts#L42-L60 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/TrainedModel.ts#L42-L60 type TrainedModelStats struct { // DeploymentStats A collection of deployment stats, which is present when the models are // deployed. @@ -45,6 +49,70 @@ type TrainedModelStats struct { PipelineCount int `json:"pipeline_count"` } +func (s *TrainedModelStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "deployment_stats": + if err := dec.Decode(&s.DeploymentStats); err != nil { + return err + } + + case "inference_stats": + if err := dec.Decode(&s.InferenceStats); err != nil { + return err + } + + case "ingest": + if s.Ingest == nil { + s.Ingest = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Ingest); err != nil { + return err + } + + case "model_id": + if err := dec.Decode(&s.ModelId); err != nil { + return err + } + + case "model_size_stats": + if err := dec.Decode(&s.ModelSizeStats); err != nil { + return err + } + + case "pipeline_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.PipelineCount = value + case float64: + f := int(v) + s.PipelineCount = f + } + + } + } + return nil +} + // NewTrainedModelStats returns a TrainedModelStats. func NewTrainedModelStats() *TrainedModelStats { r := &TrainedModelStats{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodeltree.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodeltree.go index 3c281af7d..8fbdaed9f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodeltree.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodeltree.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TrainedModelTree type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/put_trained_model/types.ts#L74-L79 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/put_trained_model/types.ts#L74-L79 type TrainedModelTree struct { ClassificationLabels []string `json:"classification_labels,omitempty"` FeatureNames []string `json:"feature_names"` @@ -30,6 +38,53 @@ type TrainedModelTree struct { TreeStructure []TrainedModelTreeNode `json:"tree_structure"` } +func (s *TrainedModelTree) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "classification_labels": + if err := dec.Decode(&s.ClassificationLabels); err != nil { + return err + } + + case "feature_names": + if err := dec.Decode(&s.FeatureNames); err != nil { + return err + } + + case "target_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TargetType = &o + + case "tree_structure": + if err := dec.Decode(&s.TreeStructure); err != nil { + return err + } + + } + } + return nil +} + // NewTrainedModelTree returns a TrainedModelTree. func NewTrainedModelTree() *TrainedModelTree { r := &TrainedModelTree{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodeltreenode.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodeltreenode.go index 8c6ea8ee1..cee9da19f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodeltreenode.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trainedmodeltreenode.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TrainedModelTreeNode type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/put_trained_model/types.ts#L81-L91 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/put_trained_model/types.ts#L81-L91 type TrainedModelTreeNode struct { DecisionType *string `json:"decision_type,omitempty"` DefaultLeft *bool `json:"default_left,omitempty"` @@ -35,6 +43,164 @@ type TrainedModelTreeNode struct { Threshold *Float64 `json:"threshold,omitempty"` } +func (s *TrainedModelTreeNode) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "decision_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DecisionType = &o + + case "default_left": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DefaultLeft = &value + case bool: + s.DefaultLeft = &v + } + + case "leaf_value": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.LeafValue = &f + case float64: + f := Float64(v) + s.LeafValue = &f + } + + case "left_child": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.LeftChild = &value + case float64: + f := int(v) + s.LeftChild = &f + } + + case "node_index": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.NodeIndex = value + case float64: + f := int(v) + s.NodeIndex = f + } + + case "right_child": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.RightChild = &value + case float64: + f := int(v) + s.RightChild = &f + } + + case "split_feature": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.SplitFeature = &value + case float64: + f := int(v) + s.SplitFeature = &f + } + + case "split_gain": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.SplitGain = &value + case float64: + f := int(v) + s.SplitGain = &f + } + + case "threshold": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Threshold = &f + case float64: + f := Float64(v) + s.Threshold = &f + } + + } + } + return nil +} + // NewTrainedModelTreeNode returns a TrainedModelTreeNode. func NewTrainedModelTreeNode() *TrainedModelTreeNode { r := &TrainedModelTreeNode{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transformauthorization.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transformauthorization.go index 50f4f91ef..61fe57a6f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transformauthorization.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transformauthorization.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TransformAuthorization type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/Authorization.ts#L59-L71 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/Authorization.ts#L59-L71 type TransformAuthorization struct { // ApiKey If an API key was used for the most recent update to the transform, its name // and identifier are listed in the response. @@ -35,6 +43,48 @@ type TransformAuthorization struct { ServiceAccount *string `json:"service_account,omitempty"` } +func (s *TransformAuthorization) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "api_key": + if err := dec.Decode(&s.ApiKey); err != nil { + return err + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return err + } + + case "service_account": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ServiceAccount = &o + + } + } + return nil +} + // NewTransformAuthorization returns a TransformAuthorization. func NewTransformAuthorization() *TransformAuthorization { r := &TransformAuthorization{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transformcontainer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transformcontainer.go index dbb885982..06b782fa0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transformcontainer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transformcontainer.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // TransformContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Transform.ts#L27-L34 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Transform.ts#L27-L34 type TransformContainer struct { Chain []TransformContainer `json:"chain,omitempty"` Script *ScriptTransform `json:"script,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transformdestination.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transformdestination.go index 029121cef..6a1557870 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transformdestination.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transformdestination.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TransformDestination type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/transform/_types/Transform.ts#L34-L45 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/transform/_types/Transform.ts#L34-L45 type TransformDestination struct { // Index The destination index for the transform. The mappings of the destination // index are deduced based on the source @@ -34,6 +42,43 @@ type TransformDestination struct { Pipeline *string `json:"pipeline,omitempty"` } +func (s *TransformDestination) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return err + } + + case "pipeline": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pipeline = &o + + } + } + return nil +} + // NewTransformDestination returns a TransformDestination. func NewTransformDestination() *TransformDestination { r := &TransformDestination{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transformindexerstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transformindexerstats.go index 14a027f73..fe8a5ee6d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transformindexerstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transformindexerstats.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TransformIndexerStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/transform/get_transform_stats/types.ts#L53-L71 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/transform/get_transform_stats/types.ts#L56-L74 type TransformIndexerStats struct { DeleteTimeInMs *int64 `json:"delete_time_in_ms,omitempty"` DocumentsDeleted *int64 `json:"documents_deleted,omitempty"` @@ -43,6 +51,233 @@ type TransformIndexerStats struct { TriggerCount int64 `json:"trigger_count"` } +func (s *TransformIndexerStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "delete_time_in_ms": + if err := dec.Decode(&s.DeleteTimeInMs); err != nil { + return err + } + + case "documents_deleted": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DocumentsDeleted = &value + case float64: + f := int64(v) + s.DocumentsDeleted = &f + } + + case "documents_indexed": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DocumentsIndexed = value + case float64: + f := int64(v) + s.DocumentsIndexed = f + } + + case "documents_processed": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DocumentsProcessed = value + case float64: + f := int64(v) + s.DocumentsProcessed = f + } + + case "exponential_avg_checkpoint_duration_ms": + if err := dec.Decode(&s.ExponentialAvgCheckpointDurationMs); err != nil { + return err + } + + case "exponential_avg_documents_indexed": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.ExponentialAvgDocumentsIndexed = f + case float64: + f := Float64(v) + s.ExponentialAvgDocumentsIndexed = f + } + + case "exponential_avg_documents_processed": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.ExponentialAvgDocumentsProcessed = f + case float64: + f := Float64(v) + s.ExponentialAvgDocumentsProcessed = f + } + + case "index_failures": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.IndexFailures = value + case float64: + f := int64(v) + s.IndexFailures = f + } + + case "index_time_in_ms": + if err := dec.Decode(&s.IndexTimeInMs); err != nil { + return err + } + + case "index_total": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.IndexTotal = value + case float64: + f := int64(v) + s.IndexTotal = f + } + + case "pages_processed": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.PagesProcessed = value + case float64: + f := int64(v) + s.PagesProcessed = f + } + + case "processing_time_in_ms": + if err := dec.Decode(&s.ProcessingTimeInMs); err != nil { + return err + } + + case "processing_total": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.ProcessingTotal = value + case float64: + f := int64(v) + s.ProcessingTotal = f + } + + case "search_failures": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.SearchFailures = value + case float64: + f := int64(v) + s.SearchFailures = f + } + + case "search_time_in_ms": + if err := dec.Decode(&s.SearchTimeInMs); err != nil { + return err + } + + case "search_total": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.SearchTotal = value + case float64: + f := int64(v) + s.SearchTotal = f + } + + case "trigger_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TriggerCount = value + case float64: + f := int64(v) + s.TriggerCount = f + } + + } + } + return nil +} + // NewTransformIndexerStats returns a TransformIndexerStats. func NewTransformIndexerStats() *TransformIndexerStats { r := &TransformIndexerStats{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transformprogress.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transformprogress.go index 4fbac9aae..0e9a74da4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transformprogress.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transformprogress.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TransformProgress type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/transform/get_transform_stats/types.ts#L45-L51 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/transform/get_transform_stats/types.ts#L48-L54 type TransformProgress struct { DocsIndexed int64 `json:"docs_indexed"` DocsProcessed int64 `json:"docs_processed"` @@ -31,6 +39,102 @@ type TransformProgress struct { TotalDocs int64 `json:"total_docs"` } +func (s *TransformProgress) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "docs_indexed": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DocsIndexed = value + case float64: + f := int64(v) + s.DocsIndexed = f + } + + case "docs_processed": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DocsProcessed = value + case float64: + f := int64(v) + s.DocsProcessed = f + } + + case "docs_remaining": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DocsRemaining = value + case float64: + f := int64(v) + s.DocsRemaining = f + } + + case "percent_complete": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.PercentComplete = f + case float64: + f := Float64(v) + s.PercentComplete = f + } + + case "total_docs": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TotalDocs = value + case float64: + f := int64(v) + s.TotalDocs = f + } + + } + } + return nil +} + // NewTransformProgress returns a TransformProgress. func NewTransformProgress() *TransformProgress { r := &TransformProgress{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transformsource.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transformsource.go index 731b95ac3..6595cf122 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transformsource.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transformsource.go @@ -16,13 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // TransformSource type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/transform/_types/Transform.ts#L145-L163 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/transform/_types/Transform.ts#L146-L165 type TransformSource struct { // Index The source indices for the transform. It can be a single index, an index // pattern (for example, `"my-index-*""`), an @@ -38,7 +45,53 @@ type TransformSource struct { // RuntimeMappings Definitions of search-time runtime fields that can be used by the transform. // For search runtime fields all data // nodes, including remote nodes, must be 7.12 or later. - RuntimeMappings map[string]RuntimeField `json:"runtime_mappings,omitempty"` + RuntimeMappings RuntimeFields `json:"runtime_mappings,omitempty"` +} + +func (s *TransformSource) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Index = append(s.Index, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Index); err != nil { + return err + } + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return err + } + + case "runtime_mappings": + if err := dec.Decode(&s.RuntimeMappings); err != nil { + return err + } + + } + } + return nil } // NewTransformSource returns a TransformSource. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transformsrecord.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transformsrecord.go index cc5592148..acfcafd6b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transformsrecord.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transformsrecord.go @@ -16,82 +16,516 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TransformsRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cat/transforms/types.ts#L22-L187 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cat/transforms/types.ts#L22-L197 type TransformsRecord struct { - // ChangesLastDetectionTime changes last detected time + // ChangesLastDetectionTime The timestamp when changes were last detected in the source indices. ChangesLastDetectionTime string `json:"changes_last_detection_time,omitempty"` - // Checkpoint checkpoint + // Checkpoint The sequence number for the checkpoint. Checkpoint *string `json:"checkpoint,omitempty"` - // CheckpointDurationTimeExpAvg exponential average checkpoint processing time (milliseconds) + // CheckpointDurationTimeExpAvg The exponential moving average of the duration of the checkpoint, in + // milliseconds. CheckpointDurationTimeExpAvg *string `json:"checkpoint_duration_time_exp_avg,omitempty"` - // CheckpointProgress progress of the checkpoint + // CheckpointProgress The progress of the next checkpoint that is currently in progress. CheckpointProgress string `json:"checkpoint_progress,omitempty"` - // CreateTime transform creation time + // CreateTime The time the transform was created. CreateTime *string `json:"create_time,omitempty"` - // DeleteTime total time spent deleting documents + // DeleteTime The total time spent deleting documents, in milliseconds. DeleteTime *string `json:"delete_time,omitempty"` - // Description description + // Description The description of the transform. Description *string `json:"description,omitempty"` - // DestIndex destination index + // DestIndex The destination index for the transform. DestIndex *string `json:"dest_index,omitempty"` - // DocsPerSecond docs per second + // DocsPerSecond The number of input documents per second. DocsPerSecond *string `json:"docs_per_second,omitempty"` - // DocumentsDeleted the number of documents deleted from the destination index + // DocumentsDeleted The number of documents deleted from the destination index due to the + // retention policy for the transform. DocumentsDeleted *string `json:"documents_deleted,omitempty"` - // DocumentsIndexed the number of documents written to the destination index + // DocumentsIndexed The number of documents that have been indexed into the destination index for + // the transform. DocumentsIndexed *string `json:"documents_indexed,omitempty"` - // DocumentsProcessed the number of documents read from source indices and processed + // DocumentsProcessed The number of documents that have been processed from the source index of the + // transform. DocumentsProcessed *string `json:"documents_processed,omitempty"` - // Frequency frequency of transform + // Frequency The interval between checks for changes in the source indices when the + // transform is running continuously. Frequency *string `json:"frequency,omitempty"` - // Id the id + // Id The transform identifier. Id *string `json:"id,omitempty"` - // IndexFailure total number of index failures + // IndexFailure The total number of indexing failures. IndexFailure *string `json:"index_failure,omitempty"` - // IndexTime total time spent indexing documents + // IndexTime The total time spent indexing documents, in milliseconds. IndexTime *string `json:"index_time,omitempty"` - // IndexTotal total number of index phases done by the transform + // IndexTotal The total number of index operations done by the transform. IndexTotal *string `json:"index_total,omitempty"` - // IndexedDocumentsExpAvg exponential average number of documents indexed + // IndexedDocumentsExpAvg The exponential moving average of the number of new documents that have been + // indexed. IndexedDocumentsExpAvg *string `json:"indexed_documents_exp_avg,omitempty"` - // LastSearchTime last time transform searched for updates + // LastSearchTime The timestamp of the last search in the source indices. + // This field is shown only if the transform is running. LastSearchTime string `json:"last_search_time,omitempty"` - // MaxPageSearchSize max page search size + // MaxPageSearchSize The initial page size that is used for the composite aggregation for each + // checkpoint. MaxPageSearchSize *string `json:"max_page_search_size,omitempty"` - // PagesProcessed the number of pages processed + // PagesProcessed The number of search or bulk index operations processed. + // Documents are processed in batches instead of individually. PagesProcessed *string `json:"pages_processed,omitempty"` - // Pipeline transform pipeline + // Pipeline The unique identifier for the ingest pipeline. Pipeline *string `json:"pipeline,omitempty"` - // ProcessedDocumentsExpAvg exponential average number of documents processed + // ProcessedDocumentsExpAvg The exponential moving average of the number of documents that have been + // processed. ProcessedDocumentsExpAvg *string `json:"processed_documents_exp_avg,omitempty"` - // ProcessingTime the total time spent processing documents + // ProcessingTime The total time spent processing results, in milliseconds. ProcessingTime *string `json:"processing_time,omitempty"` - // Reason reason for the current state + // Reason If a transform has a `failed` state, these details describe the reason for + // failure. Reason *string `json:"reason,omitempty"` - // SearchFailure total number of search failures + // SearchFailure The total number of search failures. SearchFailure *string `json:"search_failure,omitempty"` - // SearchTime total search time + // SearchTime The total amount of search time, in milliseconds. SearchTime *string `json:"search_time,omitempty"` - // SearchTotal total number of search phases + // SearchTotal The total number of search operations on the source index for the transform. SearchTotal *string `json:"search_total,omitempty"` - // SourceIndex source index + // SourceIndex The source indices for the transform. SourceIndex *string `json:"source_index,omitempty"` - // State transform state + // State The status of the transform. + // Returned values include: + // `aborting`: The transform is aborting. + // `failed: The transform failed. For more information about the failure, check + // the `reason` field. + // `indexing`: The transform is actively processing data and creating new + // documents. + // `started`: The transform is running but not actively indexing data. + // `stopped`: The transform is stopped. + // `stopping`: The transform is stopping. State *string `json:"state,omitempty"` - // TransformType batch or continuous transform + // TransformType The type of transform: `batch` or `continuous`. TransformType *string `json:"transform_type,omitempty"` - // TriggerCount the number of times the transform has been triggered + // TriggerCount The number of times the transform has been triggered by the scheduler. + // For example, the scheduler triggers the transform indexer to check for + // updates or ingest new data at an interval specified in the `frequency` + // property. TriggerCount *string `json:"trigger_count,omitempty"` - // Version the version of Elasticsearch when the transform was created + // Version The version of Elasticsearch that existed on the node when the transform was + // created. Version *string `json:"version,omitempty"` } +func (s *TransformsRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "changes_last_detection_time", "cldt": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ChangesLastDetectionTime = o + + case "checkpoint", "c": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Checkpoint = &o + + case "checkpoint_duration_time_exp_avg", "cdtea", "checkpointTimeExpAvg": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CheckpointDurationTimeExpAvg = &o + + case "checkpoint_progress", "cp", "checkpointProgress": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CheckpointProgress = o + + case "create_time", "ct", "createTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.CreateTime = &o + + case "delete_time", "dtime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DeleteTime = &o + + case "description", "d": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "dest_index", "di", "destIndex": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DestIndex = &o + + case "docs_per_second", "dps": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DocsPerSecond = &o + + case "documents_deleted", "docd": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DocumentsDeleted = &o + + case "documents_indexed", "doci": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DocumentsIndexed = &o + + case "documents_processed", "docp", "documentsProcessed": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.DocumentsProcessed = &o + + case "frequency", "f": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Frequency = &o + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return err + } + + case "index_failure", "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexFailure = &o + + case "index_time", "itime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexTime = &o + + case "index_total", "it": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexTotal = &o + + case "indexed_documents_exp_avg", "idea": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.IndexedDocumentsExpAvg = &o + + case "last_search_time", "lst", "lastSearchTime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.LastSearchTime = o + + case "max_page_search_size", "mpsz": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MaxPageSearchSize = &o + + case "pages_processed", "pp": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.PagesProcessed = &o + + case "pipeline", "p": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pipeline = &o + + case "processed_documents_exp_avg", "pdea": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ProcessedDocumentsExpAvg = &o + + case "processing_time", "pt": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ProcessingTime = &o + + case "reason", "r": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Reason = &o + + case "search_failure", "sf": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchFailure = &o + + case "search_time", "stime": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchTime = &o + + case "search_total", "st": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SearchTotal = &o + + case "source_index", "si", "sourceIndex": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.SourceIndex = &o + + case "state", "s": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.State = &o + + case "transform_type", "tt": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TransformType = &o + + case "trigger_count", "tc": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TriggerCount = &o + + case "version", "v": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + // NewTransformsRecord returns a TransformsRecord. func NewTransformsRecord() *TransformsRecord { r := &TransformsRecord{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transformstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transformstats.go index 564e4b07f..e85ec875e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transformstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transformstats.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TransformStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/transform/get_transform_stats/types.ts#L31-L39 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/transform/get_transform_stats/types.ts#L31-L42 type TransformStats struct { Checkpointing Checkpointing `json:"checkpointing"` Health *TransformStatsHealth `json:"health,omitempty"` @@ -33,6 +41,75 @@ type TransformStats struct { Stats TransformIndexerStats `json:"stats"` } +func (s *TransformStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "checkpointing": + if err := dec.Decode(&s.Checkpointing); err != nil { + return err + } + + case "health": + if err := dec.Decode(&s.Health); err != nil { + return err + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return err + } + + case "node": + if err := dec.Decode(&s.Node); err != nil { + return err + } + + case "reason": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Reason = &o + + case "state": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.State = o + + case "stats": + if err := dec.Decode(&s.Stats); err != nil { + return err + } + + } + } + return nil +} + // NewTransformStats returns a TransformStats. func NewTransformStats() *TransformStats { r := &TransformStats{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transformstatshealth.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transformstatshealth.go index e6fb5e2b1..f85fb24d2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transformstatshealth.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transformstatshealth.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -26,7 +26,7 @@ import ( // TransformStatsHealth type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/transform/get_transform_stats/types.ts#L41-L43 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/transform/get_transform_stats/types.ts#L44-L46 type TransformStatsHealth struct { Status healthstatus.HealthStatus `json:"status"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transformsummary.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transformsummary.go index 41896561f..15d333571 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transformsummary.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transformsummary.go @@ -16,17 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // TransformSummary type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/transform/get_transform/types.ts#L33-L61 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/transform/get_transform/types.ts#L33-L61 type TransformSummary struct { // Authorization The security privileges that the transform uses to run its queries. If // Elastic Stack security features were disabled at the time of the most recent @@ -37,11 +41,11 @@ type TransformSummary struct { // Description Free text description of the transform. Description *string `json:"description,omitempty"` // Dest The destination for the transform. - Dest ReindexDestination `json:"dest"` - Frequency Duration `json:"frequency,omitempty"` - Id string `json:"id"` - Latest *Latest `json:"latest,omitempty"` - Meta_ map[string]json.RawMessage `json:"_meta,omitempty"` + Dest ReindexDestination `json:"dest"` + Frequency Duration `json:"frequency,omitempty"` + Id string `json:"id"` + Latest *Latest `json:"latest,omitempty"` + Meta_ Metadata `json:"_meta,omitempty"` // Pivot The pivot method transforms the data by aggregating and grouping it. Pivot *Pivot `json:"pivot,omitempty"` RetentionPolicy *RetentionPolicyContainer `json:"retention_policy,omitempty"` @@ -56,6 +60,103 @@ type TransformSummary struct { Version *string `json:"version,omitempty"` } +func (s *TransformSummary) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "authorization": + if err := dec.Decode(&s.Authorization); err != nil { + return err + } + + case "create_time": + if err := dec.Decode(&s.CreateTime); err != nil { + return err + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "dest": + if err := dec.Decode(&s.Dest); err != nil { + return err + } + + case "frequency": + if err := dec.Decode(&s.Frequency); err != nil { + return err + } + + case "id": + if err := dec.Decode(&s.Id); err != nil { + return err + } + + case "latest": + if err := dec.Decode(&s.Latest); err != nil { + return err + } + + case "_meta": + if err := dec.Decode(&s.Meta_); err != nil { + return err + } + + case "pivot": + if err := dec.Decode(&s.Pivot); err != nil { + return err + } + + case "retention_policy": + if err := dec.Decode(&s.RetentionPolicy); err != nil { + return err + } + + case "settings": + if err := dec.Decode(&s.Settings); err != nil { + return err + } + + case "source": + if err := dec.Decode(&s.Source); err != nil { + return err + } + + case "sync": + if err := dec.Decode(&s.Sync); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + // NewTransformSummary returns a TransformSummary. func NewTransformSummary() *TransformSummary { r := &TransformSummary{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transientmetadataconfig.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transientmetadataconfig.go index 2a4e81799..9c221354d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transientmetadataconfig.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transientmetadataconfig.go @@ -16,17 +16,59 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TransientMetadataConfig type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/_types/TransientMetadataConfig.ts#L20-L22 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/_types/TransientMetadataConfig.ts#L20-L22 type TransientMetadataConfig struct { Enabled bool `json:"enabled"` } +func (s *TransientMetadataConfig) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = value + case bool: + s.Enabled = v + } + + } + } + return nil +} + // NewTransientMetadataConfig returns a TransientMetadataConfig. func NewTransientMetadataConfig() *TransientMetadataConfig { r := &TransientMetadataConfig{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/translog.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/translog.go index 77aa4ab52..098fd9679 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/translog.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/translog.go @@ -16,17 +16,22 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/translogdurability" ) // Translog type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L332-L354 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L332-L354 type Translog struct { // Durability Whether or not to `fsync` and commit the translog after every index, delete, // update, or bulk request. @@ -49,6 +54,46 @@ type Translog struct { SyncInterval Duration `json:"sync_interval,omitempty"` } +func (s *Translog) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "durability": + if err := dec.Decode(&s.Durability); err != nil { + return err + } + + case "flush_threshold_size": + if err := dec.Decode(&s.FlushThresholdSize); err != nil { + return err + } + + case "retention": + if err := dec.Decode(&s.Retention); err != nil { + return err + } + + case "sync_interval": + if err := dec.Decode(&s.SyncInterval); err != nil { + return err + } + + } + } + return nil +} + // NewTranslog returns a Translog. func NewTranslog() *Translog { r := &Translog{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/translogretention.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/translogretention.go index 951b55bc5..9141dfaa8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/translogretention.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/translogretention.go @@ -16,13 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // TranslogRetention type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/_types/IndexSettings.ts#L373-L392 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/_types/IndexSettings.ts#L373-L392 type TranslogRetention struct { // Age This controls the maximum duration for which translog files are kept by each // shard. Keeping more @@ -46,6 +53,36 @@ type TranslogRetention struct { Size ByteSize `json:"size,omitempty"` } +func (s *TranslogRetention) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "age": + if err := dec.Decode(&s.Age); err != nil { + return err + } + + case "size": + if err := dec.Decode(&s.Size); err != nil { + return err + } + + } + } + return nil +} + // NewTranslogRetention returns a TranslogRetention. func NewTranslogRetention() *TranslogRetention { r := &TranslogRetention{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/translogstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/translogstats.go index 9de80a9bf..e14f8b715 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/translogstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/translogstats.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TranslogStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Stats.ts#L242-L250 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Stats.ts#L397-L405 type TranslogStats struct { EarliestLastModifiedAge int64 `json:"earliest_last_modified_age"` Operations int64 `json:"operations"` @@ -33,6 +41,126 @@ type TranslogStats struct { UncommittedSizeInBytes int64 `json:"uncommitted_size_in_bytes"` } +func (s *TranslogStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "earliest_last_modified_age": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.EarliestLastModifiedAge = value + case float64: + f := int64(v) + s.EarliestLastModifiedAge = f + } + + case "operations": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Operations = value + case float64: + f := int64(v) + s.Operations = f + } + + case "size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Size = &o + + case "size_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.SizeInBytes = value + case float64: + f := int64(v) + s.SizeInBytes = f + } + + case "uncommitted_operations": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.UncommittedOperations = value + case float64: + f := int(v) + s.UncommittedOperations = f + } + + case "uncommitted_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.UncommittedSize = &o + + case "uncommitted_size_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.UncommittedSizeInBytes = value + case float64: + f := int64(v) + s.UncommittedSizeInBytes = f + } + + } + } + return nil +} + // NewTranslogStats returns a TranslogStats. func NewTranslogStats() *TranslogStats { r := &TranslogStats{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/translogstatus.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/translogstatus.go index 535117d33..3d35f89f5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/translogstatus.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/translogstatus.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TranslogStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/recovery/types.ts#L102-L109 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/recovery/types.ts#L102-L109 type TranslogStatus struct { Percent Percentage `json:"percent"` Recovered int64 `json:"recovered"` @@ -32,6 +40,86 @@ type TranslogStatus struct { TotalTimeInMillis int64 `json:"total_time_in_millis"` } +func (s *TranslogStatus) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "percent": + if err := dec.Decode(&s.Percent); err != nil { + return err + } + + case "recovered": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Recovered = value + case float64: + f := int64(v) + s.Recovered = f + } + + case "total": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Total = value + case float64: + f := int64(v) + s.Total = f + } + + case "total_on_start": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TotalOnStart = value + case float64: + f := int64(v) + s.TotalOnStart = f + } + + case "total_time": + if err := dec.Decode(&s.TotalTime); err != nil { + return err + } + + case "total_time_in_millis": + if err := dec.Decode(&s.TotalTimeInMillis); err != nil { + return err + } + + } + } + return nil +} + // NewTranslogStatus returns a TranslogStatus. func NewTranslogStatus() *TranslogStatus { r := &TranslogStatus{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transport.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transport.go index 733e1fa7e..ca1847508 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transport.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transport.go @@ -16,24 +16,200 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Transport type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L420-L431 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L1047-L1090 type Transport struct { - InboundHandlingTimeHistogram []TransportHistogram `json:"inbound_handling_time_histogram,omitempty"` + // InboundHandlingTimeHistogram The distribution of the time spent handling each inbound message on a + // transport thread, represented as a histogram. + InboundHandlingTimeHistogram []TransportHistogram `json:"inbound_handling_time_histogram,omitempty"` + // OutboundHandlingTimeHistogram The distribution of the time spent sending each outbound transport message on + // a transport thread, represented as a histogram. OutboundHandlingTimeHistogram []TransportHistogram `json:"outbound_handling_time_histogram,omitempty"` - RxCount *int64 `json:"rx_count,omitempty"` - RxSize *string `json:"rx_size,omitempty"` - RxSizeInBytes *int64 `json:"rx_size_in_bytes,omitempty"` - ServerOpen *int `json:"server_open,omitempty"` - TotalOutboundConnections *int64 `json:"total_outbound_connections,omitempty"` - TxCount *int64 `json:"tx_count,omitempty"` - TxSize *string `json:"tx_size,omitempty"` - TxSizeInBytes *int64 `json:"tx_size_in_bytes,omitempty"` + // RxCount Total number of RX (receive) packets received by the node during internal + // cluster communication. + RxCount *int64 `json:"rx_count,omitempty"` + // RxSize Size of RX packets received by the node during internal cluster + // communication. + RxSize *string `json:"rx_size,omitempty"` + // RxSizeInBytes Size, in bytes, of RX packets received by the node during internal cluster + // communication. + RxSizeInBytes *int64 `json:"rx_size_in_bytes,omitempty"` + // ServerOpen Current number of inbound TCP connections used for internal communication + // between nodes. + ServerOpen *int `json:"server_open,omitempty"` + // TotalOutboundConnections The cumulative number of outbound transport connections that this node has + // opened since it started. + // Each transport connection may comprise multiple TCP connections but is only + // counted once in this statistic. + // Transport connections are typically long-lived so this statistic should + // remain constant in a stable cluster. + TotalOutboundConnections *int64 `json:"total_outbound_connections,omitempty"` + // TxCount Total number of TX (transmit) packets sent by the node during internal + // cluster communication. + TxCount *int64 `json:"tx_count,omitempty"` + // TxSize Size of TX packets sent by the node during internal cluster communication. + TxSize *string `json:"tx_size,omitempty"` + // TxSizeInBytes Size, in bytes, of TX packets sent by the node during internal cluster + // communication. + TxSizeInBytes *int64 `json:"tx_size_in_bytes,omitempty"` +} + +func (s *Transport) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "inbound_handling_time_histogram": + if err := dec.Decode(&s.InboundHandlingTimeHistogram); err != nil { + return err + } + + case "outbound_handling_time_histogram": + if err := dec.Decode(&s.OutboundHandlingTimeHistogram); err != nil { + return err + } + + case "rx_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.RxCount = &value + case float64: + f := int64(v) + s.RxCount = &f + } + + case "rx_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RxSize = &o + + case "rx_size_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.RxSizeInBytes = &value + case float64: + f := int64(v) + s.RxSizeInBytes = &f + } + + case "server_open": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.ServerOpen = &value + case float64: + f := int(v) + s.ServerOpen = &f + } + + case "total_outbound_connections": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TotalOutboundConnections = &value + case float64: + f := int64(v) + s.TotalOutboundConnections = &f + } + + case "tx_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TxCount = &value + case float64: + f := int64(v) + s.TxCount = &f + } + + case "tx_size": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TxSize = &o + + case "tx_size_in_bytes": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.TxSizeInBytes = &value + case float64: + f := int64(v) + s.TxSizeInBytes = &f + } + + } + } + return nil } // NewTransport returns a Transport. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transporthistogram.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transporthistogram.go index 19f35f2ba..8087d067c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transporthistogram.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/transporthistogram.go @@ -16,19 +16,98 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TransportHistogram type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/nodes/_types/Stats.ts#L433-L437 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/nodes/_types/Stats.ts#L1092-L1106 type TransportHistogram struct { - Count *int64 `json:"count,omitempty"` + // Count The number of times a transport thread took a period of time within the + // bounds of this bucket to handle an inbound message. + Count *int64 `json:"count,omitempty"` + // GeMillis The inclusive lower bound of the bucket in milliseconds. May be omitted on + // the first bucket if this bucket has no lower bound. GeMillis *int64 `json:"ge_millis,omitempty"` + // LtMillis The exclusive upper bound of the bucket in milliseconds. + // May be omitted on the last bucket if this bucket has no upper bound. LtMillis *int64 `json:"lt_millis,omitempty"` } +func (s *TransportHistogram) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Count = &value + case float64: + f := int64(v) + s.Count = &f + } + + case "ge_millis": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.GeMillis = &value + case float64: + f := int64(v) + s.GeMillis = &f + } + + case "lt_millis": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.LtMillis = &value + case float64: + f := int64(v) + s.LtMillis = &f + } + + } + } + return nil +} + // NewTransportHistogram returns a TransportHistogram. func NewTransportHistogram() *TransportHistogram { r := &TransportHistogram{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/triggercontainer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/triggercontainer.go index 0101ee49f..e488e5a07 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/triggercontainer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/triggercontainer.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // TriggerContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Trigger.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Trigger.ts#L23-L28 type TriggerContainer struct { Schedule *ScheduleContainer `json:"schedule,omitempty"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/triggereventcontainer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/triggereventcontainer.go index 0ca7512d7..7624043fc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/triggereventcontainer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/triggereventcontainer.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // TriggerEventContainer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Trigger.ts#L32-L37 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Trigger.ts#L32-L37 type TriggerEventContainer struct { Schedule *ScheduleTriggerEvent `json:"schedule,omitempty"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/triggereventresult.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/triggereventresult.go index fac863817..cd4308c92 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/triggereventresult.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/triggereventresult.go @@ -16,19 +16,69 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TriggerEventResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Trigger.ts#L39-L43 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Trigger.ts#L39-L43 type TriggerEventResult struct { Manual TriggerEventContainer `json:"manual"` TriggeredTime DateTime `json:"triggered_time"` Type string `json:"type"` } +func (s *TriggerEventResult) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "manual": + if err := dec.Decode(&s.Manual); err != nil { + return err + } + + case "triggered_time": + if err := dec.Decode(&s.TriggeredTime); err != nil { + return err + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + // NewTriggerEventResult returns a TriggerEventResult. func NewTriggerEventResult() *TriggerEventResult { r := &TriggerEventResult{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trimprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trimprocessor.go index 704d08e43..0f8738c87 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trimprocessor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trimprocessor.go @@ -16,22 +16,141 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TrimProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/_types/Processors.ts#L362-L366 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/_types/Processors.ts#L1079-L1095 type TrimProcessor struct { - Description *string `json:"description,omitempty"` - Field string `json:"field"` - If *string `json:"if,omitempty"` - IgnoreFailure *bool `json:"ignore_failure,omitempty"` - IgnoreMissing *bool `json:"ignore_missing,omitempty"` - OnFailure []ProcessorContainer `json:"on_failure,omitempty"` - Tag *string `json:"tag,omitempty"` - TargetField *string `json:"target_field,omitempty"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The string-valued field to trim whitespace from. + Field string `json:"field"` + // If Conditionally execute the processor. + If *string `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist, the processor quietly exits without + // modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField The field to assign the trimmed value to. + // By default, the field is updated in-place. + TargetField *string `json:"target_field,omitempty"` +} + +func (s *TrimProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.If = &o + + case "ignore_failure": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return err + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return err + } + + } + } + return nil } // NewTrimProcessor returns a TrimProcessor. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trimtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trimtokenfilter.go index a8bba5c17..146722f89 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trimtokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/trimtokenfilter.go @@ -16,23 +16,71 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // TrimTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L324-L326 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L326-L328 type TrimTokenFilter struct { Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` } +func (s *TrimTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s TrimTokenFilter) MarshalJSON() ([]byte, error) { + type innerTrimTokenFilter TrimTokenFilter + tmp := innerTrimTokenFilter{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "trim" + + return json.Marshal(tmp) +} + // NewTrimTokenFilter returns a TrimTokenFilter. func NewTrimTokenFilter() *TrimTokenFilter { r := &TrimTokenFilter{} - r.Type = "trim" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/truncatetokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/truncatetokenfilter.go index 70da6104f..b5350c03e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/truncatetokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/truncatetokenfilter.go @@ -16,24 +16,90 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TruncateTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L328-L331 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L330-L333 type TruncateTokenFilter struct { Length *int `json:"length,omitempty"` Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` } +func (s *TruncateTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "length": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Length = &value + case float64: + f := int(v) + s.Length = &f + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s TruncateTokenFilter) MarshalJSON() ([]byte, error) { + type innerTruncateTokenFilter TruncateTokenFilter + tmp := innerTruncateTokenFilter{ + Length: s.Length, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "truncate" + + return json.Marshal(tmp) +} + // NewTruncateTokenFilter returns a TruncateTokenFilter. func NewTruncateTokenFilter() *TruncateTokenFilter { r := &TruncateTokenFilter{} - r.Type = "truncate" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ttestaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ttestaggregate.go index e3afd4a05..da80bbb6b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ttestaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ttestaggregate.go @@ -16,21 +16,67 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // TTestAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L726-L730 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L735-L739 type TTestAggregate struct { - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Value Float64 `json:"value,omitempty"` - ValueAsString *string `json:"value_as_string,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Value Float64 `json:"value,omitempty"` + ValueAsString *string `json:"value_as_string,omitempty"` +} + +func (s *TTestAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return err + } + + case "value_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ValueAsString = &o + + } + } + return nil } // NewTTestAggregate returns a TTestAggregate. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ttestaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ttestaggregation.go index 038771afc..7202dd618 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ttestaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/ttestaggregation.go @@ -16,25 +16,84 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/ttesttype" ) // TTestAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/metric.ts#L153-L157 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/metric.ts#L294-L308 type TTestAggregation struct { - A *TestPopulation `json:"a,omitempty"` - B *TestPopulation `json:"b,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Name *string `json:"name,omitempty"` - Type *ttesttype.TTestType `json:"type,omitempty"` + // A Test population A. + A *TestPopulation `json:"a,omitempty"` + // B Test population B. + B *TestPopulation `json:"b,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Name *string `json:"name,omitempty"` + // Type The type of test. + Type *ttesttype.TTestType `json:"type,omitempty"` +} + +func (s *TTestAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "a": + if err := dec.Decode(&s.A); err != nil { + return err + } + + case "b": + if err := dec.Decode(&s.B); err != nil { + return err + } + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + } + } + return nil } // NewTTestAggregation returns a TTestAggregation. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/typefieldmappings.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/typefieldmappings.go index 0aa58b89a..cbbb109c3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/typefieldmappings.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/typefieldmappings.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // TypeFieldMappings type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/get_field_mapping/types.ts#L24-L26 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/get_field_mapping/types.ts#L24-L26 type TypeFieldMappings struct { Mappings map[string]FieldMapping `json:"mappings"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/typemapping.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/typemapping.go index 3caf30e15..888a3a16c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/typemapping.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/typemapping.go @@ -16,23 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" ) // TypeMapping type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/TypeMapping.ts#L34-L55 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/TypeMapping.ts#L34-L56 type TypeMapping struct { AllField *AllField `json:"all_field,omitempty"` DataStreamTimestamp_ *DataStreamTimestamp `json:"_data_stream_timestamp,omitempty"` @@ -43,7 +43,7 @@ type TypeMapping struct { Enabled *bool `json:"enabled,omitempty"` FieldNames_ *FieldNamesField `json:"_field_names,omitempty"` IndexField *IndexField `json:"index_field,omitempty"` - Meta_ map[string]json.RawMessage `json:"_meta,omitempty"` + Meta_ Metadata `json:"_meta,omitempty"` NumericDetection *bool `json:"numeric_detection,omitempty"` Properties map[string]Property `json:"properties,omitempty"` Routing_ *RoutingField `json:"_routing,omitempty"` @@ -53,6 +53,7 @@ type TypeMapping struct { } func (s *TypeMapping) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -77,8 +78,17 @@ func (s *TypeMapping) UnmarshalJSON(data []byte) error { } case "date_detection": - if err := dec.Decode(&s.DateDetection); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DateDetection = &value + case bool: + s.DateDetection = &v } case "dynamic": @@ -97,8 +107,17 @@ func (s *TypeMapping) UnmarshalJSON(data []byte) error { } case "enabled": - if err := dec.Decode(&s.Enabled); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = &value + case bool: + s.Enabled = &v } case "_field_names": @@ -117,11 +136,23 @@ func (s *TypeMapping) UnmarshalJSON(data []byte) error { } case "numeric_detection": - if err := dec.Decode(&s.NumericDetection); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.NumericDetection = &value + case bool: + s.NumericDetection = &v } case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -130,7 +161,9 @@ func (s *TypeMapping) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -409,9 +442,11 @@ func (s *TypeMapping) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } @@ -421,6 +456,9 @@ func (s *TypeMapping) UnmarshalJSON(data []byte) error { } case "runtime": + if s.Runtime == nil { + s.Runtime = make(map[string]RuntimeField, 0) + } if err := dec.Decode(&s.Runtime); err != nil { return err } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/typequery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/typequery.go index 085f3d339..0025899d9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/typequery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/typequery.go @@ -16,19 +16,92 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // TypeQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/term.ts#L145-L147 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/term.ts#L264-L266 type TypeQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. Boost *float32 `json:"boost,omitempty"` QueryName_ *string `json:"_name,omitempty"` Value string `json:"value"` } +func (s *TypeQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Value = o + + } + } + return nil +} + // NewTypeQuery returns a TypeQuery. func NewTypeQuery() *TypeQuery { r := &TypeQuery{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/uaxemailurltokenizer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/uaxemailurltokenizer.go index 6cf8400e4..1e2f82e46 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/uaxemailurltokenizer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/uaxemailurltokenizer.go @@ -16,24 +16,90 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // UaxEmailUrlTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/tokenizers.ts#L109-L112 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/tokenizers.ts#L110-L113 type UaxEmailUrlTokenizer struct { MaxTokenLength *int `json:"max_token_length,omitempty"` Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` } +func (s *UaxEmailUrlTokenizer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_token_length": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxTokenLength = &value + case float64: + f := int(v) + s.MaxTokenLength = &f + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s UaxEmailUrlTokenizer) MarshalJSON() ([]byte, error) { + type innerUaxEmailUrlTokenizer UaxEmailUrlTokenizer + tmp := innerUaxEmailUrlTokenizer{ + MaxTokenLength: s.MaxTokenLength, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "uax_url_email" + + return json.Marshal(tmp) +} + // NewUaxEmailUrlTokenizer returns a UaxEmailUrlTokenizer. func NewUaxEmailUrlTokenizer() *UaxEmailUrlTokenizer { r := &UaxEmailUrlTokenizer{} - r.Type = "uax_url_email" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/unassignedinformation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/unassignedinformation.go index ccfcb24b6..6d4f49742 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/unassignedinformation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/unassignedinformation.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/unassignedinformationreason" ) // UnassignedInformation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/cluster/allocation_explain/types.ts#L117-L125 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/cluster/allocation_explain/types.ts#L117-L125 type UnassignedInformation struct { AllocationStatus *string `json:"allocation_status,omitempty"` At DateTime `json:"at"` @@ -37,6 +43,102 @@ type UnassignedInformation struct { Reason unassignedinformationreason.UnassignedInformationReason `json:"reason"` } +func (s *UnassignedInformation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allocation_status": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.AllocationStatus = &o + + case "at": + if err := dec.Decode(&s.At); err != nil { + return err + } + + case "delayed": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Delayed = &value + case bool: + s.Delayed = &v + } + + case "details": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Details = &o + + case "failed_allocation_attempts": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.FailedAllocationAttempts = &value + case float64: + f := int(v) + s.FailedAllocationAttempts = &f + } + + case "last_allocation_status": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.LastAllocationStatus = &o + + case "reason": + if err := dec.Decode(&s.Reason); err != nil { + return err + } + + } + } + return nil +} + // NewUnassignedInformation returns a UnassignedInformation. func NewUnassignedInformation() *UnassignedInformation { r := &UnassignedInformation{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/uniquetokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/uniquetokenfilter.go index 01a817fb5..e83391c2d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/uniquetokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/uniquetokenfilter.go @@ -16,24 +16,88 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // UniqueTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L333-L336 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L335-L338 type UniqueTokenFilter struct { OnlyOnSamePosition *bool `json:"only_on_same_position,omitempty"` Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` } +func (s *UniqueTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "only_on_same_position": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.OnlyOnSamePosition = &value + case bool: + s.OnlyOnSamePosition = &v + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s UniqueTokenFilter) MarshalJSON() ([]byte, error) { + type innerUniqueTokenFilter UniqueTokenFilter + tmp := innerUniqueTokenFilter{ + OnlyOnSamePosition: s.OnlyOnSamePosition, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "unique" + + return json.Marshal(tmp) +} + // NewUniqueTokenFilter returns a UniqueTokenFilter. func NewUniqueTokenFilter() *UniqueTokenFilter { r := &UniqueTokenFilter{} - r.Type = "unique" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/unmappedraretermsaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/unmappedraretermsaggregate.go index fb9ab3a8b..f10393b8d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/unmappedraretermsaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/unmappedraretermsaggregate.go @@ -16,27 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" ) // UnmappedRareTermsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L452-L458 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L453-L459 type UnmappedRareTermsAggregate struct { - Buckets BucketsVoid `json:"buckets"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Buckets BucketsVoid `json:"buckets"` + Meta Metadata `json:"meta,omitempty"` } func (s *UnmappedRareTermsAggregate) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -57,15 +57,17 @@ func (s *UnmappedRareTermsAggregate) UnmarshalJSON(data []byte) error { source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': - o := make(map[string]struct{}, 0) - localDec.Decode(&o) + o := make(map[string]interface{}, 0) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': - o := []struct{}{} - localDec.Decode(&o) + o := []interface{}{} + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/unmappedsampleraggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/unmappedsampleraggregate.go index d181e0052..5b2441047 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/unmappedsampleraggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/unmappedsampleraggregate.go @@ -16,32 +16,31 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "fmt" - "bytes" + "encoding/json" "errors" + "fmt" "io" - + "strconv" "strings" - - "encoding/json" ) // UnmappedSamplerAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L500-L501 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L501-L502 type UnmappedSamplerAggregate struct { - Aggregations map[string]Aggregate `json:"-"` - DocCount int64 `json:"doc_count"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Aggregations map[string]Aggregate `json:"-"` + DocCount int64 `json:"doc_count"` + Meta Metadata `json:"meta,omitempty"` } func (s *UnmappedSamplerAggregate) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -55,451 +54,19 @@ func (s *UnmappedSamplerAggregate) UnmarshalJSON(data []byte) error { switch t { - case "aggregations": - for dec.More() { - tt, err := dec.Token() + case "doc_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) if err != nil { - if errors.Is(err, io.EOF) { - break - } return err } - if value, ok := tt.(string); ok { - if strings.Contains(value, "#") { - elems := strings.Split(value, "#") - if len(elems) == 2 { - switch elems[0] { - case "cardinality": - o := NewCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentiles": - o := NewHdrPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentile_ranks": - o := NewHdrPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentiles": - o := NewTDigestPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentile_ranks": - o := NewTDigestPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "percentiles_bucket": - o := NewPercentilesBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "median_absolute_deviation": - o := NewMedianAbsoluteDeviationAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "min": - o := NewMinAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "max": - o := NewMaxAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sum": - o := NewSumAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "avg": - o := NewAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "weighted_avg": - o := NewWeightedAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "value_count": - o := NewValueCountAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_value": - o := NewSimpleValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "derivative": - o := NewDerivativeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "bucket_metric_value": - o := NewBucketMetricValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats": - o := NewStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats_bucket": - o := NewStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats": - o := NewExtendedStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats_bucket": - o := NewExtendedStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_bounds": - o := NewGeoBoundsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_centroid": - o := NewGeoCentroidAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "histogram": - o := NewHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_histogram": - o := NewDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "auto_date_histogram": - o := NewAutoDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "variable_width_histogram": - o := NewVariableWidthHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sterms": - o := NewStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lterms": - o := NewLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "dterms": - o := NewDoubleTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umterms": - o := NewUnmappedTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lrareterms": - o := NewLongRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "srareterms": - o := NewStringRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umrareterms": - o := NewUnmappedRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "multi_terms": - o := NewMultiTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "missing": - o := NewMissingAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "nested": - o := NewNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "reverse_nested": - o := NewReverseNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "global": - o := NewGlobalAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filter": - o := NewFilterAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "children": - o := NewChildrenAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "parent": - o := NewParentAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sampler": - o := NewSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "unmapped_sampler": - o := NewUnmappedSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohash_grid": - o := NewGeoHashGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geotile_grid": - o := NewGeoTileGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohex_grid": - o := NewGeoHexGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "range": - o := NewRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_range": - o := NewDateRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_distance": - o := NewGeoDistanceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_range": - o := NewIpRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_prefix": - o := NewIpPrefixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filters": - o := NewFiltersAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "adjacency_matrix": - o := NewAdjacencyMatrixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "siglterms": - o := NewSignificantLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sigsterms": - o := NewSignificantStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umsigterms": - o := NewUnmappedSignificantTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "composite": - o := NewCompositeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "scripted_metric": - o := NewScriptedMetricAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_hits": - o := NewTopHitsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "inference": - o := NewInferenceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "string_stats": - o := NewStringStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "box_plot": - o := NewBoxPlotAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_metrics": - o := NewTopMetricsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "t_test": - o := NewTTestAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "rate": - o := NewRateAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_long_value": - o := NewCumulativeCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "matrix_stats": - o := NewMatrixStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_line": - o := NewGeoLineAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - default: - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - } - } else { - return errors.New("cannot decode JSON for field Aggregations") - } - } else { - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[value] = o - } - } - } - - case "doc_count": - if err := dec.Decode(&s.DocCount); err != nil { - return err + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f } case "meta": @@ -507,6 +74,519 @@ func (s *UnmappedSamplerAggregate) UnmarshalJSON(data []byte) error { return err } + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "box_plot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[value] = o + } + } + } } return nil @@ -531,6 +611,7 @@ func (s UnmappedSamplerAggregate) MarshalJSON() ([]byte, error) { for key, value := range s.Aggregations { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "Aggregations") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/unmappedsignificanttermsaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/unmappedsignificanttermsaggregate.go index 860eb800a..d28ba9477 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/unmappedsignificanttermsaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/unmappedsignificanttermsaggregate.go @@ -16,29 +16,30 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" + "strconv" ) // UnmappedSignificantTermsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L609-L615 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L610-L616 type UnmappedSignificantTermsAggregate struct { - BgCount *int64 `json:"bg_count,omitempty"` - Buckets BucketsVoid `json:"buckets"` - DocCount *int64 `json:"doc_count,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + BgCount *int64 `json:"bg_count,omitempty"` + Buckets BucketsVoid `json:"buckets"` + DocCount *int64 `json:"doc_count,omitempty"` + Meta Metadata `json:"meta,omitempty"` } func (s *UnmappedSignificantTermsAggregate) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -53,8 +54,18 @@ func (s *UnmappedSignificantTermsAggregate) UnmarshalJSON(data []byte) error { switch t { case "bg_count": - if err := dec.Decode(&s.BgCount); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.BgCount = &value + case float64: + f := int64(v) + s.BgCount = &f } case "buckets": @@ -64,21 +75,33 @@ func (s *UnmappedSignificantTermsAggregate) UnmarshalJSON(data []byte) error { source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': - o := make(map[string]struct{}, 0) - localDec.Decode(&o) + o := make(map[string]interface{}, 0) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': - o := []struct{}{} - localDec.Decode(&o) + o := []interface{}{} + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } case "doc_count": - if err := dec.Decode(&s.DocCount); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DocCount = &value + case float64: + f := int64(v) + s.DocCount = &f } case "meta": diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/unmappedtermsaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/unmappedtermsaggregate.go index c71888790..5df02c0a0 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/unmappedtermsaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/unmappedtermsaggregate.go @@ -16,29 +16,30 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" + "strconv" ) // UnmappedTermsAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L422-L428 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L423-L429 type UnmappedTermsAggregate struct { - Buckets BucketsVoid `json:"buckets"` - DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - SumOtherDocCount *int64 `json:"sum_other_doc_count,omitempty"` + Buckets BucketsVoid `json:"buckets"` + DocCountErrorUpperBound *int64 `json:"doc_count_error_upper_bound,omitempty"` + Meta Metadata `json:"meta,omitempty"` + SumOtherDocCount *int64 `json:"sum_other_doc_count,omitempty"` } func (s *UnmappedTermsAggregate) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -59,21 +60,33 @@ func (s *UnmappedTermsAggregate) UnmarshalJSON(data []byte) error { source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': - o := make(map[string]struct{}, 0) - localDec.Decode(&o) + o := make(map[string]interface{}, 0) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': - o := []struct{}{} - localDec.Decode(&o) + o := []interface{}{} + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } case "doc_count_error_upper_bound": - if err := dec.Decode(&s.DocCountErrorUpperBound); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.DocCountErrorUpperBound = &value + case float64: + f := int64(v) + s.DocCountErrorUpperBound = &f } case "meta": @@ -82,8 +95,18 @@ func (s *UnmappedTermsAggregate) UnmarshalJSON(data []byte) error { } case "sum_other_doc_count": - if err := dec.Decode(&s.SumOtherDocCount); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.SumOtherDocCount = &value + case float64: + f := int64(v) + s.SumOtherDocCount = &f } } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/unrateddocument.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/unrateddocument.go index 96e91ceb1..76bcd06ab 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/unrateddocument.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/unrateddocument.go @@ -16,18 +16,55 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // UnratedDocument type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/rank_eval/types.ts#L147-L150 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/rank_eval/types.ts#L147-L150 type UnratedDocument struct { Id_ string `json:"_id"` Index_ string `json:"_index"` } +func (s *UnratedDocument) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return err + } + + case "_index": + if err := dec.Decode(&s.Index_); err != nil { + return err + } + + } + } + return nil +} + // NewUnratedDocument returns a UnratedDocument. func NewUnratedDocument() *UnratedDocument { r := &UnratedDocument{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/unsignedlongnumberproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/unsignedlongnumberproperty.go index a6a072838..29c18b016 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/unsignedlongnumberproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/unsignedlongnumberproperty.go @@ -16,25 +16,25 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/timeseriesmetrictype" ) // UnsignedLongNumberProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/core.ts#L166-L169 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/core.ts#L169-L172 type UnsignedLongNumberProperty struct { Boost *Float64 `json:"boost,omitempty"` Coerce *bool `json:"coerce,omitempty"` @@ -47,7 +47,7 @@ type UnsignedLongNumberProperty struct { Index *bool `json:"index,omitempty"` // Meta Metadata about the field. Meta map[string]string `json:"meta,omitempty"` - NullValue uint64 `json:"null_value,omitempty"` + NullValue *uint64 `json:"null_value,omitempty"` OnScriptError *onscripterror.OnScriptError `json:"on_script_error,omitempty"` Properties map[string]Property `json:"properties,omitempty"` Script Script `json:"script,omitempty"` @@ -63,6 +63,7 @@ type UnsignedLongNumberProperty struct { } func (s *UnsignedLongNumberProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -77,23 +78,63 @@ func (s *UnsignedLongNumberProperty) UnmarshalJSON(data []byte) error { switch t { case "boost": - if err := dec.Decode(&s.Boost); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Boost = &f + case float64: + f := Float64(v) + s.Boost = &f } case "coerce": - if err := dec.Decode(&s.Coerce); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Coerce = &value + case bool: + s.Coerce = &v } case "copy_to": - if err := dec.Decode(&s.CopyTo); err != nil { - return err + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return err + } } case "doc_values": - if err := dec.Decode(&s.DocValues); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DocValues = &value + case bool: + s.DocValues = &v } case "dynamic": @@ -102,6 +143,9 @@ func (s *UnsignedLongNumberProperty) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -110,7 +154,9 @@ func (s *UnsignedLongNumberProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -389,28 +435,62 @@ func (s *UnsignedLongNumberProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "ignore_malformed": - if err := dec.Decode(&s.IgnoreMalformed); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreMalformed = &value + case bool: + s.IgnoreMalformed = &v } case "index": - if err := dec.Decode(&s.Index); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Index = &value + case bool: + s.Index = &v } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } @@ -426,6 +506,9 @@ func (s *UnsignedLongNumberProperty) UnmarshalJSON(data []byte) error { } case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -434,7 +517,9 @@ func (s *UnsignedLongNumberProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -713,9 +798,11 @@ func (s *UnsignedLongNumberProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } @@ -725,18 +812,43 @@ func (s *UnsignedLongNumberProperty) UnmarshalJSON(data []byte) error { } case "similarity": - if err := dec.Decode(&s.Similarity); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Similarity = &o case "store": - if err := dec.Decode(&s.Store); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Store = &value + case bool: + s.Store = &v } case "time_series_dimension": - if err := dec.Decode(&s.TimeSeriesDimension); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.TimeSeriesDimension = &value + case bool: + s.TimeSeriesDimension = &v } case "time_series_metric": @@ -754,6 +866,36 @@ func (s *UnsignedLongNumberProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s UnsignedLongNumberProperty) MarshalJSON() ([]byte, error) { + type innerUnsignedLongNumberProperty UnsignedLongNumberProperty + tmp := innerUnsignedLongNumberProperty{ + Boost: s.Boost, + Coerce: s.Coerce, + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + IgnoreMalformed: s.IgnoreMalformed, + Index: s.Index, + Meta: s.Meta, + NullValue: s.NullValue, + OnScriptError: s.OnScriptError, + Properties: s.Properties, + Script: s.Script, + Similarity: s.Similarity, + Store: s.Store, + TimeSeriesDimension: s.TimeSeriesDimension, + TimeSeriesMetric: s.TimeSeriesMetric, + Type: s.Type, + } + + tmp.Type = "unsigned_long" + + return json.Marshal(tmp) +} + // NewUnsignedLongNumberProperty returns a UnsignedLongNumberProperty. func NewUnsignedLongNumberProperty() *UnsignedLongNumberProperty { r := &UnsignedLongNumberProperty{ @@ -762,7 +904,5 @@ func NewUnsignedLongNumberProperty() *UnsignedLongNumberProperty { Properties: make(map[string]Property, 0), } - r.Type = "unsigned_long" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/updateaction.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/updateaction.go new file mode 100644 index 000000000..8e431519e --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/updateaction.go @@ -0,0 +1,143 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + +// UpdateAction type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/bulk/types.ts#L169-L205 +type UpdateAction struct { + // DetectNoop Set to false to disable setting 'result' in the response + // to 'noop' if no change to the document occurred. + DetectNoop *bool `json:"detect_noop,omitempty"` + // Doc A partial update to an existing document. + Doc json.RawMessage `json:"doc,omitempty"` + // DocAsUpsert Set to true to use the contents of 'doc' as the value of 'upsert' + DocAsUpsert *bool `json:"doc_as_upsert,omitempty"` + // Script Script to execute to update the document. + Script Script `json:"script,omitempty"` + // ScriptedUpsert Set to true to execute the script whether or not the document exists. + ScriptedUpsert *bool `json:"scripted_upsert,omitempty"` + // Source_ Set to false to disable source retrieval. You can also specify a + // comma-separated + // list of the fields you want to retrieve. + Source_ SourceConfig `json:"_source,omitempty"` + // Upsert If the document does not already exist, the contents of 'upsert' are inserted + // as a + // new document. If the document exists, the 'script' is executed. + Upsert json.RawMessage `json:"upsert,omitempty"` +} + +func (s *UpdateAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "detect_noop": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DetectNoop = &value + case bool: + s.DetectNoop = &v + } + + case "doc": + if err := dec.Decode(&s.Doc); err != nil { + return err + } + + case "doc_as_upsert": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DocAsUpsert = &value + case bool: + s.DocAsUpsert = &v + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + case "scripted_upsert": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.ScriptedUpsert = &value + case bool: + s.ScriptedUpsert = &v + } + + case "_source": + if err := dec.Decode(&s.Source_); err != nil { + return err + } + + case "upsert": + if err := dec.Decode(&s.Upsert); err != nil { + return err + } + + } + } + return nil +} + +// NewUpdateAction returns a UpdateAction. +func NewUpdateAction() *UpdateAction { + r := &UpdateAction{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/updatebyqueryrethrottlenode.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/updatebyqueryrethrottlenode.go index f3b61823a..a8c745545 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/updatebyqueryrethrottlenode.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/updatebyqueryrethrottlenode.go @@ -16,32 +16,98 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/noderole" ) // UpdateByQueryRethrottleNode type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_global/update_by_query_rethrottle/UpdateByQueryRethrottleNode.ts#L25-L27 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/update_by_query_rethrottle/UpdateByQueryRethrottleNode.ts#L25-L27 type UpdateByQueryRethrottleNode struct { Attributes map[string]string `json:"attributes"` Host string `json:"host"` Ip string `json:"ip"` Name string `json:"name"` Roles []noderole.NodeRole `json:"roles,omitempty"` - Tasks map[TaskId]TaskInfo `json:"tasks"` + Tasks map[string]TaskInfo `json:"tasks"` TransportAddress string `json:"transport_address"` } +func (s *UpdateByQueryRethrottleNode) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "attributes": + if s.Attributes == nil { + s.Attributes = make(map[string]string, 0) + } + if err := dec.Decode(&s.Attributes); err != nil { + return err + } + + case "host": + if err := dec.Decode(&s.Host); err != nil { + return err + } + + case "ip": + if err := dec.Decode(&s.Ip); err != nil { + return err + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return err + } + + case "tasks": + if s.Tasks == nil { + s.Tasks = make(map[string]TaskInfo, 0) + } + if err := dec.Decode(&s.Tasks); err != nil { + return err + } + + case "transport_address": + if err := dec.Decode(&s.TransportAddress); err != nil { + return err + } + + } + } + return nil +} + // NewUpdateByQueryRethrottleNode returns a UpdateByQueryRethrottleNode. func NewUpdateByQueryRethrottleNode() *UpdateByQueryRethrottleNode { r := &UpdateByQueryRethrottleNode{ Attributes: make(map[string]string, 0), - Tasks: make(map[TaskId]TaskInfo, 0), + Tasks: make(map[string]TaskInfo, 0), } return r diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/updateoperation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/updateoperation.go new file mode 100644 index 000000000..ea58d4c2c --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/updateoperation.go @@ -0,0 +1,152 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/versiontype" +) + +// UpdateOperation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/bulk/types.ts#L136-L143 +type UpdateOperation struct { + // Id_ The document ID. + Id_ *string `json:"_id,omitempty"` + IfPrimaryTerm *int64 `json:"if_primary_term,omitempty"` + IfSeqNo *int64 `json:"if_seq_no,omitempty"` + // Index_ Name of the index or index alias to perform the action on. + Index_ *string `json:"_index,omitempty"` + // RequireAlias If `true`, the request’s actions must target an index alias. + RequireAlias *bool `json:"require_alias,omitempty"` + RetryOnConflict *int `json:"retry_on_conflict,omitempty"` + // Routing Custom value used to route operations to a specific shard. + Routing *string `json:"routing,omitempty"` + Version *int64 `json:"version,omitempty"` + VersionType *versiontype.VersionType `json:"version_type,omitempty"` +} + +func (s *UpdateOperation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return err + } + + case "if_primary_term": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.IfPrimaryTerm = &value + case float64: + f := int64(v) + s.IfPrimaryTerm = &f + } + + case "if_seq_no": + if err := dec.Decode(&s.IfSeqNo); err != nil { + return err + } + + case "_index": + if err := dec.Decode(&s.Index_); err != nil { + return err + } + + case "require_alias": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.RequireAlias = &value + case bool: + s.RequireAlias = &v + } + + case "retry_on_conflict": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.RetryOnConflict = &value + case float64: + f := int(v) + s.RetryOnConflict = &f + } + + case "routing": + if err := dec.Decode(&s.Routing); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + case "version_type": + if err := dec.Decode(&s.VersionType); err != nil { + return err + } + + } + } + return nil +} + +// NewUpdateOperation returns a UpdateOperation. +func NewUpdateOperation() *UpdateOperation { + r := &UpdateOperation{} + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/uppercaseprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/uppercaseprocessor.go index c991b7732..a6b9369a3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/uppercaseprocessor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/uppercaseprocessor.go @@ -16,22 +16,141 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // UppercaseProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/_types/Processors.ts#L368-L372 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/_types/Processors.ts#L1097-L1113 type UppercaseProcessor struct { - Description *string `json:"description,omitempty"` - Field string `json:"field"` - If *string `json:"if,omitempty"` - IgnoreFailure *bool `json:"ignore_failure,omitempty"` - IgnoreMissing *bool `json:"ignore_missing,omitempty"` - OnFailure []ProcessorContainer `json:"on_failure,omitempty"` - Tag *string `json:"tag,omitempty"` - TargetField *string `json:"target_field,omitempty"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field to make uppercase. + Field string `json:"field"` + // If Conditionally execute the processor. + If *string `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist or is `null`, the processor quietly + // exits without modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField The field to assign the converted value to. + // By default, the field is updated in-place. + TargetField *string `json:"target_field,omitempty"` +} + +func (s *UppercaseProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.If = &o + + case "ignore_failure": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return err + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return err + } + + } + } + return nil } // NewUppercaseProcessor returns a UppercaseProcessor. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/uppercasetokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/uppercasetokenfilter.go index ab8e57912..51b48b0d5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/uppercasetokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/uppercasetokenfilter.go @@ -16,23 +16,71 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // UppercaseTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L338-L340 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L340-L342 type UppercaseTokenFilter struct { Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` } +func (s *UppercaseTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s UppercaseTokenFilter) MarshalJSON() ([]byte, error) { + type innerUppercaseTokenFilter UppercaseTokenFilter + tmp := innerUppercaseTokenFilter{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "uppercase" + + return json.Marshal(tmp) +} + // NewUppercaseTokenFilter returns a UppercaseTokenFilter. func NewUppercaseTokenFilter() *UppercaseTokenFilter { r := &UppercaseTokenFilter{} - r.Type = "uppercase" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/urldecodeprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/urldecodeprocessor.go index 0c1f2b9b3..b7e93177f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/urldecodeprocessor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/urldecodeprocessor.go @@ -16,22 +16,141 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // UrlDecodeProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/_types/Processors.ts#L374-L378 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/_types/Processors.ts#L1115-L1131 type UrlDecodeProcessor struct { - Description *string `json:"description,omitempty"` - Field string `json:"field"` - If *string `json:"if,omitempty"` - IgnoreFailure *bool `json:"ignore_failure,omitempty"` - IgnoreMissing *bool `json:"ignore_missing,omitempty"` - OnFailure []ProcessorContainer `json:"on_failure,omitempty"` - Tag *string `json:"tag,omitempty"` - TargetField *string `json:"target_field,omitempty"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field to decode. + Field string `json:"field"` + // If Conditionally execute the processor. + If *string `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist or is `null`, the processor quietly + // exits without modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField The field to assign the converted value to. + // By default, the field is updated in-place. + TargetField *string `json:"target_field,omitempty"` +} + +func (s *UrlDecodeProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.If = &o + + case "ignore_failure": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return err + } + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return err + } + + } + } + return nil } // NewUrlDecodeProcessor returns a UrlDecodeProcessor. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/usagestatsindex.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/usagestatsindex.go index 80f15445d..bfc0a9374 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/usagestatsindex.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/usagestatsindex.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // UsageStatsIndex type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L38-L40 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L38-L40 type UsageStatsIndex struct { Shards []UsageStatsShards `json:"shards"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/usagestatsshards.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/usagestatsshards.go index 49b63475d..8467663c6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/usagestatsshards.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/usagestatsshards.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // UsageStatsShards type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L42-L47 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/field_usage_stats/IndicesFieldUsageStatsResponse.ts#L42-L47 type UsageStatsShards struct { Routing ShardRouting `json:"routing"` Stats IndicesShardsStats `json:"stats"` @@ -30,6 +38,53 @@ type UsageStatsShards struct { TrackingStartedAtMillis int64 `json:"tracking_started_at_millis"` } +func (s *UsageStatsShards) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "routing": + if err := dec.Decode(&s.Routing); err != nil { + return err + } + + case "stats": + if err := dec.Decode(&s.Stats); err != nil { + return err + } + + case "tracking_id": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TrackingId = o + + case "tracking_started_at_millis": + if err := dec.Decode(&s.TrackingStartedAtMillis); err != nil { + return err + } + + } + } + return nil +} + // NewUsageStatsShards returns a UsageStatsShards. func NewUsageStatsShards() *UsageStatsShards { r := &UsageStatsShards{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/user.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/user.go index f1a89203c..02fb7778a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/user.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/user.go @@ -16,25 +16,100 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // User type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/_types/User.ts#L23-L31 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/_types/User.ts#L23-L31 type User struct { - Email string `json:"email,omitempty"` - Enabled bool `json:"enabled"` - FullName string `json:"full_name,omitempty"` - Metadata map[string]json.RawMessage `json:"metadata"` - ProfileUid *string `json:"profile_uid,omitempty"` - Roles []string `json:"roles"` - Username string `json:"username"` + Email string `json:"email,omitempty"` + Enabled bool `json:"enabled"` + FullName string `json:"full_name,omitempty"` + Metadata Metadata `json:"metadata"` + ProfileUid *string `json:"profile_uid,omitempty"` + Roles []string `json:"roles"` + Username string `json:"username"` +} + +func (s *User) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "email": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Email = o + + case "enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "full_name": + if err := dec.Decode(&s.FullName); err != nil { + return err + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return err + } + + case "profile_uid": + if err := dec.Decode(&s.ProfileUid); err != nil { + return err + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return err + } + + case "username": + if err := dec.Decode(&s.Username); err != nil { + return err + } + + } + } + return nil } // NewUser returns a User. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/useragentprocessor.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/useragentprocessor.go index 38e40dc9a..7ab71d05c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/useragentprocessor.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/useragentprocessor.go @@ -16,28 +16,166 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/useragentproperty" ) // UserAgentProcessor type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ingest/_types/Processors.ts#L115-L121 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ingest/_types/Processors.ts#L359-L379 type UserAgentProcessor struct { - Description *string `json:"description,omitempty"` - Field string `json:"field"` - If *string `json:"if,omitempty"` - IgnoreFailure *bool `json:"ignore_failure,omitempty"` - IgnoreMissing *bool `json:"ignore_missing,omitempty"` - OnFailure []ProcessorContainer `json:"on_failure,omitempty"` - Options []useragentproperty.UserAgentProperty `json:"options,omitempty"` - RegexFile *string `json:"regex_file,omitempty"` - Tag *string `json:"tag,omitempty"` - TargetField *string `json:"target_field,omitempty"` + // Description Description of the processor. + // Useful for describing the purpose of the processor or its configuration. + Description *string `json:"description,omitempty"` + // Field The field containing the user agent string. + Field string `json:"field"` + // If Conditionally execute the processor. + If *string `json:"if,omitempty"` + // IgnoreFailure Ignore failures for the processor. + IgnoreFailure *bool `json:"ignore_failure,omitempty"` + // IgnoreMissing If `true` and `field` does not exist, the processor quietly exits without + // modifying the document. + IgnoreMissing *bool `json:"ignore_missing,omitempty"` + // OnFailure Handle failures for the processor. + OnFailure []ProcessorContainer `json:"on_failure,omitempty"` + Options []useragentproperty.UserAgentProperty `json:"options,omitempty"` + // RegexFile The name of the file in the `config/ingest-user-agent` directory containing + // the regular expressions for parsing the user agent string. Both the directory + // and the file have to be created before starting Elasticsearch. If not + // specified, ingest-user-agent will use the `regexes.yaml` from uap-core it + // ships with. + RegexFile *string `json:"regex_file,omitempty"` + // Tag Identifier for the processor. + // Useful for debugging and metrics. + Tag *string `json:"tag,omitempty"` + // TargetField The field that will be filled with the user agent details. + TargetField *string `json:"target_field,omitempty"` +} + +func (s *UserAgentProcessor) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "if": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.If = &o + + case "ignore_failure": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreFailure = &value + case bool: + s.IgnoreFailure = &v + } + + case "ignore_missing": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreMissing = &value + case bool: + s.IgnoreMissing = &v + } + + case "on_failure": + if err := dec.Decode(&s.OnFailure); err != nil { + return err + } + + case "options": + if err := dec.Decode(&s.Options); err != nil { + return err + } + + case "regex_file": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.RegexFile = &o + + case "tag": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Tag = &o + + case "target_field": + if err := dec.Decode(&s.TargetField); err != nil { + return err + } + + } + } + return nil } // NewUserAgentProcessor returns a UserAgentProcessor. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/userindicesprivileges.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/userindicesprivileges.go index 0edf0505d..16b3d2262 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/userindicesprivileges.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/userindicesprivileges.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexprivilege" ) // UserIndicesPrivileges type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/_types/Privileges.ts#L106-L128 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/_types/Privileges.ts#L107-L129 type UserIndicesPrivileges struct { // AllowRestrictedIndices Set to `true` if using wildcard or regular expressions for patterns that // cover restricted indices. Implicitly, restricted indices have limited @@ -49,6 +55,71 @@ type UserIndicesPrivileges struct { Query []IndicesPrivilegesQuery `json:"query,omitempty"` } +func (s *UserIndicesPrivileges) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "allow_restricted_indices": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.AllowRestrictedIndices = value + case bool: + s.AllowRestrictedIndices = v + } + + case "field_security": + if err := dec.Decode(&s.FieldSecurity); err != nil { + return err + } + + case "names": + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.Names = append(s.Names, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.Names); err != nil { + return err + } + } + + case "privileges": + if err := dec.Decode(&s.Privileges); err != nil { + return err + } + + case "query": + if err := dec.Decode(&s.Query); err != nil { + return err + } + + } + } + return nil +} + // NewUserIndicesPrivileges returns a UserIndicesPrivileges. func NewUserIndicesPrivileges() *UserIndicesPrivileges { r := &UserIndicesPrivileges{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/userprofile.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/userprofile.go index 552226679..3287ad5b3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/userprofile.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/userprofile.go @@ -16,17 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // UserProfile type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/_types/UserProfile.ts#L42-L48 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/_types/UserProfile.ts#L42-L48 type UserProfile struct { Data map[string]json.RawMessage `json:"data"` Enabled *bool `json:"enabled,omitempty"` @@ -35,6 +39,66 @@ type UserProfile struct { User UserProfileUser `json:"user"` } +func (s *UserProfile) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "data": + if s.Data == nil { + s.Data = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Data); err != nil { + return err + } + + case "enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = &value + case bool: + s.Enabled = &v + } + + case "labels": + if s.Labels == nil { + s.Labels = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Labels); err != nil { + return err + } + + case "uid": + if err := dec.Decode(&s.Uid); err != nil { + return err + } + + case "user": + if err := dec.Decode(&s.User); err != nil { + return err + } + + } + } + return nil +} + // NewUserProfile returns a UserProfile. func NewUserProfile() *UserProfile { r := &UserProfile{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/userprofilehitmetadata.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/userprofilehitmetadata.go index 0362cb8a8..906f0e99e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/userprofilehitmetadata.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/userprofilehitmetadata.go @@ -16,18 +16,66 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // UserProfileHitMetadata type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/_types/UserProfile.ts#L28-L31 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/_types/UserProfile.ts#L28-L31 type UserProfileHitMetadata struct { PrimaryTerm_ int64 `json:"_primary_term"` SeqNo_ int64 `json:"_seq_no"` } +func (s *UserProfileHitMetadata) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "_primary_term": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.PrimaryTerm_ = value + case float64: + f := int64(v) + s.PrimaryTerm_ = f + } + + case "_seq_no": + if err := dec.Decode(&s.SeqNo_); err != nil { + return err + } + + } + } + return nil +} + // NewUserProfileHitMetadata returns a UserProfileHitMetadata. func NewUserProfileHitMetadata() *UserProfileHitMetadata { r := &UserProfileHitMetadata{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/userprofileuser.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/userprofileuser.go index d4793eb8b..4d95491e4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/userprofileuser.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/userprofileuser.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // UserProfileUser type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/_types/UserProfile.ts#L33-L40 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/_types/UserProfile.ts#L33-L40 type UserProfileUser struct { Email string `json:"email,omitempty"` FullName string `json:"full_name,omitempty"` @@ -32,6 +40,63 @@ type UserProfileUser struct { Username string `json:"username"` } +func (s *UserProfileUser) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "email": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Email = o + + case "full_name": + if err := dec.Decode(&s.FullName); err != nil { + return err + } + + case "realm_domain": + if err := dec.Decode(&s.RealmDomain); err != nil { + return err + } + + case "realm_name": + if err := dec.Decode(&s.RealmName); err != nil { + return err + } + + case "roles": + if err := dec.Decode(&s.Roles); err != nil { + return err + } + + case "username": + if err := dec.Decode(&s.Username); err != nil { + return err + } + + } + } + return nil +} + // NewUserProfileUser returns a UserProfileUser. func NewUserProfileUser() *UserProfileUser { r := &UserProfileUser{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/userprofilewithmetadata.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/userprofilewithmetadata.go index 0455ea273..f35a5714c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/userprofilewithmetadata.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/userprofilewithmetadata.go @@ -16,17 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // UserProfileWithMetadata type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/_types/UserProfile.ts#L50-L53 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/_types/UserProfile.ts#L50-L53 type UserProfileWithMetadata struct { Data map[string]json.RawMessage `json:"data"` Doc_ UserProfileHitMetadata `json:"_doc"` @@ -37,6 +41,86 @@ type UserProfileWithMetadata struct { User UserProfileUser `json:"user"` } +func (s *UserProfileWithMetadata) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "data": + if s.Data == nil { + s.Data = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Data); err != nil { + return err + } + + case "_doc": + if err := dec.Decode(&s.Doc_); err != nil { + return err + } + + case "enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = &value + case bool: + s.Enabled = &v + } + + case "labels": + if s.Labels == nil { + s.Labels = make(map[string]json.RawMessage, 0) + } + if err := dec.Decode(&s.Labels); err != nil { + return err + } + + case "last_synchronized": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.LastSynchronized = value + case float64: + f := int64(v) + s.LastSynchronized = f + } + + case "uid": + if err := dec.Decode(&s.Uid); err != nil { + return err + } + + case "user": + if err := dec.Decode(&s.User); err != nil { + return err + } + + } + } + return nil +} + // NewUserProfileWithMetadata returns a UserProfileWithMetadata. func NewUserProfileWithMetadata() *UserProfileWithMetadata { r := &UserProfileWithMetadata{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/userrealm.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/userrealm.go index f7f2a5e68..f22eb8147 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/userrealm.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/userrealm.go @@ -16,18 +16,63 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // UserRealm type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/security/get_token/types.ts#L30-L33 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/security/get_token/types.ts#L30-L33 type UserRealm struct { Name string `json:"name"` Type string `json:"type"` } +func (s *UserRealm) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Type = o + + } + } + return nil +} + // NewUserRealm returns a UserRealm. func NewUserRealm() *UserRealm { r := &UserRealm{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/validationloss.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/validationloss.go index 21d247812..ba5eba9c8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/validationloss.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/validationloss.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ValidationLoss type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/DataframeAnalytics.ts#L428-L433 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/DataframeAnalytics.ts#L570-L575 type ValidationLoss struct { // FoldValues Validation loss values for every added decision tree during the forest // growing procedure. @@ -31,6 +39,43 @@ type ValidationLoss struct { LossType string `json:"loss_type"` } +func (s *ValidationLoss) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "fold_values": + if err := dec.Decode(&s.FoldValues); err != nil { + return err + } + + case "loss_type": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.LossType = o + + } + } + return nil +} + // NewValidationLoss returns a ValidationLoss. func NewValidationLoss() *ValidationLoss { r := &ValidationLoss{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/valuecountaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/valuecountaggregate.go index 9e1cfb5b9..56238a1ed 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/valuecountaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/valuecountaggregate.go @@ -16,19 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // ValueCountAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L217-L221 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L218-L222 type ValueCountAggregate struct { - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Meta Metadata `json:"meta,omitempty"` // Value The metric value. A missing value generally means that there was no data to // aggregate, // unless specified otherwise. @@ -36,6 +40,48 @@ type ValueCountAggregate struct { ValueAsString *string `json:"value_as_string,omitempty"` } +func (s *ValueCountAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return err + } + + case "value_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ValueAsString = &o + + } + } + return nil +} + // NewValueCountAggregate returns a ValueCountAggregate. func NewValueCountAggregate() *ValueCountAggregate { r := &ValueCountAggregate{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/valuecountaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/valuecountaggregation.go index c0213db51..a23683e28 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/valuecountaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/valuecountaggregation.go @@ -16,20 +16,78 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ValueCountAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/metric.ts#L196-L196 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/metric.ts#L417-L417 type ValueCountAggregation struct { - Field *string `json:"field,omitempty"` - Format *string `json:"format,omitempty"` + // Field The field on which to run the aggregation. + Field *string `json:"field,omitempty"` + Format *string `json:"format,omitempty"` + // Missing The value to apply to documents that do not have a value. + // By default, documents without a value are ignored. Missing Missing `json:"missing,omitempty"` Script Script `json:"script,omitempty"` } +func (s *ValueCountAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "missing": + if err := dec.Decode(&s.Missing); err != nil { + return err + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + } + } + return nil +} + // NewValueCountAggregation returns a ValueCountAggregation. func NewValueCountAggregation() *ValueCountAggregation { r := &ValueCountAggregation{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/variablewidthhistogramaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/variablewidthhistogramaggregate.go index 7657cd339..9276fc558 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/variablewidthhistogramaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/variablewidthhistogramaggregate.go @@ -16,27 +16,27 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( "bytes" + "encoding/json" "errors" "io" - - "encoding/json" ) // VariableWidthHistogramAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L361-L363 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L362-L364 type VariableWidthHistogramAggregate struct { Buckets BucketsVariableWidthHistogramBucket `json:"buckets"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Meta Metadata `json:"meta,omitempty"` } func (s *VariableWidthHistogramAggregate) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -57,15 +57,17 @@ func (s *VariableWidthHistogramAggregate) UnmarshalJSON(data []byte) error { source := bytes.NewReader(rawMsg) localDec := json.NewDecoder(source) switch rawMsg[0] { - case '{': o := make(map[string]VariableWidthHistogramBucket, 0) - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o - case '[': o := []VariableWidthHistogramBucket{} - localDec.Decode(&o) + if err := localDec.Decode(&o); err != nil { + return err + } s.Buckets = o } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/variablewidthhistogramaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/variablewidthhistogramaggregation.go index 9476637bf..3e228b277 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/variablewidthhistogramaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/variablewidthhistogramaggregation.go @@ -16,18 +16,107 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // VariableWidthHistogramAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/bucket.ts#L430-L435 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/bucket.ts#L1015-L1035 type VariableWidthHistogramAggregation struct { - Buckets *int `json:"buckets,omitempty"` - Field *string `json:"field,omitempty"` - InitialBuffer *int `json:"initial_buffer,omitempty"` - ShardSize *int `json:"shard_size,omitempty"` + // Buckets The target number of buckets. + Buckets *int `json:"buckets,omitempty"` + // Field The name of the field. + Field *string `json:"field,omitempty"` + // InitialBuffer Specifies the number of individual documents that will be stored in memory on + // a shard before the initial bucketing algorithm is run. + // Defaults to `min(10 * shard_size, 50000)`. + InitialBuffer *int `json:"initial_buffer,omitempty"` + // ShardSize The number of buckets that the coordinating node will request from each + // shard. + // Defaults to `buckets * 50`. + ShardSize *int `json:"shard_size,omitempty"` +} + +func (s *VariableWidthHistogramAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "buckets": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Buckets = &value + case float64: + f := int(v) + s.Buckets = &f + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "initial_buffer": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.InitialBuffer = &value + case float64: + f := int(v) + s.InitialBuffer = &f + } + + case "shard_size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.ShardSize = &value + case float64: + f := int(v) + s.ShardSize = &f + } + + } + } + return nil } // NewVariableWidthHistogramAggregation returns a VariableWidthHistogramAggregation. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/variablewidthhistogrambucket.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/variablewidthhistogrambucket.go index 5189ff49d..8b2ef8bb5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/variablewidthhistogrambucket.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/variablewidthhistogrambucket.go @@ -16,25 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "fmt" - "bytes" + "encoding/json" "errors" + "fmt" "io" - + "strconv" "strings" - - "encoding/json" ) // VariableWidthHistogramBucket type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L365-L372 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L366-L373 type VariableWidthHistogramBucket struct { Aggregations map[string]Aggregate `json:"-"` DocCount int64 `json:"doc_count"` @@ -47,6 +45,7 @@ type VariableWidthHistogramBucket struct { } func (s *VariableWidthHistogramBucket) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -60,482 +59,617 @@ func (s *VariableWidthHistogramBucket) UnmarshalJSON(data []byte) error { switch t { - case "aggregations": - for dec.More() { - tt, err := dec.Token() + case "doc_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) if err != nil { - if errors.Is(err, io.EOF) { - break - } return err } - if value, ok := tt.(string); ok { - if strings.Contains(value, "#") { - elems := strings.Split(value, "#") - if len(elems) == 2 { - switch elems[0] { - case "cardinality": - o := NewCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentiles": - o := NewHdrPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "hdr_percentile_ranks": - o := NewHdrPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentiles": - o := NewTDigestPercentilesAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "tdigest_percentile_ranks": - o := NewTDigestPercentileRanksAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "percentiles_bucket": - o := NewPercentilesBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "median_absolute_deviation": - o := NewMedianAbsoluteDeviationAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "min": - o := NewMinAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "max": - o := NewMaxAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sum": - o := NewSumAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "avg": - o := NewAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "weighted_avg": - o := NewWeightedAvgAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "value_count": - o := NewValueCountAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_value": - o := NewSimpleValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "derivative": - o := NewDerivativeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "bucket_metric_value": - o := NewBucketMetricValueAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats": - o := NewStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "stats_bucket": - o := NewStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats": - o := NewExtendedStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "extended_stats_bucket": - o := NewExtendedStatsBucketAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_bounds": - o := NewGeoBoundsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_centroid": - o := NewGeoCentroidAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "histogram": - o := NewHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_histogram": - o := NewDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "auto_date_histogram": - o := NewAutoDateHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "variable_width_histogram": - o := NewVariableWidthHistogramAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sterms": - o := NewStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lterms": - o := NewLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "dterms": - o := NewDoubleTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umterms": - o := NewUnmappedTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "lrareterms": - o := NewLongRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "srareterms": - o := NewStringRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umrareterms": - o := NewUnmappedRareTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "multi_terms": - o := NewMultiTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "missing": - o := NewMissingAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "nested": - o := NewNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "reverse_nested": - o := NewReverseNestedAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "global": - o := NewGlobalAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filter": - o := NewFilterAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "children": - o := NewChildrenAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "parent": - o := NewParentAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sampler": - o := NewSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "unmapped_sampler": - o := NewUnmappedSamplerAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohash_grid": - o := NewGeoHashGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geotile_grid": - o := NewGeoTileGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geohex_grid": - o := NewGeoHexGridAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "range": - o := NewRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "date_range": - o := NewDateRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_distance": - o := NewGeoDistanceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_range": - o := NewIpRangeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "ip_prefix": - o := NewIpPrefixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "filters": - o := NewFiltersAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "adjacency_matrix": - o := NewAdjacencyMatrixAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "siglterms": - o := NewSignificantLongTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "sigsterms": - o := NewSignificantStringTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "umsigterms": - o := NewUnmappedSignificantTermsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "composite": - o := NewCompositeAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "scripted_metric": - o := NewScriptedMetricAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_hits": - o := NewTopHitsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "inference": - o := NewInferenceAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "string_stats": - o := NewStringStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "box_plot": - o := NewBoxPlotAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "top_metrics": - o := NewTopMetricsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "t_test": - o := NewTTestAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "rate": - o := NewRateAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "simple_long_value": - o := NewCumulativeCardinalityAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "matrix_stats": - o := NewMatrixStatsAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - case "geo_line": - o := NewGeoLineAggregate() - if err := dec.Decode(o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - default: - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[elems[1]] = o - } - } else { - return errors.New("cannot decode JSON for field Aggregations") - } - } else { - o := make(map[string]interface{}, 0) - if err := dec.Decode(&o); err != nil { - return err - } - s.Aggregations[value] = o - } - } - } - - case "doc_count": - if err := dec.Decode(&s.DocCount); err != nil { - return err + s.DocCount = value + case float64: + f := int64(v) + s.DocCount = f } case "key": - if err := dec.Decode(&s.Key); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Key = f + case float64: + f := Float64(v) + s.Key = f } case "key_as_string": - if err := dec.Decode(&s.KeyAsString); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.KeyAsString = &o case "max": - if err := dec.Decode(&s.Max); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Max = f + case float64: + f := Float64(v) + s.Max = f } case "max_as_string": - if err := dec.Decode(&s.MaxAsString); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MaxAsString = &o case "min": - if err := dec.Decode(&s.Min); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Min = f + case float64: + f := Float64(v) + s.Min = f } case "min_as_string": - if err := dec.Decode(&s.MinAsString); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.MinAsString = &o + + default: + + if value, ok := t.(string); ok { + if strings.Contains(value, "#") { + elems := strings.Split(value, "#") + if len(elems) == 2 { + if s.Aggregations == nil { + s.Aggregations = make(map[string]Aggregate, 0) + } + switch elems[0] { + + case "cardinality": + o := NewCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentiles": + o := NewHdrPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "hdr_percentile_ranks": + o := NewHdrPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentiles": + o := NewTDigestPercentilesAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "tdigest_percentile_ranks": + o := NewTDigestPercentileRanksAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "percentiles_bucket": + o := NewPercentilesBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "median_absolute_deviation": + o := NewMedianAbsoluteDeviationAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "min": + o := NewMinAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "max": + o := NewMaxAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sum": + o := NewSumAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "avg": + o := NewAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "weighted_avg": + o := NewWeightedAvgAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "value_count": + o := NewValueCountAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_value": + o := NewSimpleValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "derivative": + o := NewDerivativeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "bucket_metric_value": + o := NewBucketMetricValueAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats": + o := NewStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "stats_bucket": + o := NewStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats": + o := NewExtendedStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "extended_stats_bucket": + o := NewExtendedStatsBucketAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_bounds": + o := NewGeoBoundsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_centroid": + o := NewGeoCentroidAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "histogram": + o := NewHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_histogram": + o := NewDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "auto_date_histogram": + o := NewAutoDateHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "variable_width_histogram": + o := NewVariableWidthHistogramAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sterms": + o := NewStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lterms": + o := NewLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "dterms": + o := NewDoubleTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umterms": + o := NewUnmappedTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "lrareterms": + o := NewLongRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "srareterms": + o := NewStringRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umrareterms": + o := NewUnmappedRareTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "multi_terms": + o := NewMultiTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "missing": + o := NewMissingAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "nested": + o := NewNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "reverse_nested": + o := NewReverseNestedAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "global": + o := NewGlobalAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filter": + o := NewFilterAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "children": + o := NewChildrenAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "parent": + o := NewParentAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sampler": + o := NewSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "unmapped_sampler": + o := NewUnmappedSamplerAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohash_grid": + o := NewGeoHashGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geotile_grid": + o := NewGeoTileGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geohex_grid": + o := NewGeoHexGridAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "range": + o := NewRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "date_range": + o := NewDateRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_distance": + o := NewGeoDistanceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_range": + o := NewIpRangeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "ip_prefix": + o := NewIpPrefixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "filters": + o := NewFiltersAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "adjacency_matrix": + o := NewAdjacencyMatrixAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "siglterms": + o := NewSignificantLongTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "sigsterms": + o := NewSignificantStringTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "umsigterms": + o := NewUnmappedSignificantTermsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "composite": + o := NewCompositeAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "frequent_item_sets": + o := NewFrequentItemSetsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "scripted_metric": + o := NewScriptedMetricAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_hits": + o := NewTopHitsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "inference": + o := NewInferenceAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "string_stats": + o := NewStringStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "box_plot": + o := NewBoxPlotAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "top_metrics": + o := NewTopMetricsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "t_test": + o := NewTTestAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "rate": + o := NewRateAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "simple_long_value": + o := NewCumulativeCardinalityAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "matrix_stats": + o := NewMatrixStatsAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + case "geo_line": + o := NewGeoLineAggregate() + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + + default: + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[elems[1]] = o + } + } else { + return errors.New("cannot decode JSON for field Aggregations") + } + } else { + o := make(map[string]interface{}, 0) + if err := dec.Decode(&o); err != nil { + return err + } + s.Aggregations[value] = o + } + } } } @@ -561,6 +695,7 @@ func (s VariableWidthHistogramBucket) MarshalJSON() ([]byte, error) { for key, value := range s.Aggregations { tmp[fmt.Sprintf("%s", key)] = value } + delete(tmp, "Aggregations") data, err = json.Marshal(tmp) if err != nil { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/vector.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/vector.go index 4ccc2e5f2..ae2c98671 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/vector.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/vector.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Vector type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L445-L449 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L454-L458 type Vector struct { Available bool `json:"available"` DenseVectorDimsAvgCount int `json:"dense_vector_dims_avg_count"` @@ -31,6 +39,102 @@ type Vector struct { SparseVectorFieldsCount *int `json:"sparse_vector_fields_count,omitempty"` } +func (s *Vector) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "available": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Available = value + case bool: + s.Available = v + } + + case "dense_vector_dims_avg_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.DenseVectorDimsAvgCount = value + case float64: + f := int(v) + s.DenseVectorDimsAvgCount = f + } + + case "dense_vector_fields_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.DenseVectorFieldsCount = value + case float64: + f := int(v) + s.DenseVectorFieldsCount = f + } + + case "enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "sparse_vector_fields_count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.SparseVectorFieldsCount = &value + case float64: + f := int(v) + s.SparseVectorFieldsCount = &f + } + + } + } + return nil +} + // NewVector returns a Vector. func NewVector() *Vector { r := &Vector{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/verifyindex.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/verifyindex.go index 2ef9224a1..bed15daa3 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/verifyindex.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/verifyindex.go @@ -16,13 +16,20 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // VerifyIndex type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/indices/recovery/types.ts#L111-L116 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/indices/recovery/types.ts#L111-L116 type VerifyIndex struct { CheckIndexTime Duration `json:"check_index_time,omitempty"` CheckIndexTimeInMillis int64 `json:"check_index_time_in_millis"` @@ -30,6 +37,46 @@ type VerifyIndex struct { TotalTimeInMillis int64 `json:"total_time_in_millis"` } +func (s *VerifyIndex) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "check_index_time": + if err := dec.Decode(&s.CheckIndexTime); err != nil { + return err + } + + case "check_index_time_in_millis": + if err := dec.Decode(&s.CheckIndexTimeInMillis); err != nil { + return err + } + + case "total_time": + if err := dec.Decode(&s.TotalTime); err != nil { + return err + } + + case "total_time_in_millis": + if err := dec.Decode(&s.TotalTimeInMillis); err != nil { + return err + } + + } + } + return nil +} + // NewVerifyIndex returns a VerifyIndex. func NewVerifyIndex() *VerifyIndex { r := &VerifyIndex{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/versionproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/versionproperty.go index fb18b66d5..ae4e706c6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/versionproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/versionproperty.go @@ -16,23 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" ) // VersionProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/core.ts#L265-L267 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/core.ts#L272-L274 type VersionProperty struct { CopyTo []string `json:"copy_to,omitempty"` DocValues *bool `json:"doc_values,omitempty"` @@ -48,6 +48,7 @@ type VersionProperty struct { } func (s *VersionProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -62,13 +63,33 @@ func (s *VersionProperty) UnmarshalJSON(data []byte) error { switch t { case "copy_to": - if err := dec.Decode(&s.CopyTo); err != nil { - return err + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return err + } } case "doc_values": - if err := dec.Decode(&s.DocValues); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DocValues = &value + case bool: + s.DocValues = &v } case "dynamic": @@ -77,6 +98,9 @@ func (s *VersionProperty) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -85,7 +109,9 @@ func (s *VersionProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -364,23 +390,42 @@ func (s *VersionProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -389,7 +434,9 @@ func (s *VersionProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -668,20 +715,38 @@ func (s *VersionProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } case "similarity": - if err := dec.Decode(&s.Similarity); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Similarity = &o case "store": - if err := dec.Decode(&s.Store); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Store = &value + case bool: + s.Store = &v } case "type": @@ -694,6 +759,27 @@ func (s *VersionProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s VersionProperty) MarshalJSON() ([]byte, error) { + type innerVersionProperty VersionProperty + tmp := innerVersionProperty{ + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + Properties: s.Properties, + Similarity: s.Similarity, + Store: s.Store, + Type: s.Type, + } + + tmp.Type = "version" + + return json.Marshal(tmp) +} + // NewVersionProperty returns a VersionProperty. func NewVersionProperty() *VersionProperty { r := &VersionProperty{ @@ -702,7 +788,5 @@ func NewVersionProperty() *VersionProperty { Properties: make(map[string]Property, 0), } - r.Type = "version" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/vertex.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/vertex.go index fa3c6a5db..41db0f86f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/vertex.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/vertex.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Vertex type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/graph/_types/Vertex.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/graph/_types/Vertex.ts#L23-L28 type Vertex struct { Depth int64 `json:"depth"` Field string `json:"field"` @@ -30,6 +38,74 @@ type Vertex struct { Weight Float64 `json:"weight"` } +func (s *Vertex) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "depth": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Depth = value + case float64: + f := int64(v) + s.Depth = f + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "term": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Term = o + + case "weight": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Weight = f + case float64: + f := Float64(v) + s.Weight = f + } + + } + } + return nil +} + // NewVertex returns a Vertex. func NewVertex() *Vertex { r := &Vertex{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/vertexdefinition.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/vertexdefinition.go index 3eb2ed6dc..3052316bb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/vertexdefinition.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/vertexdefinition.go @@ -16,20 +16,119 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // VertexDefinition type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/graph/_types/Vertex.ts#L30-L37 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/graph/_types/Vertex.ts#L30-L59 type VertexDefinition struct { - Exclude []string `json:"exclude,omitempty"` - Field string `json:"field"` - Include []VertexInclude `json:"include,omitempty"` - MinDocCount *int64 `json:"min_doc_count,omitempty"` - ShardMinDocCount *int64 `json:"shard_min_doc_count,omitempty"` - Size *int `json:"size,omitempty"` + // Exclude Prevents the specified terms from being included in the results. + Exclude []string `json:"exclude,omitempty"` + // Field Identifies a field in the documents of interest. + Field string `json:"field"` + // Include Identifies the terms of interest that form the starting points from which you + // want to spider out. + Include []VertexInclude `json:"include,omitempty"` + // MinDocCount Specifies how many documents must contain a pair of terms before it is + // considered to be a useful connection. + // This setting acts as a certainty threshold. + MinDocCount *int64 `json:"min_doc_count,omitempty"` + // ShardMinDocCount Controls how many documents on a particular shard have to contain a pair of + // terms before the connection is returned for global consideration. + ShardMinDocCount *int64 `json:"shard_min_doc_count,omitempty"` + // Size Specifies the maximum number of vertex terms returned for each field. + Size *int `json:"size,omitempty"` +} + +func (s *VertexDefinition) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "exclude": + if err := dec.Decode(&s.Exclude); err != nil { + return err + } + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "include": + if err := dec.Decode(&s.Include); err != nil { + return err + } + + case "min_doc_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.MinDocCount = &value + case float64: + f := int64(v) + s.MinDocCount = &f + } + + case "shard_min_doc_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.ShardMinDocCount = &value + case float64: + f := int64(v) + s.ShardMinDocCount = &f + } + + case "size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Size = &value + case float64: + f := int(v) + s.Size = &f + } + + } + } + return nil } // NewVertexDefinition returns a VertexDefinition. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/vertexinclude.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/vertexinclude.go index 778f6cbf3..ae0788576 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/vertexinclude.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/vertexinclude.go @@ -16,18 +16,74 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // VertexInclude type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/graph/_types/Vertex.ts#L39-L42 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/graph/_types/Vertex.ts#L61-L64 type VertexInclude struct { Boost Float64 `json:"boost"` Term string `json:"term"` } +func (s *VertexInclude) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Boost = f + case float64: + f := Float64(v) + s.Boost = f + } + + case "term": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Term = o + + } + } + return nil +} + // NewVertexInclude returns a VertexInclude. func NewVertexInclude() *VertexInclude { r := &VertexInclude{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/vocabulary.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/vocabulary.go index a45a95a2a..e6f5963a8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/vocabulary.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/vocabulary.go @@ -16,17 +16,49 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // Vocabulary type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/inference.ts#L218-L220 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/inference.ts#L233-L235 type Vocabulary struct { Index string `json:"index"` } +func (s *Vocabulary) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return err + } + + } + } + return nil +} + // NewVocabulary returns a Vocabulary. func NewVocabulary() *Vocabulary { r := &Vocabulary{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/waitforactiveshards.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/waitforactiveshards.go index a4bd84206..a46e8cb6a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/waitforactiveshards.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/waitforactiveshards.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -25,5 +25,5 @@ package types // int // waitforactiveshardoptions.WaitForActiveShardOptions // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/common.ts#L122-L123 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/common.ts#L136-L137 type WaitForActiveShards interface{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/warmerstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/warmerstats.go index b7d8ea544..12e6d9b18 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/warmerstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/warmerstats.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // WarmerStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Stats.ts#L252-L257 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Stats.ts#L407-L412 type WarmerStats struct { Current int64 `json:"current"` Total int64 `json:"total"` @@ -30,6 +38,66 @@ type WarmerStats struct { TotalTimeInMillis int64 `json:"total_time_in_millis"` } +func (s *WarmerStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "current": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Current = value + case float64: + f := int64(v) + s.Current = f + } + + case "total": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Total = value + case float64: + f := int64(v) + s.Total = f + } + + case "total_time": + if err := dec.Decode(&s.TotalTime); err != nil { + return err + } + + case "total_time_in_millis": + if err := dec.Decode(&s.TotalTimeInMillis); err != nil { + return err + } + + } + } + return nil +} + // NewWarmerStats returns a WarmerStats. func NewWarmerStats() *WarmerStats { r := &WarmerStats{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watch.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watch.go index fd28096d6..652a4f1f4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watch.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watch.go @@ -16,27 +16,98 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" ) // Watch type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Watch.ts#L37-L47 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Watch.ts#L37-L47 type Watch struct { - Actions map[string]WatcherAction `json:"actions"` - Condition WatcherCondition `json:"condition"` - Input WatcherInput `json:"input"` - Metadata map[string]json.RawMessage `json:"metadata,omitempty"` - Status *WatchStatus `json:"status,omitempty"` - ThrottlePeriod Duration `json:"throttle_period,omitempty"` - ThrottlePeriodInMillis *int64 `json:"throttle_period_in_millis,omitempty"` - Transform *TransformContainer `json:"transform,omitempty"` - Trigger TriggerContainer `json:"trigger"` + Actions map[string]WatcherAction `json:"actions"` + Condition WatcherCondition `json:"condition"` + Input WatcherInput `json:"input"` + Metadata Metadata `json:"metadata,omitempty"` + Status *WatchStatus `json:"status,omitempty"` + ThrottlePeriod Duration `json:"throttle_period,omitempty"` + ThrottlePeriodInMillis *int64 `json:"throttle_period_in_millis,omitempty"` + Transform *TransformContainer `json:"transform,omitempty"` + Trigger TriggerContainer `json:"trigger"` +} + +func (s *Watch) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "actions": + if s.Actions == nil { + s.Actions = make(map[string]WatcherAction, 0) + } + if err := dec.Decode(&s.Actions); err != nil { + return err + } + + case "condition": + if err := dec.Decode(&s.Condition); err != nil { + return err + } + + case "input": + if err := dec.Decode(&s.Input); err != nil { + return err + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return err + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return err + } + + case "throttle_period": + if err := dec.Decode(&s.ThrottlePeriod); err != nil { + return err + } + + case "throttle_period_in_millis": + if err := dec.Decode(&s.ThrottlePeriodInMillis); err != nil { + return err + } + + case "transform": + if err := dec.Decode(&s.Transform); err != nil { + return err + } + + case "trigger": + if err := dec.Decode(&s.Trigger); err != nil { + return err + } + + } + } + return nil } // NewWatch returns a Watch. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watcher.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watcher.go index e707f7a01..8bd154e19 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watcher.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watcher.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Watcher type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L451-L455 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L460-L464 type Watcher struct { Available bool `json:"available"` Count Counter `json:"count"` @@ -31,6 +39,69 @@ type Watcher struct { Watch WatcherWatch `json:"watch"` } +func (s *Watcher) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "available": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Available = value + case bool: + s.Available = v + } + + case "count": + if err := dec.Decode(&s.Count); err != nil { + return err + } + + case "enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "execution": + if err := dec.Decode(&s.Execution); err != nil { + return err + } + + case "watch": + if err := dec.Decode(&s.Watch); err != nil { + return err + } + + } + } + return nil +} + // NewWatcher returns a Watcher. func NewWatcher() *Watcher { r := &Watcher{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watcheraction.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watcheraction.go index 71e8e9342..83dfaabcb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watcheraction.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watcheraction.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/actiontype" ) // WatcherAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Action.ts#L41-L57 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Action.ts#L41-L60 type WatcherAction struct { ActionType *actiontype.ActionType `json:"action_type,omitempty"` Condition *WatcherCondition `json:"condition,omitempty"` @@ -44,6 +50,114 @@ type WatcherAction struct { Webhook *WebhookAction `json:"webhook,omitempty"` } +func (s *WatcherAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "action_type": + if err := dec.Decode(&s.ActionType); err != nil { + return err + } + + case "condition": + if err := dec.Decode(&s.Condition); err != nil { + return err + } + + case "email": + if err := dec.Decode(&s.Email); err != nil { + return err + } + + case "foreach": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Foreach = &o + + case "index": + if err := dec.Decode(&s.Index); err != nil { + return err + } + + case "logging": + if err := dec.Decode(&s.Logging); err != nil { + return err + } + + case "max_iterations": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxIterations = &value + case float64: + f := int(v) + s.MaxIterations = &f + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "pagerduty": + if err := dec.Decode(&s.Pagerduty); err != nil { + return err + } + + case "slack": + if err := dec.Decode(&s.Slack); err != nil { + return err + } + + case "throttle_period": + if err := dec.Decode(&s.ThrottlePeriod); err != nil { + return err + } + + case "throttle_period_in_millis": + if err := dec.Decode(&s.ThrottlePeriodInMillis); err != nil { + return err + } + + case "transform": + if err := dec.Decode(&s.Transform); err != nil { + return err + } + + case "webhook": + if err := dec.Decode(&s.Webhook); err != nil { + return err + } + + } + } + return nil +} + // NewWatcherAction returns a WatcherAction. func NewWatcherAction() *WatcherAction { r := &WatcherAction{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watcheractions.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watcheractions.go index a9f90669f..cb8ce2537 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watcheractions.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watcheractions.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // WatcherActions type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L387-L389 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L396-L398 type WatcherActions struct { Actions map[string]WatcherActionTotals `json:"actions"` } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watcheractiontotals.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watcheractiontotals.go index 12afdaf35..79561d324 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watcheractiontotals.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watcheractiontotals.go @@ -16,18 +16,55 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // WatcherActionTotals type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L403-L406 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L412-L415 type WatcherActionTotals struct { Total Duration `json:"total"` TotalTimeInMs int64 `json:"total_time_in_ms"` } +func (s *WatcherActionTotals) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "total": + if err := dec.Decode(&s.Total); err != nil { + return err + } + + case "total_time_in_ms": + if err := dec.Decode(&s.TotalTimeInMs); err != nil { + return err + } + + } + } + return nil +} + // NewWatcherActionTotals returns a WatcherActionTotals. func NewWatcherActionTotals() *WatcherActionTotals { r := &WatcherActionTotals{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watchercondition.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watchercondition.go index 3516c97ba..810f58f18 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watchercondition.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watchercondition.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -26,7 +26,7 @@ import ( // WatcherCondition type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Conditions.ts#L47-L59 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Conditions.ts#L47-L59 type WatcherCondition struct { Always *AlwaysCondition `json:"always,omitempty"` ArrayCompare map[string]ArrayCompareCondition `json:"array_compare,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watcherinput.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watcherinput.go index e1c04d97b..8750d081c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watcherinput.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watcherinput.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types @@ -26,7 +26,7 @@ import ( // WatcherInput type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Input.ts#L90-L98 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Input.ts#L90-L98 type WatcherInput struct { Chain *ChainInput `json:"chain,omitempty"` Http *HttpInput `json:"http,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watchernodestats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watchernodestats.go index f1091923a..fd1c79cb9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watchernodestats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watchernodestats.go @@ -16,17 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/watcherstate" ) // WatcherNodeStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/stats/types.ts#L33-L40 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/stats/types.ts#L33-L40 type WatcherNodeStats struct { CurrentWatches []WatchRecordStats `json:"current_watches,omitempty"` ExecutionThreadPool ExecutionThreadPool `json:"execution_thread_pool"` @@ -36,6 +42,66 @@ type WatcherNodeStats struct { WatcherState watcherstate.WatcherState `json:"watcher_state"` } +func (s *WatcherNodeStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "current_watches": + if err := dec.Decode(&s.CurrentWatches); err != nil { + return err + } + + case "execution_thread_pool": + if err := dec.Decode(&s.ExecutionThreadPool); err != nil { + return err + } + + case "node_id": + if err := dec.Decode(&s.NodeId); err != nil { + return err + } + + case "queued_watches": + if err := dec.Decode(&s.QueuedWatches); err != nil { + return err + } + + case "watch_count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.WatchCount = value + case float64: + f := int64(v) + s.WatchCount = f + } + + case "watcher_state": + if err := dec.Decode(&s.WatcherState); err != nil { + return err + } + + } + } + return nil +} + // NewWatcherNodeStats returns a WatcherNodeStats. func NewWatcherNodeStats() *WatcherNodeStats { r := &WatcherNodeStats{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watcherstatusactions.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watcherstatusactions.go index e5eecde24..0b0efc0e9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watcherstatusactions.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watcherstatusactions.go @@ -16,11 +16,11 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // WatcherStatusActions type alias. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Action.ts#L59-L59 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Action.ts#L62-L62 type WatcherStatusActions map[string]ActionStatus diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watcherwatch.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watcherwatch.go index 3cda86bcd..cb6b0825e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watcherwatch.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watcherwatch.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // WatcherWatch type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L391-L396 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L400-L405 type WatcherWatch struct { Action map[string]Counter `json:"action,omitempty"` Condition map[string]Counter `json:"condition,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watcherwatchtrigger.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watcherwatchtrigger.go index c7438f50f..61a05ffa2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watcherwatchtrigger.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watcherwatchtrigger.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // WatcherWatchTrigger type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L398-L401 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L407-L410 type WatcherWatchTrigger struct { All_ Counter `json:"_all"` Schedule *WatcherWatchTriggerSchedule `json:"schedule,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watcherwatchtriggerschedule.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watcherwatchtriggerschedule.go index e81617a2e..bbd299bf9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watcherwatchtriggerschedule.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watcherwatchtriggerschedule.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // WatcherWatchTriggerSchedule type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L457-L460 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L466-L469 type WatcherWatchTriggerSchedule struct { Active int64 `json:"active"` All_ Counter `json:"_all"` @@ -30,6 +38,66 @@ type WatcherWatchTriggerSchedule struct { Total int64 `json:"total"` } +func (s *WatcherWatchTriggerSchedule) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "active": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Active = value + case float64: + f := int64(v) + s.Active = f + } + + case "_all": + if err := dec.Decode(&s.All_); err != nil { + return err + } + + case "cron": + if err := dec.Decode(&s.Cron); err != nil { + return err + } + + case "total": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Total = value + case float64: + f := int64(v) + s.Total = f + } + + } + } + return nil +} + // NewWatcherWatchTriggerSchedule returns a WatcherWatchTriggerSchedule. func NewWatcherWatchTriggerSchedule() *WatcherWatchTriggerSchedule { r := &WatcherWatchTriggerSchedule{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watchrecord.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watchrecord.go index 2842fe7b3..dc6aa8af8 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watchrecord.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watchrecord.go @@ -16,24 +16,28 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/executionstatus" ) // WatchRecord type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/execute_watch/types.ts#L27-L39 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/execute_watch/types.ts#L27-L39 type WatchRecord struct { Condition WatcherCondition `json:"condition"` Input WatcherInput `json:"input"` Messages []string `json:"messages"` - Metadata map[string]json.RawMessage `json:"metadata,omitempty"` + Metadata Metadata `json:"metadata,omitempty"` Node string `json:"node"` Result ExecutionResult `json:"result"` State executionstatus.ExecutionStatus `json:"state"` @@ -43,6 +47,88 @@ type WatchRecord struct { WatchId string `json:"watch_id"` } +func (s *WatchRecord) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "condition": + if err := dec.Decode(&s.Condition); err != nil { + return err + } + + case "input": + if err := dec.Decode(&s.Input); err != nil { + return err + } + + case "messages": + if err := dec.Decode(&s.Messages); err != nil { + return err + } + + case "metadata": + if err := dec.Decode(&s.Metadata); err != nil { + return err + } + + case "node": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Node = o + + case "result": + if err := dec.Decode(&s.Result); err != nil { + return err + } + + case "state": + if err := dec.Decode(&s.State); err != nil { + return err + } + + case "status": + if err := dec.Decode(&s.Status); err != nil { + return err + } + + case "trigger_event": + if err := dec.Decode(&s.TriggerEvent); err != nil { + return err + } + + case "user": + if err := dec.Decode(&s.User); err != nil { + return err + } + + case "watch_id": + if err := dec.Decode(&s.WatchId); err != nil { + return err + } + + } + } + return nil +} + // NewWatchRecord returns a WatchRecord. func NewWatchRecord() *WatchRecord { r := &WatchRecord{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watchrecordqueuedstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watchrecordqueuedstats.go index 123f96566..f400c7463 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watchrecordqueuedstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watchrecordqueuedstats.go @@ -16,17 +16,49 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // WatchRecordQueuedStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/stats/types.ts#L50-L52 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/stats/types.ts#L50-L52 type WatchRecordQueuedStats struct { ExecutionTime DateTime `json:"execution_time"` } +func (s *WatchRecordQueuedStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "execution_time": + if err := dec.Decode(&s.ExecutionTime); err != nil { + return err + } + + } + } + return nil +} + // NewWatchRecordQueuedStats returns a WatchRecordQueuedStats. func NewWatchRecordQueuedStats() *WatchRecordQueuedStats { r := &WatchRecordQueuedStats{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watchrecordstats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watchrecordstats.go index a6ba076c7..8b5542455 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watchrecordstats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watchrecordstats.go @@ -16,17 +16,22 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/executionphase" ) // WatchRecordStats type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/stats/types.ts#L54-L60 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/stats/types.ts#L54-L60 type WatchRecordStats struct { ExecutedActions []string `json:"executed_actions,omitempty"` ExecutionPhase executionphase.ExecutionPhase `json:"execution_phase"` @@ -36,6 +41,56 @@ type WatchRecordStats struct { WatchRecordId string `json:"watch_record_id"` } +func (s *WatchRecordStats) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "executed_actions": + if err := dec.Decode(&s.ExecutedActions); err != nil { + return err + } + + case "execution_phase": + if err := dec.Decode(&s.ExecutionPhase); err != nil { + return err + } + + case "execution_time": + if err := dec.Decode(&s.ExecutionTime); err != nil { + return err + } + + case "triggered_time": + if err := dec.Decode(&s.TriggeredTime); err != nil { + return err + } + + case "watch_id": + if err := dec.Decode(&s.WatchId); err != nil { + return err + } + + case "watch_record_id": + if err := dec.Decode(&s.WatchRecordId); err != nil { + return err + } + + } + } + return nil +} + // NewWatchRecordStats returns a WatchRecordStats. func NewWatchRecordStats() *WatchRecordStats { r := &WatchRecordStats{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watchstatus.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watchstatus.go index d799db357..4adfb0d1c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watchstatus.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/watchstatus.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // WatchStatus type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Watch.ts#L49-L56 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Watch.ts#L49-L56 type WatchStatus struct { Actions WatcherStatusActions `json:"actions"` ExecutionState *string `json:"execution_state,omitempty"` @@ -32,6 +40,63 @@ type WatchStatus struct { Version int64 `json:"version"` } +func (s *WatchStatus) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "actions": + if err := dec.Decode(&s.Actions); err != nil { + return err + } + + case "execution_state": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ExecutionState = &o + + case "last_checked": + if err := dec.Decode(&s.LastChecked); err != nil { + return err + } + + case "last_met_condition": + if err := dec.Decode(&s.LastMetCondition); err != nil { + return err + } + + case "state": + if err := dec.Decode(&s.State); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + // NewWatchStatus returns a WatchStatus. func NewWatchStatus() *WatchStatus { r := &WatchStatus{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/webhookaction.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/webhookaction.go index ef985a574..436bbee4f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/webhookaction.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/webhookaction.go @@ -16,18 +16,24 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/connectionscheme" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/httpinputmethod" ) // WebhookAction type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Actions.ts#L293-L293 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Actions.ts#L293-L293 type WebhookAction struct { Auth *HttpInputAuthentication `json:"auth,omitempty"` Body *string `json:"body,omitempty"` @@ -44,6 +50,118 @@ type WebhookAction struct { Url *string `json:"url,omitempty"` } +func (s *WebhookAction) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "auth": + if err := dec.Decode(&s.Auth); err != nil { + return err + } + + case "body": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Body = &o + + case "connection_timeout": + if err := dec.Decode(&s.ConnectionTimeout); err != nil { + return err + } + + case "headers": + if s.Headers == nil { + s.Headers = make(map[string]string, 0) + } + if err := dec.Decode(&s.Headers); err != nil { + return err + } + + case "host": + if err := dec.Decode(&s.Host); err != nil { + return err + } + + case "method": + if err := dec.Decode(&s.Method); err != nil { + return err + } + + case "params": + if s.Params == nil { + s.Params = make(map[string]string, 0) + } + if err := dec.Decode(&s.Params); err != nil { + return err + } + + case "path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Path = &o + + case "port": + if err := dec.Decode(&s.Port); err != nil { + return err + } + + case "proxy": + if err := dec.Decode(&s.Proxy); err != nil { + return err + } + + case "read_timeout": + if err := dec.Decode(&s.ReadTimeout); err != nil { + return err + } + + case "scheme": + if err := dec.Decode(&s.Scheme); err != nil { + return err + } + + case "url": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Url = &o + + } + } + return nil +} + // NewWebhookAction returns a WebhookAction. func NewWebhookAction() *WebhookAction { r := &WebhookAction{ diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/webhookresult.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/webhookresult.go index 649a55544..8773d589e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/webhookresult.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/webhookresult.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // WebhookResult type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/_types/Actions.ts#L295-L298 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/_types/Actions.ts#L295-L298 type WebhookResult struct { Request HttpInputRequestResult `json:"request"` Response *HttpInputResponseResult `json:"response,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/weightedaverageaggregation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/weightedaverageaggregation.go index 07c06afa4..d02fe7dde 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/weightedaverageaggregation.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/weightedaverageaggregation.go @@ -16,26 +16,97 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/valuetype" ) // WeightedAverageAggregation type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/metric.ts#L211-L216 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/metric.ts#L432-L446 type WeightedAverageAggregation struct { - Format *string `json:"format,omitempty"` - Meta map[string]json.RawMessage `json:"meta,omitempty"` - Name *string `json:"name,omitempty"` - Value *WeightedAverageValue `json:"value,omitempty"` - ValueType *valuetype.ValueType `json:"value_type,omitempty"` - Weight *WeightedAverageValue `json:"weight,omitempty"` + // Format A numeric response formatter. + Format *string `json:"format,omitempty"` + Meta Metadata `json:"meta,omitempty"` + Name *string `json:"name,omitempty"` + // Value Configuration for the field that provides the values. + Value *WeightedAverageValue `json:"value,omitempty"` + ValueType *valuetype.ValueType `json:"value_type,omitempty"` + // Weight Configuration for the field or script that provides the weights. + Weight *WeightedAverageValue `json:"weight,omitempty"` +} + +func (s *WeightedAverageAggregation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "format": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Format = &o + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Name = &o + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return err + } + + case "value_type": + if err := dec.Decode(&s.ValueType); err != nil { + return err + } + + case "weight": + if err := dec.Decode(&s.Weight); err != nil { + return err + } + + } + } + return nil } // NewWeightedAverageAggregation returns a WeightedAverageAggregation. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/weightedaveragevalue.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/weightedaveragevalue.go index 94501b596..927bfbcfb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/weightedaveragevalue.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/weightedaveragevalue.go @@ -16,19 +16,75 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // WeightedAverageValue type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/metric.ts#L218-L222 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/metric.ts#L448-L458 type WeightedAverageValue struct { - Field *string `json:"field,omitempty"` + // Field The field from which to extract the values or weights. + Field *string `json:"field,omitempty"` + // Missing A value or weight to use if the field is missing. Missing *Float64 `json:"missing,omitempty"` Script Script `json:"script,omitempty"` } +func (s *WeightedAverageValue) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "field": + if err := dec.Decode(&s.Field); err != nil { + return err + } + + case "missing": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Missing = &f + case float64: + f := Float64(v) + s.Missing = &f + } + + case "script": + if err := dec.Decode(&s.Script); err != nil { + return err + } + + } + } + return nil +} + // NewWeightedAverageValue returns a WeightedAverageValue. func NewWeightedAverageValue() *WeightedAverageValue { r := &WeightedAverageValue{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/weightedavgaggregate.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/weightedavgaggregate.go index 34e8b7454..9bcc863ec 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/weightedavgaggregate.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/weightedavgaggregate.go @@ -16,19 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( + "bytes" "encoding/json" + "errors" + "io" + "strconv" ) // WeightedAvgAggregate type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/aggregations/Aggregate.ts#L211-L215 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/aggregations/Aggregate.ts#L212-L216 type WeightedAvgAggregate struct { - Meta map[string]json.RawMessage `json:"meta,omitempty"` + Meta Metadata `json:"meta,omitempty"` // Value The metric value. A missing value generally means that there was no data to // aggregate, // unless specified otherwise. @@ -36,6 +40,48 @@ type WeightedAvgAggregate struct { ValueAsString *string `json:"value_as_string,omitempty"` } +func (s *WeightedAvgAggregate) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "meta": + if err := dec.Decode(&s.Meta); err != nil { + return err + } + + case "value": + if err := dec.Decode(&s.Value); err != nil { + return err + } + + case "value_as_string": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ValueAsString = &o + + } + } + return nil +} + // NewWeightedAvgAggregate returns a WeightedAvgAggregate. func NewWeightedAvgAggregate() *WeightedAvgAggregate { r := &WeightedAvgAggregate{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/weights.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/weights.go index d90214099..97de33216 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/weights.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/weights.go @@ -16,17 +16,61 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // Weights type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/put_trained_model/types.ts#L108-L110 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/put_trained_model/types.ts#L108-L110 type Weights struct { Weights Float64 `json:"weights"` } +func (s *Weights) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "weights": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + f := Float64(value) + s.Weights = f + case float64: + f := Float64(v) + s.Weights = f + } + + } + } + return nil +} + // NewWeights returns a Weights. func NewWeights() *Weights { r := &Weights{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/whitespaceanalyzer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/whitespaceanalyzer.go index e018f6bba..208ad1581 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/whitespaceanalyzer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/whitespaceanalyzer.go @@ -16,23 +16,71 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + // WhitespaceAnalyzer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/analyzers.ts#L108-L111 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/analyzers.ts#L108-L111 type WhitespaceAnalyzer struct { Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` } +func (s *WhitespaceAnalyzer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s WhitespaceAnalyzer) MarshalJSON() ([]byte, error) { + type innerWhitespaceAnalyzer WhitespaceAnalyzer + tmp := innerWhitespaceAnalyzer{ + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "whitespace" + + return json.Marshal(tmp) +} + // NewWhitespaceAnalyzer returns a WhitespaceAnalyzer. func NewWhitespaceAnalyzer() *WhitespaceAnalyzer { r := &WhitespaceAnalyzer{} - r.Type = "whitespace" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/whitespacetokenizer.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/whitespacetokenizer.go index 83a8dfa25..45725d3bc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/whitespacetokenizer.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/whitespacetokenizer.go @@ -16,24 +16,90 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // WhitespaceTokenizer type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/tokenizers.ts#L114-L117 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/tokenizers.ts#L115-L118 type WhitespaceTokenizer struct { MaxTokenLength *int `json:"max_token_length,omitempty"` Type string `json:"type,omitempty"` Version *string `json:"version,omitempty"` } +func (s *WhitespaceTokenizer) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "max_token_length": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.MaxTokenLength = &value + case float64: + f := int(v) + s.MaxTokenLength = &f + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s WhitespaceTokenizer) MarshalJSON() ([]byte, error) { + type innerWhitespaceTokenizer WhitespaceTokenizer + tmp := innerWhitespaceTokenizer{ + MaxTokenLength: s.MaxTokenLength, + Type: s.Type, + Version: s.Version, + } + + tmp.Type = "whitespace" + + return json.Marshal(tmp) +} + // NewWhitespaceTokenizer returns a WhitespaceTokenizer. func NewWhitespaceTokenizer() *WhitespaceTokenizer { r := &WhitespaceTokenizer{} - r.Type = "whitespace" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/wildcardproperty.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/wildcardproperty.go index a8b818eca..9a354c673 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/wildcardproperty.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/wildcardproperty.go @@ -16,23 +16,23 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types import ( - "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" - "bytes" + "encoding/json" "errors" "io" + "strconv" - "encoding/json" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/dynamicmapping" ) // WildcardProperty type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/mapping/core.ts#L269-L273 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/mapping/core.ts#L276-L283 type WildcardProperty struct { CopyTo []string `json:"copy_to,omitempty"` DocValues *bool `json:"doc_values,omitempty"` @@ -49,6 +49,7 @@ type WildcardProperty struct { } func (s *WildcardProperty) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) for { @@ -63,13 +64,33 @@ func (s *WildcardProperty) UnmarshalJSON(data []byte) error { switch t { case "copy_to": - if err := dec.Decode(&s.CopyTo); err != nil { - return err + rawMsg := json.RawMessage{} + dec.Decode(&rawMsg) + if !bytes.HasPrefix(rawMsg, []byte("[")) { + o := new(string) + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&o); err != nil { + return err + } + + s.CopyTo = append(s.CopyTo, *o) + } else { + if err := json.NewDecoder(bytes.NewReader(rawMsg)).Decode(&s.CopyTo); err != nil { + return err + } } case "doc_values": - if err := dec.Decode(&s.DocValues); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.DocValues = &value + case bool: + s.DocValues = &v } case "dynamic": @@ -78,6 +99,9 @@ func (s *WildcardProperty) UnmarshalJSON(data []byte) error { } case "fields": + if s.Fields == nil { + s.Fields = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -86,7 +110,9 @@ func (s *WildcardProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -365,28 +391,54 @@ func (s *WildcardProperty) UnmarshalJSON(data []byte) error { } s.Fields[key] = oo default: - if err := dec.Decode(&s.Fields); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Fields[key] = oo } } case "ignore_above": - if err := dec.Decode(&s.IgnoreAbove); err != nil { - return err + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.IgnoreAbove = &value + case float64: + f := int(v) + s.IgnoreAbove = &f } case "meta": + if s.Meta == nil { + s.Meta = make(map[string]string, 0) + } if err := dec.Decode(&s.Meta); err != nil { return err } case "null_value": - if err := dec.Decode(&s.NullValue); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.NullValue = &o case "properties": + if s.Properties == nil { + s.Properties = make(map[string]Property, 0) + } refs := make(map[string]json.RawMessage, 0) dec.Decode(&refs) for key, message := range refs { @@ -395,7 +447,9 @@ func (s *WildcardProperty) UnmarshalJSON(data []byte) error { localDec := json.NewDecoder(buf) localDec.Decode(&kind) buf.Seek(0, io.SeekStart) - + if _, ok := kind["type"]; !ok { + kind["type"] = "object" + } switch kind["type"] { case "binary": oo := NewBinaryProperty() @@ -674,20 +728,38 @@ func (s *WildcardProperty) UnmarshalJSON(data []byte) error { } s.Properties[key] = oo default: - if err := dec.Decode(&s.Properties); err != nil { + oo := new(Property) + if err := localDec.Decode(&oo); err != nil { return err } + s.Properties[key] = oo } } case "similarity": - if err := dec.Decode(&s.Similarity); err != nil { + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { return err } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Similarity = &o case "store": - if err := dec.Decode(&s.Store); err != nil { - return err + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Store = &value + case bool: + s.Store = &v } case "type": @@ -700,6 +772,28 @@ func (s *WildcardProperty) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON override marshalling to include literal value +func (s WildcardProperty) MarshalJSON() ([]byte, error) { + type innerWildcardProperty WildcardProperty + tmp := innerWildcardProperty{ + CopyTo: s.CopyTo, + DocValues: s.DocValues, + Dynamic: s.Dynamic, + Fields: s.Fields, + IgnoreAbove: s.IgnoreAbove, + Meta: s.Meta, + NullValue: s.NullValue, + Properties: s.Properties, + Similarity: s.Similarity, + Store: s.Store, + Type: s.Type, + } + + tmp.Type = "wildcard" + + return json.Marshal(tmp) +} + // NewWildcardProperty returns a WildcardProperty. func NewWildcardProperty() *WildcardProperty { r := &WildcardProperty{ @@ -708,7 +802,5 @@ func NewWildcardProperty() *WildcardProperty { Properties: make(map[string]Property, 0), } - r.Type = "wildcard" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/wildcardquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/wildcardquery.go index 09763fef8..2bab4c5d1 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/wildcardquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/wildcardquery.go @@ -16,21 +16,34 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // WildcardQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/term.ts#L149-L162 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/term.ts#L268-L285 type WildcardQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. Boost *float32 `json:"boost,omitempty"` // CaseInsensitive Allows case insensitive matching of the pattern with the indexed field values // when set to true. Default is false which means the case sensitivity of // matching depends on the underlying field’s mapping. CaseInsensitive *bool `json:"case_insensitive,omitempty"` QueryName_ *string `json:"_name,omitempty"` - // Rewrite Method used to rewrite the query + // Rewrite Method used to rewrite the query. Rewrite *string `json:"rewrite,omitempty"` // Value Wildcard pattern for terms you wish to find in the provided field. Required, // when wildcard is not set. @@ -40,6 +53,102 @@ type WildcardQuery struct { Wildcard *string `json:"wildcard,omitempty"` } +func (s *WildcardQuery) UnmarshalJSON(data []byte) error { + + if !bytes.HasPrefix(data, []byte(`{`)) { + err := json.NewDecoder(bytes.NewReader(data)).Decode(&s.Value) + return err + } + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "case_insensitive": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.CaseInsensitive = &value + case bool: + s.CaseInsensitive = &v + } + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + case "rewrite": + if err := dec.Decode(&s.Rewrite); err != nil { + return err + } + + case "value": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Value = &o + + case "wildcard": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Wildcard = &o + + } + } + return nil +} + // NewWildcardQuery returns a WildcardQuery. func NewWildcardQuery() *WildcardQuery { r := &WildcardQuery{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/wktgeobounds.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/wktgeobounds.go index 8c8b25895..520f764f7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/wktgeobounds.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/wktgeobounds.go @@ -16,17 +16,57 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // WktGeoBounds type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/Geo.ts#L134-L136 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/Geo.ts#L159-L161 type WktGeoBounds struct { Wkt string `json:"wkt"` } +func (s *WktGeoBounds) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "wkt": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Wkt = o + + } + } + return nil +} + // NewWktGeoBounds returns a WktGeoBounds. func NewWktGeoBounds() *WktGeoBounds { r := &WktGeoBounds{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/worddelimitergraphtokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/worddelimitergraphtokenfilter.go index 1e956a197..150e1fb69 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/worddelimitergraphtokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/worddelimitergraphtokenfilter.go @@ -16,38 +16,281 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // WordDelimiterGraphTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L148-L165 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L149-L166 type WordDelimiterGraphTokenFilter struct { - AdjustOffsets *bool `json:"adjust_offsets,omitempty"` - CatenateAll *bool `json:"catenate_all,omitempty"` - CatenateNumbers *bool `json:"catenate_numbers,omitempty"` - CatenateWords *bool `json:"catenate_words,omitempty"` - GenerateNumberParts *bool `json:"generate_number_parts,omitempty"` - GenerateWordParts *bool `json:"generate_word_parts,omitempty"` - IgnoreKeywords *bool `json:"ignore_keywords,omitempty"` - PreserveOriginal *bool `json:"preserve_original,omitempty"` - ProtectedWords []string `json:"protected_words,omitempty"` - ProtectedWordsPath *string `json:"protected_words_path,omitempty"` - SplitOnCaseChange *bool `json:"split_on_case_change,omitempty"` - SplitOnNumerics *bool `json:"split_on_numerics,omitempty"` - StemEnglishPossessive *bool `json:"stem_english_possessive,omitempty"` - Type string `json:"type,omitempty"` - TypeTable []string `json:"type_table,omitempty"` - TypeTablePath *string `json:"type_table_path,omitempty"` - Version *string `json:"version,omitempty"` + AdjustOffsets *bool `json:"adjust_offsets,omitempty"` + CatenateAll *bool `json:"catenate_all,omitempty"` + CatenateNumbers *bool `json:"catenate_numbers,omitempty"` + CatenateWords *bool `json:"catenate_words,omitempty"` + GenerateNumberParts *bool `json:"generate_number_parts,omitempty"` + GenerateWordParts *bool `json:"generate_word_parts,omitempty"` + IgnoreKeywords *bool `json:"ignore_keywords,omitempty"` + PreserveOriginal Stringifiedboolean `json:"preserve_original,omitempty"` + ProtectedWords []string `json:"protected_words,omitempty"` + ProtectedWordsPath *string `json:"protected_words_path,omitempty"` + SplitOnCaseChange *bool `json:"split_on_case_change,omitempty"` + SplitOnNumerics *bool `json:"split_on_numerics,omitempty"` + StemEnglishPossessive *bool `json:"stem_english_possessive,omitempty"` + Type string `json:"type,omitempty"` + TypeTable []string `json:"type_table,omitempty"` + TypeTablePath *string `json:"type_table_path,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *WordDelimiterGraphTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "adjust_offsets": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.AdjustOffsets = &value + case bool: + s.AdjustOffsets = &v + } + + case "catenate_all": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.CatenateAll = &value + case bool: + s.CatenateAll = &v + } + + case "catenate_numbers": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.CatenateNumbers = &value + case bool: + s.CatenateNumbers = &v + } + + case "catenate_words": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.CatenateWords = &value + case bool: + s.CatenateWords = &v + } + + case "generate_number_parts": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.GenerateNumberParts = &value + case bool: + s.GenerateNumberParts = &v + } + + case "generate_word_parts": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.GenerateWordParts = &value + case bool: + s.GenerateWordParts = &v + } + + case "ignore_keywords": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.IgnoreKeywords = &value + case bool: + s.IgnoreKeywords = &v + } + + case "preserve_original": + if err := dec.Decode(&s.PreserveOriginal); err != nil { + return err + } + + case "protected_words": + if err := dec.Decode(&s.ProtectedWords); err != nil { + return err + } + + case "protected_words_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ProtectedWordsPath = &o + + case "split_on_case_change": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.SplitOnCaseChange = &value + case bool: + s.SplitOnCaseChange = &v + } + + case "split_on_numerics": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.SplitOnNumerics = &value + case bool: + s.SplitOnNumerics = &v + } + + case "stem_english_possessive": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.StemEnglishPossessive = &value + case bool: + s.StemEnglishPossessive = &v + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "type_table": + if err := dec.Decode(&s.TypeTable); err != nil { + return err + } + + case "type_table_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TypeTablePath = &o + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s WordDelimiterGraphTokenFilter) MarshalJSON() ([]byte, error) { + type innerWordDelimiterGraphTokenFilter WordDelimiterGraphTokenFilter + tmp := innerWordDelimiterGraphTokenFilter{ + AdjustOffsets: s.AdjustOffsets, + CatenateAll: s.CatenateAll, + CatenateNumbers: s.CatenateNumbers, + CatenateWords: s.CatenateWords, + GenerateNumberParts: s.GenerateNumberParts, + GenerateWordParts: s.GenerateWordParts, + IgnoreKeywords: s.IgnoreKeywords, + PreserveOriginal: s.PreserveOriginal, + ProtectedWords: s.ProtectedWords, + ProtectedWordsPath: s.ProtectedWordsPath, + SplitOnCaseChange: s.SplitOnCaseChange, + SplitOnNumerics: s.SplitOnNumerics, + StemEnglishPossessive: s.StemEnglishPossessive, + Type: s.Type, + TypeTable: s.TypeTable, + TypeTablePath: s.TypeTablePath, + Version: s.Version, + } + + tmp.Type = "word_delimiter_graph" + + return json.Marshal(tmp) } // NewWordDelimiterGraphTokenFilter returns a WordDelimiterGraphTokenFilter. func NewWordDelimiterGraphTokenFilter() *WordDelimiterGraphTokenFilter { r := &WordDelimiterGraphTokenFilter{} - r.Type = "word_delimiter_graph" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/worddelimitertokenfilter.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/worddelimitertokenfilter.go index 3c44807c8..0cd406826 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/worddelimitertokenfilter.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/worddelimitertokenfilter.go @@ -16,36 +16,249 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // WordDelimiterTokenFilter type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/analysis/token_filters.ts#L131-L146 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/analysis/token_filters.ts#L132-L147 type WordDelimiterTokenFilter struct { - CatenateAll *bool `json:"catenate_all,omitempty"` - CatenateNumbers *bool `json:"catenate_numbers,omitempty"` - CatenateWords *bool `json:"catenate_words,omitempty"` - GenerateNumberParts *bool `json:"generate_number_parts,omitempty"` - GenerateWordParts *bool `json:"generate_word_parts,omitempty"` - PreserveOriginal *bool `json:"preserve_original,omitempty"` - ProtectedWords []string `json:"protected_words,omitempty"` - ProtectedWordsPath *string `json:"protected_words_path,omitempty"` - SplitOnCaseChange *bool `json:"split_on_case_change,omitempty"` - SplitOnNumerics *bool `json:"split_on_numerics,omitempty"` - StemEnglishPossessive *bool `json:"stem_english_possessive,omitempty"` - Type string `json:"type,omitempty"` - TypeTable []string `json:"type_table,omitempty"` - TypeTablePath *string `json:"type_table_path,omitempty"` - Version *string `json:"version,omitempty"` + CatenateAll *bool `json:"catenate_all,omitempty"` + CatenateNumbers *bool `json:"catenate_numbers,omitempty"` + CatenateWords *bool `json:"catenate_words,omitempty"` + GenerateNumberParts *bool `json:"generate_number_parts,omitempty"` + GenerateWordParts *bool `json:"generate_word_parts,omitempty"` + PreserveOriginal Stringifiedboolean `json:"preserve_original,omitempty"` + ProtectedWords []string `json:"protected_words,omitempty"` + ProtectedWordsPath *string `json:"protected_words_path,omitempty"` + SplitOnCaseChange *bool `json:"split_on_case_change,omitempty"` + SplitOnNumerics *bool `json:"split_on_numerics,omitempty"` + StemEnglishPossessive *bool `json:"stem_english_possessive,omitempty"` + Type string `json:"type,omitempty"` + TypeTable []string `json:"type_table,omitempty"` + TypeTablePath *string `json:"type_table_path,omitempty"` + Version *string `json:"version,omitempty"` +} + +func (s *WordDelimiterTokenFilter) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "catenate_all": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.CatenateAll = &value + case bool: + s.CatenateAll = &v + } + + case "catenate_numbers": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.CatenateNumbers = &value + case bool: + s.CatenateNumbers = &v + } + + case "catenate_words": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.CatenateWords = &value + case bool: + s.CatenateWords = &v + } + + case "generate_number_parts": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.GenerateNumberParts = &value + case bool: + s.GenerateNumberParts = &v + } + + case "generate_word_parts": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.GenerateWordParts = &value + case bool: + s.GenerateWordParts = &v + } + + case "preserve_original": + if err := dec.Decode(&s.PreserveOriginal); err != nil { + return err + } + + case "protected_words": + if err := dec.Decode(&s.ProtectedWords); err != nil { + return err + } + + case "protected_words_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ProtectedWordsPath = &o + + case "split_on_case_change": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.SplitOnCaseChange = &value + case bool: + s.SplitOnCaseChange = &v + } + + case "split_on_numerics": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.SplitOnNumerics = &value + case bool: + s.SplitOnNumerics = &v + } + + case "stem_english_possessive": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.StemEnglishPossessive = &value + case bool: + s.StemEnglishPossessive = &v + } + + case "type": + if err := dec.Decode(&s.Type); err != nil { + return err + } + + case "type_table": + if err := dec.Decode(&s.TypeTable); err != nil { + return err + } + + case "type_table_path": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.TypeTablePath = &o + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + } + } + return nil +} + +// MarshalJSON override marshalling to include literal value +func (s WordDelimiterTokenFilter) MarshalJSON() ([]byte, error) { + type innerWordDelimiterTokenFilter WordDelimiterTokenFilter + tmp := innerWordDelimiterTokenFilter{ + CatenateAll: s.CatenateAll, + CatenateNumbers: s.CatenateNumbers, + CatenateWords: s.CatenateWords, + GenerateNumberParts: s.GenerateNumberParts, + GenerateWordParts: s.GenerateWordParts, + PreserveOriginal: s.PreserveOriginal, + ProtectedWords: s.ProtectedWords, + ProtectedWordsPath: s.ProtectedWordsPath, + SplitOnCaseChange: s.SplitOnCaseChange, + SplitOnNumerics: s.SplitOnNumerics, + StemEnglishPossessive: s.StemEnglishPossessive, + Type: s.Type, + TypeTable: s.TypeTable, + TypeTablePath: s.TypeTablePath, + Version: s.Version, + } + + tmp.Type = "word_delimiter" + + return json.Marshal(tmp) } // NewWordDelimiterTokenFilter returns a WordDelimiterTokenFilter. func NewWordDelimiterTokenFilter() *WordDelimiterTokenFilter { r := &WordDelimiterTokenFilter{} - r.Type = "word_delimiter" - return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/wrapperquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/wrapperquery.go index 426466663..15a286ae6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/wrapperquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/wrapperquery.go @@ -16,21 +16,94 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // WrapperQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/_types/query_dsl/abstractions.ts#L197-L200 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_types/query_dsl/abstractions.ts#L465-L471 type WrapperQuery struct { + // Boost Floating point number used to decrease or increase the relevance scores of + // the query. + // Boost values are relative to the default value of 1.0. + // A boost value between 0 and 1.0 decreases the relevance score. + // A value greater than 1.0 increases the relevance score. Boost *float32 `json:"boost,omitempty"` - // Query A base64 encoded query. The binary data format can be any of JSON, YAML, CBOR - // or SMILE encodings + // Query A base64 encoded query. + // The binary data format can be any of JSON, YAML, CBOR or SMILE encodings Query string `json:"query"` QueryName_ *string `json:"_name,omitempty"` } +func (s *WrapperQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "boost": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseFloat(v, 32) + if err != nil { + return err + } + f := float32(value) + s.Boost = &f + case float64: + f := float32(v) + s.Boost = &f + } + + case "query": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Query = o + + case "_name": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.QueryName_ = &o + + } + } + return nil +} + // NewWrapperQuery returns a WrapperQuery. func NewWrapperQuery() *WrapperQuery { r := &WrapperQuery{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/writeoperation.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/writeoperation.go new file mode 100644 index 000000000..0b712e419 --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/writeoperation.go @@ -0,0 +1,170 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +package types + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" + + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/versiontype" +) + +// WriteOperation type. +// +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/_global/bulk/types.ts#L109-L128 +type WriteOperation struct { + // DynamicTemplates A map from the full name of fields to the name of dynamic templates. + // Defaults to an empty map. + // If a name matches a dynamic template, then that template will be applied + // regardless of other match predicates defined in the template. + // If a field is already defined in the mapping, then this parameter won’t be + // used. + DynamicTemplates map[string]string `json:"dynamic_templates,omitempty"` + // Id_ The document ID. + Id_ *string `json:"_id,omitempty"` + IfPrimaryTerm *int64 `json:"if_primary_term,omitempty"` + IfSeqNo *int64 `json:"if_seq_no,omitempty"` + // Index_ Name of the index or index alias to perform the action on. + Index_ *string `json:"_index,omitempty"` + // Pipeline ID of the pipeline to use to preprocess incoming documents. + // If the index has a default ingest pipeline specified, then setting the value + // to `_none` disables the default ingest pipeline for this request. + // If a final pipeline is configured it will always run, regardless of the value + // of this parameter. + Pipeline *string `json:"pipeline,omitempty"` + // RequireAlias If `true`, the request’s actions must target an index alias. + RequireAlias *bool `json:"require_alias,omitempty"` + // Routing Custom value used to route operations to a specific shard. + Routing *string `json:"routing,omitempty"` + Version *int64 `json:"version,omitempty"` + VersionType *versiontype.VersionType `json:"version_type,omitempty"` +} + +func (s *WriteOperation) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "dynamic_templates": + if s.DynamicTemplates == nil { + s.DynamicTemplates = make(map[string]string, 0) + } + if err := dec.Decode(&s.DynamicTemplates); err != nil { + return err + } + + case "_id": + if err := dec.Decode(&s.Id_); err != nil { + return err + } + + case "if_primary_term": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.IfPrimaryTerm = &value + case float64: + f := int64(v) + s.IfPrimaryTerm = &f + } + + case "if_seq_no": + if err := dec.Decode(&s.IfSeqNo); err != nil { + return err + } + + case "_index": + if err := dec.Decode(&s.Index_); err != nil { + return err + } + + case "pipeline": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Pipeline = &o + + case "require_alias": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.RequireAlias = &value + case bool: + s.RequireAlias = &v + } + + case "routing": + if err := dec.Decode(&s.Routing); err != nil { + return err + } + + case "version": + if err := dec.Decode(&s.Version); err != nil { + return err + } + + case "version_type": + if err := dec.Decode(&s.VersionType); err != nil { + return err + } + + } + } + return nil +} + +// NewWriteOperation returns a WriteOperation. +func NewWriteOperation() *WriteOperation { + r := &WriteOperation{ + DynamicTemplates: make(map[string]string, 0), + } + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/xpackdatafeed.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/xpackdatafeed.go index a31dac53b..8a71d9f92 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/xpackdatafeed.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/xpackdatafeed.go @@ -16,17 +16,60 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // XpackDatafeed type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L77-L79 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L77-L79 type XpackDatafeed struct { Count int64 `json:"count"` } +func (s *XpackDatafeed) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + s.Count = value + case float64: + f := int64(v) + s.Count = f + } + + } + } + return nil +} + // NewXpackDatafeed returns a XpackDatafeed. func NewXpackDatafeed() *XpackDatafeed { r := &XpackDatafeed{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/xpackfeature.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/xpackfeature.go index ec5f2ea2f..0b962d860 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/xpackfeature.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/xpackfeature.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // XpackFeature type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/info/types.ts#L74-L79 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/info/types.ts#L77-L82 type XpackFeature struct { Available bool `json:"available"` Description *string `json:"description,omitempty"` @@ -30,6 +38,71 @@ type XpackFeature struct { NativeCodeInfo *NativeCodeInformation `json:"native_code_info,omitempty"` } +func (s *XpackFeature) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "available": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Available = value + case bool: + s.Available = v + } + + case "description": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.Description = &o + + case "enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "native_code_info": + if err := dec.Decode(&s.NativeCodeInfo); err != nil { + return err + } + + } + } + return nil +} + // NewXpackFeature returns a XpackFeature. func NewXpackFeature() *XpackFeature { r := &XpackFeature{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/xpackfeatures.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/xpackfeatures.go index 2c6adcd64..b62a0cdd2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/xpackfeatures.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/xpackfeatures.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types // XpackFeatures type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/info/types.ts#L42-L72 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/info/types.ts#L42-L75 type XpackFeatures struct { AggregateMetric XpackFeature `json:"aggregate_metric"` Analytics XpackFeature `json:"analytics"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/xpackquery.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/xpackquery.go index c54db35cd..3ad832554 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/xpackquery.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/xpackquery.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // XpackQuery type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L253-L258 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L259-L264 type XpackQuery struct { Count *int `json:"count,omitempty"` Failed *int `json:"failed,omitempty"` @@ -30,6 +38,90 @@ type XpackQuery struct { Total *int `json:"total,omitempty"` } +func (s *XpackQuery) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "count": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Count = &value + case float64: + f := int(v) + s.Count = &f + } + + case "failed": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Failed = &value + case float64: + f := int(v) + s.Failed = &f + } + + case "paging": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Paging = &value + case float64: + f := int(v) + s.Paging = &f + } + + case "total": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Total = &value + case float64: + f := int(v) + s.Total = &f + } + + } + } + return nil +} + // NewXpackQuery returns a XpackQuery. func NewXpackQuery() *XpackQuery { r := &XpackQuery{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/xpackrealm.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/xpackrealm.go index b82d33982..1cb30c326 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/xpackrealm.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/xpackrealm.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // XpackRealm type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L408-L417 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L417-L426 type XpackRealm struct { Available bool `json:"available"` Cache []RealmCache `json:"cache,omitempty"` @@ -36,6 +44,94 @@ type XpackRealm struct { Size []int64 `json:"size,omitempty"` } +func (s *XpackRealm) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "available": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Available = value + case bool: + s.Available = v + } + + case "cache": + if err := dec.Decode(&s.Cache); err != nil { + return err + } + + case "enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "has_authorization_realms": + if err := dec.Decode(&s.HasAuthorizationRealms); err != nil { + return err + } + + case "has_default_username_pattern": + if err := dec.Decode(&s.HasDefaultUsernamePattern); err != nil { + return err + } + + case "has_truststore": + if err := dec.Decode(&s.HasTruststore); err != nil { + return err + } + + case "is_authentication_delegated": + if err := dec.Decode(&s.IsAuthenticationDelegated); err != nil { + return err + } + + case "name": + if err := dec.Decode(&s.Name); err != nil { + return err + } + + case "order": + if err := dec.Decode(&s.Order); err != nil { + return err + } + + case "size": + if err := dec.Decode(&s.Size); err != nil { + return err + } + + } + } + return nil +} + // NewXpackRealm returns a XpackRealm. func NewXpackRealm() *XpackRealm { r := &XpackRealm{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/xpackrolemapping.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/xpackrolemapping.go index c9a5292b9..1913832aa 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/xpackrolemapping.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/xpackrolemapping.go @@ -16,18 +16,78 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // XpackRoleMapping type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L264-L267 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L270-L273 type XpackRoleMapping struct { Enabled int `json:"enabled"` Size int `json:"size"` } +func (s *XpackRoleMapping) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "enabled": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Enabled = value + case float64: + f := int(v) + s.Enabled = f + } + + case "size": + + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.Atoi(v) + if err != nil { + return err + } + s.Size = value + case float64: + f := int(v) + s.Size = f + } + + } + } + return nil +} + // NewXpackRoleMapping returns a XpackRoleMapping. func NewXpackRoleMapping() *XpackRoleMapping { r := &XpackRoleMapping{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/xpackruntimefieldtypes.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/xpackruntimefieldtypes.go index 14fda4c5d..683eed645 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/xpackruntimefieldtypes.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/xpackruntimefieldtypes.go @@ -16,19 +16,80 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // XpackRuntimeFieldTypes type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/types.ts#L269-L271 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/types.ts#L275-L277 type XpackRuntimeFieldTypes struct { Available bool `json:"available"` Enabled bool `json:"enabled"` FieldTypes []RuntimeFieldsType `json:"field_types"` } +func (s *XpackRuntimeFieldTypes) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "available": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Available = value + case bool: + s.Available = v + } + + case "enabled": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.Enabled = value + case bool: + s.Enabled = v + } + + case "field_types": + if err := dec.Decode(&s.FieldTypes); err != nil { + return err + } + + } + } + return nil +} + // NewXpackRuntimeFieldTypes returns a XpackRuntimeFieldTypes. func NewXpackRuntimeFieldTypes() *XpackRuntimeFieldTypes { r := &XpackRuntimeFieldTypes{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/zeroshotclassificationinferenceoptions.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/zeroshotclassificationinferenceoptions.go index a486c3d75..3862ffd33 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/zeroshotclassificationinferenceoptions.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/zeroshotclassificationinferenceoptions.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ZeroShotClassificationInferenceOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/inference.ts#L186-L207 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/inference.ts#L201-L222 type ZeroShotClassificationInferenceOptions struct { // ClassificationLabels The zero shot classification labels indicating entailment, neutral, and // contradiction @@ -41,6 +49,79 @@ type ZeroShotClassificationInferenceOptions struct { Tokenization *TokenizationConfigContainer `json:"tokenization,omitempty"` } +func (s *ZeroShotClassificationInferenceOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "classification_labels": + if err := dec.Decode(&s.ClassificationLabels); err != nil { + return err + } + + case "hypothesis_template": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.HypothesisTemplate = &o + + case "labels": + if err := dec.Decode(&s.Labels); err != nil { + return err + } + + case "multi_label": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.MultiLabel = &value + case bool: + s.MultiLabel = &v + } + + case "results_field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultsField = &o + + case "tokenization": + if err := dec.Decode(&s.Tokenization); err != nil { + return err + } + + } + } + return nil +} + // NewZeroShotClassificationInferenceOptions returns a ZeroShotClassificationInferenceOptions. func NewZeroShotClassificationInferenceOptions() *ZeroShotClassificationInferenceOptions { r := &ZeroShotClassificationInferenceOptions{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/zeroshotclassificationinferenceupdateoptions.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/zeroshotclassificationinferenceupdateoptions.go index 2d1cfd9e1..fc077e9e4 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/zeroshotclassificationinferenceupdateoptions.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/types/zeroshotclassificationinferenceupdateoptions.go @@ -16,13 +16,21 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package types +import ( + "bytes" + "encoding/json" + "errors" + "io" + "strconv" +) + // ZeroShotClassificationInferenceUpdateOptions type. // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/ml/_types/inference.ts#L339-L348 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/ml/_types/inference.ts#L374-L383 type ZeroShotClassificationInferenceUpdateOptions struct { // Labels The labels to predict. Labels []string `json:"labels"` @@ -36,6 +44,62 @@ type ZeroShotClassificationInferenceUpdateOptions struct { Tokenization *NlpTokenizationUpdateOptions `json:"tokenization,omitempty"` } +func (s *ZeroShotClassificationInferenceUpdateOptions) UnmarshalJSON(data []byte) error { + + dec := json.NewDecoder(bytes.NewReader(data)) + + for { + t, err := dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + switch t { + + case "labels": + if err := dec.Decode(&s.Labels); err != nil { + return err + } + + case "multi_label": + var tmp interface{} + dec.Decode(&tmp) + switch v := tmp.(type) { + case string: + value, err := strconv.ParseBool(v) + if err != nil { + return err + } + s.MultiLabel = &value + case bool: + s.MultiLabel = &v + } + + case "results_field": + var tmp json.RawMessage + if err := dec.Decode(&tmp); err != nil { + return err + } + o := string(tmp[:]) + o, err = strconv.Unquote(o) + if err != nil { + o = string(tmp[:]) + } + s.ResultsField = &o + + case "tokenization": + if err := dec.Decode(&s.Tokenization); err != nil { + return err + } + + } + } + return nil +} + // NewZeroShotClassificationInferenceUpdateOptions returns a ZeroShotClassificationInferenceUpdateOptions. func NewZeroShotClassificationInferenceUpdateOptions() *ZeroShotClassificationInferenceUpdateOptions { r := &ZeroShotClassificationInferenceUpdateOptions{} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/ackwatch/ack_watch.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/ackwatch/ack_watch.go index 47e48af2c..8b38721ce 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/ackwatch/ack_watch.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/ackwatch/ack_watch.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Acknowledges a watch, manually throttling the execution of the watch's // actions. @@ -71,7 +71,7 @@ func NewAckWatchFunc(tp elastictransport.Interface) NewAckWatch { return func(watchid string) *AckWatch { n := New(tp) - n.WatchId(watchid) + n._watchid(watchid) return n } @@ -192,7 +192,6 @@ func (r AckWatch) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -201,6 +200,10 @@ func (r AckWatch) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -234,18 +237,18 @@ func (r *AckWatch) Header(key, value string) *AckWatch { // WatchId Watch ID // API Name: watchid -func (r *AckWatch) WatchId(v string) *AckWatch { +func (r *AckWatch) _watchid(watchid string) *AckWatch { r.paramSet |= watchidMask - r.watchid = v + r.watchid = watchid return r } // ActionId A comma-separated list of the action ids to be acked // API Name: actionid -func (r *AckWatch) ActionId(v string) *AckWatch { +func (r *AckWatch) ActionId(actionid string) *AckWatch { r.paramSet |= actionidMask - r.actionid = v + r.actionid = actionid return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/ackwatch/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/ackwatch/response.go index 997ded773..07712f5cb 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/ackwatch/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/ackwatch/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package ackwatch @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package ackwatch // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/ack_watch/WatcherAckWatchResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/ack_watch/WatcherAckWatchResponse.ts#L22-L24 type Response struct { Status types.WatchStatus `json:"status"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/activatewatch/activate_watch.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/activatewatch/activate_watch.go index 74c1c29ac..348ee3072 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/activatewatch/activate_watch.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/activatewatch/activate_watch.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Activates a currently inactive watch. package activatewatch @@ -67,7 +67,7 @@ func NewActivateWatchFunc(tp elastictransport.Interface) NewActivateWatch { return func(watchid string) *ActivateWatch { n := New(tp) - n.WatchId(watchid) + n._watchid(watchid) return n } @@ -172,7 +172,6 @@ func (r ActivateWatch) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -181,6 +180,10 @@ func (r ActivateWatch) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -214,9 +217,9 @@ func (r *ActivateWatch) Header(key, value string) *ActivateWatch { // WatchId Watch ID // API Name: watchid -func (r *ActivateWatch) WatchId(v string) *ActivateWatch { +func (r *ActivateWatch) _watchid(watchid string) *ActivateWatch { r.paramSet |= watchidMask - r.watchid = v + r.watchid = watchid return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/activatewatch/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/activatewatch/response.go index a1acd6e1e..573f09c5b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/activatewatch/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/activatewatch/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package activatewatch @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package activatewatch // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/activate_watch/WatcherActivateWatchResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/activate_watch/WatcherActivateWatchResponse.ts#L22-L24 type Response struct { Status types.ActivationStatus `json:"status"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/deactivatewatch/deactivate_watch.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/deactivatewatch/deactivate_watch.go index 8238cd545..d59252a8c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/deactivatewatch/deactivate_watch.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/deactivatewatch/deactivate_watch.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Deactivates a currently active watch. package deactivatewatch @@ -67,7 +67,7 @@ func NewDeactivateWatchFunc(tp elastictransport.Interface) NewDeactivateWatch { return func(watchid string) *DeactivateWatch { n := New(tp) - n.WatchId(watchid) + n._watchid(watchid) return n } @@ -172,7 +172,6 @@ func (r DeactivateWatch) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -181,6 +180,10 @@ func (r DeactivateWatch) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -214,9 +217,9 @@ func (r *DeactivateWatch) Header(key, value string) *DeactivateWatch { // WatchId Watch ID // API Name: watchid -func (r *DeactivateWatch) WatchId(v string) *DeactivateWatch { +func (r *DeactivateWatch) _watchid(watchid string) *DeactivateWatch { r.paramSet |= watchidMask - r.watchid = v + r.watchid = watchid return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/deactivatewatch/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/deactivatewatch/response.go index a4e859bf7..0f510df6f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/deactivatewatch/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/deactivatewatch/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package deactivatewatch @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package deactivatewatch // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/deactivate_watch/DeactivateWatchResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/deactivate_watch/DeactivateWatchResponse.ts#L22-L24 type Response struct { Status types.ActivationStatus `json:"status"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/deletewatch/delete_watch.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/deletewatch/delete_watch.go index fcfa8e6f4..0765abec7 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/deletewatch/delete_watch.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/deletewatch/delete_watch.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Removes a watch from Watcher. package deletewatch @@ -67,7 +67,7 @@ func NewDeleteWatchFunc(tp elastictransport.Interface) NewDeleteWatch { return func(id string) *DeleteWatch { n := New(tp) - n.Id(id) + n._id(id) return n } @@ -170,7 +170,6 @@ func (r DeleteWatch) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -179,6 +178,10 @@ func (r DeleteWatch) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -212,9 +215,9 @@ func (r *DeleteWatch) Header(key, value string) *DeleteWatch { // Id Watch ID // API Name: id -func (r *DeleteWatch) Id(v string) *DeleteWatch { +func (r *DeleteWatch) _id(id string) *DeleteWatch { r.paramSet |= idMask - r.id = v + r.id = id return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/deletewatch/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/deletewatch/response.go index 7dee21253..4fa03d66d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/deletewatch/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/deletewatch/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package deletewatch // Response holds the response body struct for the package deletewatch // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/delete_watch/DeleteWatchResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/delete_watch/DeleteWatchResponse.ts#L22-L24 type Response struct { Found bool `json:"found"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/executewatch/execute_watch.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/executewatch/execute_watch.go index 2818f117f..119f2f9dc 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/executewatch/execute_watch.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/executewatch/execute_watch.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Forces the execution of a stored watch. package executewatch @@ -35,6 +35,7 @@ import ( "github.com/elastic/elastic-transport-go/v8/elastictransport" "github.com/elastic/go-elasticsearch/v8/typedapi/types" + "github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/actionexecutionmode" ) const ( @@ -53,8 +54,9 @@ type ExecuteWatch struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -83,6 +85,8 @@ func New(tp elastictransport.Interface) *ExecuteWatch { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -112,9 +116,19 @@ func (r *ExecuteWatch) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -122,6 +136,7 @@ func (r *ExecuteWatch) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -215,7 +230,6 @@ func (r ExecuteWatch) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -224,6 +238,10 @@ func (r ExecuteWatch) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -236,17 +254,84 @@ func (r *ExecuteWatch) Header(key, value string) *ExecuteWatch { // Id Identifier for the watch. // API Name: id -func (r *ExecuteWatch) Id(v string) *ExecuteWatch { +func (r *ExecuteWatch) Id(id string) *ExecuteWatch { r.paramSet |= idMask - r.id = v + r.id = id return r } // Debug Defines whether the watch runs in debug mode. // API name: debug -func (r *ExecuteWatch) Debug(b bool) *ExecuteWatch { - r.values.Set("debug", strconv.FormatBool(b)) +func (r *ExecuteWatch) Debug(debug bool) *ExecuteWatch { + r.values.Set("debug", strconv.FormatBool(debug)) + + return r +} + +// ActionModes Determines how to handle the watch actions as part of the watch execution. +// API name: action_modes +func (r *ExecuteWatch) ActionModes(actionmodes map[string]actionexecutionmode.ActionExecutionMode) *ExecuteWatch { + + r.req.ActionModes = actionmodes + + return r +} + +// AlternativeInput When present, the watch uses this object as a payload instead of executing +// its own input. +// API name: alternative_input +func (r *ExecuteWatch) AlternativeInput(alternativeinput map[string]json.RawMessage) *ExecuteWatch { + + r.req.AlternativeInput = alternativeinput + + return r +} + +// IgnoreCondition When set to `true`, the watch execution uses the always condition. This can +// also be specified as an HTTP parameter. +// API name: ignore_condition +func (r *ExecuteWatch) IgnoreCondition(ignorecondition bool) *ExecuteWatch { + r.req.IgnoreCondition = &ignorecondition + + return r +} + +// RecordExecution When set to `true`, the watch record representing the watch execution result +// is persisted to the `.watcher-history` index for the current time. In +// addition, the status of the watch is updated, possibly throttling subsequent +// executions. This can also be specified as an HTTP parameter. +// API name: record_execution +func (r *ExecuteWatch) RecordExecution(recordexecution bool) *ExecuteWatch { + r.req.RecordExecution = &recordexecution + + return r +} + +// API name: simulated_actions +func (r *ExecuteWatch) SimulatedActions(simulatedactions *types.SimulatedActions) *ExecuteWatch { + + r.req.SimulatedActions = simulatedactions + + return r +} + +// TriggerData This structure is parsed as the data of the trigger event that will be used +// during the watch execution +// API name: trigger_data +func (r *ExecuteWatch) TriggerData(triggerdata *types.ScheduleTriggerEvent) *ExecuteWatch { + + r.req.TriggerData = triggerdata + + return r +} + +// Watch When present, this watch is used instead of the one specified in the request. +// This watch is not persisted to the index and record_execution cannot be set. +// API name: watch +func (r *ExecuteWatch) Watch(watch *types.Watch) *ExecuteWatch { + + r.req.Watch = watch return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/executewatch/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/executewatch/request.go index 954e089ba..1587bc9b5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/executewatch/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/executewatch/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package executewatch @@ -30,7 +30,7 @@ import ( // Request holds the request body struct for the package executewatch // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/execute_watch/WatcherExecuteWatchRequest.ts#L28-L80 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/execute_watch/WatcherExecuteWatchRequest.ts#L28-L79 type Request struct { // ActionModes Determines how to handle the watch actions as part of the watch execution. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/executewatch/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/executewatch/response.go index 472466e22..6f4f1b812 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/executewatch/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/executewatch/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package executewatch @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package executewatch // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/execute_watch/WatcherExecuteWatchResponse.ts#L23-L25 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/execute_watch/WatcherExecuteWatchResponse.ts#L23-L25 type Response struct { Id_ string `json:"_id"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/getsettings/get_settings.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/getsettings/get_settings.go new file mode 100644 index 000000000..6a184c9cc --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/getsettings/get_settings.go @@ -0,0 +1,179 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Retrieve settings for the watcher system index +package getsettings + +import ( + gobytes "bytes" + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type GetSettings struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + buf *gobytes.Buffer + + paramSet int +} + +// NewGetSettings type alias for index. +type NewGetSettings func() *GetSettings + +// NewGetSettingsFunc returns a new instance of GetSettings with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewGetSettingsFunc(tp elastictransport.Interface) NewGetSettings { + return func() *GetSettings { + n := New(tp) + + return n + } +} + +// Retrieve settings for the watcher system index +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-get-settings.html +func New(tp elastictransport.Interface) *GetSettings { + r := &GetSettings{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + buf: gobytes.NewBuffer(nil), + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *GetSettings) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_watcher") + path.WriteString("/") + path.WriteString("settings") + + method = http.MethodGet + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.buf) + } else { + req, err = http.NewRequest(method, r.path.String(), r.buf) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.buf.Len() > 0 { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r GetSettings) Perform(ctx context.Context) (*http.Response, error) { + req, err := r.HttpRequest(ctx) + if err != nil { + return nil, err + } + + res, err := r.transport.Perform(req) + if err != nil { + return nil, fmt.Errorf("an error happened during the GetSettings query execution: %w", err) + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a getsettings.Response +func (r GetSettings) Do(ctx context.Context) (bool, error) { + return r.IsSuccess(ctx) +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r GetSettings) IsSuccess(ctx context.Context) (bool, error) { + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(ioutil.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + return false, nil +} + +// Header set a key, value pair in the GetSettings headers map. +func (r *GetSettings) Header(key, value string) *GetSettings { + r.headers.Set(key, value) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/getwatch/get_watch.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/getwatch/get_watch.go index 326a00fd8..fefe3ef1b 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/getwatch/get_watch.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/getwatch/get_watch.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves a watch by its ID. package getwatch @@ -67,7 +67,7 @@ func NewGetWatchFunc(tp elastictransport.Interface) NewGetWatch { return func(id string) *GetWatch { n := New(tp) - n.Id(id) + n._id(id) return n } @@ -170,7 +170,6 @@ func (r GetWatch) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -179,6 +178,10 @@ func (r GetWatch) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -212,9 +215,9 @@ func (r *GetWatch) Header(key, value string) *GetWatch { // Id Watch ID // API Name: id -func (r *GetWatch) Id(v string) *GetWatch { +func (r *GetWatch) _id(id string) *GetWatch { r.paramSet |= idMask - r.id = v + r.id = id return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/getwatch/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/getwatch/response.go index a56e0001d..7d63634ec 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/getwatch/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/getwatch/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package getwatch @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package getwatch // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/get_watch/GetWatchResponse.ts#L24-L34 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/get_watch/GetWatchResponse.ts#L24-L34 type Response struct { Found bool `json:"found"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/putwatch/put_watch.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/putwatch/put_watch.go index c1d98c1b1..6181971c6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/putwatch/put_watch.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/putwatch/put_watch.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Creates a new watch, or updates an existing one. package putwatch @@ -53,8 +53,9 @@ type PutWatch struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int @@ -70,7 +71,7 @@ func NewPutWatchFunc(tp elastictransport.Interface) NewPutWatch { return func(id string) *PutWatch { n := New(tp) - n.Id(id) + n._id(id) return n } @@ -85,6 +86,8 @@ func New(tp elastictransport.Interface) *PutWatch { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -114,9 +117,19 @@ func (r *PutWatch) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -124,6 +137,7 @@ func (r *PutWatch) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -206,7 +220,6 @@ func (r PutWatch) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -215,6 +228,10 @@ func (r PutWatch) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -227,17 +244,17 @@ func (r *PutWatch) Header(key, value string) *PutWatch { // Id Watch ID // API Name: id -func (r *PutWatch) Id(v string) *PutWatch { +func (r *PutWatch) _id(id string) *PutWatch { r.paramSet |= idMask - r.id = v + r.id = id return r } // Active Specify whether the watch is in/active by default // API name: active -func (r *PutWatch) Active(b bool) *PutWatch { - r.values.Set("active", strconv.FormatBool(b)) +func (r *PutWatch) Active(active bool) *PutWatch { + r.values.Set("active", strconv.FormatBool(active)) return r } @@ -245,8 +262,8 @@ func (r *PutWatch) Active(b bool) *PutWatch { // IfPrimaryTerm only update the watch if the last operation that has changed the watch has // the specified primary term // API name: if_primary_term -func (r *PutWatch) IfPrimaryTerm(v string) *PutWatch { - r.values.Set("if_primary_term", v) +func (r *PutWatch) IfPrimaryTerm(ifprimaryterm string) *PutWatch { + r.values.Set("if_primary_term", ifprimaryterm) return r } @@ -254,16 +271,71 @@ func (r *PutWatch) IfPrimaryTerm(v string) *PutWatch { // IfSeqNo only update the watch if the last operation that has changed the watch has // the specified sequence number // API name: if_seq_no -func (r *PutWatch) IfSeqNo(v string) *PutWatch { - r.values.Set("if_seq_no", v) +func (r *PutWatch) IfSeqNo(sequencenumber string) *PutWatch { + r.values.Set("if_seq_no", sequencenumber) return r } // Version Explicit version number for concurrency control // API name: version -func (r *PutWatch) Version(v string) *PutWatch { - r.values.Set("version", v) +func (r *PutWatch) Version(versionnumber string) *PutWatch { + r.values.Set("version", versionnumber) + + return r +} + +// API name: actions +func (r *PutWatch) Actions(actions map[string]types.WatcherAction) *PutWatch { + + r.req.Actions = actions + + return r +} + +// API name: condition +func (r *PutWatch) Condition(condition *types.WatcherCondition) *PutWatch { + + r.req.Condition = condition + + return r +} + +// API name: input +func (r *PutWatch) Input(input *types.WatcherInput) *PutWatch { + + r.req.Input = input + + return r +} + +// API name: metadata +func (r *PutWatch) Metadata(metadata types.Metadata) *PutWatch { + r.req.Metadata = metadata + + return r +} + +// API name: throttle_period +func (r *PutWatch) ThrottlePeriod(throttleperiod string) *PutWatch { + + r.req.ThrottlePeriod = &throttleperiod + + return r +} + +// API name: transform +func (r *PutWatch) Transform(transform *types.TransformContainer) *PutWatch { + + r.req.Transform = transform + + return r +} + +// API name: trigger +func (r *PutWatch) Trigger(trigger *types.TriggerContainer) *PutWatch { + + r.req.Trigger = trigger return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/putwatch/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/putwatch/request.go index 93e891fd0..1d277efc6 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/putwatch/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/putwatch/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putwatch @@ -29,12 +29,12 @@ import ( // Request holds the request body struct for the package putwatch // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/put_watch/WatcherPutWatchRequest.ts#L30-L54 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/put_watch/WatcherPutWatchRequest.ts#L30-L53 type Request struct { Actions map[string]types.WatcherAction `json:"actions,omitempty"` Condition *types.WatcherCondition `json:"condition,omitempty"` Input *types.WatcherInput `json:"input,omitempty"` - Metadata map[string]json.RawMessage `json:"metadata,omitempty"` + Metadata types.Metadata `json:"metadata,omitempty"` ThrottlePeriod *string `json:"throttle_period,omitempty"` Transform *types.TransformContainer `json:"transform,omitempty"` Trigger *types.TriggerContainer `json:"trigger,omitempty"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/putwatch/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/putwatch/response.go index cc288e0e4..24fe130a2 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/putwatch/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/putwatch/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package putwatch // Response holds the response body struct for the package putwatch // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/put_watch/WatcherPutWatchResponse.ts#L23-L31 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/put_watch/WatcherPutWatchResponse.ts#L23-L31 type Response struct { Created bool `json:"created"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/querywatches/query_watches.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/querywatches/query_watches.go index 2f4bc5593..66ac1471c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/querywatches/query_watches.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/querywatches/query_watches.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves stored watches. package querywatches @@ -48,8 +48,9 @@ type QueryWatches struct { buf *gobytes.Buffer - req *Request - raw io.Reader + req *Request + deferred []func(request *Request) error + raw io.Reader paramSet int } @@ -76,6 +77,8 @@ func New(tp elastictransport.Interface) *QueryWatches { values: make(url.Values), headers: make(http.Header), buf: gobytes.NewBuffer(nil), + + req: NewRequest(), } return r @@ -105,9 +108,19 @@ func (r *QueryWatches) HttpRequest(ctx context.Context) (*http.Request, error) { var err error + if len(r.deferred) > 0 { + for _, f := range r.deferred { + deferredErr := f(r.req) + if deferredErr != nil { + return nil, deferredErr + } + } + } + if r.raw != nil { r.buf.ReadFrom(r.raw) } else if r.req != nil { + data, err := json.Marshal(r.req) if err != nil { @@ -115,6 +128,7 @@ func (r *QueryWatches) HttpRequest(ctx context.Context) (*http.Request, error) { } r.buf.Write(data) + } r.path.Scheme = "http" @@ -196,7 +210,6 @@ func (r QueryWatches) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -205,6 +218,10 @@ func (r QueryWatches) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -214,3 +231,44 @@ func (r *QueryWatches) Header(key, value string) *QueryWatches { return r } + +// From The offset from the first result to fetch. Needs to be non-negative. +// API name: from +func (r *QueryWatches) From(from int) *QueryWatches { + r.req.From = &from + + return r +} + +// Query Optional, query filter watches to be returned. +// API name: query +func (r *QueryWatches) Query(query *types.Query) *QueryWatches { + + r.req.Query = query + + return r +} + +// SearchAfter Optional search After to do pagination using last hit’s sort values. +// API name: search_after +func (r *QueryWatches) SearchAfter(sortresults ...types.FieldValue) *QueryWatches { + r.req.SearchAfter = sortresults + + return r +} + +// Size The number of hits to return. Needs to be non-negative. +// API name: size +func (r *QueryWatches) Size(size int) *QueryWatches { + r.req.Size = &size + + return r +} + +// Sort Optional sort definition. +// API name: sort +func (r *QueryWatches) Sort(sorts ...types.SortCombinations) *QueryWatches { + r.req.Sort = sorts + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/querywatches/request.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/querywatches/request.go index 6875fa475..ef720812e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/querywatches/request.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/querywatches/request.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package querywatches @@ -29,7 +29,7 @@ import ( // Request holds the request body struct for the package querywatches // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/query_watches/WatcherQueryWatchesRequest.ts#L25-L49 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/query_watches/WatcherQueryWatchesRequest.ts#L25-L48 type Request struct { // From The offset from the first result to fetch. Needs to be non-negative. diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/querywatches/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/querywatches/response.go index 393865905..0af27757c 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/querywatches/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/querywatches/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package querywatches @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package querywatches // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/query_watches/WatcherQueryWatchesResponse.ts#L23-L28 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/query_watches/WatcherQueryWatchesResponse.ts#L23-L28 type Response struct { Count int `json:"count"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/start/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/start/response.go index cdd06dbd3..6e1bc27d9 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/start/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/start/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package start // Response holds the response body struct for the package start // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/start/WatcherStartResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/start/WatcherStartResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/start/start.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/start/start.go index 9c797fcf8..1daffc3e5 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/start/start.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/start/start.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Starts Watcher if it is not already running. package start @@ -159,7 +159,6 @@ func (r Start) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -168,6 +167,10 @@ func (r Start) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/stats/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/stats/response.go index b8b1ac3e9..900f3803f 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/stats/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/stats/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package stats @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package stats // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/stats/WatcherStatsResponse.ts#L24-L32 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/stats/WatcherStatsResponse.ts#L24-L32 type Response struct { ClusterName string `json:"cluster_name"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/stats/stats.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/stats/stats.go index 927da6136..be155060a 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/stats/stats.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/stats/stats.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves the current Watcher metrics. package stats @@ -176,7 +176,6 @@ func (r Stats) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -185,6 +184,10 @@ func (r Stats) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -218,17 +221,17 @@ func (r *Stats) Header(key, value string) *Stats { // Metric Defines which additional metrics are included in the response. // API Name: metric -func (r *Stats) Metric(v ...string) *Stats { +func (r *Stats) Metric(metrics ...string) *Stats { r.paramSet |= metricMask - r.metric = strings.Join(v, ",") + r.metric = strings.Join(metrics, ",") return r } // EmitStacktraces Defines whether stack traces are generated for each watch that is running. // API name: emit_stacktraces -func (r *Stats) EmitStacktraces(b bool) *Stats { - r.values.Set("emit_stacktraces", strconv.FormatBool(b)) +func (r *Stats) EmitStacktraces(emitstacktraces bool) *Stats { + r.values.Set("emit_stacktraces", strconv.FormatBool(emitstacktraces)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/stop/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/stop/response.go index 0d7d22ffb..237677127 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/stop/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/stop/response.go @@ -16,13 +16,13 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package stop // Response holds the response body struct for the package stop // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/watcher/stop/WatcherStopResponse.ts#L22-L24 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/watcher/stop/WatcherStopResponse.ts#L22-L24 type Response struct { diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/stop/stop.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/stop/stop.go index 0fb39c4b6..d0facb10d 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/stop/stop.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/stop/stop.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Stops Watcher if it is running. package stop @@ -159,7 +159,6 @@ func (r Stop) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -168,6 +167,10 @@ func (r Stop) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/updatesettings/update_settings.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/updatesettings/update_settings.go new file mode 100644 index 000000000..c3eb4441a --- /dev/null +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/watcher/updatesettings/update_settings.go @@ -0,0 +1,179 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated from the elasticsearch-specification DO NOT EDIT. +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 + +// Update settings for the watcher system index +package updatesettings + +import ( + gobytes "bytes" + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/elastic/elastic-transport-go/v8/elastictransport" +) + +// ErrBuildPath is returned in case of missing parameters within the build of the request. +var ErrBuildPath = errors.New("cannot build path, check for missing path parameters") + +type UpdateSettings struct { + transport elastictransport.Interface + + headers http.Header + values url.Values + path url.URL + + buf *gobytes.Buffer + + paramSet int +} + +// NewUpdateSettings type alias for index. +type NewUpdateSettings func() *UpdateSettings + +// NewUpdateSettingsFunc returns a new instance of UpdateSettings with the provided transport. +// Used in the index of the library this allows to retrieve every apis in once place. +func NewUpdateSettingsFunc(tp elastictransport.Interface) NewUpdateSettings { + return func() *UpdateSettings { + n := New(tp) + + return n + } +} + +// Update settings for the watcher system index +// +// https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-update-settings.html +func New(tp elastictransport.Interface) *UpdateSettings { + r := &UpdateSettings{ + transport: tp, + values: make(url.Values), + headers: make(http.Header), + buf: gobytes.NewBuffer(nil), + } + + return r +} + +// HttpRequest returns the http.Request object built from the +// given parameters. +func (r *UpdateSettings) HttpRequest(ctx context.Context) (*http.Request, error) { + var path strings.Builder + var method string + var req *http.Request + + var err error + + r.path.Scheme = "http" + + switch { + case r.paramSet == 0: + path.WriteString("/") + path.WriteString("_watcher") + path.WriteString("/") + path.WriteString("settings") + + method = http.MethodPut + } + + r.path.Path = path.String() + r.path.RawQuery = r.values.Encode() + + if r.path.Path == "" { + return nil, ErrBuildPath + } + + if ctx != nil { + req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.buf) + } else { + req, err = http.NewRequest(method, r.path.String(), r.buf) + } + + req.Header = r.headers.Clone() + + if req.Header.Get("Content-Type") == "" { + if r.buf.Len() > 0 { + req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8") + } + } + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8") + } + + if err != nil { + return req, fmt.Errorf("could not build http.Request: %w", err) + } + + return req, nil +} + +// Perform runs the http.Request through the provided transport and returns an http.Response. +func (r UpdateSettings) Perform(ctx context.Context) (*http.Response, error) { + req, err := r.HttpRequest(ctx) + if err != nil { + return nil, err + } + + res, err := r.transport.Perform(req) + if err != nil { + return nil, fmt.Errorf("an error happened during the UpdateSettings query execution: %w", err) + } + + return res, nil +} + +// Do runs the request through the transport, handle the response and returns a updatesettings.Response +func (r UpdateSettings) Do(ctx context.Context) (bool, error) { + return r.IsSuccess(ctx) +} + +// IsSuccess allows to run a query with a context and retrieve the result as a boolean. +// This only exists for endpoints without a request payload and allows for quick control flow. +func (r UpdateSettings) IsSuccess(ctx context.Context) (bool, error) { + res, err := r.Perform(ctx) + + if err != nil { + return false, err + } + io.Copy(ioutil.Discard, res.Body) + err = res.Body.Close() + if err != nil { + return false, err + } + + if res.StatusCode >= 200 && res.StatusCode < 300 { + return true, nil + } + + return false, nil +} + +// Header set a key, value pair in the UpdateSettings headers map. +func (r *UpdateSettings) Header(key, value string) *UpdateSettings { + r.headers.Set(key, value) + + return r +} diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/xpack/info/info.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/xpack/info/info.go index f76cec4d4..860873e78 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/xpack/info/info.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/xpack/info/info.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves information about the installed X-Pack features. package info @@ -158,7 +158,6 @@ func (r Info) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -167,6 +166,10 @@ func (r Info) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -201,16 +204,20 @@ func (r *Info) Header(key, value string) *Info { // Categories A comma-separated list of the information categories to include in the // response. For example, `build,license,features`. // API name: categories -func (r *Info) Categories(v string) *Info { - r.values.Set("categories", v) +func (r *Info) Categories(categories ...string) *Info { + tmp := []string{} + for _, item := range categories { + tmp = append(tmp, fmt.Sprintf("%v", item)) + } + r.values.Set("categories", strings.Join(tmp, ",")) return r } // AcceptEnterprise If this param is used it must be set to true // API name: accept_enterprise -func (r *Info) AcceptEnterprise(b bool) *Info { - r.values.Set("accept_enterprise", strconv.FormatBool(b)) +func (r *Info) AcceptEnterprise(acceptenterprise bool) *Info { + r.values.Set("accept_enterprise", strconv.FormatBool(acceptenterprise)) return r } @@ -218,8 +225,8 @@ func (r *Info) AcceptEnterprise(b bool) *Info { // Human Defines whether additional human-readable information is included in the // response. In particular, it adds descriptions and a tag line. // API name: human -func (r *Info) Human(b bool) *Info { - r.values.Set("human", strconv.FormatBool(b)) +func (r *Info) Human(human bool) *Info { + r.values.Set("human", strconv.FormatBool(human)) return r } diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/xpack/info/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/xpack/info/response.go index b8866860a..a1782c21e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/xpack/info/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/xpack/info/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package info @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package info // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/info/XPackInfoResponse.ts#L22-L29 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/info/XPackInfoResponse.ts#L22-L29 type Response struct { Build types.BuildInformation `json:"build"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/xpack/usage/response.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/xpack/usage/response.go index 16f27f52e..f5060678e 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/xpack/usage/response.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/xpack/usage/response.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 package usage @@ -26,7 +26,7 @@ import ( // Response holds the response body struct for the package usage // -// https://github.com/elastic/elasticsearch-specification/blob/4ab557491062aab5a916a1e274e28c266b0e0708/specification/xpack/usage/XPackUsageResponse.ts#L43-L76 +// https://github.com/elastic/elasticsearch-specification/blob/ac9c431ec04149d9048f2b8f9731e3c2f7f38754/specification/xpack/usage/XPackUsageResponse.ts#L43-L79 type Response struct { AggregateMetric types.Base `json:"aggregate_metric"` diff --git a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/xpack/usage/usage.go b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/xpack/usage/usage.go index c14e7e55d..e15eb4f79 100644 --- a/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/xpack/usage/usage.go +++ b/vendor/github.com/elastic/go-elasticsearch/v8/typedapi/xpack/usage/usage.go @@ -16,7 +16,7 @@ // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. -// https://github.com/elastic/elasticsearch-specification/tree/4ab557491062aab5a916a1e274e28c266b0e0708 +// https://github.com/elastic/elasticsearch-specification/tree/ac9c431ec04149d9048f2b8f9731e3c2f7f38754 // Retrieves usage information about the installed X-Pack features. package usage @@ -159,7 +159,6 @@ func (r Usage) Do(ctx context.Context) (*Response, error) { } return response, nil - } errorResponse := types.NewElasticsearchError() @@ -168,6 +167,10 @@ func (r Usage) Do(ctx context.Context) (*Response, error) { return nil, err } + if errorResponse.Status == 0 { + errorResponse.Status = res.StatusCode + } + return nil, errorResponse } @@ -202,8 +205,8 @@ func (r *Usage) Header(key, value string) *Usage { // MasterTimeout Period to wait for a connection to the master node. If no response is // received before the timeout expires, the request fails and returns an error. // API name: master_timeout -func (r *Usage) MasterTimeout(v string) *Usage { - r.values.Set("master_timeout", v) +func (r *Usage) MasterTimeout(duration string) *Usage { + r.values.Set("master_timeout", duration) return r } diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml index a2bf06e94..7a008a4d2 100644 --- a/vendor/github.com/klauspost/compress/.goreleaser.yml +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -3,7 +3,7 @@ before: hooks: - ./gen.sh - - go install mvdan.cc/garble@v0.7.2 + - go install mvdan.cc/garble@v0.9.3 builds: - diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index d73fb86e4..f710a34ec 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -16,6 +16,47 @@ This package provides various compression algorithms. # changelog +* Apr 16, 2023 - [v1.16.5](https://github.com/klauspost/compress/releases/tag/v1.16.5) + * zstd: readByte needs to use io.ReadFull by @jnoxon in https://github.com/klauspost/compress/pull/802 + * gzip: Fix WriterTo after initial read https://github.com/klauspost/compress/pull/804 + +* Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4) + * zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784 + * zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792 + * zstd: Fix amd64 not always detecting corrupt data https://github.com/klauspost/compress/pull/785 + * zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795 + * s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779 + * s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780 + * gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 + +* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1) + * zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776 + * gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767 + * s2: Add Intel LZ4s converter https://github.com/klauspost/compress/pull/766 + * zstd: Minor bug fixes https://github.com/klauspost/compress/pull/771 https://github.com/klauspost/compress/pull/772 https://github.com/klauspost/compress/pull/773 + * huff0: Speed up compress1xDo by @greatroar in https://github.com/klauspost/compress/pull/774 + +* Feb 26, 2023 - [v1.16.0](https://github.com/klauspost/compress/releases/tag/v1.16.0) + * s2: Add [Dictionary](https://github.com/klauspost/compress/tree/master/s2#dictionaries) support. https://github.com/klauspost/compress/pull/685 + * s2: Add Compression Size Estimate. https://github.com/klauspost/compress/pull/752 + * s2: Add support for custom stream encoder. https://github.com/klauspost/compress/pull/755 + * s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748 + * s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747 + * s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746 + +* Jan 21st, 2023 (v1.15.15) + * deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739 + * zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728 + * zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745 + * gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740 + +* Jan 3rd, 2023 (v1.15.14) + + * flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718 + * zstd: Minor speed tweaks by @greatroar in https://github.com/klauspost/compress/pull/716 https://github.com/klauspost/compress/pull/720 + * export NoGzipResponseWriter for custom ResponseWriter wrappers by @harshavardhana in https://github.com/klauspost/compress/pull/722 + * s2: Add example for indexing and existing stream https://github.com/klauspost/compress/pull/723 + * Dec 11, 2022 (v1.15.13) * zstd: Add [MaxEncodedSize](https://pkg.go.dev/github.com/klauspost/compress@v1.15.13/zstd#Encoder.MaxEncodedSize) to encoder https://github.com/klauspost/compress/pull/691 * zstd: Various tweaks and improvements https://github.com/klauspost/compress/pull/693 https://github.com/klauspost/compress/pull/695 https://github.com/klauspost/compress/pull/696 https://github.com/klauspost/compress/pull/701 https://github.com/klauspost/compress/pull/702 https://github.com/klauspost/compress/pull/703 https://github.com/klauspost/compress/pull/704 https://github.com/klauspost/compress/pull/705 https://github.com/klauspost/compress/pull/706 https://github.com/klauspost/compress/pull/707 https://github.com/klauspost/compress/pull/708 @@ -587,6 +628,8 @@ Here are other packages of good quality and pure Go (no cgo wrappers or autoconv * [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression. * [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression. * [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer. +* [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression. +* [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression. # license diff --git a/vendor/github.com/klauspost/compress/SECURITY.md b/vendor/github.com/klauspost/compress/SECURITY.md new file mode 100644 index 000000000..23a43387b --- /dev/null +++ b/vendor/github.com/klauspost/compress/SECURITY.md @@ -0,0 +1,25 @@ +# Security Policy + +## Supported Versions + +Security updates are applied only to the latest release. + +## Vulnerability Definition + +A security vulnerability is a bug that with certain input triggers a crash or an infinite loop. Most calls will have varying execution time and only in rare cases will slow operation be considered a security vulnerability. + +Corrupted output generally is not considered a security vulnerability, unless independent operations are able to affect each other. Note that not all functionality is re-entrant and safe to use concurrently. + +Out-of-memory crashes only applies if the en/decoder uses an abnormal amount of memory, with appropriate options applied, to limit maximum window size, concurrency, etc. However, if you are in doubt you are welcome to file a security issue. + +It is assumed that all callers are trusted, meaning internal data exposed through reflection or inspection of returned data structures is not considered a vulnerability. + +Vulnerabilities resulting from compiler/assembler errors should be reported upstream. Depending on the severity this package may or may not implement a workaround. + +## Reporting a Vulnerability + +If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released. + +Please disclose it at [security advisory](https://github.com/klaupost/compress/security/advisories/new). If possible please provide a minimal reproducer. If the issue only applies to a single platform, it would be helpful to provide access to that. + +This project is maintained by a team of volunteers on a reasonable-effort basis. As such, vulnerabilities will be disclosed in a best effort base. diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go index 6f341914c..dac97e58a 100644 --- a/vendor/github.com/klauspost/compress/fse/compress.go +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -146,54 +146,51 @@ func (s *Scratch) compress(src []byte) error { c1.encodeZero(tt[src[ip-2]]) ip -= 2 } + src = src[:ip] // Main compression loop. switch { case !s.zeroBits && s.actualTableLog <= 8: // We can encode 4 symbols without requiring a flush. // We do not need to check if any output is 0 bits. - for ip >= 4 { + for ; len(src) >= 4; src = src[:len(src)-4] { s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] c2.encode(tt[v0]) c1.encode(tt[v1]) c2.encode(tt[v2]) c1.encode(tt[v3]) - ip -= 4 } case !s.zeroBits: // We do not need to check if any output is 0 bits. - for ip >= 4 { + for ; len(src) >= 4; src = src[:len(src)-4] { s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] c2.encode(tt[v0]) c1.encode(tt[v1]) s.bw.flush32() c2.encode(tt[v2]) c1.encode(tt[v3]) - ip -= 4 } case s.actualTableLog <= 8: // We can encode 4 symbols without requiring a flush - for ip >= 4 { + for ; len(src) >= 4; src = src[:len(src)-4] { s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] c2.encodeZero(tt[v0]) c1.encodeZero(tt[v1]) c2.encodeZero(tt[v2]) c1.encodeZero(tt[v3]) - ip -= 4 } default: - for ip >= 4 { + for ; len(src) >= 4; src = src[:len(src)-4] { s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] c2.encodeZero(tt[v0]) c1.encodeZero(tt[v1]) s.bw.flush32() c2.encodeZero(tt[v2]) c1.encodeZero(tt[v3]) - ip -= 4 } } @@ -459,15 +456,17 @@ func (s *Scratch) countSimple(in []byte) (max int) { for _, v := range in { s.count[v]++ } - m := uint32(0) + m, symlen := uint32(0), s.symbolLen for i, v := range s.count[:] { + if v == 0 { + continue + } if v > m { m = v } - if v > 0 { - s.symbolLen = uint16(i) + 1 - } + symlen = uint16(i) + 1 } + s.symbolLen = symlen return int(m) } diff --git a/vendor/github.com/klauspost/compress/fse/decompress.go b/vendor/github.com/klauspost/compress/fse/decompress.go index 926f5f153..cc05d0f7e 100644 --- a/vendor/github.com/klauspost/compress/fse/decompress.go +++ b/vendor/github.com/klauspost/compress/fse/decompress.go @@ -260,7 +260,9 @@ func (s *Scratch) buildDtable() error { // If the buffer is over-read an error is returned. func (s *Scratch) decompress() error { br := &s.bits - br.init(s.br.unread()) + if err := br.init(s.br.unread()); err != nil { + return err + } var s1, s2 decoder // Initialize and decode first state and symbol. diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go index 504a7be9d..e36d9742f 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitreader.go +++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -67,7 +67,6 @@ func (b *bitReaderBytes) fillFast() { // 2 bounds checks. v := b.in[b.off-4 : b.off] - v = v[:4] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value |= uint64(low) << (b.bitsRead - 32) b.bitsRead -= 32 @@ -88,8 +87,7 @@ func (b *bitReaderBytes) fill() { return } if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] + v := b.in[b.off-4 : b.off] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value |= uint64(low) << (b.bitsRead - 32) b.bitsRead -= 32 @@ -179,7 +177,6 @@ func (b *bitReaderShifted) fillFast() { // 2 bounds checks. v := b.in[b.off-4 : b.off] - v = v[:4] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value |= uint64(low) << ((b.bitsRead - 32) & 63) b.bitsRead -= 32 @@ -200,8 +197,7 @@ func (b *bitReaderShifted) fill() { return } if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] + v := b.in[b.off-4 : b.off] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value |= uint64(low) << ((b.bitsRead - 32) & 63) b.bitsRead -= 32 diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go index ec71f7a34..b4d7164e3 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitwriter.go +++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -13,14 +13,6 @@ type bitWriter struct { out []byte } -// bitMask16 is bitmasks. Has extra to avoid bounds check. -var bitMask16 = [32]uint16{ - 0, 1, 3, 7, 0xF, 0x1F, - 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, - 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF} /* up to 16 bits */ - // addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. // It will not check if there is space for them, so the caller must ensure that it has flushed recently. func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { @@ -60,6 +52,22 @@ func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { b.nBits += encA.nBits + encB.nBits } +// encFourSymbols adds up to 32 bits from four symbols. +// It will not check if there is space for them, +// so the caller must ensure that b has been flushed recently. +func (b *bitWriter) encFourSymbols(encA, encB, encC, encD cTableEntry) { + bitsA := encA.nBits + bitsB := bitsA + encB.nBits + bitsC := bitsB + encC.nBits + bitsD := bitsC + encD.nBits + combined := uint64(encA.val) | + (uint64(encB.val) << (bitsA & 63)) | + (uint64(encC.val) << (bitsB & 63)) | + (uint64(encD.val) << (bitsC & 63)) + b.bitContainer |= combined << (b.nBits & 63) + b.nBits += bitsD +} + // flush32 will flush out, so there are at least 32 bits available for writing. func (b *bitWriter) flush32() { if b.nBits < 32 { diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go index d9223a91e..4ee4fa18d 100644 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -248,8 +248,7 @@ func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) { tmp := src[n : n+4] // tmp should be len 4 bw.flush32() - bw.encTwoSymbols(cTable, tmp[3], tmp[2]) - bw.encTwoSymbols(cTable, tmp[1], tmp[0]) + bw.encFourSymbols(cTable[tmp[3]], cTable[tmp[2]], cTable[tmp[1]], cTable[tmp[0]]) } } else { for ; n >= 0; n -= 4 { @@ -484,34 +483,35 @@ func (s *Scratch) buildCTable() error { // Different from reference implementation. huffNode0 := s.nodes[0 : huffNodesLen+1] - for huffNode[nonNullRank].count == 0 { + for huffNode[nonNullRank].count() == 0 { nonNullRank-- } lowS := int16(nonNullRank) nodeRoot := nodeNb + lowS - 1 lowN := nodeNb - huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count - huffNode[lowS].parent, huffNode[lowS-1].parent = uint16(nodeNb), uint16(nodeNb) + huffNode[nodeNb].setCount(huffNode[lowS].count() + huffNode[lowS-1].count()) + huffNode[lowS].setParent(nodeNb) + huffNode[lowS-1].setParent(nodeNb) nodeNb++ lowS -= 2 for n := nodeNb; n <= nodeRoot; n++ { - huffNode[n].count = 1 << 30 + huffNode[n].setCount(1 << 30) } // fake entry, strong barrier - huffNode0[0].count = 1 << 31 + huffNode0[0].setCount(1 << 31) // create parents for nodeNb <= nodeRoot { var n1, n2 int16 - if huffNode0[lowS+1].count < huffNode0[lowN+1].count { + if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { n1 = lowS lowS-- } else { n1 = lowN lowN++ } - if huffNode0[lowS+1].count < huffNode0[lowN+1].count { + if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { n2 = lowS lowS-- } else { @@ -519,18 +519,19 @@ func (s *Scratch) buildCTable() error { lowN++ } - huffNode[nodeNb].count = huffNode0[n1+1].count + huffNode0[n2+1].count - huffNode0[n1+1].parent, huffNode0[n2+1].parent = uint16(nodeNb), uint16(nodeNb) + huffNode[nodeNb].setCount(huffNode0[n1+1].count() + huffNode0[n2+1].count()) + huffNode0[n1+1].setParent(nodeNb) + huffNode0[n2+1].setParent(nodeNb) nodeNb++ } // distribute weights (unlimited tree height) - huffNode[nodeRoot].nbBits = 0 + huffNode[nodeRoot].setNbBits(0) for n := nodeRoot - 1; n >= startNode; n-- { - huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 + huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) } for n := uint16(0); n <= nonNullRank; n++ { - huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 + huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) } s.actualTableLog = s.setMaxHeight(int(nonNullRank)) maxNbBits := s.actualTableLog @@ -542,7 +543,7 @@ func (s *Scratch) buildCTable() error { var nbPerRank [tableLogMax + 1]uint16 var valPerRank [16]uint16 for _, v := range huffNode[:nonNullRank+1] { - nbPerRank[v.nbBits]++ + nbPerRank[v.nbBits()]++ } // determine stating value per rank { @@ -557,7 +558,7 @@ func (s *Scratch) buildCTable() error { // push nbBits per symbol, symbol order for _, v := range huffNode[:nonNullRank+1] { - s.cTable[v.symbol].nBits = v.nbBits + s.cTable[v.symbol()].nBits = v.nbBits() } // assign value within rank, symbol order @@ -603,12 +604,12 @@ func (s *Scratch) huffSort() { pos := rank[r].current rank[r].current++ prev := nodes[(pos-1)&huffNodesMask] - for pos > rank[r].base && c > prev.count { + for pos > rank[r].base && c > prev.count() { nodes[pos&huffNodesMask] = prev pos-- prev = nodes[(pos-1)&huffNodesMask] } - nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)} + nodes[pos&huffNodesMask] = makeNodeElt(c, byte(n)) } } @@ -617,7 +618,7 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { huffNode := s.nodes[1 : huffNodesLen+1] //huffNode = huffNode[: huffNodesLen] - largestBits := huffNode[lastNonNull].nbBits + largestBits := huffNode[lastNonNull].nbBits() // early exit : no elt > maxNbBits if largestBits <= maxNbBits { @@ -627,14 +628,14 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { baseCost := int(1) << (largestBits - maxNbBits) n := uint32(lastNonNull) - for huffNode[n].nbBits > maxNbBits { - totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)) - huffNode[n].nbBits = maxNbBits + for huffNode[n].nbBits() > maxNbBits { + totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits())) + huffNode[n].setNbBits(maxNbBits) n-- } // n stops at huffNode[n].nbBits <= maxNbBits - for huffNode[n].nbBits == maxNbBits { + for huffNode[n].nbBits() == maxNbBits { n-- } // n end at index of smallest symbol using < maxNbBits @@ -655,10 +656,10 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { { currentNbBits := maxNbBits for pos := int(n); pos >= 0; pos-- { - if huffNode[pos].nbBits >= currentNbBits { + if huffNode[pos].nbBits() >= currentNbBits { continue } - currentNbBits = huffNode[pos].nbBits // < maxNbBits + currentNbBits = huffNode[pos].nbBits() // < maxNbBits rankLast[maxNbBits-currentNbBits] = uint32(pos) } } @@ -675,8 +676,8 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { if lowPos == noSymbol { break } - highTotal := huffNode[highPos].count - lowTotal := 2 * huffNode[lowPos].count + highTotal := huffNode[highPos].count() + lowTotal := 2 * huffNode[lowPos].count() if highTotal <= lowTotal { break } @@ -692,13 +693,14 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { // this rank is no longer empty rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] } - huffNode[rankLast[nBitsToDecrease]].nbBits++ + huffNode[rankLast[nBitsToDecrease]].setNbBits(1 + + huffNode[rankLast[nBitsToDecrease]].nbBits()) if rankLast[nBitsToDecrease] == 0 { /* special case, reached largest symbol */ rankLast[nBitsToDecrease] = noSymbol } else { rankLast[nBitsToDecrease]-- - if huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease { + if huffNode[rankLast[nBitsToDecrease]].nbBits() != maxNbBits-nBitsToDecrease { rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ } } @@ -706,15 +708,15 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { for totalCost < 0 { /* Sometimes, cost correction overshoot */ if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ - for huffNode[n].nbBits == maxNbBits { + for huffNode[n].nbBits() == maxNbBits { n-- } - huffNode[n+1].nbBits-- + huffNode[n+1].setNbBits(huffNode[n+1].nbBits() - 1) rankLast[1] = n + 1 totalCost++ continue } - huffNode[rankLast[1]+1].nbBits-- + huffNode[rankLast[1]+1].setNbBits(huffNode[rankLast[1]+1].nbBits() - 1) rankLast[1]++ totalCost++ } @@ -722,9 +724,26 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { return maxNbBits } -type nodeElt struct { - count uint32 - parent uint16 - symbol byte - nbBits uint8 +// A nodeElt is the fields +// +// count uint32 +// parent uint16 +// symbol byte +// nbBits uint8 +// +// in some order, all squashed into an integer so that the compiler +// always loads and stores entire nodeElts instead of separate fields. +type nodeElt uint64 + +func makeNodeElt(count uint32, symbol byte) nodeElt { + return nodeElt(count) | nodeElt(symbol)<<48 } + +func (e *nodeElt) count() uint32 { return uint32(*e) } +func (e *nodeElt) parent() uint16 { return uint16(*e >> 32) } +func (e *nodeElt) symbol() byte { return byte(*e >> 48) } +func (e *nodeElt) nbBits() uint8 { return uint8(*e >> 56) } + +func (e *nodeElt) setCount(c uint32) { *e = (*e)&0xffffffff00000000 | nodeElt(c) } +func (e *nodeElt) setParent(p int16) { *e = (*e)&0xffff0000ffffffff | nodeElt(uint16(p))<<32 } +func (e *nodeElt) setNbBits(n uint8) { *e = (*e)&0x00ffffffffffffff | nodeElt(n)<<56 } diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go index 42a237eac..54bd08b25 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -61,7 +61,7 @@ func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { b, err := fse.Decompress(in[:iSize], s.fse) s.fse.Out = nil if err != nil { - return s, nil, err + return s, nil, fmt.Errorf("fse decompress returned: %w", err) } if len(b) > 255 { return s, nil, errors.New("corrupt input: output table too large") @@ -253,7 +253,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { switch d.actualTableLog { case 8: - const shift = 8 - 8 + const shift = 0 for br.off >= 4 { br.fillFast() v := dt[uint8(br.value>>(56+shift))] diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s index 8d2187a2c..c4c7ab2d1 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s @@ -4,360 +4,349 @@ // func decompress4x_main_loop_amd64(ctx *decompress4xContext) TEXT ·decompress4x_main_loop_amd64(SB), $0-8 - XORQ DX, DX - // Preload values MOVQ ctx+0(FP), AX MOVBQZX 8(AX), DI - MOVQ 16(AX), SI - MOVQ 48(AX), BX - MOVQ 24(AX), R9 - MOVQ 32(AX), R10 - MOVQ (AX), R11 + MOVQ 16(AX), BX + MOVQ 48(AX), SI + MOVQ 24(AX), R8 + MOVQ 32(AX), R9 + MOVQ (AX), R10 // Main loop main_loop: - MOVQ SI, R8 - CMPQ R8, BX + XORL DX, DX + CMPQ BX, SI SETGE DL // br0.fillFast32() - MOVQ 32(R11), R12 - MOVBQZX 40(R11), R13 - CMPQ R13, $0x20 + MOVQ 32(R10), R11 + MOVBQZX 40(R10), R12 + CMPQ R12, $0x20 JBE skip_fill0 - MOVQ 24(R11), AX - SUBQ $0x20, R13 + MOVQ 24(R10), AX + SUBQ $0x20, R12 SUBQ $0x04, AX - MOVQ (R11), R14 + MOVQ (R10), R13 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R14*1), R14 - MOVQ R13, CX - SHLQ CL, R14 - MOVQ AX, 24(R11) - ORQ R14, R12 + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 24(R10) + ORQ R13, R11 - // exhausted = exhausted || (br0.off < 4) - CMPQ AX, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br0.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL skip_fill0: // val0 := br0.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br0.peekTopBits(peekBits) MOVQ DI, CX - MOVQ R12, R14 - SHRQ CL, R14 + MOVQ R11, R13 + SHRQ CL, R13 // v1 := table[val1&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v1.entry)) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // these two writes get coalesced // out[id * dstEvery + 0] = uint8(v0.entry >> 8) // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (R8) + MOVW AX, (BX) // update the bitreader structure - MOVQ R12, 32(R11) - MOVB R13, 40(R11) - ADDQ R9, R8 + MOVQ R11, 32(R10) + MOVB R12, 40(R10) // br1.fillFast32() - MOVQ 80(R11), R12 - MOVBQZX 88(R11), R13 - CMPQ R13, $0x20 + MOVQ 80(R10), R11 + MOVBQZX 88(R10), R12 + CMPQ R12, $0x20 JBE skip_fill1 - MOVQ 72(R11), AX - SUBQ $0x20, R13 + MOVQ 72(R10), AX + SUBQ $0x20, R12 SUBQ $0x04, AX - MOVQ 48(R11), R14 + MOVQ 48(R10), R13 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R14*1), R14 - MOVQ R13, CX - SHLQ CL, R14 - MOVQ AX, 72(R11) - ORQ R14, R12 + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 72(R10) + ORQ R13, R11 - // exhausted = exhausted || (br1.off < 4) - CMPQ AX, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br1.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL skip_fill1: // val0 := br1.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br1.peekTopBits(peekBits) MOVQ DI, CX - MOVQ R12, R14 - SHRQ CL, R14 + MOVQ R11, R13 + SHRQ CL, R13 // v1 := table[val1&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v1.entry)) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // these two writes get coalesced // out[id * dstEvery + 0] = uint8(v0.entry >> 8) // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (R8) + MOVW AX, (BX)(R8*1) // update the bitreader structure - MOVQ R12, 80(R11) - MOVB R13, 88(R11) - ADDQ R9, R8 + MOVQ R11, 80(R10) + MOVB R12, 88(R10) // br2.fillFast32() - MOVQ 128(R11), R12 - MOVBQZX 136(R11), R13 - CMPQ R13, $0x20 + MOVQ 128(R10), R11 + MOVBQZX 136(R10), R12 + CMPQ R12, $0x20 JBE skip_fill2 - MOVQ 120(R11), AX - SUBQ $0x20, R13 + MOVQ 120(R10), AX + SUBQ $0x20, R12 SUBQ $0x04, AX - MOVQ 96(R11), R14 + MOVQ 96(R10), R13 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R14*1), R14 - MOVQ R13, CX - SHLQ CL, R14 - MOVQ AX, 120(R11) - ORQ R14, R12 + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 120(R10) + ORQ R13, R11 - // exhausted = exhausted || (br2.off < 4) - CMPQ AX, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br2.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL skip_fill2: // val0 := br2.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br2.peekTopBits(peekBits) MOVQ DI, CX - MOVQ R12, R14 - SHRQ CL, R14 + MOVQ R11, R13 + SHRQ CL, R13 // v1 := table[val1&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v1.entry)) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // these two writes get coalesced // out[id * dstEvery + 0] = uint8(v0.entry >> 8) // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (R8) + MOVW AX, (BX)(R8*2) // update the bitreader structure - MOVQ R12, 128(R11) - MOVB R13, 136(R11) - ADDQ R9, R8 + MOVQ R11, 128(R10) + MOVB R12, 136(R10) // br3.fillFast32() - MOVQ 176(R11), R12 - MOVBQZX 184(R11), R13 - CMPQ R13, $0x20 + MOVQ 176(R10), R11 + MOVBQZX 184(R10), R12 + CMPQ R12, $0x20 JBE skip_fill3 - MOVQ 168(R11), AX - SUBQ $0x20, R13 + MOVQ 168(R10), AX + SUBQ $0x20, R12 SUBQ $0x04, AX - MOVQ 144(R11), R14 + MOVQ 144(R10), R13 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R14*1), R14 - MOVQ R13, CX - SHLQ CL, R14 - MOVQ AX, 168(R11) - ORQ R14, R12 + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 168(R10) + ORQ R13, R11 - // exhausted = exhausted || (br3.off < 4) - CMPQ AX, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br3.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL skip_fill3: // val0 := br3.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br3.peekTopBits(peekBits) MOVQ DI, CX - MOVQ R12, R14 - SHRQ CL, R14 + MOVQ R11, R13 + SHRQ CL, R13 // v1 := table[val1&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v1.entry)) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // these two writes get coalesced // out[id * dstEvery + 0] = uint8(v0.entry >> 8) // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (R8) + LEAQ (R8)(R8*2), CX + MOVW AX, (BX)(CX*1) // update the bitreader structure - MOVQ R12, 176(R11) - MOVB R13, 184(R11) - ADDQ $0x02, SI + MOVQ R11, 176(R10) + MOVB R12, 184(R10) + ADDQ $0x02, BX TESTB DL, DL JZ main_loop MOVQ ctx+0(FP), AX - SUBQ 16(AX), SI - SHLQ $0x02, SI - MOVQ SI, 40(AX) + SUBQ 16(AX), BX + SHLQ $0x02, BX + MOVQ BX, 40(AX) RET // func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8 - XORQ DX, DX - // Preload values MOVQ ctx+0(FP), CX MOVBQZX 8(CX), DI MOVQ 16(CX), BX MOVQ 48(CX), SI - MOVQ 24(CX), R9 - MOVQ 32(CX), R10 - MOVQ (CX), R11 + MOVQ 24(CX), R8 + MOVQ 32(CX), R9 + MOVQ (CX), R10 // Main loop main_loop: - MOVQ BX, R8 - CMPQ R8, SI + XORL DX, DX + CMPQ BX, SI SETGE DL // br0.fillFast32() - MOVQ 32(R11), R12 - MOVBQZX 40(R11), R13 - CMPQ R13, $0x20 + MOVQ 32(R10), R11 + MOVBQZX 40(R10), R12 + CMPQ R12, $0x20 JBE skip_fill0 - MOVQ 24(R11), R14 - SUBQ $0x20, R13 - SUBQ $0x04, R14 - MOVQ (R11), R15 + MOVQ 24(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ (R10), R14 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R14)(R15*1), R15 - MOVQ R13, CX - SHLQ CL, R15 - MOVQ R14, 24(R11) - ORQ R15, R12 + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 24(R10) + ORQ R14, R11 - // exhausted = exhausted || (br0.off < 4) - CMPQ R14, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br0.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL skip_fill0: // val0 := br0.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br0.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v1 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v1.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // val2 := br0.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v2 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v2.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val3 := br0.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v3 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v3.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // these four writes get coalesced @@ -365,88 +354,86 @@ skip_fill0: // out[id * dstEvery + 1] = uint8(v1.entry >> 8) // out[id * dstEvery + 3] = uint8(v2.entry >> 8) // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (R8) + MOVL AX, (BX) // update the bitreader structure - MOVQ R12, 32(R11) - MOVB R13, 40(R11) - ADDQ R9, R8 + MOVQ R11, 32(R10) + MOVB R12, 40(R10) // br1.fillFast32() - MOVQ 80(R11), R12 - MOVBQZX 88(R11), R13 - CMPQ R13, $0x20 + MOVQ 80(R10), R11 + MOVBQZX 88(R10), R12 + CMPQ R12, $0x20 JBE skip_fill1 - MOVQ 72(R11), R14 - SUBQ $0x20, R13 - SUBQ $0x04, R14 - MOVQ 48(R11), R15 + MOVQ 72(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 48(R10), R14 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R14)(R15*1), R15 - MOVQ R13, CX - SHLQ CL, R15 - MOVQ R14, 72(R11) - ORQ R15, R12 + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 72(R10) + ORQ R14, R11 - // exhausted = exhausted || (br1.off < 4) - CMPQ R14, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br1.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL skip_fill1: // val0 := br1.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br1.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v1 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v1.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // val2 := br1.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v2 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v2.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val3 := br1.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v3 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v3.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // these four writes get coalesced @@ -454,88 +441,86 @@ skip_fill1: // out[id * dstEvery + 1] = uint8(v1.entry >> 8) // out[id * dstEvery + 3] = uint8(v2.entry >> 8) // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (R8) + MOVL AX, (BX)(R8*1) // update the bitreader structure - MOVQ R12, 80(R11) - MOVB R13, 88(R11) - ADDQ R9, R8 + MOVQ R11, 80(R10) + MOVB R12, 88(R10) // br2.fillFast32() - MOVQ 128(R11), R12 - MOVBQZX 136(R11), R13 - CMPQ R13, $0x20 + MOVQ 128(R10), R11 + MOVBQZX 136(R10), R12 + CMPQ R12, $0x20 JBE skip_fill2 - MOVQ 120(R11), R14 - SUBQ $0x20, R13 - SUBQ $0x04, R14 - MOVQ 96(R11), R15 + MOVQ 120(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 96(R10), R14 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R14)(R15*1), R15 - MOVQ R13, CX - SHLQ CL, R15 - MOVQ R14, 120(R11) - ORQ R15, R12 + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 120(R10) + ORQ R14, R11 - // exhausted = exhausted || (br2.off < 4) - CMPQ R14, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br2.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL skip_fill2: // val0 := br2.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br2.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v1 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v1.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // val2 := br2.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v2 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v2.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val3 := br2.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v3 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v3.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // these four writes get coalesced @@ -543,88 +528,86 @@ skip_fill2: // out[id * dstEvery + 1] = uint8(v1.entry >> 8) // out[id * dstEvery + 3] = uint8(v2.entry >> 8) // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (R8) + MOVL AX, (BX)(R8*2) // update the bitreader structure - MOVQ R12, 128(R11) - MOVB R13, 136(R11) - ADDQ R9, R8 + MOVQ R11, 128(R10) + MOVB R12, 136(R10) // br3.fillFast32() - MOVQ 176(R11), R12 - MOVBQZX 184(R11), R13 - CMPQ R13, $0x20 + MOVQ 176(R10), R11 + MOVBQZX 184(R10), R12 + CMPQ R12, $0x20 JBE skip_fill3 - MOVQ 168(R11), R14 - SUBQ $0x20, R13 - SUBQ $0x04, R14 - MOVQ 144(R11), R15 + MOVQ 168(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 144(R10), R14 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R14)(R15*1), R15 - MOVQ R13, CX - SHLQ CL, R15 - MOVQ R14, 168(R11) - ORQ R15, R12 + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 168(R10) + ORQ R14, R11 - // exhausted = exhausted || (br3.off < 4) - CMPQ R14, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br3.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL skip_fill3: // val0 := br3.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br3.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v1 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v1.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // val2 := br3.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v2 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v2.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val3 := br3.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v3 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v3.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // these four writes get coalesced @@ -632,11 +615,12 @@ skip_fill3: // out[id * dstEvery + 1] = uint8(v1.entry >> 8) // out[id * dstEvery + 3] = uint8(v2.entry >> 8) // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (R8) + LEAQ (R8)(R8*2), CX + MOVL AX, (BX)(CX*1) // update the bitreader structure - MOVQ R12, 176(R11) - MOVB R13, 184(R11) + MOVQ R11, 176(R10) + MOVB R12, 184(R10) ADDQ $0x04, BX TESTB DL, DL JZ main_loop @@ -652,7 +636,7 @@ TEXT ·decompress1x_main_loop_amd64(SB), $0-8 MOVQ 16(CX), DX MOVQ 24(CX), BX CMPQ BX, $0x04 - JB error_max_decoded_size_exeeded + JB error_max_decoded_size_exceeded LEAQ (DX)(BX*1), BX MOVQ (CX), SI MOVQ (SI), R8 @@ -667,7 +651,7 @@ main_loop: // Check if we have room for 4 bytes in the output buffer LEAQ 4(DX), CX CMPQ CX, BX - JGE error_max_decoded_size_exeeded + JGE error_max_decoded_size_exceeded // Decode 4 values CMPQ R11, $0x20 @@ -744,7 +728,7 @@ loop_condition: RET // Report error -error_max_decoded_size_exeeded: +error_max_decoded_size_exceeded: MOVQ ctx+0(FP), AX MOVQ $-1, CX MOVQ CX, 40(AX) @@ -757,7 +741,7 @@ TEXT ·decompress1x_main_loop_bmi2(SB), $0-8 MOVQ 16(CX), DX MOVQ 24(CX), BX CMPQ BX, $0x04 - JB error_max_decoded_size_exeeded + JB error_max_decoded_size_exceeded LEAQ (DX)(BX*1), BX MOVQ (CX), SI MOVQ (SI), R8 @@ -772,7 +756,7 @@ main_loop: // Check if we have room for 4 bytes in the output buffer LEAQ 4(DX), CX CMPQ CX, BX - JGE error_max_decoded_size_exeeded + JGE error_max_decoded_size_exceeded // Decode 4 values CMPQ R11, $0x20 @@ -839,7 +823,7 @@ loop_condition: RET // Report error -error_max_decoded_size_exeeded: +error_max_decoded_size_exceeded: MOVQ ctx+0(FP), AX MOVQ $-1, CX MOVQ CX, 40(AX) diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go index 298c4f8e9..2aa6a95a0 100644 --- a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go @@ -87,22 +87,32 @@ func emitCopy(dst []byte, offset, length int) int { return i + 2 } -// extendMatch returns the largest k such that k <= len(src) and that -// src[i:i+k-j] and src[j:k] have the same contents. -// -// It assumes that: -// -// 0 <= i && i < j && j <= len(src) -func extendMatch(src []byte, i, j int) int { - for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { - } - return j -} - func hash(u, shift uint32) uint32 { return (u * 0x1e35a7bd) >> shift } +// EncodeBlockInto exposes encodeBlock but checks dst size. +func EncodeBlockInto(dst, src []byte) (d int) { + if MaxEncodedLen(len(src)) > len(dst) { + return 0 + } + + // encodeBlock breaks on too big blocks, so split. + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return d +} + // encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It // assumes that the varint-encoded length of the decompressed bytes has already // been written. diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md index 65b38abed..bdd49c8b2 100644 --- a/vendor/github.com/klauspost/compress/zstd/README.md +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -304,7 +304,7 @@ import "github.com/klauspost/compress/zstd" // Create a reader that caches decompressors. // For this operation type we supply a nil Reader. -var decoder, _ = zstd.NewReader(nil, WithDecoderConcurrency(0)) +var decoder, _ = zstd.NewReader(nil, zstd.WithDecoderConcurrency(0)) // Decompress a buffer. We don't supply a destination buffer, // so it will be allocated by the decoder. diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 6b9929ddf..9f17ce601 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -9,6 +9,7 @@ import ( "encoding/binary" "errors" "fmt" + "hash/crc32" "io" "os" "path/filepath" @@ -192,16 +193,14 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { } // Read block data. - if cap(b.dataStorage) < cSize { + if _, ok := br.(*byteBuf); !ok && cap(b.dataStorage) < cSize { + // byteBuf doesn't need a destination buffer. if b.lowMem || cSize > maxCompressedBlockSize { b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc) } else { b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc) } } - if cap(b.dst) <= maxSize { - b.dst = make([]byte, 0, maxSize+1) - } b.data, err = br.readBig(cSize, b.dataStorage) if err != nil { if debugDecoder { @@ -210,6 +209,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { } return err } + if cap(b.dst) <= maxSize { + b.dst = make([]byte, 0, maxSize+1) + } return nil } @@ -441,6 +443,9 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err } } var err error + if debugDecoder { + println("huff table input:", len(literals), "CRC:", crc32.ChecksumIEEE(literals)) + } huff, literals, err = huff0.ReadTable(literals, huff) if err != nil { println("reading huffman table:", err) @@ -587,7 +592,7 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { } seq.fse.setRLE(symb) if debugDecoder { - printf("RLE set to %+v, code: %v", symb, v) + printf("RLE set to 0x%x, code: %v", symb, v) } case compModeFSE: println("Reading table for", tableIndex(i)) diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go index 12e8f6f0b..fd4a36f73 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -473,7 +473,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { return b.encodeLits(b.literals, rawAllLits) } // We want some difference to at least account for the headers. - saved := b.size - len(b.literals) - (b.size >> 5) + saved := b.size - len(b.literals) - (b.size >> 6) if saved < 16 { if org == nil { return errIncompressible @@ -779,10 +779,13 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { } b.output = wr.out + // Maybe even add a bigger margin. if len(b.output)-3-bhOffset >= b.size { - // Maybe even add a bigger margin. + // Discard and encode as raw block. + b.output = b.encodeRawTo(b.output[:bhOffset], org) + b.popOffsets() b.litEnc.Reuse = huff0.ReusePolicyNone - return errIncompressible + return nil } // Size is output minus block header. diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go index 176788f25..55a388553 100644 --- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -54,7 +54,7 @@ func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { func (b *byteBuf) readByte() (byte, error) { bb := *b if len(bb) < 1 { - return 0, nil + return 0, io.ErrUnexpectedEOF } r := bb[0] *b = bb[1:] @@ -109,7 +109,7 @@ func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { } func (r *readerWrapper) readByte() (byte, error) { - n2, err := r.r.Read(r.tmp[:1]) + n2, err := io.ReadFull(r.r, r.tmp[:1]) if err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index 30459cd3f..f04aaa21e 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -40,8 +40,7 @@ type Decoder struct { frame *frameDec // Custom dictionaries. - // Always uses copies. - dicts map[uint32]dict + dicts map[uint32]*dict // streamWg is the waitgroup for all streams streamWg sync.WaitGroup @@ -103,7 +102,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { } // Transfer option dicts. - d.dicts = make(map[uint32]dict, len(d.o.dicts)) + d.dicts = make(map[uint32]*dict, len(d.o.dicts)) for _, dc := range d.o.dicts { d.dicts[dc.id] = dc } @@ -341,15 +340,8 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { } return dst, err } - if frame.DictionaryID != nil { - dict, ok := d.dicts[*frame.DictionaryID] - if !ok { - return nil, ErrUnknownDictionary - } - if debugDecoder { - println("setting dict", frame.DictionaryID) - } - frame.history.setDict(&dict) + if err = d.setDict(frame); err != nil { + return nil, err } if frame.WindowSize > d.o.maxWindowSize { if debugDecoder { @@ -463,12 +455,7 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) { } if len(next.b) > 0 { - n, err := d.current.crc.Write(next.b) - if err == nil { - if n != len(next.b) { - d.current.err = io.ErrShortWrite - } - } + d.current.crc.Write(next.b) } if next.err == nil && next.d != nil && next.d.hasCRC { got := uint32(d.current.crc.Sum64()) @@ -495,18 +482,12 @@ func (d *Decoder) nextBlockSync() (ok bool) { if !d.syncStream.inFrame { d.frame.history.reset() d.current.err = d.frame.reset(&d.syncStream.br) + if d.current.err == nil { + d.current.err = d.setDict(d.frame) + } if d.current.err != nil { return false } - if d.frame.DictionaryID != nil { - dict, ok := d.dicts[*d.frame.DictionaryID] - if !ok { - d.current.err = ErrUnknownDictionary - return false - } else { - d.frame.history.setDict(&dict) - } - } if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize { d.current.err = ErrDecoderSizeExceeded return false @@ -865,13 +846,8 @@ decodeStream: if debugDecoder && err != nil { println("Frame decoder returned", err) } - if err == nil && frame.DictionaryID != nil { - dict, ok := d.dicts[*frame.DictionaryID] - if !ok { - err = ErrUnknownDictionary - } else { - frame.history.setDict(&dict) - } + if err == nil { + err = d.setDict(frame) } if err == nil && d.frame.WindowSize > d.o.maxWindowSize { if debugDecoder { @@ -953,3 +929,20 @@ decodeStream: hist.reset() d.frame.history.b = frameHistCache } + +func (d *Decoder) setDict(frame *frameDec) (err error) { + dict, ok := d.dicts[frame.DictionaryID] + if ok { + if debugDecoder { + println("setting dict", frame.DictionaryID) + } + frame.history.setDict(dict) + } else if frame.DictionaryID != 0 { + // A zero or missing dictionary id is ambiguous: + // either dictionary zero, or no dictionary. In particular, + // zstd --patch-from uses this id for the source file, + // so only return an error if the dictionary id is not zero. + err = ErrUnknownDictionary + } + return err +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go index f42448e69..774c5f00f 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -6,6 +6,8 @@ package zstd import ( "errors" + "fmt" + "math/bits" "runtime" ) @@ -18,7 +20,7 @@ type decoderOptions struct { concurrent int maxDecodedSize uint64 maxWindowSize uint64 - dicts []dict + dicts []*dict ignoreChecksum bool limitToCap bool decodeBufsBelow int @@ -85,7 +87,13 @@ func WithDecoderMaxMemory(n uint64) DOption { } // WithDecoderDicts allows to register one or more dictionaries for the decoder. -// If several dictionaries with the same ID is provided the last one will be used. +// +// Each slice in dict must be in the [dictionary format] produced by +// "zstd --train" from the Zstandard reference implementation. +// +// If several dictionaries with the same ID are provided, the last one will be used. +// +// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format func WithDecoderDicts(dicts ...[]byte) DOption { return func(o *decoderOptions) error { for _, b := range dicts { @@ -93,12 +101,24 @@ func WithDecoderDicts(dicts ...[]byte) DOption { if err != nil { return err } - o.dicts = append(o.dicts, *d) + o.dicts = append(o.dicts, d) } return nil } } +// WithDecoderDictRaw registers a dictionary that may be used by the decoder. +// The slice content can be arbitrary data. +func WithDecoderDictRaw(id uint32, content []byte) DOption { + return func(o *decoderOptions) error { + if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { + return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) + } + o.dicts = append(o.dicts, &dict{id: id, content: content, offsets: [3]int{1, 4, 8}}) + return nil + } +} + // WithDecoderMaxWindow allows to set a maximum window size for decodes. // This allows rejecting packets that will cause big memory usage. // The Decoder will likely allocate more memory based on the WithDecoderLowmem setting. diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go index b2725f77b..ca0951452 100644 --- a/vendor/github.com/klauspost/compress/zstd/dict.go +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -21,6 +21,9 @@ type dict struct { const dictMagic = "\x37\xa4\x30\xec" +// Maximum dictionary size for the reference implementation (1.5.3) is 2 GiB. +const dictMaxLength = 1 << 31 + // ID returns the dictionary id or 0 if d is nil. func (d *dict) ID() uint32 { if d == nil { @@ -29,14 +32,38 @@ func (d *dict) ID() uint32 { return d.id } -// DictContentSize returns the dictionary content size or 0 if d is nil. -func (d *dict) DictContentSize() int { +// ContentSize returns the dictionary content size or 0 if d is nil. +func (d *dict) ContentSize() int { if d == nil { return 0 } return len(d.content) } +// Content returns the dictionary content. +func (d *dict) Content() []byte { + if d == nil { + return nil + } + return d.content +} + +// Offsets returns the initial offsets. +func (d *dict) Offsets() [3]int { + if d == nil { + return [3]int{} + } + return d.offsets +} + +// LitEncoder returns the literal encoder. +func (d *dict) LitEncoder() *huff0.Scratch { + if d == nil { + return nil + } + return d.litEnc +} + // Load a dictionary as described in // https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format func loadDict(b []byte) (*dict, error) { @@ -61,7 +88,7 @@ func loadDict(b []byte) (*dict, error) { var err error d.litEnc, b, err = huff0.ReadTable(b[8:], nil) if err != nil { - return nil, err + return nil, fmt.Errorf("loading literal table: %w", err) } d.litEnc.Reuse = huff0.ReusePolicyMust @@ -119,3 +146,16 @@ func loadDict(b []byte) (*dict, error) { return &d, nil } + +// InspectDictionary loads a zstd dictionary and provides functions to inspect the content. +func InspectDictionary(b []byte) (interface { + ID() uint32 + ContentSize() int + Content() []byte + Offsets() [3]int + LitEncoder() *huff0.Scratch +}, error) { + initPredefined() + d, err := loadDict(b) + return d, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go index bfb2e146c..e008b9929 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_base.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -149,7 +149,7 @@ func (e *fastBase) resetBase(d *dict, singleBlock bool) { if singleBlock { e.lowMem = true } - e.ensureHist(d.DictContentSize() + maxCompressedBlockSize) + e.ensureHist(d.ContentSize() + maxCompressedBlockSize) e.lowMem = low } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go index 830f5ba74..9819d4145 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -32,10 +32,9 @@ type match struct { length int32 rep int32 est int32 - _ [12]byte // Aligned size to cache line: 4+4+4+4+4 bytes + 12 bytes padding = 32 bytes } -const highScore = 25000 +const highScore = maxMatchLen * 8 // estBits will estimate output bits from predefined tables. func (m *match) estBits(bitsPerByte int32) { @@ -160,7 +159,6 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { // nextEmit is where in src the next emitLiteral should start from. nextEmit := s - cv := load6432(src, s) // Relative offsets offset1 := int32(blk.recentOffsets[0]) @@ -174,7 +172,6 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { blk.literals = append(blk.literals, src[nextEmit:until]...) s.litLen = uint32(until - nextEmit) } - _ = addLiterals if debugEncoder { println("recent offsets:", blk.recentOffsets) @@ -189,53 +186,96 @@ encodeLoop: panic("offset0 was 0") } - bestOf := func(a, b *match) *match { - if a.est-b.est+(a.s-b.s)*bitsPerByte>>10 < 0 { - return a - } - return b - } - const goodEnough = 100 + const goodEnough = 250 + + cv := load6432(src, s) nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] - matchAt := func(offset int32, s int32, first uint32, rep int32) match { + // Set m to a match at offset if it looks like that will improve compression. + improve := func(m *match, offset int32, s int32, first uint32, rep int32) { if s-offset >= e.maxMatchOff || load3232(src, offset) != first { - return match{s: s, est: highScore} + return } if debugAsserts { + if offset <= 0 { + panic(offset) + } if !bytes.Equal(src[s:s+4], src[offset:offset+4]) { panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) } } - m := match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep} - m.estBits(bitsPerByte) - return m + // Try to quick reject if we already have a long match. + if m.length > 16 { + left := len(src) - int(m.s+m.length) + // If we are too close to the end, keep as is. + if left <= 0 { + return + } + checkLen := m.length - (s - m.s) - 8 + if left > 2 && checkLen > 4 { + // Check 4 bytes, 4 bytes from the end of the current match. + a := load3232(src, offset+checkLen) + b := load3232(src, s+checkLen) + if a != b { + return + } + } + } + l := 4 + e.matchlen(s+4, offset+4, src) + if rep < 0 { + // Extend candidate match backwards as far as possible. + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for offset > tMin && s > nextEmit && src[offset-1] == src[s-1] && l < maxMatchLength { + s-- + offset-- + l++ + } + } + + cand := match{offset: offset, s: s, length: l, rep: rep} + cand.estBits(bitsPerByte) + if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 { + *m = cand + } } - m1 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1) - m2 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1) - m3 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1) - m4 := matchAt(candidateS.prev-e.cur, s, uint32(cv), -1) - best := bestOf(bestOf(&m1, &m2), bestOf(&m3, &m4)) + best := match{s: s, est: highScore} + improve(&best, candidateL.offset-e.cur, s, uint32(cv), -1) + improve(&best, candidateL.prev-e.cur, s, uint32(cv), -1) + improve(&best, candidateS.offset-e.cur, s, uint32(cv), -1) + improve(&best, candidateS.prev-e.cur, s, uint32(cv), -1) if canRepeat && best.length < goodEnough { - cv32 := uint32(cv >> 8) - spp := s + 1 - m1 := matchAt(spp-offset1, spp, cv32, 1) - m2 := matchAt(spp-offset2, spp, cv32, 2) - m3 := matchAt(spp-offset3, spp, cv32, 3) - best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3)) - if best.length > 0 { - cv32 = uint32(cv >> 24) - spp += 2 - m1 := matchAt(spp-offset1, spp, cv32, 1) - m2 := matchAt(spp-offset2, spp, cv32, 2) - m3 := matchAt(spp-offset3, spp, cv32, 3) - best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3)) + if s == nextEmit { + // Check repeats straight after a match. + improve(&best, s-offset2, s, uint32(cv), 1|4) + improve(&best, s-offset3, s, uint32(cv), 2|4) + if offset1 > 1 { + improve(&best, s-(offset1-1), s, uint32(cv), 3|4) + } + } + + // If either no match or a non-repeat match, check at + 1 + if best.rep <= 0 { + cv32 := uint32(cv >> 8) + spp := s + 1 + improve(&best, spp-offset1, spp, cv32, 1) + improve(&best, spp-offset2, spp, cv32, 2) + improve(&best, spp-offset3, spp, cv32, 3) + if best.rep < 0 { + cv32 = uint32(cv >> 24) + spp += 2 + improve(&best, spp-offset1, spp, cv32, 1) + improve(&best, spp-offset2, spp, cv32, 2) + improve(&best, spp-offset3, spp, cv32, 3) + } } } // Load next and check... @@ -250,47 +290,45 @@ encodeLoop: if s >= sLimit { break encodeLoop } - cv = load6432(src, s) continue } - s++ candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)] - cv = load6432(src, s) - cv2 := load6432(src, s+1) + cv = load6432(src, s+1) + cv2 := load6432(src, s+2) candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)] candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)] // Short at s+1 - m1 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1) + improve(&best, candidateS.offset-e.cur, s+1, uint32(cv), -1) // Long at s+1, s+2 - m2 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1) - m3 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1) - m4 := matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1) - m5 := matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1) - best = bestOf(bestOf(bestOf(best, &m1), &m2), bestOf(bestOf(&m3, &m4), &m5)) + improve(&best, candidateL.offset-e.cur, s+1, uint32(cv), -1) + improve(&best, candidateL.prev-e.cur, s+1, uint32(cv), -1) + improve(&best, candidateL2.offset-e.cur, s+2, uint32(cv2), -1) + improve(&best, candidateL2.prev-e.cur, s+2, uint32(cv2), -1) if false { // Short at s+3. // Too often worse... - m := matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1) - best = bestOf(best, &m) + improve(&best, e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+3, uint32(cv2>>8), -1) } - // See if we can find a better match by checking where the current best ends. - // Use that offset to see if we can find a better full match. - if sAt := best.s + best.length; sAt < sLimit { - nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) - candidateEnd := e.longTable[nextHashL] - // Start check at a fixed offset to allow for a few mismatches. - // For this compression level 2 yields the best results. - const skipBeginning = 2 - if pos := candidateEnd.offset - e.cur - best.length + skipBeginning; pos >= 0 { - m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) - bestEnd := bestOf(best, &m) - if pos := candidateEnd.prev - e.cur - best.length + skipBeginning; pos >= 0 { - m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) - bestEnd = bestOf(bestEnd, &m) + + // Start check at a fixed offset to allow for a few mismatches. + // For this compression level 2 yields the best results. + // We cannot do this if we have already indexed this position. + const skipBeginning = 2 + if best.s > s-skipBeginning { + // See if we can find a better match by checking where the current best ends. + // Use that offset to see if we can find a better full match. + if sAt := best.s + best.length; sAt < sLimit { + nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) + candidateEnd := e.longTable[nextHashL] + + if off := candidateEnd.offset - e.cur - best.length + skipBeginning; off >= 0 { + improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + if off := candidateEnd.prev - e.cur - best.length + skipBeginning; off >= 0 { + improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + } } - best = bestEnd } } } @@ -303,51 +341,34 @@ encodeLoop: // We have a match, we can store the forward value if best.rep > 0 { - s = best.s var seq seq seq.matchLen = uint32(best.length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := best.s - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - repIndex := best.offset - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ + if debugAsserts && s <= nextEmit { + panic("s <= nextEmit") } - addLiterals(&seq, start) + addLiterals(&seq, best.s) - // rep 0 - seq.offset = uint32(best.rep) + // Repeat. If bit 4 is set, this is a non-lit repeat. + seq.offset = uint32(best.rep & 3) if debugSequences { println("repeat sequence", seq, "next s:", s) } blk.sequences = append(blk.sequences, seq) - // Index match start+1 (long) -> s - 1 - index0 := s + // Index old s + 1 -> s - 1 + index0 := s + 1 s = best.s + best.length nextEmit = s if s >= sLimit { if debugEncoder { println("repeat ended", s, best.length) - } break encodeLoop } // Index skipped... off := index0 + e.cur - for index0 < s-1 { + for index0 < s { cv0 := load6432(src, index0) h0 := hashLen(cv0, bestLongTableBits, bestLongLen) h1 := hashLen(cv0, bestShortTableBits, bestShortLen) @@ -357,17 +378,19 @@ encodeLoop: index0++ } switch best.rep { - case 2: + case 2, 4 | 1: offset1, offset2 = offset2, offset1 - case 3: + case 3, 4 | 2: offset1, offset2, offset3 = offset3, offset1, offset2 + case 4 | 3: + offset1, offset2, offset3 = offset1-1, offset1, offset2 } - cv = load6432(src, s) continue } // A 4-byte match has been found. Update recent offsets. // We'll later see if more than 4 bytes. + index0 := s + 1 s = best.s t := best.offset offset1, offset2, offset3 = s-t, offset1, offset2 @@ -380,22 +403,9 @@ encodeLoop: panic("invalid offset") } - // Extend the n-byte match as long as possible. - l := best.length - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - // Write our sequence var seq seq + l := best.length seq.litLen = uint32(s - nextEmit) seq.matchLen = uint32(l - zstdMinMatch) if seq.litLen > 0 { @@ -412,10 +422,8 @@ encodeLoop: break encodeLoop } - // Index match start+1 (long) -> s - 1 - index0 := s - l + 1 - // every entry - for index0 < s-1 { + // Index old s + 1 -> s - 1 + for index0 < s { cv0 := load6432(src, index0) h0 := hashLen(cv0, bestLongTableBits, bestLongLen) h1 := hashLen(cv0, bestShortTableBits, bestShortLen) @@ -424,50 +432,6 @@ encodeLoop: e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} index0++ } - - cv = load6432(src, s) - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) - nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} - e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: e.table[nextHashS].offset} - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } } if int(nextEmit) < len(src) { diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go index 315b1a8f2..cbc626eec 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -133,8 +133,7 @@ encodeLoop: if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { // Consider history as well. var seq seq - var length int32 - length = 4 + e.matchlen(s+6, repIndex+4, src) + length := 4 + e.matchlen(s+6, repIndex+4, src) seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. @@ -645,8 +644,7 @@ encodeLoop: if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { // Consider history as well. var seq seq - var length int32 - length = 4 + e.matchlen(s+6, repIndex+4, src) + length := 4 + e.matchlen(s+6, repIndex+4, src) seq.matchLen = uint32(length - zstdMinMatch) diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index 65c6c36dc..4de0aed0d 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -277,23 +277,9 @@ func (e *Encoder) nextBlock(final bool) error { s.eofWritten = true } - err := errIncompressible - // If we got the exact same number of literals as input, - // assume the literals cannot be compressed. - if len(src) != len(blk.literals) || len(src) != e.o.blockSize { - err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) - } - switch err { - case errIncompressible: - if debugEncoder { - println("Storing incompressible block as raw") - } - blk.encodeRaw(src) - // In fast mode, we do not transfer offsets, so we don't have to deal with changing the. - case nil: - default: - s.err = err - return err + s.err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if s.err != nil { + return s.err } _, s.err = s.w.Write(blk.output) s.nWritten += int64(len(blk.output)) @@ -343,22 +329,8 @@ func (e *Encoder) nextBlock(final bool) error { } s.wWg.Done() }() - err := errIncompressible - // If we got the exact same number of literals as input, - // assume the literals cannot be compressed. - if len(src) != len(blk.literals) || len(src) != e.o.blockSize { - err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) - } - switch err { - case errIncompressible: - if debugEncoder { - println("Storing incompressible block as raw") - } - blk.encodeRaw(src) - // In fast mode, we do not transfer offsets, so we don't have to deal with changing the. - case nil: - default: - s.writeErr = err + s.writeErr = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if s.writeErr != nil { return } _, s.writeErr = s.w.Write(blk.output) @@ -568,25 +540,15 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { // If we got the exact same number of literals as input, // assume the literals cannot be compressed. - err := errIncompressible oldout := blk.output - if len(blk.literals) != len(src) || len(src) != e.o.blockSize { - // Output directly to dst - blk.output = dst - err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) - } + // Output directly to dst + blk.output = dst - switch err { - case errIncompressible: - if debugEncoder { - println("Storing incompressible block as raw") - } - dst = blk.encodeRawTo(dst, src) - case nil: - dst = blk.output - default: + err := blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if err != nil { panic(err) } + dst = blk.output blk.output = oldout } else { enc.Reset(e.o.dict, false) @@ -605,25 +567,11 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { if len(src) == 0 { blk.last = true } - err := errIncompressible - // If we got the exact same number of literals as input, - // assume the literals cannot be compressed. - if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize { - err = blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy) - } - - switch err { - case errIncompressible: - if debugEncoder { - println("Storing incompressible block as raw") - } - dst = blk.encodeRawTo(dst, todo) - blk.popOffsets() - case nil: - dst = append(dst, blk.output...) - default: + err := blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy) + if err != nil { panic(err) } + dst = append(dst, blk.output...) blk.reset(nil) } } diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go index 6015f498a..faaf81921 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "math" + "math/bits" "runtime" "strings" ) @@ -38,7 +39,7 @@ func (o *encoderOptions) setDefault() { blockSize: maxCompressedBlockSize, windowSize: 8 << 20, level: SpeedDefault, - allLitEntropy: true, + allLitEntropy: false, lowMem: false, } } @@ -128,7 +129,7 @@ func WithEncoderPadding(n int) EOption { } // No need to waste our time. if n == 1 { - o.pad = 0 + n = 0 } if n > 1<<30 { return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ") @@ -237,7 +238,7 @@ func WithEncoderLevel(l EncoderLevel) EOption { } } if !o.customALEntropy { - o.allLitEntropy = l > SpeedFastest + o.allLitEntropy = l > SpeedDefault } return nil @@ -305,7 +306,13 @@ func WithLowerEncoderMem(b bool) EOption { } // WithEncoderDict allows to register a dictionary that will be used for the encode. +// +// The slice dict must be in the [dictionary format] produced by +// "zstd --train" from the Zstandard reference implementation. +// // The encoder *may* choose to use no dictionary instead for certain payloads. +// +// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format func WithEncoderDict(dict []byte) EOption { return func(o *encoderOptions) error { d, err := loadDict(dict) @@ -316,3 +323,17 @@ func WithEncoderDict(dict []byte) EOption { return nil } } + +// WithEncoderDictRaw registers a dictionary that may be used by the encoder. +// +// The slice content may contain arbitrary data. It will be used as an initial +// history. +func WithEncoderDictRaw(id uint32, content []byte) EOption { + return func(o *encoderOptions) error { + if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { + return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) + } + o.dict = &dict{id: id, content: content, offsets: [3]int{1, 4, 8}} + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index 65984bf07..53e160f7e 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -29,7 +29,7 @@ type frameDec struct { FrameContentSize uint64 - DictionaryID *uint32 + DictionaryID uint32 HasCheckSum bool SingleSegment bool } @@ -73,20 +73,20 @@ func (d *frameDec) reset(br byteBuffer) error { switch err { case io.EOF, io.ErrUnexpectedEOF: return io.EOF - default: - return err case nil: signature[0] = b[0] + default: + return err } // Read the rest, don't allow io.ErrUnexpectedEOF b, err = br.readSmall(3) switch err { case io.EOF: return io.EOF - default: - return err case nil: copy(signature[1:], b) + default: + return err } if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 { @@ -155,7 +155,7 @@ func (d *frameDec) reset(br byteBuffer) error { // Read Dictionary_ID // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id - d.DictionaryID = nil + d.DictionaryID = 0 if size := fhd & 3; size != 0 { if size == 3 { size = 4 @@ -178,11 +178,7 @@ func (d *frameDec) reset(br byteBuffer) error { if debugDecoder { println("Dict size", size, "ID:", id) } - if id > 0 { - // ID 0 means "sorry, no dictionary anyway". - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format - d.DictionaryID = &id - } + d.DictionaryID = id } // Read Frame_Content_Size @@ -297,13 +293,9 @@ func (d *frameDec) next(block *blockDec) error { return nil } -// checkCRC will check the checksum if the frame has one. +// checkCRC will check the checksum, assuming the frame has one. // Will return ErrCRCMismatch if crc check failed, otherwise nil. func (d *frameDec) checkCRC() error { - if !d.HasCheckSum { - return nil - } - // We can overwrite upper tmp now buf, err := d.rawInput.readSmall(4) if err != nil { @@ -311,10 +303,6 @@ func (d *frameDec) checkCRC() error { return err } - if d.o.ignoreChecksum { - return nil - } - want := binary.LittleEndian.Uint32(buf[:4]) got := uint32(d.crc.Sum64()) @@ -330,17 +318,13 @@ func (d *frameDec) checkCRC() error { return nil } -// consumeCRC reads the checksum data if the frame has one. +// consumeCRC skips over the checksum, assuming the frame has one. func (d *frameDec) consumeCRC() error { - if d.HasCheckSum { - _, err := d.rawInput.readSmall(4) - if err != nil { - println("CRC missing?", err) - return err - } + _, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) } - - return nil + return err } // runDecoder will run the decoder for the remainder of the frame. @@ -419,15 +403,8 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { if d.o.ignoreChecksum { err = d.consumeCRC() } else { - var n int - n, err = d.crc.Write(dst[crcStart:]) - if err == nil { - if n != len(dst)-crcStart { - err = io.ErrShortWrite - } else { - err = d.checkCRC() - } - } + d.crc.Write(dst[crcStart:]) + err = d.checkCRC() } } } diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go new file mode 100644 index 000000000..f41932b7a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go @@ -0,0 +1,16 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +// matchLen returns how many bytes match in a and b +// +// It assumes that: +// +// len(a) <= len(b) and len(a) > 0 +// +//go:noescape +func matchLen(a []byte, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s new file mode 100644 index 000000000..9a7655c0f --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s @@ -0,0 +1,68 @@ +// Copied from S2 implementation. + +//go:build !appengine && !noasm && gc && !noasm + +#include "textflag.h" + +// func matchLen(a []byte, b []byte) int +// Requires: BMI +TEXT ·matchLen(SB), NOSPLIT, $0-56 + MOVQ a_base+0(FP), AX + MOVQ b_base+24(FP), CX + MOVQ a_len+8(FP), DX + + // matchLen + XORL SI, SI + CMPL DX, $0x08 + JB matchlen_match4_standalone + +matchlen_loopback_standalone: + MOVQ (AX)(SI*1), BX + XORQ (CX)(SI*1), BX + TESTQ BX, BX + JZ matchlen_loop_standalone + +#ifdef GOAMD64_v3 + TZCNTQ BX, BX +#else + BSFQ BX, BX +#endif + SARQ $0x03, BX + LEAL (SI)(BX*1), SI + JMP gen_match_len_end + +matchlen_loop_standalone: + LEAL -8(DX), DX + LEAL 8(SI), SI + CMPL DX, $0x08 + JAE matchlen_loopback_standalone + +matchlen_match4_standalone: + CMPL DX, $0x04 + JB matchlen_match2_standalone + MOVL (AX)(SI*1), BX + CMPL (CX)(SI*1), BX + JNE matchlen_match2_standalone + LEAL -4(DX), DX + LEAL 4(SI), SI + +matchlen_match2_standalone: + CMPL DX, $0x02 + JB matchlen_match1_standalone + MOVW (AX)(SI*1), BX + CMPW (CX)(SI*1), BX + JNE matchlen_match1_standalone + LEAL -2(DX), DX + LEAL 2(SI), SI + +matchlen_match1_standalone: + CMPL DX, $0x01 + JB gen_match_len_end + MOVB (AX)(SI*1), BL + CMPB (CX)(SI*1), BL + JNE gen_match_len_end + INCL SI + +gen_match_len_end: + MOVQ SI, ret+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go new file mode 100644 index 000000000..57b9c31c0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go @@ -0,0 +1,33 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "encoding/binary" + "math/bits" +) + +// matchLen returns the maximum common prefix length of a and b. +// a must be the shortest of the two. +func matchLen(a, b []byte) (n int) { + for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { + diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) + if diff != 0 { + return n + bits.TrailingZeros64(diff)>>3 + } + n += 8 + } + + for i := range a { + if a[i] != b[i] { + break + } + n++ + } + return n + +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go index f833d1541..9405fcf10 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -236,9 +236,12 @@ func (s *sequenceDecs) decodeSync(hist []byte) error { maxBlockSize = s.windowSize } + if debugDecoder { + println("decodeSync: decoding", seqs, "sequences", br.remain(), "bits remain on stream") + } for i := seqs - 1; i >= 0; i-- { if br.overread() { - printf("reading sequence %d, exceeded available data\n", seqs-i) + printf("reading sequence %d, exceeded available data. Overread by %d\n", seqs-i, -br.remain()) return io.ErrUnexpectedEOF } var ll, mo, ml int @@ -314,9 +317,6 @@ func (s *sequenceDecs) decodeSync(hist []byte) error { } size := ll + ml + len(out) if size-startSize > maxBlockSize { - if size-startSize == 424242 { - panic("here") - } return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) } if size > cap(out) { @@ -427,8 +427,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error { } } - // Check if space for literals - if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize { + if size := len(s.literals) + len(out) - startSize; size > maxBlockSize { return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) } diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go index 191384adf..8adabd828 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -5,6 +5,7 @@ package zstd import ( "fmt" + "io" "github.com/klauspost/compress/internal/cpuinfo" ) @@ -134,6 +135,9 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ctx.ll, ctx.litRemain+ctx.ll) + case errorOverread: + return true, io.ErrUnexpectedEOF + case errorNotEnoughSpace: size := ctx.outPosition + ctx.ll + ctx.ml if debugDecoder { @@ -148,7 +152,6 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { s.seqSize += ctx.litRemain if s.seqSize > maxBlockSize { return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } err := br.close() if err != nil { @@ -203,6 +206,9 @@ const errorNotEnoughLiterals = 4 // error reported when capacity of `out` is too small const errorNotEnoughSpace = 5 +// error reported when bits are overread. +const errorOverread = 6 + // sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. // // Please refer to seqdec_generic.go for the reference implementation. @@ -248,6 +254,10 @@ func (s *sequenceDecs) decode(seqs []seqVals) error { litRemain: len(s.literals), } + if debugDecoder { + println("decode: decoding", len(seqs), "sequences", br.remain(), "bits remain on stream") + } + s.seqSize = 0 lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56 var errCode int @@ -278,6 +288,8 @@ func (s *sequenceDecs) decode(seqs []seqVals) error { case errorNotEnoughLiterals: ll := ctx.seqs[i].ll return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll) + case errorOverread: + return io.ErrUnexpectedEOF } return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode) @@ -292,6 +304,9 @@ func (s *sequenceDecs) decode(seqs []seqVals) error { if s.seqSize > maxBlockSize { return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) } + if debugDecoder { + println("decode: ", br.remain(), "bits remain on stream. code:", errCode) + } err := br.close() if err != nil { printf("Closing sequences: %v, %+v\n", err, *br) diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s index b94993a07..b6f4ba6fc 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -38,7 +38,7 @@ sequenceDecs_decode_amd64_main_loop: sequenceDecs_decode_amd64_fill_byte_by_byte: CMPQ SI, $0x00 - JLE sequenceDecs_decode_amd64_fill_end + JLE sequenceDecs_decode_amd64_fill_check_overread CMPQ BX, $0x07 JLE sequenceDecs_decode_amd64_fill_end SHLQ $0x08, DX @@ -49,6 +49,10 @@ sequenceDecs_decode_amd64_fill_byte_by_byte: ORQ AX, DX JMP sequenceDecs_decode_amd64_fill_byte_by_byte +sequenceDecs_decode_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + sequenceDecs_decode_amd64_fill_end: // Update offset MOVQ R9, AX @@ -105,7 +109,7 @@ sequenceDecs_decode_amd64_ml_update_zero: sequenceDecs_decode_amd64_fill_2_byte_by_byte: CMPQ SI, $0x00 - JLE sequenceDecs_decode_amd64_fill_2_end + JLE sequenceDecs_decode_amd64_fill_2_check_overread CMPQ BX, $0x07 JLE sequenceDecs_decode_amd64_fill_2_end SHLQ $0x08, DX @@ -116,6 +120,10 @@ sequenceDecs_decode_amd64_fill_2_byte_by_byte: ORQ AX, DX JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte +sequenceDecs_decode_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + sequenceDecs_decode_amd64_fill_2_end: // Update literal length MOVQ DI, AX @@ -320,6 +328,11 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + // func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // Requires: CMOV TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 @@ -356,7 +369,7 @@ sequenceDecs_decode_56_amd64_main_loop: sequenceDecs_decode_56_amd64_fill_byte_by_byte: CMPQ SI, $0x00 - JLE sequenceDecs_decode_56_amd64_fill_end + JLE sequenceDecs_decode_56_amd64_fill_check_overread CMPQ BX, $0x07 JLE sequenceDecs_decode_56_amd64_fill_end SHLQ $0x08, DX @@ -367,6 +380,10 @@ sequenceDecs_decode_56_amd64_fill_byte_by_byte: ORQ AX, DX JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte +sequenceDecs_decode_56_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + sequenceDecs_decode_56_amd64_fill_end: // Update offset MOVQ R9, AX @@ -613,6 +630,11 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + // func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // Requires: BMI, BMI2, CMOV TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 @@ -649,7 +671,7 @@ sequenceDecs_decode_bmi2_main_loop: sequenceDecs_decode_bmi2_fill_byte_by_byte: CMPQ BX, $0x00 - JLE sequenceDecs_decode_bmi2_fill_end + JLE sequenceDecs_decode_bmi2_fill_check_overread CMPQ DX, $0x07 JLE sequenceDecs_decode_bmi2_fill_end SHLQ $0x08, AX @@ -660,6 +682,10 @@ sequenceDecs_decode_bmi2_fill_byte_by_byte: ORQ CX, AX JMP sequenceDecs_decode_bmi2_fill_byte_by_byte +sequenceDecs_decode_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + sequenceDecs_decode_bmi2_fill_end: // Update offset MOVQ $0x00000808, CX @@ -700,7 +726,7 @@ sequenceDecs_decode_bmi2_fill_end: sequenceDecs_decode_bmi2_fill_2_byte_by_byte: CMPQ BX, $0x00 - JLE sequenceDecs_decode_bmi2_fill_2_end + JLE sequenceDecs_decode_bmi2_fill_2_check_overread CMPQ DX, $0x07 JLE sequenceDecs_decode_bmi2_fill_2_end SHLQ $0x08, AX @@ -711,6 +737,10 @@ sequenceDecs_decode_bmi2_fill_2_byte_by_byte: ORQ CX, AX JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte +sequenceDecs_decode_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + sequenceDecs_decode_bmi2_fill_2_end: // Update literal length MOVQ $0x00000808, CX @@ -889,6 +919,11 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + // func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // Requires: BMI, BMI2, CMOV TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 @@ -925,7 +960,7 @@ sequenceDecs_decode_56_bmi2_main_loop: sequenceDecs_decode_56_bmi2_fill_byte_by_byte: CMPQ BX, $0x00 - JLE sequenceDecs_decode_56_bmi2_fill_end + JLE sequenceDecs_decode_56_bmi2_fill_check_overread CMPQ DX, $0x07 JLE sequenceDecs_decode_56_bmi2_fill_end SHLQ $0x08, AX @@ -936,6 +971,10 @@ sequenceDecs_decode_56_bmi2_fill_byte_by_byte: ORQ CX, AX JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte +sequenceDecs_decode_56_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + sequenceDecs_decode_56_bmi2_fill_end: // Update offset MOVQ $0x00000808, CX @@ -1140,6 +1179,11 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + // func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool // Requires: SSE TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 @@ -1804,7 +1848,7 @@ sequenceDecs_decodeSync_amd64_main_loop: sequenceDecs_decodeSync_amd64_fill_byte_by_byte: CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_amd64_fill_end + JLE sequenceDecs_decodeSync_amd64_fill_check_overread CMPQ BX, $0x07 JLE sequenceDecs_decodeSync_amd64_fill_end SHLQ $0x08, DX @@ -1815,6 +1859,10 @@ sequenceDecs_decodeSync_amd64_fill_byte_by_byte: ORQ AX, DX JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte +sequenceDecs_decodeSync_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + sequenceDecs_decodeSync_amd64_fill_end: // Update offset MOVQ R9, AX @@ -1871,7 +1919,7 @@ sequenceDecs_decodeSync_amd64_ml_update_zero: sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte: CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_amd64_fill_2_end + JLE sequenceDecs_decodeSync_amd64_fill_2_check_overread CMPQ BX, $0x07 JLE sequenceDecs_decodeSync_amd64_fill_2_end SHLQ $0x08, DX @@ -1882,6 +1930,10 @@ sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte: ORQ AX, DX JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte +sequenceDecs_decodeSync_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + sequenceDecs_decodeSync_amd64_fill_2_end: // Update literal length MOVQ DI, AX @@ -2291,6 +2343,11 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + // Return with not enough output space error error_not_enough_space: MOVQ ctx+16(FP), AX @@ -2356,7 +2413,7 @@ sequenceDecs_decodeSync_bmi2_main_loop: sequenceDecs_decodeSync_bmi2_fill_byte_by_byte: CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_bmi2_fill_end + JLE sequenceDecs_decodeSync_bmi2_fill_check_overread CMPQ DX, $0x07 JLE sequenceDecs_decodeSync_bmi2_fill_end SHLQ $0x08, AX @@ -2367,6 +2424,10 @@ sequenceDecs_decodeSync_bmi2_fill_byte_by_byte: ORQ CX, AX JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte +sequenceDecs_decodeSync_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + sequenceDecs_decodeSync_bmi2_fill_end: // Update offset MOVQ $0x00000808, CX @@ -2407,7 +2468,7 @@ sequenceDecs_decodeSync_bmi2_fill_end: sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte: CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_bmi2_fill_2_end + JLE sequenceDecs_decodeSync_bmi2_fill_2_check_overread CMPQ DX, $0x07 JLE sequenceDecs_decodeSync_bmi2_fill_2_end SHLQ $0x08, AX @@ -2418,6 +2479,10 @@ sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte: ORQ CX, AX JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte +sequenceDecs_decodeSync_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + sequenceDecs_decodeSync_bmi2_fill_2_end: // Update literal length MOVQ $0x00000808, CX @@ -2801,6 +2866,11 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + // Return with not enough output space error error_not_enough_space: MOVQ ctx+16(FP), AX @@ -2866,7 +2936,7 @@ sequenceDecs_decodeSync_safe_amd64_main_loop: sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte: CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_safe_amd64_fill_end + JLE sequenceDecs_decodeSync_safe_amd64_fill_check_overread CMPQ BX, $0x07 JLE sequenceDecs_decodeSync_safe_amd64_fill_end SHLQ $0x08, DX @@ -2877,6 +2947,10 @@ sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte: ORQ AX, DX JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte +sequenceDecs_decodeSync_safe_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + sequenceDecs_decodeSync_safe_amd64_fill_end: // Update offset MOVQ R9, AX @@ -2933,7 +3007,7 @@ sequenceDecs_decodeSync_safe_amd64_ml_update_zero: sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte: CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread CMPQ BX, $0x07 JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end SHLQ $0x08, DX @@ -2944,6 +3018,10 @@ sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte: ORQ AX, DX JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte +sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + sequenceDecs_decodeSync_safe_amd64_fill_2_end: // Update literal length MOVQ DI, AX @@ -3455,6 +3533,11 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + // Return with not enough output space error error_not_enough_space: MOVQ ctx+16(FP), AX @@ -3520,7 +3603,7 @@ sequenceDecs_decodeSync_safe_bmi2_main_loop: sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte: CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_safe_bmi2_fill_end + JLE sequenceDecs_decodeSync_safe_bmi2_fill_check_overread CMPQ DX, $0x07 JLE sequenceDecs_decodeSync_safe_bmi2_fill_end SHLQ $0x08, AX @@ -3531,6 +3614,10 @@ sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte: ORQ CX, AX JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte +sequenceDecs_decodeSync_safe_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + sequenceDecs_decodeSync_safe_bmi2_fill_end: // Update offset MOVQ $0x00000808, CX @@ -3571,7 +3658,7 @@ sequenceDecs_decodeSync_safe_bmi2_fill_end: sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte: CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread CMPQ DX, $0x07 JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end SHLQ $0x08, AX @@ -3582,6 +3669,10 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte: ORQ CX, AX JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte +sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + sequenceDecs_decodeSync_safe_bmi2_fill_2_end: // Update literal length MOVQ $0x00000808, CX @@ -4067,6 +4158,11 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + // Return with not enough output space error error_not_enough_space: MOVQ ctx+16(FP), AX diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index b1886f7c7..4be7cc736 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -9,7 +9,6 @@ import ( "errors" "log" "math" - "math/bits" ) // enable debug printing @@ -72,7 +71,6 @@ var ( ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit") // ErrUnknownDictionary is returned if the dictionary ID is unknown. - // For the time being dictionaries are not supported. ErrUnknownDictionary = errors.New("unknown dictionary") // ErrFrameSizeExceeded is returned if the stated frame size is exceeded. @@ -107,33 +105,12 @@ func printf(format string, a ...interface{}) { } } -// matchLen returns the maximum common prefix length of a and b. -// a must be the shortest of the two. -func matchLen(a, b []byte) (n int) { - for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { - diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) - if diff != 0 { - return n + bits.TrailingZeros64(diff)>>3 - } - n += 8 - } - - for i := range a { - if a[i] != b[i] { - break - } - n++ - } - return n - -} - func load3232(b []byte, i int32) uint32 { - return binary.LittleEndian.Uint32(b[i:]) + return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:]) } func load6432(b []byte, i int32) uint64 { - return binary.LittleEndian.Uint64(b[i:]) + return binary.LittleEndian.Uint64(b[:len(b):len(b)][i:]) } type byter interface { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go index 62de4dc59..4ce84e7a8 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -20,6 +20,7 @@ import ( "time" dto "github.com/prometheus/client_model/go" + "google.golang.org/protobuf/types/known/timestamppb" ) // Counter is a Metric that represents a single numerical value that only ever @@ -66,7 +67,7 @@ type CounterVecOpts struct { CounterOpts // VariableLabels are used to partition the metric vector by the given set - // of labels. Each label value will be constrained with the optional Contraint + // of labels. Each label value will be constrained with the optional Constraint // function, if provided. VariableLabels ConstrainableLabels } @@ -90,8 +91,12 @@ func NewCounter(opts CounterOpts) Counter { nil, opts.ConstLabels, ) - result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: time.Now} + if opts.now == nil { + opts.now = time.Now + } + result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: opts.now} result.init(result) // Init self-collection. + result.createdTs = timestamppb.New(opts.now()) return result } @@ -106,10 +111,12 @@ type counter struct { selfCollector desc *Desc + createdTs *timestamppb.Timestamp labelPairs []*dto.LabelPair exemplar atomic.Value // Containing nil or a *dto.Exemplar. - now func() time.Time // To mock out time.Now() for testing. + // now is for testing purposes, by default it's time.Now. + now func() time.Time } func (c *counter) Desc() *Desc { @@ -159,8 +166,7 @@ func (c *counter) Write(out *dto.Metric) error { exemplar = e.(*dto.Exemplar) } val := c.get() - - return populateMetric(CounterValue, val, c.labelPairs, exemplar, out) + return populateMetric(CounterValue, val, c.labelPairs, exemplar, out, c.createdTs) } func (c *counter) updateExemplar(v float64, l Labels) { @@ -200,13 +206,17 @@ func (v2) NewCounterVec(opts CounterVecOpts) *CounterVec { opts.VariableLabels, opts.ConstLabels, ) + if opts.now == nil { + opts.now = time.Now + } return &CounterVec{ MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { - if len(lvs) != len(desc.variableLabels) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), lvs)) + if len(lvs) != len(desc.variableLabels.names) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs)) } - result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: time.Now} + result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: opts.now} result.init(result) // Init self-collection. + result.createdTs = timestamppb.New(opts.now()) return result }), } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go index deedc2dfb..68ffe3c24 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -52,7 +52,7 @@ type Desc struct { constLabelPairs []*dto.LabelPair // variableLabels contains names of labels and normalization function for // which the metric maintains variable values. - variableLabels ConstrainedLabels + variableLabels *compiledLabels // id is a hash of the values of the ConstLabels and fqName. This // must be unique among all registered descriptors and can therefore be // used as an identifier of the descriptor. @@ -93,7 +93,7 @@ func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, const d := &Desc{ fqName: fqName, help: help, - variableLabels: variableLabels.constrainedLabels(), + variableLabels: variableLabels.compile(), } if !model.IsValidMetricName(model.LabelValue(fqName)) { d.err = fmt.Errorf("%q is not a valid metric name", fqName) @@ -103,7 +103,7 @@ func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, const // their sorted label names) plus the fqName (at position 0). labelValues := make([]string, 1, len(constLabels)+1) labelValues[0] = fqName - labelNames := make([]string, 0, len(constLabels)+len(d.variableLabels)) + labelNames := make([]string, 0, len(constLabels)+len(d.variableLabels.names)) labelNameSet := map[string]struct{}{} // First add only the const label names and sort them... for labelName := range constLabels { @@ -128,13 +128,13 @@ func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, const // Now add the variable label names, but prefix them with something that // cannot be in a regular label name. That prevents matching the label // dimension with a different mix between preset and variable labels. - for _, label := range d.variableLabels { - if !checkLabelName(label.Name) { - d.err = fmt.Errorf("%q is not a valid label name for metric %q", label.Name, fqName) + for _, label := range d.variableLabels.names { + if !checkLabelName(label) { + d.err = fmt.Errorf("%q is not a valid label name for metric %q", label, fqName) return d } - labelNames = append(labelNames, "$"+label.Name) - labelNameSet[label.Name] = struct{}{} + labelNames = append(labelNames, "$"+label) + labelNameSet[label] = struct{}{} } if len(labelNames) != len(labelNameSet) { d.err = fmt.Errorf("duplicate label names in constant and variable labels for metric %q", fqName) @@ -189,11 +189,19 @@ func (d *Desc) String() string { fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), ) } + vlStrings := make([]string, 0, len(d.variableLabels.names)) + for _, vl := range d.variableLabels.names { + if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil { + vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl)) + } else { + vlStrings = append(vlStrings, vl) + } + } return fmt.Sprintf( - "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}", + "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: {%s}}", d.fqName, d.help, strings.Join(lpStrings, ","), - d.variableLabels, + strings.Join(vlStrings, ","), ) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go index c41ab37f3..de5a85629 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go @@ -48,7 +48,7 @@ func (e *expvarCollector) Collect(ch chan<- Metric) { continue } var v interface{} - labels := make([]string, len(desc.variableLabels)) + labels := make([]string, len(desc.variableLabels.names)) if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil { ch <- NewInvalidMetric(desc, err) continue diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go index f1ea6c76f..dd2eac940 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -62,7 +62,7 @@ type GaugeVecOpts struct { GaugeOpts // VariableLabels are used to partition the metric vector by the given set - // of labels. Each label value will be constrained with the optional Contraint + // of labels. Each label value will be constrained with the optional Constraint // function, if provided. VariableLabels ConstrainableLabels } @@ -135,7 +135,7 @@ func (g *gauge) Sub(val float64) { func (g *gauge) Write(out *dto.Metric) error { val := math.Float64frombits(atomic.LoadUint64(&g.valBits)) - return populateMetric(GaugeValue, val, g.labelPairs, nil, out) + return populateMetric(GaugeValue, val, g.labelPairs, nil, out, nil) } // GaugeVec is a Collector that bundles a set of Gauges that all share the same @@ -166,8 +166,8 @@ func (v2) NewGaugeVec(opts GaugeVecOpts) *GaugeVec { ) return &GaugeVec{ MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { - if len(lvs) != len(desc.variableLabels) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), lvs)) + if len(lvs) != len(desc.variableLabels.names) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs)) } result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)} result.init(result) // Init self-collection. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index 8d818afe9..1feba62c6 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -25,6 +25,7 @@ import ( dto "github.com/prometheus/client_model/go" "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" ) // nativeHistogramBounds for the frac of observed values. Only relevant for @@ -391,7 +392,7 @@ type HistogramOpts struct { // zero, it is replaced by default buckets. The default buckets are // DefBuckets if no buckets for a native histogram (see below) are used, // otherwise the default is no buckets. (In other words, if you want to - // use both reguler buckets and buckets for a native histogram, you have + // use both regular buckets and buckets for a native histogram, you have // to define the regular buckets here explicitly.) Buckets []float64 @@ -413,8 +414,8 @@ type HistogramOpts struct { // and 2, same as between 2 and 4, and 4 and 8, etc.). // // Details about the actually used factor: The factor is calculated as - // 2^(2^n), where n is an integer number between (and including) -8 and - // 4. n is chosen so that the resulting factor is the largest that is + // 2^(2^-n), where n is an integer number between (and including) -4 and + // 8. n is chosen so that the resulting factor is the largest that is // still smaller or equal to NativeHistogramBucketFactor. Note that the // smallest possible factor is therefore approx. 1.00271 (i.e. 2^(2^-8) // ). If NativeHistogramBucketFactor is greater than 1 but smaller than @@ -428,12 +429,12 @@ type HistogramOpts struct { // a major version bump. NativeHistogramBucketFactor float64 // All observations with an absolute value of less or equal - // NativeHistogramZeroThreshold are accumulated into a “zero” - // bucket. For best results, this should be close to a bucket - // boundary. This is usually the case if picking a power of two. If + // NativeHistogramZeroThreshold are accumulated into a “zero” bucket. + // For best results, this should be close to a bucket boundary. This is + // usually the case if picking a power of two. If // NativeHistogramZeroThreshold is left at zero, - // DefNativeHistogramZeroThreshold is used as the threshold. To configure - // a zero bucket with an actual threshold of zero (i.e. only + // DefNativeHistogramZeroThreshold is used as the threshold. To + // configure a zero bucket with an actual threshold of zero (i.e. only // observations of precisely zero will go into the zero bucket), set // NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero // constant (or any negative float value). @@ -446,26 +447,34 @@ type HistogramOpts struct { // Histogram are sufficiently wide-spread. In particular, this could be // used as a DoS attack vector. Where the observed values depend on // external inputs, it is highly recommended to set a - // NativeHistogramMaxBucketNumber.) Once the set + // NativeHistogramMaxBucketNumber.) Once the set // NativeHistogramMaxBucketNumber is exceeded, the following strategy is - // enacted: First, if the last reset (or the creation) of the histogram - // is at least NativeHistogramMinResetDuration ago, then the whole - // histogram is reset to its initial state (including regular - // buckets). If less time has passed, or if - // NativeHistogramMinResetDuration is zero, no reset is - // performed. Instead, the zero threshold is increased sufficiently to - // reduce the number of buckets to or below - // NativeHistogramMaxBucketNumber, but not to more than - // NativeHistogramMaxZeroThreshold. Thus, if - // NativeHistogramMaxZeroThreshold is already at or below the current - // zero threshold, nothing happens at this step. After that, if the - // number of buckets still exceeds NativeHistogramMaxBucketNumber, the - // resolution of the histogram is reduced by doubling the width of the - // sparse buckets (up to a growth factor between one bucket to the next - // of 2^(2^4) = 65536, see above). + // enacted: + // - First, if the last reset (or the creation) of the histogram is at + // least NativeHistogramMinResetDuration ago, then the whole + // histogram is reset to its initial state (including regular + // buckets). + // - If less time has passed, or if NativeHistogramMinResetDuration is + // zero, no reset is performed. Instead, the zero threshold is + // increased sufficiently to reduce the number of buckets to or below + // NativeHistogramMaxBucketNumber, but not to more than + // NativeHistogramMaxZeroThreshold. Thus, if + // NativeHistogramMaxZeroThreshold is already at or below the current + // zero threshold, nothing happens at this step. + // - After that, if the number of buckets still exceeds + // NativeHistogramMaxBucketNumber, the resolution of the histogram is + // reduced by doubling the width of the sparse buckets (up to a + // growth factor between one bucket to the next of 2^(2^4) = 65536, + // see above). + // - Any increased zero threshold or reduced resolution is reset back + // to their original values once NativeHistogramMinResetDuration has + // passed (since the last reset or the creation of the histogram). NativeHistogramMaxBucketNumber uint32 NativeHistogramMinResetDuration time.Duration NativeHistogramMaxZeroThreshold float64 + + // now is for testing purposes, by default it's time.Now. + now func() time.Time } // HistogramVecOpts bundles the options to create a HistogramVec metric. @@ -475,7 +484,7 @@ type HistogramVecOpts struct { HistogramOpts // VariableLabels are used to partition the metric vector by the given set - // of labels. Each label value will be constrained with the optional Contraint + // of labels. Each label value will be constrained with the optional Constraint // function, if provided. VariableLabels ConstrainableLabels } @@ -499,12 +508,12 @@ func NewHistogram(opts HistogramOpts) Histogram { } func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { - if len(desc.variableLabels) != len(labelValues) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), labelValues)) + if len(desc.variableLabels.names) != len(labelValues) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues)) } - for _, n := range desc.variableLabels { - if n.Name == bucketLabel { + for _, n := range desc.variableLabels.names { + if n == bucketLabel { panic(errBucketLabelNotAllowed) } } @@ -514,6 +523,10 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } } + if opts.now == nil { + opts.now = time.Now + } + h := &histogram{ desc: desc, upperBounds: opts.Buckets, @@ -521,8 +534,8 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr nativeHistogramMaxBuckets: opts.NativeHistogramMaxBucketNumber, nativeHistogramMaxZeroThreshold: opts.NativeHistogramMaxZeroThreshold, nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration, - lastResetTime: time.Now(), - now: time.Now, + lastResetTime: opts.now(), + now: opts.now, } if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 { h.upperBounds = DefBuckets @@ -701,9 +714,11 @@ type histogram struct { nativeHistogramMaxZeroThreshold float64 nativeHistogramMaxBuckets uint32 nativeHistogramMinResetDuration time.Duration - lastResetTime time.Time // Protected by mtx. + // lastResetTime is protected by mtx. It is also used as created timestamp. + lastResetTime time.Time - now func() time.Time // To mock out time.Now() for testing. + // now is for testing purposes, by default it's time.Now. + now func() time.Time } func (h *histogram) Desc() *Desc { @@ -742,9 +757,10 @@ func (h *histogram) Write(out *dto.Metric) error { waitForCooldown(count, coldCounts) his := &dto.Histogram{ - Bucket: make([]*dto.Bucket, len(h.upperBounds)), - SampleCount: proto.Uint64(count), - SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + Bucket: make([]*dto.Bucket, len(h.upperBounds)), + SampleCount: proto.Uint64(count), + SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + CreatedTimestamp: timestamppb.New(h.lastResetTime), } out.Histogram = his out.Label = h.labelPairs @@ -782,6 +798,16 @@ func (h *histogram) Write(out *dto.Metric) error { his.ZeroCount = proto.Uint64(zeroBucket) his.NegativeSpan, his.NegativeDelta = makeBuckets(&coldCounts.nativeHistogramBucketsNegative) his.PositiveSpan, his.PositiveDelta = makeBuckets(&coldCounts.nativeHistogramBucketsPositive) + + // Add a no-op span to a histogram without observations and with + // a zero threshold of zero. Otherwise, a native histogram would + // look like a classic histogram to scrapers. + if *his.ZeroThreshold == 0 && *his.ZeroCount == 0 && len(his.PositiveSpan) == 0 && len(his.NegativeSpan) == 0 { + his.PositiveSpan = []*dto.BucketSpan{{ + Offset: proto.Int32(0), + Length: proto.Uint32(0), + }} + } } addAndResetCounts(hotCounts, coldCounts) return nil @@ -854,20 +880,23 @@ func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket h.doubleBucketWidth(hotCounts, coldCounts) } -// maybeReset resests the whole histogram if at least h.nativeHistogramMinResetDuration +// maybeReset resets the whole histogram if at least h.nativeHistogramMinResetDuration // has been passed. It returns true if the histogram has been reset. The caller // must have locked h.mtx. -func (h *histogram) maybeReset(hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int) bool { +func (h *histogram) maybeReset( + hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int, +) bool { // We are using the possibly mocked h.now() rather than // time.Since(h.lastResetTime) to enable testing. - if h.nativeHistogramMinResetDuration == 0 || h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration { + if h.nativeHistogramMinResetDuration == 0 || + h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration { return false } // Completely reset coldCounts. h.resetCounts(cold) // Repeat the latest observation to not lose it completely. cold.observe(value, bucket, true) - // Make coldCounts the new hot counts while ressetting countAndHotIdx. + // Make coldCounts the new hot counts while resetting countAndHotIdx. n := atomic.SwapUint64(&h.countAndHotIdx, (coldIdx<<63)+1) count := n & ((1 << 63) - 1) waitForCooldown(count, hot) @@ -1176,6 +1205,7 @@ type constHistogram struct { sum float64 buckets map[float64]uint64 labelPairs []*dto.LabelPair + createdTs *timestamppb.Timestamp } func (h *constHistogram) Desc() *Desc { @@ -1183,7 +1213,9 @@ func (h *constHistogram) Desc() *Desc { } func (h *constHistogram) Write(out *dto.Metric) error { - his := &dto.Histogram{} + his := &dto.Histogram{ + CreatedTimestamp: h.createdTs, + } buckets := make([]*dto.Bucket, 0, len(h.buckets)) @@ -1230,7 +1262,7 @@ func NewConstHistogram( if desc.err != nil { return nil, desc.err } - if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { return nil, err } return &constHistogram{ @@ -1324,7 +1356,7 @@ func makeBuckets(buckets *sync.Map) ([]*dto.BucketSpan, []int64) { // Multiple spans with only small gaps in between are probably // encoded more efficiently as one larger span with a few empty // buckets. Needs some research to find the sweet spot. For now, - // we assume that gaps of one ore two buckets should not create + // we assume that gaps of one or two buckets should not create // a new span. iDelta := int32(i - nextI) if n == 0 || iDelta > 2 { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go index fd0750f2c..a595a2036 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go @@ -14,7 +14,7 @@ // It provides tools to compare sequences of strings and generate textual diffs. // // Maintaining `GetUnifiedDiffString` here because original repository -// (https://github.com/pmezard/go-difflib) is no loger maintained. +// (https://github.com/pmezard/go-difflib) is no longer maintained. package internal import ( diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go index 63ff8683c..b3c4eca2b 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -32,19 +32,15 @@ import ( // create a Desc. type Labels map[string]string +// LabelConstraint normalizes label values. +type LabelConstraint func(string) string + // ConstrainedLabels represents a label name and its constrain function // to normalize label values. This type is commonly used when constructing // metric vector Collectors. type ConstrainedLabel struct { Name string - Constraint func(string) string -} - -func (cl ConstrainedLabel) Constrain(v string) string { - if cl.Constraint == nil { - return v - } - return cl.Constraint(v) + Constraint LabelConstraint } // ConstrainableLabels is an interface that allows creating of labels that can @@ -58,7 +54,7 @@ func (cl ConstrainedLabel) Constrain(v string) string { // }, // }) type ConstrainableLabels interface { - constrainedLabels() ConstrainedLabels + compile() *compiledLabels labelNames() []string } @@ -67,8 +63,20 @@ type ConstrainableLabels interface { // metric vector Collectors. type ConstrainedLabels []ConstrainedLabel -func (cls ConstrainedLabels) constrainedLabels() ConstrainedLabels { - return cls +func (cls ConstrainedLabels) compile() *compiledLabels { + compiled := &compiledLabels{ + names: make([]string, len(cls)), + labelConstraints: map[string]LabelConstraint{}, + } + + for i, label := range cls { + compiled.names[i] = label.Name + if label.Constraint != nil { + compiled.labelConstraints[label.Name] = label.Constraint + } + } + + return compiled } func (cls ConstrainedLabels) labelNames() []string { @@ -92,18 +100,36 @@ func (cls ConstrainedLabels) labelNames() []string { // } type UnconstrainedLabels []string -func (uls UnconstrainedLabels) constrainedLabels() ConstrainedLabels { - constrainedLabels := make([]ConstrainedLabel, len(uls)) - for i, l := range uls { - constrainedLabels[i] = ConstrainedLabel{Name: l} +func (uls UnconstrainedLabels) compile() *compiledLabels { + return &compiledLabels{ + names: uls, } - return constrainedLabels } func (uls UnconstrainedLabels) labelNames() []string { return uls } +type compiledLabels struct { + names []string + labelConstraints map[string]LabelConstraint +} + +func (cls *compiledLabels) compile() *compiledLabels { + return cls +} + +func (cls *compiledLabels) labelNames() []string { + return cls.names +} + +func (cls *compiledLabels) constrain(labelName, value string) string { + if fn, ok := cls.labelConstraints[labelName]; ok && fn != nil { + return fn(value) + } + return value +} + // reservedLabelPrefix is a prefix which is not legal in user-supplied // label names. const reservedLabelPrefix = "__" diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index 07bbc9d76..f018e5723 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -92,6 +92,9 @@ type Opts struct { // machine_role metric). See also // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels ConstLabels Labels + + // now is for testing purposes, by default it's time.Now. + now func() time.Time } // BuildFQName joins the given three name components by "_". Empty name diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go index 3793036ad..356edb786 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go @@ -389,15 +389,12 @@ func isLabelCurried(c prometheus.Collector, label string) bool { return true } -// emptyLabels is a one-time allocation for non-partitioned metrics to avoid -// unnecessary allocations on each request. -var emptyLabels = prometheus.Labels{} - func labels(code, method bool, reqMethod string, status int, extraMethods ...string) prometheus.Labels { + labels := prometheus.Labels{} + if !(code || method) { - return emptyLabels + return labels } - labels := prometheus.Labels{} if code { labels["code"] = sanitizeCode(status) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index 44da9433b..5e2ced25a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -548,7 +548,7 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) { goroutineBudget-- runtime.Gosched() } - // Once both checkedMetricChan and uncheckdMetricChan are closed + // Once both checkedMetricChan and uncheckedMetricChan are closed // and drained, the contraption above will nil out cmc and umc, // and then we can leave the collect loop here. if cmc == nil && umc == nil { @@ -963,9 +963,9 @@ func checkDescConsistency( // Is the desc consistent with the content of the metric? lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label)) copy(lpsFromDesc, desc.constLabelPairs) - for _, l := range desc.variableLabels { + for _, l := range desc.variableLabels.names { lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ - Name: proto.String(l.Name), + Name: proto.String(l), }) } if len(lpsFromDesc) != len(dtoMetric.Label) { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index dd359264e..146270444 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -26,6 +26,7 @@ import ( "github.com/beorn7/perks/quantile" "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" ) // quantileLabel is used for the label that defines the quantile in a @@ -145,6 +146,9 @@ type SummaryOpts struct { // is the internal buffer size of the underlying package // "github.com/bmizerany/perks/quantile"). BufCap uint32 + + // now is for testing purposes, by default it's time.Now. + now func() time.Time } // SummaryVecOpts bundles the options to create a SummaryVec metric. @@ -154,7 +158,7 @@ type SummaryVecOpts struct { SummaryOpts // VariableLabels are used to partition the metric vector by the given set - // of labels. Each label value will be constrained with the optional Contraint + // of labels. Each label value will be constrained with the optional Constraint // function, if provided. VariableLabels ConstrainableLabels } @@ -188,12 +192,12 @@ func NewSummary(opts SummaryOpts) Summary { } func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { - if len(desc.variableLabels) != len(labelValues) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), labelValues)) + if len(desc.variableLabels.names) != len(labelValues) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues)) } - for _, n := range desc.variableLabels { - if n.Name == quantileLabel { + for _, n := range desc.variableLabels.names { + if n == quantileLabel { panic(errQuantileLabelNotAllowed) } } @@ -222,6 +226,9 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { opts.BufCap = DefBufCap } + if opts.now == nil { + opts.now = time.Now + } if len(opts.Objectives) == 0 { // Use the lock-free implementation of a Summary without objectives. s := &noObjectivesSummary{ @@ -230,6 +237,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { counts: [2]*summaryCounts{{}, {}}, } s.init(s) // Init self-collection. + s.createdTs = timestamppb.New(opts.now()) return s } @@ -245,7 +253,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { coldBuf: make([]float64, 0, opts.BufCap), streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets), } - s.headStreamExpTime = time.Now().Add(s.streamDuration) + s.headStreamExpTime = opts.now().Add(s.streamDuration) s.hotBufExpTime = s.headStreamExpTime for i := uint32(0); i < opts.AgeBuckets; i++ { @@ -259,6 +267,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { sort.Float64s(s.sortedObjectives) s.init(s) // Init self-collection. + s.createdTs = timestamppb.New(opts.now()) return s } @@ -286,6 +295,8 @@ type summary struct { headStream *quantile.Stream headStreamIdx int headStreamExpTime, hotBufExpTime time.Time + + createdTs *timestamppb.Timestamp } func (s *summary) Desc() *Desc { @@ -307,7 +318,9 @@ func (s *summary) Observe(v float64) { } func (s *summary) Write(out *dto.Metric) error { - sum := &dto.Summary{} + sum := &dto.Summary{ + CreatedTimestamp: s.createdTs, + } qs := make([]*dto.Quantile, 0, len(s.objectives)) s.bufMtx.Lock() @@ -440,6 +453,8 @@ type noObjectivesSummary struct { counts [2]*summaryCounts labelPairs []*dto.LabelPair + + createdTs *timestamppb.Timestamp } func (s *noObjectivesSummary) Desc() *Desc { @@ -490,8 +505,9 @@ func (s *noObjectivesSummary) Write(out *dto.Metric) error { } sum := &dto.Summary{ - SampleCount: proto.Uint64(count), - SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + SampleCount: proto.Uint64(count), + SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + CreatedTimestamp: s.createdTs, } out.Summary = sum @@ -681,6 +697,7 @@ type constSummary struct { sum float64 quantiles map[float64]float64 labelPairs []*dto.LabelPair + createdTs *timestamppb.Timestamp } func (s *constSummary) Desc() *Desc { @@ -688,7 +705,9 @@ func (s *constSummary) Desc() *Desc { } func (s *constSummary) Write(out *dto.Metric) error { - sum := &dto.Summary{} + sum := &dto.Summary{ + CreatedTimestamp: s.createdTs, + } qs := make([]*dto.Quantile, 0, len(s.quantiles)) sum.SampleCount = proto.Uint64(s.count) @@ -737,7 +756,7 @@ func NewConstSummary( if desc.err != nil { return nil, desc.err } - if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { return nil, err } return &constSummary{ diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go index 5f6bb8001..cc23011fa 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/value.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go @@ -14,6 +14,7 @@ package prometheus import ( + "errors" "fmt" "sort" "time" @@ -91,7 +92,7 @@ func (v *valueFunc) Desc() *Desc { } func (v *valueFunc) Write(out *dto.Metric) error { - return populateMetric(v.valType, v.function(), v.labelPairs, nil, out) + return populateMetric(v.valType, v.function(), v.labelPairs, nil, out, nil) } // NewConstMetric returns a metric with one fixed value that cannot be @@ -105,12 +106,12 @@ func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues if desc.err != nil { return nil, desc.err } - if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { return nil, err } metric := &dto.Metric{} - if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric); err != nil { + if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, nil); err != nil { return nil, err } @@ -130,6 +131,43 @@ func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelVal return m } +// NewConstMetricWithCreatedTimestamp does the same thing as NewConstMetric, but generates Counters +// with created timestamp set and returns an error for other metric types. +func NewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { + return nil, err + } + switch valueType { + case CounterValue: + break + default: + return nil, errors.New("created timestamps are only supported for counters") + } + + metric := &dto.Metric{} + if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, timestamppb.New(ct)); err != nil { + return nil, err + } + + return &constMetric{ + desc: desc, + metric: metric, + }, nil +} + +// MustNewConstMetricWithCreatedTimestamp is a version of NewConstMetricWithCreatedTimestamp that panics where +// NewConstMetricWithCreatedTimestamp would have returned an error. +func MustNewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) Metric { + m, err := NewConstMetricWithCreatedTimestamp(desc, valueType, value, ct, labelValues...) + if err != nil { + panic(err) + } + return m +} + type constMetric struct { desc *Desc metric *dto.Metric @@ -153,11 +191,12 @@ func populateMetric( labelPairs []*dto.LabelPair, e *dto.Exemplar, m *dto.Metric, + ct *timestamppb.Timestamp, ) error { m.Label = labelPairs switch t { case CounterValue: - m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e} + m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e, CreatedTimestamp: ct} case GaugeValue: m.Gauge = &dto.Gauge{Value: proto.Float64(v)} case UntypedValue: @@ -176,19 +215,19 @@ func populateMetric( // This function is only needed for custom Metric implementations. See MetricVec // example. func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { - totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) + totalLen := len(desc.variableLabels.names) + len(desc.constLabelPairs) if totalLen == 0 { // Super fast path. return nil } - if len(desc.variableLabels) == 0 { + if len(desc.variableLabels.names) == 0 { // Moderately fast path. return desc.constLabelPairs } labelPairs := make([]*dto.LabelPair, 0, totalLen) - for i, l := range desc.variableLabels { + for i, l := range desc.variableLabels.names { labelPairs = append(labelPairs, &dto.LabelPair{ - Name: proto.String(l.Name), + Name: proto.String(l), Value: proto.String(labelValues[i]), }) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go index f0d0015a0..955cfd59f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -20,24 +20,6 @@ import ( "github.com/prometheus/common/model" ) -var labelsPool = &sync.Pool{ - New: func() interface{} { - return make(Labels) - }, -} - -func getLabelsFromPool() Labels { - return labelsPool.Get().(Labels) -} - -func putLabelsToPool(labels Labels) { - for k := range labels { - delete(labels, k) - } - - labelsPool.Put(labels) -} - // MetricVec is a Collector to bundle metrics of the same name that differ in // their label values. MetricVec is not used directly but as a building block // for implementations of vectors of a given metric type, like GaugeVec, @@ -91,6 +73,7 @@ func NewMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec { // See also the CounterVec example. func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { lvs = constrainLabelValues(m.desc, lvs, m.curry) + h, err := m.hashLabelValues(lvs) if err != nil { return false @@ -110,8 +93,8 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { // This method is used for the same purpose as DeleteLabelValues(...string). See // there for pros and cons of the two methods. func (m *MetricVec) Delete(labels Labels) bool { - labels = constrainLabels(m.desc, labels) - defer putLabelsToPool(labels) + labels, closer := constrainLabels(m.desc, labels) + defer closer() h, err := m.hashLabels(labels) if err != nil { @@ -128,8 +111,8 @@ func (m *MetricVec) Delete(labels Labels) bool { // Note that curried labels will never be matched if deleting from the curried vector. // To match curried labels with DeletePartialMatch, it must be called on the base vector. func (m *MetricVec) DeletePartialMatch(labels Labels) int { - labels = constrainLabels(m.desc, labels) - defer putLabelsToPool(labels) + labels, closer := constrainLabels(m.desc, labels) + defer closer() return m.metricMap.deleteByLabels(labels, m.curry) } @@ -169,11 +152,11 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) { oldCurry = m.curry iCurry int ) - for i, label := range m.desc.variableLabels { - val, ok := labels[label.Name] + for i, labelName := range m.desc.variableLabels.names { + val, ok := labels[labelName] if iCurry < len(oldCurry) && oldCurry[iCurry].index == i { if ok { - return nil, fmt.Errorf("label name %q is already curried", label.Name) + return nil, fmt.Errorf("label name %q is already curried", labelName) } newCurry = append(newCurry, oldCurry[iCurry]) iCurry++ @@ -181,7 +164,10 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) { if !ok { continue // Label stays uncurried. } - newCurry = append(newCurry, curriedLabelValue{i, label.Constrain(val)}) + newCurry = append(newCurry, curriedLabelValue{ + i, + m.desc.variableLabels.constrain(labelName, val), + }) } } if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 { @@ -250,8 +236,8 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { // around MetricVec, implementing a vector for a specific Metric implementation, // for example GaugeVec. func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { - labels = constrainLabels(m.desc, labels) - defer putLabelsToPool(labels) + labels, closer := constrainLabels(m.desc, labels) + defer closer() h, err := m.hashLabels(labels) if err != nil { @@ -262,7 +248,7 @@ func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { } func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { - if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil { + if err := validateLabelValues(vals, len(m.desc.variableLabels.names)-len(m.curry)); err != nil { return 0, err } @@ -271,7 +257,7 @@ func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { curry = m.curry iVals, iCurry int ) - for i := 0; i < len(m.desc.variableLabels); i++ { + for i := 0; i < len(m.desc.variableLabels.names); i++ { if iCurry < len(curry) && curry[iCurry].index == i { h = m.hashAdd(h, curry[iCurry].value) iCurry++ @@ -285,7 +271,7 @@ func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { } func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { - if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil { + if err := validateValuesInLabels(labels, len(m.desc.variableLabels.names)-len(m.curry)); err != nil { return 0, err } @@ -294,17 +280,17 @@ func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { curry = m.curry iCurry int ) - for i, label := range m.desc.variableLabels { - val, ok := labels[label.Name] + for i, labelName := range m.desc.variableLabels.names { + val, ok := labels[labelName] if iCurry < len(curry) && curry[iCurry].index == i { if ok { - return 0, fmt.Errorf("label name %q is already curried", label.Name) + return 0, fmt.Errorf("label name %q is already curried", labelName) } h = m.hashAdd(h, curry[iCurry].value) iCurry++ } else { if !ok { - return 0, fmt.Errorf("label name %q missing in label map", label.Name) + return 0, fmt.Errorf("label name %q missing in label map", labelName) } h = m.hashAdd(h, val) } @@ -482,7 +468,7 @@ func valueMatchesVariableOrCurriedValue(targetValue string, index int, values [] func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { for l, v := range labels { // Check if the target label exists in our metrics and get the index. - varLabelIndex, validLabel := indexOf(l, desc.variableLabels.labelNames()) + varLabelIndex, validLabel := indexOf(l, desc.variableLabels.names) if validLabel { // Check the value of that label against the target value. // We don't consider curried values in partial matches. @@ -626,7 +612,7 @@ func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabe return false } iCurry := 0 - for i, k := range desc.variableLabels { + for i, k := range desc.variableLabels.names { if iCurry < len(curry) && curry[iCurry].index == i { if values[i] != curry[iCurry].value { return false @@ -634,7 +620,7 @@ func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabe iCurry++ continue } - if values[i] != labels[k.Name] { + if values[i] != labels[k] { return false } } @@ -644,13 +630,13 @@ func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabe func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string { labelValues := make([]string, len(labels)+len(curry)) iCurry := 0 - for i, k := range desc.variableLabels { + for i, k := range desc.variableLabels.names { if iCurry < len(curry) && curry[iCurry].index == i { labelValues[i] = curry[iCurry].value iCurry++ continue } - labelValues[i] = labels[k.Name] + labelValues[i] = labels[k] } return labelValues } @@ -670,20 +656,37 @@ func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string { return labelValues } -func constrainLabels(desc *Desc, labels Labels) Labels { - constrainedLabels := getLabelsFromPool() - for l, v := range labels { - if i, ok := indexOf(l, desc.variableLabels.labelNames()); ok { - v = desc.variableLabels[i].Constrain(v) - } +var labelsPool = &sync.Pool{ + New: func() interface{} { + return make(Labels) + }, +} - constrainedLabels[l] = v +func constrainLabels(desc *Desc, labels Labels) (Labels, func()) { + if len(desc.variableLabels.labelConstraints) == 0 { + // Fast path when there's no constraints + return labels, func() {} } - return constrainedLabels + constrainedLabels := labelsPool.Get().(Labels) + for l, v := range labels { + constrainedLabels[l] = desc.variableLabels.constrain(l, v) + } + + return constrainedLabels, func() { + for k := range constrainedLabels { + delete(constrainedLabels, k) + } + labelsPool.Put(constrainedLabels) + } } func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) []string { + if len(desc.variableLabels.labelConstraints) == 0 { + // Fast path when there's no constraints + return lvs + } + constrainedValues := make([]string, len(lvs)) var iCurry, iLVs int for i := 0; i < len(lvs)+len(curry); i++ { @@ -692,8 +695,11 @@ func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) [ continue } - if i < len(desc.variableLabels) { - constrainedValues[iLVs] = desc.variableLabels[i].Constrain(lvs[iLVs]) + if i < len(desc.variableLabels.names) { + constrainedValues[iLVs] = desc.variableLabels.constrain( + desc.variableLabels.names[i], + lvs[iLVs], + ) } else { constrainedValues[iLVs] = lvs[iLVs] } diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go index 2b5bca4b9..cee360db7 100644 --- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go +++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -215,8 +215,9 @@ type Counter struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"` + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"` + CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"` } func (x *Counter) Reset() { @@ -265,6 +266,13 @@ func (x *Counter) GetExemplar() *Exemplar { return nil } +func (x *Counter) GetCreatedTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.CreatedTimestamp + } + return nil +} + type Quantile struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -325,9 +333,10 @@ type Summary struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` - Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` + Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` + CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"` } func (x *Summary) Reset() { @@ -383,6 +392,13 @@ func (x *Summary) GetQuantile() []*Quantile { return nil } +func (x *Summary) GetCreatedTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.CreatedTimestamp + } + return nil +} + type Untyped struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -439,7 +455,8 @@ type Histogram struct { SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"` // Overrides sample_count if > 0. SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` // Buckets for the conventional histogram. - Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` // Ordered in increasing order of upper_bound, +Inf bucket is optional. + Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` // Ordered in increasing order of upper_bound, +Inf bucket is optional. + CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,15,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"` // schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8. // They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and // then each power of two is divided into 2^n logarithmic buckets. @@ -457,6 +474,9 @@ type Histogram struct { NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket). NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"` // Absolute count of each bucket. // Positive buckets for the native histogram. + // Use a no-op span (offset 0, length 0) for a native histogram without any + // observations yet and with a zero_threshold of 0. Otherwise, it would be + // indistinguishable from a classic histogram. PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan" json:"positive_span,omitempty"` // Use either "positive_delta" or "positive_count", the former for // regular histograms with integer counts, the latter for float @@ -525,6 +545,13 @@ func (x *Histogram) GetBucket() []*Bucket { return nil } +func (x *Histogram) GetCreatedTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.CreatedTimestamp + } + return nil +} + func (x *Histogram) GetSchema() int32 { if x != nil && x.Schema != nil { return *x.Schema @@ -972,137 +999,151 @@ var file_io_prometheus_client_metrics_proto_rawDesc = []byte{ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x1d, 0x0a, 0x05, 0x47, 0x61, 0x75, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x22, 0x5b, 0x0a, 0x07, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, - 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, - 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x22, 0x3c, - 0x0a, 0x08, 0x51, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x71, 0x75, - 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x71, 0x75, - 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x87, 0x01, 0x0a, - 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, + 0x65, 0x22, 0xa4, 0x01, 0x0a, 0x07, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, + 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, + 0x47, 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x3c, 0x0a, 0x08, 0x51, 0x75, 0x61, 0x6e, + 0x74, 0x69, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xd0, 0x01, 0x0a, 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61, + 0x72, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, + 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73, 0x61, 0x6d, 0x70, 0x6c, + 0x65, 0x53, 0x75, 0x6d, 0x12, 0x3a, 0x0a, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, + 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x51, 0x75, + 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x52, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, + 0x12, 0x47, 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74, + 0x79, 0x70, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xac, 0x05, 0x0a, 0x09, 0x48, + 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, - 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, - 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, - 0x09, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x3a, 0x0a, 0x08, 0x71, 0x75, - 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, + 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73, + 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, + 0x70, 0x6c, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73, + 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x34, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, + 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x47, + 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, + 0x25, 0x0a, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, + 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x7a, 0x65, 0x72, 0x6f, 0x54, 0x68, 0x72, + 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x7a, 0x65, 0x72, 0x6f, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, + 0x45, 0x0a, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, + 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, + 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, + 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, + 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x12, 0x52, 0x0d, + 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x25, 0x0a, + 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, + 0x0b, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, + 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, + 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x70, + 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x70, + 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0d, 0x20, + 0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, + 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xc6, 0x01, 0x0a, 0x06, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, + 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, + 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, + 0x34, 0x0a, 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x14, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, + 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, 0x65, 0x72, 0x5f, 0x62, + 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x75, 0x70, 0x70, 0x65, + 0x72, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, + 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, + 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, + 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, + 0x61, 0x72, 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, + 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11, + 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x22, 0x91, 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x35, 0x0a, + 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x2e, 0x51, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x52, 0x08, 0x71, 0x75, - 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, - 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xe3, 0x04, 0x0a, 0x09, 0x48, 0x69, 0x73, 0x74, - 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, - 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x73, 0x61, 0x6d, - 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73, 0x61, 0x6d, 0x70, - 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, - 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, - 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73, 0x61, 0x6d, 0x70, - 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x34, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, - 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x73, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, - 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x7a, 0x65, 0x72, - 0x6f, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x7a, 0x65, - 0x72, 0x6f, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, - 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x7a, 0x65, 0x72, - 0x6f, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x01, 0x52, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, - 0x6f, 0x61, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, - 0x73, 0x70, 0x61, 0x6e, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, + 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, + 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, + 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, + 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, + 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x61, 0x75, + 0x67, 0x65, 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x6e, 0x65, - 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, - 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x03, - 0x28, 0x12, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, - 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, - 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x70, 0x6f, 0x73, 0x69, - 0x74, 0x69, 0x76, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, - 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, - 0x6e, 0x52, 0x0c, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, - 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, - 0x61, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, - 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, - 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, - 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xc6, 0x01, - 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, - 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x0f, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, - 0x75, 0x6e, 0x74, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, - 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x01, 0x52, 0x14, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, - 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, - 0x65, 0x72, 0x5f, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, - 0x75, 0x70, 0x70, 0x65, 0x72, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, - 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, + 0x74, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x65, 0x72, 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, + 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61, + 0x72, 0x79, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x75, + 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, - 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x53, 0x70, 0x61, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, - 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, - 0x6e, 0x67, 0x74, 0x68, 0x22, 0x91, 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, - 0x72, 0x12, 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, - 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, - 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, - 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x12, 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, - 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, - 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, - 0x75, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, - 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x2e, 0x47, 0x61, 0x75, 0x67, 0x65, 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, - 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, - 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, - 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, - 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, - 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, - 0x37, 0x0a, 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, - 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, - 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, - 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, - 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, - 0x6e, 0x74, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, - 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xa2, 0x01, 0x0a, 0x0c, 0x4d, - 0x65, 0x74, 0x72, 0x69, 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x12, 0x0a, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, - 0x65, 0x6c, 0x70, 0x12, 0x34, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, - 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, - 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, - 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2a, - 0x62, 0x0a, 0x0a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, - 0x07, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, - 0x55, 0x47, 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, - 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, - 0x0d, 0x0a, 0x09, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, - 0x0a, 0x0f, 0x47, 0x41, 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, - 0x4d, 0x10, 0x05, 0x42, 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, - 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, - 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, - 0x75, 0x73, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, - 0x67, 0x6f, 0x3b, 0x69, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, - 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, 0x07, 0x75, 0x6e, 0x74, + 0x79, 0x70, 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, + 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, + 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x48, + 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, + 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xa2, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, + 0x65, 0x6c, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x12, + 0x34, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, + 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, + 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2a, 0x62, 0x0a, 0x0a, 0x4d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4f, 0x55, + 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47, 0x45, 0x10, + 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x02, 0x12, 0x0b, + 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x48, + 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f, 0x47, 0x41, + 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x05, 0x42, + 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, + 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x67, 0x6f, 0x3b, 0x69, + 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, } var ( @@ -1137,26 +1178,29 @@ var file_io_prometheus_client_metrics_proto_goTypes = []interface{}{ } var file_io_prometheus_client_metrics_proto_depIdxs = []int32{ 10, // 0: io.prometheus.client.Counter.exemplar:type_name -> io.prometheus.client.Exemplar - 4, // 1: io.prometheus.client.Summary.quantile:type_name -> io.prometheus.client.Quantile - 8, // 2: io.prometheus.client.Histogram.bucket:type_name -> io.prometheus.client.Bucket - 9, // 3: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan - 9, // 4: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan - 10, // 5: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar - 1, // 6: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair - 13, // 7: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp - 1, // 8: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair - 2, // 9: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge - 3, // 10: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter - 5, // 11: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary - 6, // 12: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped - 7, // 13: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram - 0, // 14: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType - 11, // 15: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric - 16, // [16:16] is the sub-list for method output_type - 16, // [16:16] is the sub-list for method input_type - 16, // [16:16] is the sub-list for extension type_name - 16, // [16:16] is the sub-list for extension extendee - 0, // [0:16] is the sub-list for field type_name + 13, // 1: io.prometheus.client.Counter.created_timestamp:type_name -> google.protobuf.Timestamp + 4, // 2: io.prometheus.client.Summary.quantile:type_name -> io.prometheus.client.Quantile + 13, // 3: io.prometheus.client.Summary.created_timestamp:type_name -> google.protobuf.Timestamp + 8, // 4: io.prometheus.client.Histogram.bucket:type_name -> io.prometheus.client.Bucket + 13, // 5: io.prometheus.client.Histogram.created_timestamp:type_name -> google.protobuf.Timestamp + 9, // 6: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan + 9, // 7: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan + 10, // 8: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar + 1, // 9: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair + 13, // 10: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp + 1, // 11: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair + 2, // 12: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge + 3, // 13: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter + 5, // 14: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary + 6, // 15: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped + 7, // 16: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram + 0, // 17: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType + 11, // 18: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric + 19, // [19:19] is the sub-list for method output_type + 19, // [19:19] is the sub-list for method input_type + 19, // [19:19] is the sub-list for extension type_name + 19, // [19:19] is the sub-list for extension extendee + 0, // [0:19] is the sub-list for field type_name } func init() { file_io_prometheus_client_metrics_proto_init() } diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index f4fc88455..906397815 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -132,7 +132,10 @@ func (d *textDecoder) Decode(v *dto.MetricFamily) error { } // Pick off one MetricFamily per Decode until there's nothing left. for key, fam := range d.fams { - *v = *fam + v.Name = fam.Name + v.Help = fam.Help + v.Type = fam.Type + v.Metric = fam.Metric delete(d.fams, key) return nil } diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index 64dc0eb40..7f611ffaa 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -18,9 +18,9 @@ import ( "io" "net/http" - "github.com/golang/protobuf/proto" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/matttproud/golang_protobuf_extensions/pbutil" "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" + "google.golang.org/protobuf/encoding/prototext" dto "github.com/prometheus/client_model/go" ) @@ -99,8 +99,11 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format { if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { return FmtText } - if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion || ver == "") { - return FmtOpenMetrics + if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") { + if ver == OpenMetricsVersion_1_0_0 { + return FmtOpenMetrics_1_0_0 + } + return FmtOpenMetrics_0_0_1 } } return FmtText @@ -133,7 +136,7 @@ func NewEncoder(w io.Writer, format Format) Encoder { case FmtProtoText: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, proto.MarshalTextString(v)) + _, err := fmt.Fprintln(w, prototext.Format(v)) return err }, close: func() error { return nil }, @@ -146,7 +149,7 @@ func NewEncoder(w io.Writer, format Format) Encoder { }, close: func() error { return nil }, } - case FmtOpenMetrics: + case FmtOpenMetrics_0_0_1, FmtOpenMetrics_1_0_0: return encoderCloser{ encode: func(v *dto.MetricFamily) error { _, err := MetricFamilyToOpenMetrics(w, v) diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index 0f176fa64..c4cb20f0d 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -19,20 +19,22 @@ type Format string // Constants to assemble the Content-Type values for the different wire protocols. const ( - TextVersion = "0.0.4" - ProtoType = `application/vnd.google.protobuf` - ProtoProtocol = `io.prometheus.client.MetricFamily` - ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" - OpenMetricsType = `application/openmetrics-text` - OpenMetricsVersion = "0.0.1" + TextVersion = "0.0.4" + ProtoType = `application/vnd.google.protobuf` + ProtoProtocol = `io.prometheus.client.MetricFamily` + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + OpenMetricsType = `application/openmetrics-text` + OpenMetricsVersion_0_0_1 = "0.0.1" + OpenMetricsVersion_1_0_0 = "1.0.0" // The Content-Type values for the different wire protocols. - FmtUnknown Format = `` - FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` - FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` - FmtProtoText Format = ProtoFmt + ` encoding=text` - FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` - FmtOpenMetrics Format = OpenMetricsType + `; version=` + OpenMetricsVersion + `; charset=utf-8` + FmtUnknown Format = `` + FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` + FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` + FmtProtoText Format = ProtoFmt + ` encoding=text` + FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` + FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` + FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` ) const ( diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index ac2482782..35db1cc9d 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -24,8 +24,8 @@ import ( dto "github.com/prometheus/client_model/go" - "github.com/golang/protobuf/proto" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/prometheus/common/model" + "google.golang.org/protobuf/proto" ) // A stateFn is a function that represents a state in a state machine. By diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml index a197699a1..c24864a92 100644 --- a/vendor/github.com/prometheus/procfs/.golangci.yml +++ b/vendor/github.com/prometheus/procfs/.golangci.yml @@ -2,6 +2,7 @@ linters: enable: - godot + - misspell - revive linter-settings: @@ -10,3 +11,5 @@ linter-settings: exclude: # Ignore "See: URL" - 'See:' + misspell: + locale: US diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index b111d2562..0ce7ea461 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -49,19 +49,19 @@ endif GOTEST := $(GO) test GOTEST_DIR := ifneq ($(CIRCLE_JOB),) -ifneq ($(shell which gotestsum),) +ifneq ($(shell command -v gotestsum > /dev/null),) GOTEST_DIR := test-results GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- endif endif -PROMU_VERSION ?= 0.14.0 +PROMU_VERSION ?= 0.15.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.51.2 +GOLANGCI_LINT_VERSION ?= v1.53.3 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) @@ -178,7 +178,7 @@ endif .PHONY: common-yamllint common-yamllint: @echo ">> running yamllint on all YAML files in the repository" -ifeq (, $(shell which yamllint)) +ifeq (, $(shell command -v yamllint > /dev/null)) @echo "yamllint not installed so skipping" else yamllint . diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md index 43c37735a..1224816c2 100644 --- a/vendor/github.com/prometheus/procfs/README.md +++ b/vendor/github.com/prometheus/procfs/README.md @@ -51,11 +51,11 @@ ensure the `fixtures` directory is up to date by removing the existing directory extracting the ttar file using `make fixtures/.unpacked` or just `make test`. ```bash -rm -rf fixtures +rm -rf testdata/fixtures make test ``` Next, make the required changes to the extracted files in the `fixtures` directory. When the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file based on the updated `fixtures` directory. And finally, verify the changes using -`git diff fixtures.ttar`. +`git diff testdata/fixtures.ttar`. diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go index 68f36e888..28783e2dd 100644 --- a/vendor/github.com/prometheus/procfs/arp.go +++ b/vendor/github.com/prometheus/procfs/arp.go @@ -55,7 +55,7 @@ type ARPEntry struct { func (fs FS) GatherARPEntries() ([]ARPEntry, error) { data, err := os.ReadFile(fs.proc.Path("net/arp")) if err != nil { - return nil, fmt.Errorf("error reading arp %q: %w", fs.proc.Path("net/arp"), err) + return nil, fmt.Errorf("%s: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err) } return parseARPEntries(data) @@ -78,11 +78,11 @@ func parseARPEntries(data []byte) ([]ARPEntry, error) { } else if width == expectedDataWidth { entry, err := parseARPEntry(columns) if err != nil { - return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %w", err) + return []ARPEntry{}, fmt.Errorf("%s: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err) } entries = append(entries, entry) } else { - return []ARPEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedDataWidth) + return []ARPEntry{}, fmt.Errorf("%s: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err) } } diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go index f5b7939b2..4a173636c 100644 --- a/vendor/github.com/prometheus/procfs/buddyinfo.go +++ b/vendor/github.com/prometheus/procfs/buddyinfo.go @@ -55,7 +55,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { parts := strings.Fields(line) if len(parts) < 4 { - return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo") + return nil, fmt.Errorf("%w: Invalid number of fields, found: %v", ErrFileParse, parts) } node := strings.TrimRight(parts[1], ",") @@ -66,7 +66,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { bucketCount = arraySize } else { if bucketCount != arraySize { - return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize) + return nil, fmt.Errorf("%w: mismatch in number of buddyinfo buckets, previous count %d, new count %d", ErrFileParse, bucketCount, arraySize) } } @@ -74,7 +74,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { for i := 0; i < arraySize; i++ { sizes[i], err = strconv.ParseFloat(parts[i+4], 64) if err != nil { - return nil, fmt.Errorf("invalid value in buddyinfo: %w", err) + return nil, fmt.Errorf("%s: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err) } } diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go index 06968ca2e..f4f5501c6 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo.go @@ -79,7 +79,7 @@ func parseCPUInfoX86(info []byte) ([]CPUInfo, error) { // find the first "processor" line firstLine := firstNonEmptyLine(scanner) if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, firstLine) } field := strings.SplitN(firstLine, ": ", 2) v, err := strconv.ParseUint(field[1], 0, 32) @@ -192,9 +192,10 @@ func parseCPUInfoARM(info []byte) ([]CPUInfo, error) { scanner := bufio.NewScanner(bytes.NewReader(info)) firstLine := firstNonEmptyLine(scanner) - match, _ := regexp.MatchString("^[Pp]rocessor", firstLine) + match, err := regexp.MatchString("^[Pp]rocessor", firstLine) if !match || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + return nil, fmt.Errorf("%s: Cannot parse line: %q: %w", ErrFileParse, firstLine, err) + } field := strings.SplitN(firstLine, ": ", 2) cpuinfo := []CPUInfo{} @@ -258,7 +259,7 @@ func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) { firstLine := firstNonEmptyLine(scanner) if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, firstLine) } field := strings.SplitN(firstLine, ": ", 2) cpuinfo := []CPUInfo{} @@ -283,7 +284,7 @@ func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) { if strings.HasPrefix(line, "processor") { match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line) if len(match) < 2 { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) } cpu := commonCPUInfo v, err := strconv.ParseUint(match[1], 0, 32) @@ -343,7 +344,7 @@ func parseCPUInfoMips(info []byte) ([]CPUInfo, error) { // find the first "processor" line firstLine := firstNonEmptyLine(scanner) if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) } field := strings.SplitN(firstLine, ": ", 2) cpuinfo := []CPUInfo{} @@ -421,7 +422,7 @@ func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) { firstLine := firstNonEmptyLine(scanner) if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) } field := strings.SplitN(firstLine, ": ", 2) v, err := strconv.ParseUint(field[1], 0, 32) @@ -466,7 +467,7 @@ func parseCPUInfoRISCV(info []byte) ([]CPUInfo, error) { firstLine := firstNonEmptyLine(scanner) if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) } field := strings.SplitN(firstLine, ": ", 2) v, err := strconv.ParseUint(field[1], 0, 32) diff --git a/vendor/github.com/prometheus/procfs/crypto.go b/vendor/github.com/prometheus/procfs/crypto.go index 5048ad1f2..9a73e2639 100644 --- a/vendor/github.com/prometheus/procfs/crypto.go +++ b/vendor/github.com/prometheus/procfs/crypto.go @@ -55,12 +55,13 @@ func (fs FS) Crypto() ([]Crypto, error) { path := fs.proc.Path("crypto") b, err := util.ReadFileNoStat(path) if err != nil { - return nil, fmt.Errorf("error reading crypto %q: %w", path, err) + return nil, fmt.Errorf("%s: Cannot read file %v: %w", ErrFileRead, b, err) + } crypto, err := parseCrypto(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("error parsing crypto %q: %w", path, err) + return nil, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, crypto, err) } return crypto, nil @@ -83,7 +84,7 @@ func parseCrypto(r io.Reader) ([]Crypto, error) { kv := strings.Split(text, ":") if len(kv) != 2 { - return nil, fmt.Errorf("malformed crypto line: %q", text) + return nil, fmt.Errorf("%w: Cannot parae line: %q", ErrFileParse, text) } k := strings.TrimSpace(kv[0]) diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go index 60c551e02..4980c875b 100644 --- a/vendor/github.com/prometheus/procfs/fs.go +++ b/vendor/github.com/prometheus/procfs/fs.go @@ -20,8 +20,8 @@ import ( // FS represents the pseudo-filesystem sys, which provides an interface to // kernel data structures. type FS struct { - proc fs.FS - real bool + proc fs.FS + isReal bool } // DefaultMountPoint is the common mount point of the proc filesystem. @@ -41,10 +41,10 @@ func NewFS(mountPoint string) (FS, error) { return FS{}, err } - real, err := isRealProc(mountPoint) + isReal, err := isRealProc(mountPoint) if err != nil { return FS{}, err } - return FS{fs, real}, nil + return FS{fs, isReal}, nil } diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go index 800576968..13d74e395 100644 --- a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go +++ b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build netbsd || openbsd || solaris || windows -// +build netbsd openbsd solaris windows +//go:build netbsd || openbsd || solaris || windows || nostatfs +// +build netbsd openbsd solaris windows nostatfs package procfs diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_type.go b/vendor/github.com/prometheus/procfs/fs_statfs_type.go index 6233217ad..bee151445 100644 --- a/vendor/github.com/prometheus/procfs/fs_statfs_type.go +++ b/vendor/github.com/prometheus/procfs/fs_statfs_type.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build !netbsd && !openbsd && !solaris && !windows -// +build !netbsd,!openbsd,!solaris,!windows +//go:build !netbsd && !openbsd && !solaris && !windows && !nostatfs +// +build !netbsd,!openbsd,!solaris,!windows,!nostatfs package procfs diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go index f8070e6e2..f560a8db3 100644 --- a/vendor/github.com/prometheus/procfs/fscache.go +++ b/vendor/github.com/prometheus/procfs/fscache.go @@ -236,7 +236,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) { m, err := parseFscacheinfo(bytes.NewReader(b)) if err != nil { - return Fscacheinfo{}, fmt.Errorf("failed to parse Fscacheinfo: %w", err) + return Fscacheinfo{}, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, m, err) } return *m, nil @@ -245,7 +245,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) { func setFSCacheFields(fields []string, setFields ...*uint64) error { var err error if len(fields) < len(setFields) { - return fmt.Errorf("Insufficient number of fields, expected %v, got %v", len(setFields), len(fields)) + return fmt.Errorf("%s: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err) } for i := range setFields { @@ -263,7 +263,7 @@ func parseFscacheinfo(r io.Reader) (*Fscacheinfo, error) { for s.Scan() { fields := strings.Fields(s.Text()) if len(fields) < 2 { - return nil, fmt.Errorf("malformed Fscacheinfo line: %q", s.Text()) + return nil, fmt.Errorf("%w: malformed Fscacheinfo line: %q", ErrFileParse, s.Text()) } switch fields[0] { diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go index 391c07957..5a145bbfe 100644 --- a/vendor/github.com/prometheus/procfs/ipvs.go +++ b/vendor/github.com/prometheus/procfs/ipvs.go @@ -221,15 +221,16 @@ func parseIPPort(s string) (net.IP, uint16, error) { case 46: ip = net.ParseIP(s[1:40]) if ip == nil { - return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40]) + return nil, 0, fmt.Errorf("%s: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err) } default: - return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s) + return nil, 0, fmt.Errorf("%s: Unexpected IP:Port %s: %w", ErrFileParse, s, err) } portString := s[len(s)-4:] if len(portString) != 4 { - return nil, 0, fmt.Errorf("unexpected port string format: %s", portString) + return nil, 0, + fmt.Errorf("%s: Unexpected port string format %s: %w", ErrFileParse, portString, err) } port, err := strconv.ParseUint(portString, 16, 16) if err != nil { diff --git a/vendor/github.com/prometheus/procfs/loadavg.go b/vendor/github.com/prometheus/procfs/loadavg.go index 0096cafbd..59465c5bb 100644 --- a/vendor/github.com/prometheus/procfs/loadavg.go +++ b/vendor/github.com/prometheus/procfs/loadavg.go @@ -44,14 +44,14 @@ func parseLoad(loadavgBytes []byte) (*LoadAvg, error) { loads := make([]float64, 3) parts := strings.Fields(string(loadavgBytes)) if len(parts) < 3 { - return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %q", string(loadavgBytes)) + return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, string(loadavgBytes)) } var err error for i, load := range parts[0:3] { loads[i], err = strconv.ParseFloat(load, 64) if err != nil { - return nil, fmt.Errorf("could not parse load %q: %w", load, err) + return nil, fmt.Errorf("%s: Cannot parse load: %f: %w", ErrFileParse, loads[i], err) } } return &LoadAvg{ diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go index a95c889cb..fdd4b9544 100644 --- a/vendor/github.com/prometheus/procfs/mdstat.go +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -70,7 +70,7 @@ func (fs FS) MDStat() ([]MDStat, error) { } mdstat, err := parseMDStat(data) if err != nil { - return nil, fmt.Errorf("error parsing mdstat %q: %w", fs.proc.Path("mdstat"), err) + return nil, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err) } return mdstat, nil } @@ -90,13 +90,13 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { deviceFields := strings.Fields(line) if len(deviceFields) < 3 { - return nil, fmt.Errorf("not enough fields in mdline (expected at least 3): %s", line) + return nil, fmt.Errorf("%s: Expected 3+ lines, got %q", ErrFileParse, line) } mdName := deviceFields[0] // mdx state := deviceFields[2] // active or inactive if len(lines) <= i+3 { - return nil, fmt.Errorf("error parsing %q: too few lines for md device", mdName) + return nil, fmt.Errorf("%w: Too few lines for md device: %q", ErrFileParse, mdName) } // Failed disks have the suffix (F) & Spare disks have the suffix (S). @@ -105,7 +105,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { active, total, down, size, err := evalStatusLine(lines[i], lines[i+1]) if err != nil { - return nil, fmt.Errorf("error parsing md device lines: %w", err) + return nil, fmt.Errorf("%s: Cannot parse md device lines: %v: %w", ErrFileParse, active, err) } syncLineIdx := i + 2 @@ -140,7 +140,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { } else { syncedBlocks, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx]) if err != nil { - return nil, fmt.Errorf("error parsing sync line in md device %q: %w", mdName, err) + return nil, fmt.Errorf("%s: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err) } } } @@ -168,13 +168,13 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) { statusFields := strings.Fields(statusLine) if len(statusFields) < 1 { - return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q", statusLine) + return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) } sizeStr := statusFields[0] size, err = strconv.ParseInt(sizeStr, 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) + return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) } if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") { @@ -189,17 +189,17 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, down, size in matches := statusLineRE.FindStringSubmatch(statusLine) if len(matches) != 5 { - return 0, 0, 0, 0, fmt.Errorf("couldn't find all the substring matches: %s", statusLine) + return 0, 0, 0, 0, fmt.Errorf("%s: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err) } total, err = strconv.ParseInt(matches[2], 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) + return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) } active, err = strconv.ParseInt(matches[3], 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) + return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected active %d: %w", ErrFileParse, active, err) } down = int64(strings.Count(matches[4], "_")) @@ -209,42 +209,42 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, down, size in func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, pct float64, finish float64, speed float64, err error) { matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return 0, 0, 0, 0, fmt.Errorf("unexpected recoveryLine: %s", recoveryLine) + return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected recoveryLine %s: %w", ErrFileParse, recoveryLine, err) } syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("error parsing int from recoveryLine %q: %w", recoveryLine, err) + return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected parsing of recoveryLine %q: %w", ErrFileParse, recoveryLine, err) } // Get percentage complete matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return syncedBlocks, 0, 0, 0, fmt.Errorf("unexpected recoveryLine matching percentage: %s", recoveryLine) + return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine) } pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64) if err != nil { - return syncedBlocks, 0, 0, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err) + return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine) } // Get time expected left to complete matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return syncedBlocks, pct, 0, 0, fmt.Errorf("unexpected recoveryLine matching est. finish time: %s", recoveryLine) + return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine) } finish, err = strconv.ParseFloat(matches[1], 64) if err != nil { - return syncedBlocks, pct, 0, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err) + return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine) } // Get recovery speed matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return syncedBlocks, pct, finish, 0, fmt.Errorf("unexpected recoveryLine matching speed: %s", recoveryLine) + return syncedBlocks, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine) } speed, err = strconv.ParseFloat(matches[1], 64) if err != nil { - return syncedBlocks, pct, finish, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err) + return syncedBlocks, pct, finish, 0, fmt.Errorf("%s: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err) } return syncedBlocks, pct, finish, speed, nil diff --git a/vendor/github.com/prometheus/procfs/meminfo.go b/vendor/github.com/prometheus/procfs/meminfo.go index f65e174e5..eaf00e224 100644 --- a/vendor/github.com/prometheus/procfs/meminfo.go +++ b/vendor/github.com/prometheus/procfs/meminfo.go @@ -152,7 +152,7 @@ func (fs FS) Meminfo() (Meminfo, error) { m, err := parseMemInfo(bytes.NewReader(b)) if err != nil { - return Meminfo{}, fmt.Errorf("failed to parse meminfo: %w", err) + return Meminfo{}, fmt.Errorf("%s: %w", ErrFileParse, err) } return *m, nil @@ -165,7 +165,7 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) { // Each line has at least a name and value; we ignore the unit. fields := strings.Fields(s.Text()) if len(fields) < 2 { - return nil, fmt.Errorf("malformed meminfo line: %q", s.Text()) + return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text()) } v, err := strconv.ParseUint(fields[1], 0, 64) diff --git a/vendor/github.com/prometheus/procfs/mountinfo.go b/vendor/github.com/prometheus/procfs/mountinfo.go index 59f4d5055..388ebf396 100644 --- a/vendor/github.com/prometheus/procfs/mountinfo.go +++ b/vendor/github.com/prometheus/procfs/mountinfo.go @@ -78,11 +78,11 @@ func parseMountInfoString(mountString string) (*MountInfo, error) { mountInfo := strings.Split(mountString, " ") mountInfoLength := len(mountInfo) if mountInfoLength < 10 { - return nil, fmt.Errorf("couldn't find enough fields in mount string: %s", mountString) + return nil, fmt.Errorf("%w: Too few fields in mount string: %s", ErrFileParse, mountString) } if mountInfo[mountInfoLength-4] != "-" { - return nil, fmt.Errorf("couldn't find separator in expected field: %s", mountInfo[mountInfoLength-4]) + return nil, fmt.Errorf("%w: couldn't find separator in expected field: %s", ErrFileParse, mountInfo[mountInfoLength-4]) } mount := &MountInfo{ @@ -98,18 +98,18 @@ func parseMountInfoString(mountString string) (*MountInfo, error) { mount.MountID, err = strconv.Atoi(mountInfo[0]) if err != nil { - return nil, fmt.Errorf("failed to parse mount ID") + return nil, fmt.Errorf("%w: mount ID: %q", ErrFileParse, mount.MountID) } mount.ParentID, err = strconv.Atoi(mountInfo[1]) if err != nil { - return nil, fmt.Errorf("failed to parse parent ID") + return nil, fmt.Errorf("%w: parent ID: %q", ErrFileParse, mount.ParentID) } // Has optional fields, which is a space separated list of values. // Example: shared:2 master:7 if mountInfo[6] != "" { mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4]) if err != nil { - return nil, err + return nil, fmt.Errorf("%s: %w", ErrFileParse, err) } } return mount, nil diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go index 7f68890cf..852c8c4a0 100644 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -266,7 +266,7 @@ func parseMountStats(r io.Reader) ([]*Mount, error) { if len(ss) > deviceEntryLen { // Only NFSv3 and v4 are supported for parsing statistics if m.Type != nfs3Type && m.Type != nfs4Type { - return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type) + return nil, fmt.Errorf("%w: Cannot parse MountStats for %q", ErrFileParse, m.Type) } statVersion := strings.TrimPrefix(ss[8], statVersionPrefix) @@ -290,7 +290,7 @@ func parseMountStats(r io.Reader) ([]*Mount, error) { // device [device] mounted on [mount] with fstype [type] func parseMount(ss []string) (*Mount, error) { if len(ss) < deviceEntryLen { - return nil, fmt.Errorf("invalid device entry: %v", ss) + return nil, fmt.Errorf("%w: Invalid device %q", ErrFileParse, ss) } // Check for specific words appearing at specific indices to ensure @@ -308,7 +308,7 @@ func parseMount(ss []string) (*Mount, error) { for _, f := range format { if ss[f.i] != f.s { - return nil, fmt.Errorf("invalid device entry: %v", ss) + return nil, fmt.Errorf("%w: Invalid device %q", ErrFileParse, ss) } } @@ -345,7 +345,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e switch ss[0] { case fieldOpts: if len(ss) < 2 { - return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss) } if stats.Opts == nil { stats.Opts = map[string]string{} @@ -360,7 +360,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e } case fieldAge: if len(ss) < 2 { - return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss) } // Age integer is in seconds d, err := time.ParseDuration(ss[1] + "s") @@ -371,7 +371,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e stats.Age = d case fieldBytes: if len(ss) < 2 { - return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss) } bstats, err := parseNFSBytesStats(ss[1:]) if err != nil { @@ -381,7 +381,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e stats.Bytes = *bstats case fieldEvents: if len(ss) < 2 { - return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + return nil, fmt.Errorf("%w: Incomplete information for NFS events: %v", ErrFileParse, ss) } estats, err := parseNFSEventsStats(ss[1:]) if err != nil { @@ -391,7 +391,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e stats.Events = *estats case fieldTransport: if len(ss) < 3 { - return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss) + return nil, fmt.Errorf("%w: Incomplete information for NFS transport stats: %v", ErrFileParse, ss) } tstats, err := parseNFSTransportStats(ss[1:], statVersion) @@ -430,7 +430,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e // integer fields. func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) { if len(ss) != fieldBytesLen { - return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss) + return nil, fmt.Errorf("%w: Invalid NFS bytes stats: %v", ErrFileParse, ss) } ns := make([]uint64, 0, fieldBytesLen) @@ -459,7 +459,7 @@ func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) { // integer fields. func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) { if len(ss) != fieldEventsLen { - return nil, fmt.Errorf("invalid NFS events stats: %v", ss) + return nil, fmt.Errorf("%w: invalid NFS events stats: %v", ErrFileParse, ss) } ns := make([]uint64, 0, fieldEventsLen) @@ -523,7 +523,7 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { } if len(ss) < minFields { - return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss) + return nil, fmt.Errorf("%w: invalid NFS per-operations stats: %v", ErrFileParse, ss) } // Skip string operation name for integers @@ -576,10 +576,10 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats } else if protocol == "udp" { expectedLength = fieldTransport10UDPLen } else { - return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss) + return nil, fmt.Errorf("%w: Invalid NFS protocol \"%s\" in stats 1.0 statement: %v", ErrFileParse, protocol, ss) } if len(ss) != expectedLength { - return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss) + return nil, fmt.Errorf("%w: Invalid NFS transport stats 1.0 statement: %v", ErrFileParse, ss) } case statVersion11: var expectedLength int @@ -588,13 +588,13 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats } else if protocol == "udp" { expectedLength = fieldTransport11UDPLen } else { - return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss) + return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss) } if len(ss) != expectedLength { - return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss) + return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v", ErrFileParse, ss) } default: - return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion) + return nil, fmt.Errorf("%s: Unrecognized NFS transport stats version: %q", ErrFileParse, statVersion) } // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go index 64a0e9460..fdfa45611 100644 --- a/vendor/github.com/prometheus/procfs/net_conntrackstat.go +++ b/vendor/github.com/prometheus/procfs/net_conntrackstat.go @@ -58,7 +58,7 @@ func readConntrackStat(path string) ([]ConntrackStatEntry, error) { stat, err := parseConntrackStat(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("failed to read conntrack stats from %q: %w", path, err) + return nil, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, path, err) } return stat, nil @@ -86,11 +86,12 @@ func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) { func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) { entries, err := util.ParseHexUint64s(fields) if err != nil { - return nil, fmt.Errorf("invalid conntrackstat entry, couldn't parse fields: %s", err) + return nil, fmt.Errorf("%s: Cannot parse entry: %d: %w", ErrFileParse, entries, err) } numEntries := len(entries) if numEntries < 16 || numEntries > 17 { - return nil, fmt.Errorf("invalid conntrackstat entry, invalid number of fields: %d", numEntries) + return nil, + fmt.Errorf("%w: invalid conntrackstat entry, invalid number of fields: %d", ErrFileParse, numEntries) } stats := &ConntrackStatEntry{ diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go index 7fd57d7f4..4da81ea57 100644 --- a/vendor/github.com/prometheus/procfs/net_ip_socket.go +++ b/vendor/github.com/prometheus/procfs/net_ip_socket.go @@ -130,7 +130,7 @@ func parseIP(hexIP string) (net.IP, error) { var byteIP []byte byteIP, err := hex.DecodeString(hexIP) if err != nil { - return nil, fmt.Errorf("cannot parse address field in socket line %q", hexIP) + return nil, fmt.Errorf("%s: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err) } switch len(byteIP) { case 4: @@ -144,7 +144,7 @@ func parseIP(hexIP string) (net.IP, error) { } return i, nil default: - return nil, fmt.Errorf("Unable to parse IP %s", hexIP) + return nil, fmt.Errorf("%s: Unable to parse IP %s: %w", ErrFileParse, hexIP, nil) } } @@ -153,7 +153,8 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) { line := &netIPSocketLine{} if len(fields) < 10 { return nil, fmt.Errorf( - "cannot parse net socket line as it has less then 10 columns %q", + "%w: Less than 10 columns found %q", + ErrFileParse, strings.Join(fields, " "), ) } @@ -162,64 +163,65 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) { // sl s := strings.Split(fields[0], ":") if len(s) != 2 { - return nil, fmt.Errorf("cannot parse sl field in socket line %q", fields[0]) + return nil, fmt.Errorf("%w: Unable to parse sl field in line %q", ErrFileParse, fields[0]) } if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil { - return nil, fmt.Errorf("cannot parse sl value in socket line: %w", err) + return nil, fmt.Errorf("%s: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err) } // local_address l := strings.Split(fields[1], ":") if len(l) != 2 { - return nil, fmt.Errorf("cannot parse local_address field in socket line %q", fields[1]) + return nil, fmt.Errorf("%w: Unable to parse local_address field in %q", ErrFileParse, fields[1]) } if line.LocalAddr, err = parseIP(l[0]); err != nil { return nil, err } if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse local_address port value in socket line: %w", err) + return nil, fmt.Errorf("%s: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err) } // remote_address r := strings.Split(fields[2], ":") if len(r) != 2 { - return nil, fmt.Errorf("cannot parse rem_address field in socket line %q", fields[1]) + return nil, fmt.Errorf("%w: Unable to parse rem_address field in %q", ErrFileParse, fields[1]) } if line.RemAddr, err = parseIP(r[0]); err != nil { return nil, err } if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse rem_address port value in socket line: %w", err) + return nil, fmt.Errorf("%s: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err) } // st if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse st value in socket line: %w", err) + return nil, fmt.Errorf("%s: Cannot parse st value in %q: %w", ErrFileParse, line.St, err) } // tx_queue and rx_queue q := strings.Split(fields[4], ":") if len(q) != 2 { return nil, fmt.Errorf( - "cannot parse tx/rx queues in socket line as it has a missing colon %q", + "%w: Missing colon for tx/rx queues in socket line %q", + ErrFileParse, fields[4], ) } if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse tx_queue value in socket line: %w", err) + return nil, fmt.Errorf("%s: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err) } if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse rx_queue value in socket line: %w", err) + return nil, fmt.Errorf("%s: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err) } // uid if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil { - return nil, fmt.Errorf("cannot parse uid value in socket line: %w", err) + return nil, fmt.Errorf("%s: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err) } // inode if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil { - return nil, fmt.Errorf("cannot parse inode value in socket line: %w", err) + return nil, fmt.Errorf("%s: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err) } return line, nil diff --git a/vendor/github.com/prometheus/procfs/net_protocols.go b/vendor/github.com/prometheus/procfs/net_protocols.go index 374b6f73f..b6c77b709 100644 --- a/vendor/github.com/prometheus/procfs/net_protocols.go +++ b/vendor/github.com/prometheus/procfs/net_protocols.go @@ -131,7 +131,7 @@ func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, erro } else if fields[6] == disabled { line.Slab = false } else { - return nil, fmt.Errorf("unable to parse capability for protocol: %s", line.Name) + return nil, fmt.Errorf("%w: capability for protocol: %s", ErrFileParse, line.Name) } line.ModuleName = fields[7] @@ -173,7 +173,7 @@ func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) erro } else if capabilities[i] == "n" { *capabilityFields[i] = false } else { - return fmt.Errorf("unable to parse capability block for protocol: position %d", i) + return fmt.Errorf("%w: capability block for protocol: position %d", ErrFileParse, i) } } return nil diff --git a/vendor/github.com/prometheus/procfs/net_route.go b/vendor/github.com/prometheus/procfs/net_route.go new file mode 100644 index 000000000..deb7029fe --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_route.go @@ -0,0 +1,143 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +const ( + blackholeRepresentation string = "*" + blackholeIfaceName string = "blackhole" + routeLineColumns int = 11 +) + +// A NetRouteLine represents one line from net/route. +type NetRouteLine struct { + Iface string + Destination uint32 + Gateway uint32 + Flags uint32 + RefCnt uint32 + Use uint32 + Metric uint32 + Mask uint32 + MTU uint32 + Window uint32 + IRTT uint32 +} + +func (fs FS) NetRoute() ([]NetRouteLine, error) { + return readNetRoute(fs.proc.Path("net", "route")) +} + +func readNetRoute(path string) ([]NetRouteLine, error) { + b, err := util.ReadFileNoStat(path) + if err != nil { + return nil, err + } + + routelines, err := parseNetRoute(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("failed to read net route from %s: %w", path, err) + } + return routelines, nil +} + +func parseNetRoute(r io.Reader) ([]NetRouteLine, error) { + var routelines []NetRouteLine + + scanner := bufio.NewScanner(r) + scanner.Scan() + for scanner.Scan() { + fields := strings.Fields(scanner.Text()) + routeline, err := parseNetRouteLine(fields) + if err != nil { + return nil, err + } + routelines = append(routelines, *routeline) + } + return routelines, nil +} + +func parseNetRouteLine(fields []string) (*NetRouteLine, error) { + if len(fields) != routeLineColumns { + return nil, fmt.Errorf("invalid routeline, num of digits: %d", len(fields)) + } + iface := fields[0] + if iface == blackholeRepresentation { + iface = blackholeIfaceName + } + destination, err := strconv.ParseUint(fields[1], 16, 32) + if err != nil { + return nil, err + } + gateway, err := strconv.ParseUint(fields[2], 16, 32) + if err != nil { + return nil, err + } + flags, err := strconv.ParseUint(fields[3], 10, 32) + if err != nil { + return nil, err + } + refcnt, err := strconv.ParseUint(fields[4], 10, 32) + if err != nil { + return nil, err + } + use, err := strconv.ParseUint(fields[5], 10, 32) + if err != nil { + return nil, err + } + metric, err := strconv.ParseUint(fields[6], 10, 32) + if err != nil { + return nil, err + } + mask, err := strconv.ParseUint(fields[7], 16, 32) + if err != nil { + return nil, err + } + mtu, err := strconv.ParseUint(fields[8], 10, 32) + if err != nil { + return nil, err + } + window, err := strconv.ParseUint(fields[9], 10, 32) + if err != nil { + return nil, err + } + irtt, err := strconv.ParseUint(fields[10], 10, 32) + if err != nil { + return nil, err + } + routeline := &NetRouteLine{ + Iface: iface, + Destination: uint32(destination), + Gateway: uint32(gateway), + Flags: uint32(flags), + RefCnt: uint32(refcnt), + Use: uint32(use), + Metric: uint32(metric), + Mask: uint32(mask), + MTU: uint32(mtu), + Window: uint32(window), + IRTT: uint32(irtt), + } + return routeline, nil +} diff --git a/vendor/github.com/prometheus/procfs/net_sockstat.go b/vendor/github.com/prometheus/procfs/net_sockstat.go index e36f4872d..360e36af7 100644 --- a/vendor/github.com/prometheus/procfs/net_sockstat.go +++ b/vendor/github.com/prometheus/procfs/net_sockstat.go @@ -16,7 +16,6 @@ package procfs import ( "bufio" "bytes" - "errors" "fmt" "io" "strings" @@ -70,7 +69,7 @@ func readSockstat(name string) (*NetSockstat, error) { stat, err := parseSockstat(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("failed to read sockstats from %q: %w", name, err) + return nil, fmt.Errorf("%s: sockstats from %q: %w", ErrFileRead, name, err) } return stat, nil @@ -84,13 +83,13 @@ func parseSockstat(r io.Reader) (*NetSockstat, error) { // Expect a minimum of a protocol and one key/value pair. fields := strings.Split(s.Text(), " ") if len(fields) < 3 { - return nil, fmt.Errorf("malformed sockstat line: %q", s.Text()) + return nil, fmt.Errorf("%w: Malformed sockstat line: %q", ErrFileParse, s.Text()) } // The remaining fields are key/value pairs. kvs, err := parseSockstatKVs(fields[1:]) if err != nil { - return nil, fmt.Errorf("error parsing sockstat key/value pairs from %q: %w", s.Text(), err) + return nil, fmt.Errorf("%s: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err) } // The first field is the protocol. We must trim its colon suffix. @@ -119,7 +118,7 @@ func parseSockstat(r io.Reader) (*NetSockstat, error) { // parseSockstatKVs parses a string slice into a map of key/value pairs. func parseSockstatKVs(kvs []string) (map[string]int, error) { if len(kvs)%2 != 0 { - return nil, errors.New("odd number of fields in key/value pairs") + return nil, fmt.Errorf("%w:: Odd number of fields in key/value pairs %q", ErrFileParse, kvs) } // Iterate two values at a time to gather key/value pairs. diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go index 540cea52c..c77085291 100644 --- a/vendor/github.com/prometheus/procfs/net_softnet.go +++ b/vendor/github.com/prometheus/procfs/net_softnet.go @@ -64,7 +64,7 @@ func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) { entries, err := parseSoftnet(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("failed to parse /proc/net/softnet_stat: %w", err) + return nil, fmt.Errorf("%s: /proc/net/softnet_stat: %w", ErrFileParse, err) } return entries, nil @@ -83,7 +83,7 @@ func parseSoftnet(r io.Reader) ([]SoftnetStat, error) { softnetStat := SoftnetStat{} if width < minColumns { - return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns) + return nil, fmt.Errorf("%w: detected %d columns, but expected at least %d", ErrFileParse, width, minColumns) } // Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2347 diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go index 98aa8e1c3..acbbc57ea 100644 --- a/vendor/github.com/prometheus/procfs/net_unix.go +++ b/vendor/github.com/prometheus/procfs/net_unix.go @@ -108,14 +108,14 @@ func parseNetUNIX(r io.Reader) (*NetUNIX, error) { line := s.Text() item, err := nu.parseLine(line, hasInode, minFields) if err != nil { - return nil, fmt.Errorf("failed to parse /proc/net/unix data %q: %w", line, err) + return nil, fmt.Errorf("%s: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err) } nu.Rows = append(nu.Rows, item) } if err := s.Err(); err != nil { - return nil, fmt.Errorf("failed to scan /proc/net/unix data: %w", err) + return nil, fmt.Errorf("%s: /proc/net/unix encountered data: %w", ErrFileParse, err) } return &nu, nil @@ -126,7 +126,7 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, l := len(fields) if l < min { - return nil, fmt.Errorf("expected at least %d fields but got %d", min, l) + return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, min, l) } // Field offsets are as follows: @@ -136,29 +136,29 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, users, err := u.parseUsers(fields[1]) if err != nil { - return nil, fmt.Errorf("failed to parse ref count %q: %w", fields[1], err) + return nil, fmt.Errorf("%s: ref count %q: %w", ErrFileParse, fields[1], err) } flags, err := u.parseFlags(fields[3]) if err != nil { - return nil, fmt.Errorf("failed to parse flags %q: %w", fields[3], err) + return nil, fmt.Errorf("%s: Unable to parse flags %q: %w", ErrFileParse, fields[3], err) } typ, err := u.parseType(fields[4]) if err != nil { - return nil, fmt.Errorf("failed to parse type %q: %w", fields[4], err) + return nil, fmt.Errorf("%s: Failed to parse type %q: %w", ErrFileParse, fields[4], err) } state, err := u.parseState(fields[5]) if err != nil { - return nil, fmt.Errorf("failed to parse state %q: %w", fields[5], err) + return nil, fmt.Errorf("%s: Failed to parse state %q: %w", ErrFileParse, fields[5], err) } var inode uint64 if hasInode { inode, err = u.parseInode(fields[6]) if err != nil { - return nil, fmt.Errorf("failed to parse inode %q: %w", fields[6], err) + return nil, fmt.Errorf("%s failed to parse inode %q: %w", ErrFileParse, fields[6], err) } } diff --git a/vendor/github.com/prometheus/procfs/net_wireless.go b/vendor/github.com/prometheus/procfs/net_wireless.go index c80fb1542..7443edca9 100644 --- a/vendor/github.com/prometheus/procfs/net_wireless.go +++ b/vendor/github.com/prometheus/procfs/net_wireless.go @@ -68,7 +68,7 @@ func (fs FS) Wireless() ([]*Wireless, error) { m, err := parseWireless(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("failed to parse wireless: %w", err) + return nil, fmt.Errorf("%s: wireless: %w", ErrFileParse, err) } return m, nil @@ -97,64 +97,64 @@ func parseWireless(r io.Reader) ([]*Wireless, error) { parts := strings.Split(line, ":") if len(parts) != 2 { - return nil, fmt.Errorf("expected 2 parts after splitting line by ':', got %d for line %q", len(parts), line) + return nil, fmt.Errorf("%w: expected 2 parts after splitting line by ':', got %d for line %q", ErrFileParse, len(parts), line) } name := strings.TrimSpace(parts[0]) stats := strings.Fields(parts[1]) if len(stats) < 10 { - return nil, fmt.Errorf("invalid number of fields in line %d, expected at least 10, got %d: %q", n, len(stats), line) + return nil, fmt.Errorf("%w: invalid number of fields in line %d, expected 10+, got %d: %q", ErrFileParse, n, len(stats), line) } status, err := strconv.ParseUint(stats[0], 16, 16) if err != nil { - return nil, fmt.Errorf("invalid status in line %d: %q", n, line) + return nil, fmt.Errorf("%w: invalid status in line %d: %q", ErrFileParse, n, line) } qlink, err := strconv.Atoi(strings.TrimSuffix(stats[1], ".")) if err != nil { - return nil, fmt.Errorf("failed to parse Quality:link as integer %q: %w", qlink, err) + return nil, fmt.Errorf("%s: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err) } qlevel, err := strconv.Atoi(strings.TrimSuffix(stats[2], ".")) if err != nil { - return nil, fmt.Errorf("failed to parse Quality:level as integer %q: %w", qlevel, err) + return nil, fmt.Errorf("%s: Quality:level as integer %q: %w", ErrFileParse, qlevel, err) } qnoise, err := strconv.Atoi(strings.TrimSuffix(stats[3], ".")) if err != nil { - return nil, fmt.Errorf("failed to parse Quality:noise as integer %q: %w", qnoise, err) + return nil, fmt.Errorf("%s: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err) } dnwid, err := strconv.Atoi(stats[4]) if err != nil { - return nil, fmt.Errorf("failed to parse Discarded:nwid as integer %q: %w", dnwid, err) + return nil, fmt.Errorf("%s: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err) } dcrypt, err := strconv.Atoi(stats[5]) if err != nil { - return nil, fmt.Errorf("failed to parse Discarded:crypt as integer %q: %w", dcrypt, err) + return nil, fmt.Errorf("%s: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err) } dfrag, err := strconv.Atoi(stats[6]) if err != nil { - return nil, fmt.Errorf("failed to parse Discarded:frag as integer %q: %w", dfrag, err) + return nil, fmt.Errorf("%s: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err) } dretry, err := strconv.Atoi(stats[7]) if err != nil { - return nil, fmt.Errorf("failed to parse Discarded:retry as integer %q: %w", dretry, err) + return nil, fmt.Errorf("%s: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err) } dmisc, err := strconv.Atoi(stats[8]) if err != nil { - return nil, fmt.Errorf("failed to parse Discarded:misc as integer %q: %w", dmisc, err) + return nil, fmt.Errorf("%s: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err) } mbeacon, err := strconv.Atoi(stats[9]) if err != nil { - return nil, fmt.Errorf("failed to parse Missed:beacon as integer %q: %w", mbeacon, err) + return nil, fmt.Errorf("%s: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err) } w := &Wireless{ @@ -175,7 +175,7 @@ func parseWireless(r io.Reader) ([]*Wireless, error) { } if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("failed to scan /proc/net/wireless: %w", err) + return nil, fmt.Errorf("%s: Failed to scan /proc/net/wireless: %w", ErrFileRead, err) } return interfaces, nil diff --git a/vendor/github.com/prometheus/procfs/net_xfrm.go b/vendor/github.com/prometheus/procfs/net_xfrm.go index f9d9d243d..932ef2046 100644 --- a/vendor/github.com/prometheus/procfs/net_xfrm.go +++ b/vendor/github.com/prometheus/procfs/net_xfrm.go @@ -115,7 +115,7 @@ func (fs FS) NewXfrmStat() (XfrmStat, error) { fields := strings.Fields(s.Text()) if len(fields) != 2 { - return XfrmStat{}, fmt.Errorf("couldn't parse %q line %q", file.Name(), s.Text()) + return XfrmStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text()) } name := fields[0] diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go index 48f39dafd..d1f71caa5 100644 --- a/vendor/github.com/prometheus/procfs/proc.go +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -15,6 +15,7 @@ package procfs import ( "bytes" + "errors" "fmt" "io" "os" @@ -35,6 +36,12 @@ type Proc struct { // Procs represents a list of Proc structs. type Procs []Proc +var ( + ErrFileParse = errors.New("Error Parsing File") + ErrFileRead = errors.New("Error Reading File") + ErrMountPoint = errors.New("Error Accessing Mount point") +) + func (p Procs) Len() int { return len(p) } func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } @@ -42,7 +49,7 @@ func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } // Self returns a process for the current process read via /proc/self. func Self() (Proc, error) { fs, err := NewFS(DefaultMountPoint) - if err != nil { + if err != nil || errors.Unwrap(err) == ErrMountPoint { return Proc{}, err } return fs.Self() @@ -104,7 +111,7 @@ func (fs FS) AllProcs() (Procs, error) { names, err := d.Readdirnames(-1) if err != nil { - return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err) + return Procs{}, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, names, err) } p := Procs{} @@ -205,7 +212,7 @@ func (p Proc) FileDescriptors() ([]uintptr, error) { for i, n := range names { fd, err := strconv.ParseInt(n, 10, 32) if err != nil { - return nil, fmt.Errorf("could not parse fd %q: %w", n, err) + return nil, fmt.Errorf("%s: Cannot parse line: %v: %w", ErrFileParse, i, err) } fds[i] = uintptr(fd) } @@ -237,7 +244,7 @@ func (p Proc) FileDescriptorTargets() ([]string, error) { // a process. func (p Proc) FileDescriptorsLen() (int, error) { // Use fast path if available (Linux v6.2): https://github.com/torvalds/linux/commit/f1f1f2569901 - if p.fs.real { + if p.fs.isReal { stat, err := os.Stat(p.path("fd")) if err != nil { return 0, err @@ -290,7 +297,7 @@ func (p Proc) fileDescriptors() ([]string, error) { names, err := d.Readdirnames(-1) if err != nil { - return nil, fmt.Errorf("could not read %q: %w", d.Name(), err) + return nil, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, names, err) } return names, nil diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go index ea83a75ff..daeed7f57 100644 --- a/vendor/github.com/prometheus/procfs/proc_cgroup.go +++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go @@ -51,7 +51,7 @@ func parseCgroupString(cgroupStr string) (*Cgroup, error) { fields := strings.SplitN(cgroupStr, ":", 3) if len(fields) < 3 { - return nil, fmt.Errorf("at least 3 fields required, found %d fields in cgroup string: %s", len(fields), cgroupStr) + return nil, fmt.Errorf("%w: 3+ fields required, found %d fields in cgroup string: %s", ErrFileParse, len(fields), cgroupStr) } cgroup := &Cgroup{ @@ -60,7 +60,7 @@ func parseCgroupString(cgroupStr string) (*Cgroup, error) { } cgroup.HierarchyID, err = strconv.Atoi(fields[0]) if err != nil { - return nil, fmt.Errorf("failed to parse hierarchy ID") + return nil, fmt.Errorf("%w: hierarchy ID: %q", ErrFileParse, cgroup.HierarchyID) } if fields[1] != "" { ssNames := strings.Split(fields[1], ",") diff --git a/vendor/github.com/prometheus/procfs/proc_cgroups.go b/vendor/github.com/prometheus/procfs/proc_cgroups.go index 24d4dce9c..5dd493899 100644 --- a/vendor/github.com/prometheus/procfs/proc_cgroups.go +++ b/vendor/github.com/prometheus/procfs/proc_cgroups.go @@ -46,7 +46,7 @@ func parseCgroupSummaryString(CgroupSummaryStr string) (*CgroupSummary, error) { fields := strings.Fields(CgroupSummaryStr) // require at least 4 fields if len(fields) < 4 { - return nil, fmt.Errorf("at least 4 fields required, found %d fields in cgroup info string: %s", len(fields), CgroupSummaryStr) + return nil, fmt.Errorf("%w: 4+ fields required, found %d fields in cgroup info string: %s", ErrFileParse, len(fields), CgroupSummaryStr) } CgroupSummary := &CgroupSummary{ @@ -54,15 +54,15 @@ func parseCgroupSummaryString(CgroupSummaryStr string) (*CgroupSummary, error) { } CgroupSummary.Hierarchy, err = strconv.Atoi(fields[1]) if err != nil { - return nil, fmt.Errorf("failed to parse hierarchy ID") + return nil, fmt.Errorf("%w: Unable to parse hierarchy ID from %q", ErrFileParse, fields[1]) } CgroupSummary.Cgroups, err = strconv.Atoi(fields[2]) if err != nil { - return nil, fmt.Errorf("failed to parse Cgroup Num") + return nil, fmt.Errorf("%w: Unable to parse Cgroup Num from %q", ErrFileParse, fields[2]) } CgroupSummary.Enabled, err = strconv.Atoi(fields[3]) if err != nil { - return nil, fmt.Errorf("failed to parse Enabled") + return nil, fmt.Errorf("%w: Unable to parse Enabled from %q", ErrFileParse, fields[3]) } return CgroupSummary, nil } diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go index 1bbdd4a8e..4b7933e4f 100644 --- a/vendor/github.com/prometheus/procfs/proc_fdinfo.go +++ b/vendor/github.com/prometheus/procfs/proc_fdinfo.go @@ -111,7 +111,7 @@ func parseInotifyInfo(line string) (*InotifyInfo, error) { } return i, nil } - return nil, fmt.Errorf("invalid inode entry: %q", line) + return nil, fmt.Errorf("%w: invalid inode entry: %q", ErrFileParse, line) } // ProcFDInfos represents a list of ProcFDInfo structs. diff --git a/vendor/github.com/prometheus/procfs/proc_interrupts.go b/vendor/github.com/prometheus/procfs/proc_interrupts.go index 9df79c237..86b4b4524 100644 --- a/vendor/github.com/prometheus/procfs/proc_interrupts.go +++ b/vendor/github.com/prometheus/procfs/proc_interrupts.go @@ -66,7 +66,7 @@ func parseInterrupts(r io.Reader) (Interrupts, error) { continue } if len(parts) < 2 { - return nil, fmt.Errorf("not enough fields in interrupts (expected at least 2 fields but got %d): %s", len(parts), parts) + return nil, fmt.Errorf("%w: Not enough fields in interrupts (expected 2+ fields but got %d): %s", ErrFileParse, len(parts), parts) } intName := parts[0][:len(parts[0])-1] // remove trailing : diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go index 7a1388185..c86d815d7 100644 --- a/vendor/github.com/prometheus/procfs/proc_limits.go +++ b/vendor/github.com/prometheus/procfs/proc_limits.go @@ -103,7 +103,7 @@ func (p Proc) Limits() (ProcLimits, error) { //fields := limitsMatch.Split(s.Text(), limitsFields) fields := limitsMatch.FindStringSubmatch(s.Text()) if len(fields) != limitsFields { - return ProcLimits{}, fmt.Errorf("couldn't parse %q line %q", f.Name(), s.Text()) + return ProcLimits{}, fmt.Errorf("%w: couldn't parse %q line %q", ErrFileParse, f.Name(), s.Text()) } switch fields[1] { @@ -154,7 +154,7 @@ func parseUint(s string) (uint64, error) { } i, err := strconv.ParseUint(s, 10, 64) if err != nil { - return 0, fmt.Errorf("couldn't parse value %q: %w", s, err) + return 0, fmt.Errorf("%s: couldn't parse value %q: %w", ErrFileParse, s, err) } return i, nil } diff --git a/vendor/github.com/prometheus/procfs/proc_maps.go b/vendor/github.com/prometheus/procfs/proc_maps.go index f1bcbf32b..727549a13 100644 --- a/vendor/github.com/prometheus/procfs/proc_maps.go +++ b/vendor/github.com/prometheus/procfs/proc_maps.go @@ -65,7 +65,7 @@ type ProcMap struct { func parseDevice(s string) (uint64, error) { toks := strings.Split(s, ":") if len(toks) < 2 { - return 0, fmt.Errorf("unexpected number of fields") + return 0, fmt.Errorf("%w: unexpected number of fields, expected: 2, got: %q", ErrFileParse, len(toks)) } major, err := strconv.ParseUint(toks[0], 16, 0) @@ -95,7 +95,7 @@ func parseAddress(s string) (uintptr, error) { func parseAddresses(s string) (uintptr, uintptr, error) { toks := strings.Split(s, "-") if len(toks) < 2 { - return 0, 0, fmt.Errorf("invalid address") + return 0, 0, fmt.Errorf("%w: invalid address", ErrFileParse) } saddr, err := parseAddress(toks[0]) @@ -114,7 +114,7 @@ func parseAddresses(s string) (uintptr, uintptr, error) { // parsePermissions parses a token and returns any that are set. func parsePermissions(s string) (*ProcMapPermissions, error) { if len(s) < 4 { - return nil, fmt.Errorf("invalid permissions token") + return nil, fmt.Errorf("%w: invalid permissions token", ErrFileParse) } perms := ProcMapPermissions{} @@ -141,7 +141,7 @@ func parsePermissions(s string) (*ProcMapPermissions, error) { func parseProcMap(text string) (*ProcMap, error) { fields := strings.Fields(text) if len(fields) < 5 { - return nil, fmt.Errorf("truncated procmap entry") + return nil, fmt.Errorf("%w: truncated procmap entry", ErrFileParse) } saddr, eaddr, err := parseAddresses(fields[0]) diff --git a/vendor/github.com/prometheus/procfs/proc_netstat.go b/vendor/github.com/prometheus/procfs/proc_netstat.go index 6a43bb245..8e3ff4d79 100644 --- a/vendor/github.com/prometheus/procfs/proc_netstat.go +++ b/vendor/github.com/prometheus/procfs/proc_netstat.go @@ -195,8 +195,8 @@ func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) { // Remove trailing :. protocol := strings.TrimSuffix(nameParts[0], ":") if len(nameParts) != len(valueParts) { - return procNetstat, fmt.Errorf("mismatch field count mismatch in %s: %s", - fileName, protocol) + return procNetstat, fmt.Errorf("%w: mismatch field count mismatch in %s: %s", + ErrFileParse, fileName, protocol) } for i := 1; i < len(nameParts); i++ { value, err := strconv.ParseFloat(valueParts[i], 64) diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go index 391b4cbd1..c22666750 100644 --- a/vendor/github.com/prometheus/procfs/proc_ns.go +++ b/vendor/github.com/prometheus/procfs/proc_ns.go @@ -40,7 +40,7 @@ func (p Proc) Namespaces() (Namespaces, error) { names, err := d.Readdirnames(-1) if err != nil { - return nil, fmt.Errorf("failed to read contents of ns dir: %w", err) + return nil, fmt.Errorf("%s: failed to read contents of ns dir: %w", ErrFileRead, err) } ns := make(Namespaces, len(names)) @@ -52,13 +52,13 @@ func (p Proc) Namespaces() (Namespaces, error) { fields := strings.SplitN(target, ":", 2) if len(fields) != 2 { - return nil, fmt.Errorf("failed to parse namespace type and inode from %q", target) + return nil, fmt.Errorf("%w: namespace type and inode from %q", ErrFileParse, target) } typ := fields[0] inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32) if err != nil { - return nil, fmt.Errorf("failed to parse inode from %q: %w", fields[1], err) + return nil, fmt.Errorf("%s: inode from %q: %w", ErrFileParse, fields[1], err) } ns[name] = Namespace{typ, uint32(inode)} diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go index a68fe1529..fe9dbb425 100644 --- a/vendor/github.com/prometheus/procfs/proc_psi.go +++ b/vendor/github.com/prometheus/procfs/proc_psi.go @@ -61,14 +61,14 @@ type PSIStats struct { func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) { data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource))) if err != nil { - return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %q: %w", resource, err) + return PSIStats{}, fmt.Errorf("%s: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err) } - return parsePSIStats(resource, bytes.NewReader(data)) + return parsePSIStats(bytes.NewReader(data)) } // parsePSIStats parses the specified file for pressure stall information. -func parsePSIStats(resource string, r io.Reader) (PSIStats, error) { +func parsePSIStats(r io.Reader) (PSIStats, error) { psiStats := PSIStats{} scanner := bufio.NewScanner(r) diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go index 0e97d9957..ad8785a40 100644 --- a/vendor/github.com/prometheus/procfs/proc_smaps.go +++ b/vendor/github.com/prometheus/procfs/proc_smaps.go @@ -135,12 +135,12 @@ func (s *ProcSMapsRollup) parseLine(line string) error { } vBytes := vKBytes * 1024 - s.addValue(k, v, vKBytes, vBytes) + s.addValue(k, vBytes) return nil } -func (s *ProcSMapsRollup) addValue(k string, vString string, vUint uint64, vUintBytes uint64) { +func (s *ProcSMapsRollup) addValue(k string, vUintBytes uint64) { switch k { case "Rss": s.Rss += vUintBytes diff --git a/vendor/github.com/prometheus/procfs/proc_snmp.go b/vendor/github.com/prometheus/procfs/proc_snmp.go index 6c46b7188..b9d2cf642 100644 --- a/vendor/github.com/prometheus/procfs/proc_snmp.go +++ b/vendor/github.com/prometheus/procfs/proc_snmp.go @@ -159,8 +159,8 @@ func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) { // Remove trailing :. protocol := strings.TrimSuffix(nameParts[0], ":") if len(nameParts) != len(valueParts) { - return procSnmp, fmt.Errorf("mismatch field count mismatch in %s: %s", - fileName, protocol) + return procSnmp, fmt.Errorf("%w: mismatch field count mismatch in %s: %s", + ErrFileParse, fileName, protocol) } for i := 1; i < len(nameParts); i++ { value, err := strconv.ParseFloat(valueParts[i], 64) diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go index 14b249f4f..923e55005 100644 --- a/vendor/github.com/prometheus/procfs/proc_stat.go +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -138,7 +138,7 @@ func (p Proc) Stat() (ProcStat, error) { ) if l < 0 || r < 0 { - return ProcStat{}, fmt.Errorf("unexpected format, couldn't extract comm %q", data) + return ProcStat{}, fmt.Errorf("%w: unexpected format, couldn't extract comm %q", ErrFileParse, data) } s.Comm = string(data[l+1 : r]) diff --git a/vendor/github.com/prometheus/procfs/proc_sys.go b/vendor/github.com/prometheus/procfs/proc_sys.go index d46533ebf..12c5bf05b 100644 --- a/vendor/github.com/prometheus/procfs/proc_sys.go +++ b/vendor/github.com/prometheus/procfs/proc_sys.go @@ -44,7 +44,7 @@ func (fs FS) SysctlInts(sysctl string) ([]int, error) { vp := util.NewValueParser(f) values[i] = vp.Int() if err := vp.Err(); err != nil { - return nil, fmt.Errorf("field %d in sysctl %s is not a valid int: %w", i, sysctl, err) + return nil, fmt.Errorf("%s: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err) } } return values, nil diff --git a/vendor/github.com/prometheus/procfs/slab.go b/vendor/github.com/prometheus/procfs/slab.go index bc9aaf5c2..8611c9017 100644 --- a/vendor/github.com/prometheus/procfs/slab.go +++ b/vendor/github.com/prometheus/procfs/slab.go @@ -68,7 +68,7 @@ func parseV21SlabEntry(line string) (*Slab, error) { l := slabSpace.ReplaceAllString(line, " ") s := strings.Split(l, " ") if len(s) != 16 { - return nil, fmt.Errorf("unable to parse: %q", line) + return nil, fmt.Errorf("%w: unable to parse: %q", ErrFileParse, line) } var err error i := &Slab{Name: s[0]} diff --git a/vendor/github.com/prometheus/procfs/softirqs.go b/vendor/github.com/prometheus/procfs/softirqs.go index 559129cbc..b8fad677d 100644 --- a/vendor/github.com/prometheus/procfs/softirqs.go +++ b/vendor/github.com/prometheus/procfs/softirqs.go @@ -57,7 +57,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { ) if !scanner.Scan() { - return Softirqs{}, fmt.Errorf("softirqs empty") + return Softirqs{}, fmt.Errorf("%w: softirqs empty", ErrFileRead) } for scanner.Scan() { @@ -74,7 +74,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Hi = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (HI%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "TIMER:": @@ -82,7 +82,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Timer = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (TIMER%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "NET_TX:": @@ -90,7 +90,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.NetTx = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (NET_TX%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "NET_RX:": @@ -98,7 +98,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.NetRx = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (NET_RX%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "BLOCK:": @@ -106,7 +106,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Block = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (BLOCK%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "IRQ_POLL:": @@ -114,7 +114,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.IRQPoll = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (IRQ_POLL%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "TASKLET:": @@ -122,7 +122,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Tasklet = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (TASKLET%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "SCHED:": @@ -130,7 +130,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Sched = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (SCHED%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "HRTIMER:": @@ -138,7 +138,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.HRTimer = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (HRTIMER%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "RCU:": @@ -146,14 +146,14 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.RCU = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (RCU%d): %w", count, i, err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err) } } } } if err := scanner.Err(); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse softirqs: %w", err) + return Softirqs{}, fmt.Errorf("%s: couldn't parse softirqs: %w", ErrFileParse, err) } return softirqs, scanner.Err() diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go index 586af48af..34fc3ee21 100644 --- a/vendor/github.com/prometheus/procfs/stat.go +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -93,10 +93,10 @@ func parseCPUStat(line string) (CPUStat, int64, error) { &cpuStat.Guest, &cpuStat.GuestNice) if err != nil && err != io.EOF { - return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): %w", line, err) + return CPUStat{}, -1, fmt.Errorf("%s: couldn't parse %q (cpu): %w", ErrFileParse, line, err) } if count == 0 { - return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): 0 elements parsed", line) + return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): 0 elements parsed", ErrFileParse, line) } cpuStat.User /= userHZ @@ -116,7 +116,7 @@ func parseCPUStat(line string) (CPUStat, int64, error) { cpuID, err := strconv.ParseInt(cpu[3:], 10, 64) if err != nil { - return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu/cpuid): %w", line, err) + return CPUStat{}, -1, fmt.Errorf("%s: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err) } return cpuStat, cpuID, nil @@ -136,7 +136,7 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { &softIRQStat.Hrtimer, &softIRQStat.Rcu) if err != nil { - return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %q (softirq): %w", line, err) + return SoftIRQStat{}, 0, fmt.Errorf("%s: couldn't parse %q (softirq): %w", ErrFileParse, line, err) } return softIRQStat, total, nil @@ -187,6 +187,10 @@ func parseStat(r io.Reader, fileName string) (Stat, error) { err error ) + // Increase default scanner buffer to handle very long `intr` lines. + buf := make([]byte, 0, 8*1024) + scanner.Buffer(buf, 1024*1024) + for scanner.Scan() { line := scanner.Text() parts := strings.Fields(scanner.Text()) @@ -197,34 +201,34 @@ func parseStat(r io.Reader, fileName string) (Stat, error) { switch { case parts[0] == "btime": if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (btime): %w", parts[1], err) + return Stat{}, fmt.Errorf("%s: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err) } case parts[0] == "intr": if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (intr): %w", parts[1], err) + return Stat{}, fmt.Errorf("%s: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err) } numberedIRQs := parts[2:] stat.IRQ = make([]uint64, len(numberedIRQs)) for i, count := range numberedIRQs { if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (intr%d): %w", count, i, err) + return Stat{}, fmt.Errorf("%s: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "ctxt": if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (ctxt): %w", parts[1], err) + return Stat{}, fmt.Errorf("%s: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err) } case parts[0] == "processes": if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (processes): %w", parts[1], err) + return Stat{}, fmt.Errorf("%s: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err) } case parts[0] == "procs_running": if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (procs_running): %w", parts[1], err) + return Stat{}, fmt.Errorf("%s: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err) } case parts[0] == "procs_blocked": if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (procs_blocked): %w", parts[1], err) + return Stat{}, fmt.Errorf("%s: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err) } case parts[0] == "softirq": softIRQStats, total, err := parseSoftIRQStat(line) @@ -247,7 +251,7 @@ func parseStat(r io.Reader, fileName string) (Stat, error) { } if err := scanner.Err(); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q: %w", fileName, err) + return Stat{}, fmt.Errorf("%s: couldn't parse %q: %w", ErrFileParse, fileName, err) } return stat, nil diff --git a/vendor/github.com/prometheus/procfs/swaps.go b/vendor/github.com/prometheus/procfs/swaps.go index 15edc2212..fa00f555d 100644 --- a/vendor/github.com/prometheus/procfs/swaps.go +++ b/vendor/github.com/prometheus/procfs/swaps.go @@ -64,7 +64,7 @@ func parseSwapString(swapString string) (*Swap, error) { swapFields := strings.Fields(swapString) swapLength := len(swapFields) if swapLength < 5 { - return nil, fmt.Errorf("too few fields in swap string: %s", swapString) + return nil, fmt.Errorf("%w: too few fields in swap string: %s", ErrFileParse, swapString) } swap := &Swap{ @@ -74,15 +74,15 @@ func parseSwapString(swapString string) (*Swap, error) { swap.Size, err = strconv.Atoi(swapFields[2]) if err != nil { - return nil, fmt.Errorf("invalid swap size: %s", swapFields[2]) + return nil, fmt.Errorf("%s: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err) } swap.Used, err = strconv.Atoi(swapFields[3]) if err != nil { - return nil, fmt.Errorf("invalid swap used: %s", swapFields[3]) + return nil, fmt.Errorf("%s: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err) } swap.Priority, err = strconv.Atoi(swapFields[4]) if err != nil { - return nil, fmt.Errorf("invalid swap priority: %s", swapFields[4]) + return nil, fmt.Errorf("%s: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err) } return swap, nil diff --git a/vendor/github.com/prometheus/procfs/thread.go b/vendor/github.com/prometheus/procfs/thread.go index 490c14708..df2215ece 100644 --- a/vendor/github.com/prometheus/procfs/thread.go +++ b/vendor/github.com/prometheus/procfs/thread.go @@ -45,7 +45,7 @@ func (fs FS) AllThreads(pid int) (Procs, error) { names, err := d.Readdirnames(-1) if err != nil { - return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err) + return Procs{}, fmt.Errorf("%s: could not read %q: %w", ErrFileRead, d.Name(), err) } t := Procs{} @@ -55,7 +55,7 @@ func (fs FS) AllThreads(pid int) (Procs, error) { continue } - t = append(t, Proc{PID: int(tid), fs: FS{fsi.FS(taskPath), fs.real}}) + t = append(t, Proc{PID: int(tid), fs: FS{fsi.FS(taskPath), fs.isReal}}) } return t, nil @@ -67,12 +67,12 @@ func (fs FS) Thread(pid, tid int) (Proc, error) { if _, err := os.Stat(taskPath); err != nil { return Proc{}, err } - return Proc{PID: tid, fs: FS{fsi.FS(taskPath), fs.real}}, nil + return Proc{PID: tid, fs: FS{fsi.FS(taskPath), fs.isReal}}, nil } // Thread returns a process for a given TID of Proc. func (proc Proc) Thread(tid int) (Proc, error) { - tfs := FS{fsi.FS(proc.path("task")), proc.fs.real} + tfs := FS{fsi.FS(proc.path("task")), proc.fs.isReal} if _, err := os.Stat(tfs.proc.Path(strconv.Itoa(tid))); err != nil { return Proc{}, err } diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go index cdedcae99..51c49d89e 100644 --- a/vendor/github.com/prometheus/procfs/vm.go +++ b/vendor/github.com/prometheus/procfs/vm.go @@ -86,7 +86,7 @@ func (fs FS) VM() (*VM, error) { return nil, err } if !file.Mode().IsDir() { - return nil, fmt.Errorf("%s is not a directory", path) + return nil, fmt.Errorf("%w: %s is not a directory", ErrFileRead, path) } files, err := os.ReadDir(path) diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go index c745a4c04..ce5fefa5b 100644 --- a/vendor/github.com/prometheus/procfs/zoneinfo.go +++ b/vendor/github.com/prometheus/procfs/zoneinfo.go @@ -75,11 +75,11 @@ var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`) func (fs FS) Zoneinfo() ([]Zoneinfo, error) { data, err := os.ReadFile(fs.proc.Path("zoneinfo")) if err != nil { - return nil, fmt.Errorf("error reading zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err) + return nil, fmt.Errorf("%s: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err) } zoneinfo, err := parseZoneinfo(data) if err != nil { - return nil, fmt.Errorf("error parsing zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err) + return nil, fmt.Errorf("%s: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err) } return zoneinfo, nil } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go index 4e24f9eed..6ca8d9ad6 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go @@ -14,17 +14,23 @@ import ( ) // ArrayCodec is the Codec used for bsoncore.Array values. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// ArrayCodec registered. type ArrayCodec struct{} var defaultArrayCodec = NewArrayCodec() // NewArrayCodec returns an ArrayCodec. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// ArrayCodec registered. func NewArrayCodec() *ArrayCodec { return &ArrayCodec{} } // EncodeValue is the ValueEncoder for bsoncore.Array values. -func (ac *ArrayCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +func (ac *ArrayCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tCoreArray { return ValueEncoderError{Name: "CoreArrayEncodeValue", Types: []reflect.Type{tCoreArray}, Received: val} } @@ -34,7 +40,7 @@ func (ac *ArrayCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val r } // DecodeValue is the ValueDecoder for bsoncore.Array values. -func (ac *ArrayCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { +func (ac *ArrayCodec) DecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tCoreArray { return ValueDecoderError{Name: "CoreArrayDecodeValue", Types: []reflect.Type{tCoreArray}, Received: val} } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go index 098ed69f9..0693bd432 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go @@ -23,6 +23,8 @@ var ( // Marshaler is an interface implemented by types that can marshal themselves // into a BSON document represented as bytes. The bytes returned must be a valid // BSON document if the error is nil. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Marshaler] instead. type Marshaler interface { MarshalBSON() ([]byte, error) } @@ -31,6 +33,8 @@ type Marshaler interface { // themselves into a BSON value as bytes. The type must be the valid type for // the bytes returned. The bytes and byte type together must be valid if the // error is nil. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.ValueMarshaler] instead. type ValueMarshaler interface { MarshalBSONValue() (bsontype.Type, []byte, error) } @@ -39,6 +43,8 @@ type ValueMarshaler interface { // document representation of themselves. The BSON bytes can be assumed to be // valid. UnmarshalBSON must copy the BSON bytes if it wishes to retain the data // after returning. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Unmarshaler] instead. type Unmarshaler interface { UnmarshalBSON([]byte) error } @@ -47,6 +53,8 @@ type Unmarshaler interface { // BSON value representation of themselves. The BSON bytes and type can be // assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it // wishes to retain the data after returning. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.ValueUnmarshaler] instead. type ValueUnmarshaler interface { UnmarshalBSONValue(bsontype.Type, []byte) error } @@ -111,13 +119,93 @@ func (vde ValueDecoderError) Error() string { // value. type EncodeContext struct { *Registry + + // MinSize causes the Encoder to marshal Go integer values (int, int8, int16, int32, int64, + // uint, uint8, uint16, uint32, or uint64) as the minimum BSON int size (either 32 or 64 bits) + // that can represent the integer value. + // + // Deprecated: Use bson.Encoder.IntMinSize instead. MinSize bool + + errorOnInlineDuplicates bool + stringifyMapKeysWithFmt bool + nilMapAsEmpty bool + nilSliceAsEmpty bool + nilByteSliceAsEmpty bool + omitZeroStruct bool + useJSONStructTags bool +} + +// ErrorOnInlineDuplicates causes the Encoder to return an error if there is a duplicate field in +// the marshaled BSON when the "inline" struct tag option is set. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.ErrorOnInlineDuplicates] instead. +func (ec *EncodeContext) ErrorOnInlineDuplicates() { + ec.errorOnInlineDuplicates = true +} + +// StringifyMapKeysWithFmt causes the Encoder to convert Go map keys to BSON document field name +// strings using fmt.Sprintf() instead of the default string conversion logic. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.StringifyMapKeysWithFmt] instead. +func (ec *EncodeContext) StringifyMapKeysWithFmt() { + ec.stringifyMapKeysWithFmt = true +} + +// NilMapAsEmpty causes the Encoder to marshal nil Go maps as empty BSON documents instead of BSON +// null. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilMapAsEmpty] instead. +func (ec *EncodeContext) NilMapAsEmpty() { + ec.nilMapAsEmpty = true +} + +// NilSliceAsEmpty causes the Encoder to marshal nil Go slices as empty BSON arrays instead of BSON +// null. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilSliceAsEmpty] instead. +func (ec *EncodeContext) NilSliceAsEmpty() { + ec.nilSliceAsEmpty = true +} + +// NilByteSliceAsEmpty causes the Encoder to marshal nil Go byte slices as empty BSON binary values +// instead of BSON null. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilByteSliceAsEmpty] instead. +func (ec *EncodeContext) NilByteSliceAsEmpty() { + ec.nilByteSliceAsEmpty = true +} + +// OmitZeroStruct causes the Encoder to consider the zero value for a struct (e.g. MyStruct{}) +// as empty and omit it from the marshaled BSON when the "omitempty" struct tag option is set. +// +// Note that the Encoder only examines exported struct fields when determining if a struct is the +// zero value. It considers pointers to a zero struct value (e.g. &MyStruct{}) not empty. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.OmitZeroStruct] instead. +func (ec *EncodeContext) OmitZeroStruct() { + ec.omitZeroStruct = true +} + +// UseJSONStructTags causes the Encoder to fall back to using the "json" struct tag if a "bson" +// struct tag is not specified. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.UseJSONStructTags] instead. +func (ec *EncodeContext) UseJSONStructTags() { + ec.useJSONStructTags = true } // DecodeContext is the contextual information required for a Codec to decode a // value. type DecodeContext struct { *Registry + + // Truncate, if true, instructs decoders to to truncate the fractional part of BSON "double" + // values when attempting to unmarshal them into a Go integer (int, int8, int16, int32, int64, + // uint, uint8, uint16, uint32, or uint64) struct field. The truncation logic does not apply to + // BSON "decimal128" values. + // + // Deprecated: Use bson.Decoder.AllowTruncatingDoubles instead. Truncate bool // Ancestor is the type of a containing document. This is mainly used to determine what type @@ -125,7 +213,7 @@ type DecodeContext struct { // Ancestor is a bson.M, BSON embedded document values being decoded into an empty interface // will be decoded into a bson.M. // - // Deprecated: Use DefaultDocumentM or DefaultDocumentD instead. + // Deprecated: Use bson.Decoder.DefaultDocumentM or bson.Decoder.DefaultDocumentD instead. Ancestor reflect.Type // defaultDocumentType specifies the Go type to decode top-level and nested BSON documents into. In particular, the @@ -133,22 +221,74 @@ type DecodeContext struct { // set to a type that a BSON document cannot be unmarshaled into (e.g. "string"), unmarshalling will result in an // error. DocumentType overrides the Ancestor field. defaultDocumentType reflect.Type + + binaryAsSlice bool + useJSONStructTags bool + useLocalTimeZone bool + zeroMaps bool + zeroStructs bool } -// DefaultDocumentM will decode empty documents using the primitive.M type. This behavior is restricted to data typed as -// "interface{}" or "map[string]interface{}". +// BinaryAsSlice causes the Decoder to unmarshal BSON binary field values that are the "Generic" or +// "Old" BSON binary subtype as a Go byte slice instead of a primitive.Binary. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.BinaryAsSlice] instead. +func (dc *DecodeContext) BinaryAsSlice() { + dc.binaryAsSlice = true +} + +// UseJSONStructTags causes the Decoder to fall back to using the "json" struct tag if a "bson" +// struct tag is not specified. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseJSONStructTags] instead. +func (dc *DecodeContext) UseJSONStructTags() { + dc.useJSONStructTags = true +} + +// UseLocalTimeZone causes the Decoder to unmarshal time.Time values in the local timezone instead +// of the UTC timezone. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseLocalTimeZone] instead. +func (dc *DecodeContext) UseLocalTimeZone() { + dc.useLocalTimeZone = true +} + +// ZeroMaps causes the Decoder to delete any existing values from Go maps in the destination value +// passed to Decode before unmarshaling BSON documents into them. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroMaps] instead. +func (dc *DecodeContext) ZeroMaps() { + dc.zeroMaps = true +} + +// ZeroStructs causes the Decoder to delete any existing values from Go structs in the destination +// value passed to Decode before unmarshaling BSON documents into them. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroStructs] instead. +func (dc *DecodeContext) ZeroStructs() { + dc.zeroStructs = true +} + +// DefaultDocumentM causes the Decoder to always unmarshal documents into the primitive.M type. This +// behavior is restricted to data typed as "interface{}" or "map[string]interface{}". +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.DefaultDocumentM] instead. func (dc *DecodeContext) DefaultDocumentM() { dc.defaultDocumentType = reflect.TypeOf(primitive.M{}) } -// DefaultDocumentD will decode empty documents using the primitive.D type. This behavior is restricted to data typed as -// "interface{}" or "map[string]interface{}". +// DefaultDocumentD causes the Decoder to always unmarshal documents into the primitive.D type. This +// behavior is restricted to data typed as "interface{}" or "map[string]interface{}". +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.DefaultDocumentD] instead. func (dc *DecodeContext) DefaultDocumentD() { dc.defaultDocumentType = reflect.TypeOf(primitive.D{}) } -// ValueCodec is the interface that groups the methods to encode and decode +// ValueCodec is an interface for encoding and decoding a reflect.Value. // values. +// +// Deprecated: Use [ValueEncoder] and [ValueDecoder] instead. type ValueCodec interface { ValueEncoder ValueDecoder @@ -233,6 +373,10 @@ func decodeTypeOrValueWithInfo(vd ValueDecoder, td typeDecoder, dc DecodeContext // CodecZeroer is the interface implemented by Codecs that can also determine if // a value of the type that would be encoded is zero. +// +// Deprecated: Defining custom rules for the zero/empty value will not be supported in Go Driver +// 2.0. Users who want to omit empty complex values should use a pointer field and set the value to +// nil instead. type CodecZeroer interface { IsTypeZero(interface{}) bool } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go index 5a916cc15..dde3e7681 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go @@ -16,18 +16,30 @@ import ( ) // ByteSliceCodec is the Codec used for []byte values. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// ByteSliceCodec registered. type ByteSliceCodec struct { + // EncodeNilAsEmpty causes EncodeValue to marshal nil Go byte slices as empty BSON binary values + // instead of BSON null. + // + // Deprecated: Use bson.Encoder.NilByteSliceAsEmpty instead. EncodeNilAsEmpty bool } var ( defaultByteSliceCodec = NewByteSliceCodec() - _ ValueCodec = defaultByteSliceCodec + // Assert that defaultByteSliceCodec satisfies the typeDecoder interface, which allows it to be + // used by collection type decoders (e.g. map, slice, etc) to set individual values in a + // collection. _ typeDecoder = defaultByteSliceCodec ) -// NewByteSliceCodec returns a StringCodec with options opts. +// NewByteSliceCodec returns a ByteSliceCodec with options opts. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// ByteSliceCodec registered. func NewByteSliceCodec(opts ...*bsonoptions.ByteSliceCodecOptions) *ByteSliceCodec { byteSliceOpt := bsonoptions.MergeByteSliceCodecOptions(opts...) codec := ByteSliceCodec{} @@ -42,13 +54,13 @@ func (bsc *ByteSliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, if !val.IsValid() || val.Type() != tByteSlice { return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val} } - if val.IsNil() && !bsc.EncodeNilAsEmpty { + if val.IsNil() && !bsc.EncodeNilAsEmpty && !ec.nilByteSliceAsEmpty { return vw.WriteNull() } return vw.WriteBinary(val.Interface().([]byte)) } -func (bsc *ByteSliceCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (bsc *ByteSliceCodec) decodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tByteSlice { return emptyValue, ValueDecoderError{ Name: "ByteSliceDecodeValue", diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go new file mode 100644 index 000000000..844b50299 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go @@ -0,0 +1,166 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "reflect" + "sync" + "sync/atomic" +) + +// Runtime check that the kind encoder and decoder caches can store any valid +// reflect.Kind constant. +func init() { + if s := reflect.Kind(len(kindEncoderCache{}.entries)).String(); s != "kind27" { + panic("The capacity of kindEncoderCache is too small.\n" + + "This is due to a new type being added to reflect.Kind.") + } +} + +// statically assert array size +var _ = (kindEncoderCache{}).entries[reflect.UnsafePointer] +var _ = (kindDecoderCache{}).entries[reflect.UnsafePointer] + +type typeEncoderCache struct { + cache sync.Map // map[reflect.Type]ValueEncoder +} + +func (c *typeEncoderCache) Store(rt reflect.Type, enc ValueEncoder) { + c.cache.Store(rt, enc) +} + +func (c *typeEncoderCache) Load(rt reflect.Type) (ValueEncoder, bool) { + if v, _ := c.cache.Load(rt); v != nil { + return v.(ValueEncoder), true + } + return nil, false +} + +func (c *typeEncoderCache) LoadOrStore(rt reflect.Type, enc ValueEncoder) ValueEncoder { + if v, loaded := c.cache.LoadOrStore(rt, enc); loaded { + enc = v.(ValueEncoder) + } + return enc +} + +func (c *typeEncoderCache) Clone() *typeEncoderCache { + cc := new(typeEncoderCache) + c.cache.Range(func(k, v interface{}) bool { + if k != nil && v != nil { + cc.cache.Store(k, v) + } + return true + }) + return cc +} + +type typeDecoderCache struct { + cache sync.Map // map[reflect.Type]ValueDecoder +} + +func (c *typeDecoderCache) Store(rt reflect.Type, dec ValueDecoder) { + c.cache.Store(rt, dec) +} + +func (c *typeDecoderCache) Load(rt reflect.Type) (ValueDecoder, bool) { + if v, _ := c.cache.Load(rt); v != nil { + return v.(ValueDecoder), true + } + return nil, false +} + +func (c *typeDecoderCache) LoadOrStore(rt reflect.Type, dec ValueDecoder) ValueDecoder { + if v, loaded := c.cache.LoadOrStore(rt, dec); loaded { + dec = v.(ValueDecoder) + } + return dec +} + +func (c *typeDecoderCache) Clone() *typeDecoderCache { + cc := new(typeDecoderCache) + c.cache.Range(func(k, v interface{}) bool { + if k != nil && v != nil { + cc.cache.Store(k, v) + } + return true + }) + return cc +} + +// atomic.Value requires that all calls to Store() have the same concrete type +// so we wrap the ValueEncoder with a kindEncoderCacheEntry to ensure the type +// is always the same (since different concrete types may implement the +// ValueEncoder interface). +type kindEncoderCacheEntry struct { + enc ValueEncoder +} + +type kindEncoderCache struct { + entries [reflect.UnsafePointer + 1]atomic.Value // *kindEncoderCacheEntry +} + +func (c *kindEncoderCache) Store(rt reflect.Kind, enc ValueEncoder) { + if enc != nil && rt < reflect.Kind(len(c.entries)) { + c.entries[rt].Store(&kindEncoderCacheEntry{enc: enc}) + } +} + +func (c *kindEncoderCache) Load(rt reflect.Kind) (ValueEncoder, bool) { + if rt < reflect.Kind(len(c.entries)) { + if ent, ok := c.entries[rt].Load().(*kindEncoderCacheEntry); ok { + return ent.enc, ent.enc != nil + } + } + return nil, false +} + +func (c *kindEncoderCache) Clone() *kindEncoderCache { + cc := new(kindEncoderCache) + for i, v := range c.entries { + if val := v.Load(); val != nil { + cc.entries[i].Store(val) + } + } + return cc +} + +// atomic.Value requires that all calls to Store() have the same concrete type +// so we wrap the ValueDecoder with a kindDecoderCacheEntry to ensure the type +// is always the same (since different concrete types may implement the +// ValueDecoder interface). +type kindDecoderCacheEntry struct { + dec ValueDecoder +} + +type kindDecoderCache struct { + entries [reflect.UnsafePointer + 1]atomic.Value // *kindDecoderCacheEntry +} + +func (c *kindDecoderCache) Store(rt reflect.Kind, dec ValueDecoder) { + if rt < reflect.Kind(len(c.entries)) { + c.entries[rt].Store(&kindDecoderCacheEntry{dec: dec}) + } +} + +func (c *kindDecoderCache) Load(rt reflect.Kind) (ValueDecoder, bool) { + if rt < reflect.Kind(len(c.entries)) { + if ent, ok := c.entries[rt].Load().(*kindDecoderCacheEntry); ok { + return ent.dec, ent.dec != nil + } + } + return nil, false +} + +func (c *kindDecoderCache) Clone() *kindDecoderCache { + cc := new(kindDecoderCache) + for i, v := range c.entries { + if val := v.Load(); val != nil { + cc.entries[i].Store(val) + } + } + return cc +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go index e95cab585..2ce119731 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go @@ -24,7 +24,7 @@ import ( var ( defaultValueDecoders DefaultValueDecoders - errCannotTruncate = errors.New("float64 can only be truncated to an integer type when truncation is enabled") + errCannotTruncate = errors.New("float64 can only be truncated to a lower precision type when truncation is enabled") ) type decodeBinaryError struct { @@ -48,6 +48,9 @@ func newDefaultStructCodec() *StructCodec { // DefaultValueDecoders is a namespace type for the default ValueDecoders used // when creating a registry. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. type DefaultValueDecoders struct{} // RegisterDefaultDecoders will register the decoder methods attached to DefaultValueDecoders with @@ -56,6 +59,9 @@ type DefaultValueDecoders struct{} // There is no support for decoding map[string]interface{} because there is no decoder for // interface{}, so users must either register this decoder themselves or use the // EmptyInterfaceDecoder available in the bson package. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) RegisterDefaultDecoders(rb *RegistryBuilder) { if rb == nil { panic(errors.New("argument to RegisterDefaultDecoders must not be nil")) @@ -132,6 +138,9 @@ func (dvd DefaultValueDecoders) RegisterDefaultDecoders(rb *RegistryBuilder) { } // DDecodeValue is the ValueDecoderFunc for primitive.D instances. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) DDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.IsValid() || !val.CanSet() || val.Type() != tD { return ValueDecoderError{Name: "DDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} @@ -188,7 +197,7 @@ func (dvd DefaultValueDecoders) DDecodeValue(dc DecodeContext, vr bsonrw.ValueRe return nil } -func (dvd DefaultValueDecoders) booleanDecodeType(dctx DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (dvd DefaultValueDecoders) booleanDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t.Kind() != reflect.Bool { return emptyValue, ValueDecoderError{ Name: "BooleanDecodeValue", @@ -235,6 +244,9 @@ func (dvd DefaultValueDecoders) booleanDecodeType(dctx DecodeContext, vr bsonrw. } // BooleanDecodeValue is the ValueDecoderFunc for bool types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) BooleanDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.IsValid() || !val.CanSet() || val.Kind() != reflect.Bool { return ValueDecoderError{Name: "BooleanDecodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val} @@ -333,6 +345,9 @@ func (DefaultValueDecoders) intDecodeType(dc DecodeContext, vr bsonrw.ValueReade } // IntDecodeValue is the ValueDecoderFunc for int types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) IntDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() { return ValueDecoderError{ @@ -434,7 +449,7 @@ func (dvd DefaultValueDecoders) UintDecodeValue(dc DecodeContext, vr bsonrw.Valu return nil } -func (dvd DefaultValueDecoders) floatDecodeType(ec DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (dvd DefaultValueDecoders) floatDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { var f float64 var err error switch vrType := vr.Type(); vrType { @@ -477,7 +492,7 @@ func (dvd DefaultValueDecoders) floatDecodeType(ec DecodeContext, vr bsonrw.Valu switch t.Kind() { case reflect.Float32: - if !ec.Truncate && float64(float32(f)) != f { + if !dc.Truncate && float64(float32(f)) != f { return emptyValue, errCannotTruncate } @@ -494,6 +509,9 @@ func (dvd DefaultValueDecoders) floatDecodeType(ec DecodeContext, vr bsonrw.Valu } // FloatDecodeValue is the ValueDecoderFunc for float types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) FloatDecodeValue(ec DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() { return ValueDecoderError{ @@ -515,7 +533,7 @@ func (dvd DefaultValueDecoders) FloatDecodeValue(ec DecodeContext, vr bsonrw.Val // StringDecodeValue is the ValueDecoderFunc for string types. // // Deprecated: StringDecodeValue is not registered by default. Use StringCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) StringDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { +func (dvd DefaultValueDecoders) StringDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { var str string var err error switch vr.Type() { @@ -536,7 +554,7 @@ func (dvd DefaultValueDecoders) StringDecodeValue(dctx DecodeContext, vr bsonrw. return nil } -func (DefaultValueDecoders) javaScriptDecodeType(dctx DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) javaScriptDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tJavaScript { return emptyValue, ValueDecoderError{ Name: "JavaScriptDecodeValue", @@ -565,6 +583,9 @@ func (DefaultValueDecoders) javaScriptDecodeType(dctx DecodeContext, vr bsonrw.V } // JavaScriptDecodeValue is the ValueDecoderFunc for the primitive.JavaScript type. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) JavaScriptDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tJavaScript { return ValueDecoderError{Name: "JavaScriptDecodeValue", Types: []reflect.Type{tJavaScript}, Received: val} @@ -579,7 +600,7 @@ func (dvd DefaultValueDecoders) JavaScriptDecodeValue(dctx DecodeContext, vr bso return nil } -func (DefaultValueDecoders) symbolDecodeType(dctx DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) symbolDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tSymbol { return emptyValue, ValueDecoderError{ Name: "SymbolDecodeValue", @@ -620,6 +641,9 @@ func (DefaultValueDecoders) symbolDecodeType(dctx DecodeContext, vr bsonrw.Value } // SymbolDecodeValue is the ValueDecoderFunc for the primitive.Symbol type. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) SymbolDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tSymbol { return ValueDecoderError{Name: "SymbolDecodeValue", Types: []reflect.Type{tSymbol}, Received: val} @@ -634,7 +658,7 @@ func (dvd DefaultValueDecoders) SymbolDecodeValue(dctx DecodeContext, vr bsonrw. return nil } -func (DefaultValueDecoders) binaryDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) binaryDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tBinary { return emptyValue, ValueDecoderError{ Name: "BinaryDecodeValue", @@ -664,6 +688,9 @@ func (DefaultValueDecoders) binaryDecodeType(dc DecodeContext, vr bsonrw.ValueRe } // BinaryDecodeValue is the ValueDecoderFunc for Binary. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) BinaryDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tBinary { return ValueDecoderError{Name: "BinaryDecodeValue", Types: []reflect.Type{tBinary}, Received: val} @@ -678,7 +705,7 @@ func (dvd DefaultValueDecoders) BinaryDecodeValue(dc DecodeContext, vr bsonrw.Va return nil } -func (DefaultValueDecoders) undefinedDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) undefinedDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tUndefined { return emptyValue, ValueDecoderError{ Name: "UndefinedDecodeValue", @@ -704,6 +731,9 @@ func (DefaultValueDecoders) undefinedDecodeType(dc DecodeContext, vr bsonrw.Valu } // UndefinedDecodeValue is the ValueDecoderFunc for Undefined. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) UndefinedDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tUndefined { return ValueDecoderError{Name: "UndefinedDecodeValue", Types: []reflect.Type{tUndefined}, Received: val} @@ -719,7 +749,7 @@ func (dvd DefaultValueDecoders) UndefinedDecodeValue(dc DecodeContext, vr bsonrw } // Accept both 12-byte string and pretty-printed 24-byte hex string formats. -func (dvd DefaultValueDecoders) objectIDDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (dvd DefaultValueDecoders) objectIDDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tOID { return emptyValue, ValueDecoderError{ Name: "ObjectIDDecodeValue", @@ -765,6 +795,9 @@ func (dvd DefaultValueDecoders) objectIDDecodeType(dc DecodeContext, vr bsonrw.V } // ObjectIDDecodeValue is the ValueDecoderFunc for primitive.ObjectID. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) ObjectIDDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tOID { return ValueDecoderError{Name: "ObjectIDDecodeValue", Types: []reflect.Type{tOID}, Received: val} @@ -779,7 +812,7 @@ func (dvd DefaultValueDecoders) ObjectIDDecodeValue(dc DecodeContext, vr bsonrw. return nil } -func (DefaultValueDecoders) dateTimeDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) dateTimeDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tDateTime { return emptyValue, ValueDecoderError{ Name: "DateTimeDecodeValue", @@ -808,6 +841,9 @@ func (DefaultValueDecoders) dateTimeDecodeType(dc DecodeContext, vr bsonrw.Value } // DateTimeDecodeValue is the ValueDecoderFunc for DateTime. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) DateTimeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tDateTime { return ValueDecoderError{Name: "DateTimeDecodeValue", Types: []reflect.Type{tDateTime}, Received: val} @@ -822,7 +858,7 @@ func (dvd DefaultValueDecoders) DateTimeDecodeValue(dc DecodeContext, vr bsonrw. return nil } -func (DefaultValueDecoders) nullDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) nullDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tNull { return emptyValue, ValueDecoderError{ Name: "NullDecodeValue", @@ -848,6 +884,9 @@ func (DefaultValueDecoders) nullDecodeType(dc DecodeContext, vr bsonrw.ValueRead } // NullDecodeValue is the ValueDecoderFunc for Null. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) NullDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tNull { return ValueDecoderError{Name: "NullDecodeValue", Types: []reflect.Type{tNull}, Received: val} @@ -862,7 +901,7 @@ func (dvd DefaultValueDecoders) NullDecodeValue(dc DecodeContext, vr bsonrw.Valu return nil } -func (DefaultValueDecoders) regexDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) regexDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tRegex { return emptyValue, ValueDecoderError{ Name: "RegexDecodeValue", @@ -891,6 +930,9 @@ func (DefaultValueDecoders) regexDecodeType(dc DecodeContext, vr bsonrw.ValueRea } // RegexDecodeValue is the ValueDecoderFunc for Regex. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) RegexDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tRegex { return ValueDecoderError{Name: "RegexDecodeValue", Types: []reflect.Type{tRegex}, Received: val} @@ -905,7 +947,7 @@ func (dvd DefaultValueDecoders) RegexDecodeValue(dc DecodeContext, vr bsonrw.Val return nil } -func (DefaultValueDecoders) dBPointerDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) dBPointerDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tDBPointer { return emptyValue, ValueDecoderError{ Name: "DBPointerDecodeValue", @@ -935,6 +977,9 @@ func (DefaultValueDecoders) dBPointerDecodeType(dc DecodeContext, vr bsonrw.Valu } // DBPointerDecodeValue is the ValueDecoderFunc for DBPointer. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) DBPointerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tDBPointer { return ValueDecoderError{Name: "DBPointerDecodeValue", Types: []reflect.Type{tDBPointer}, Received: val} @@ -949,7 +994,7 @@ func (dvd DefaultValueDecoders) DBPointerDecodeValue(dc DecodeContext, vr bsonrw return nil } -func (DefaultValueDecoders) timestampDecodeType(dc DecodeContext, vr bsonrw.ValueReader, reflectType reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) timestampDecodeType(_ DecodeContext, vr bsonrw.ValueReader, reflectType reflect.Type) (reflect.Value, error) { if reflectType != tTimestamp { return emptyValue, ValueDecoderError{ Name: "TimestampDecodeValue", @@ -978,6 +1023,9 @@ func (DefaultValueDecoders) timestampDecodeType(dc DecodeContext, vr bsonrw.Valu } // TimestampDecodeValue is the ValueDecoderFunc for Timestamp. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) TimestampDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tTimestamp { return ValueDecoderError{Name: "TimestampDecodeValue", Types: []reflect.Type{tTimestamp}, Received: val} @@ -992,7 +1040,7 @@ func (dvd DefaultValueDecoders) TimestampDecodeValue(dc DecodeContext, vr bsonrw return nil } -func (DefaultValueDecoders) minKeyDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) minKeyDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tMinKey { return emptyValue, ValueDecoderError{ Name: "MinKeyDecodeValue", @@ -1020,6 +1068,9 @@ func (DefaultValueDecoders) minKeyDecodeType(dc DecodeContext, vr bsonrw.ValueRe } // MinKeyDecodeValue is the ValueDecoderFunc for MinKey. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) MinKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tMinKey { return ValueDecoderError{Name: "MinKeyDecodeValue", Types: []reflect.Type{tMinKey}, Received: val} @@ -1034,7 +1085,7 @@ func (dvd DefaultValueDecoders) MinKeyDecodeValue(dc DecodeContext, vr bsonrw.Va return nil } -func (DefaultValueDecoders) maxKeyDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) maxKeyDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tMaxKey { return emptyValue, ValueDecoderError{ Name: "MaxKeyDecodeValue", @@ -1062,6 +1113,9 @@ func (DefaultValueDecoders) maxKeyDecodeType(dc DecodeContext, vr bsonrw.ValueRe } // MaxKeyDecodeValue is the ValueDecoderFunc for MaxKey. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) MaxKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tMaxKey { return ValueDecoderError{Name: "MaxKeyDecodeValue", Types: []reflect.Type{tMaxKey}, Received: val} @@ -1076,7 +1130,7 @@ func (dvd DefaultValueDecoders) MaxKeyDecodeValue(dc DecodeContext, vr bsonrw.Va return nil } -func (dvd DefaultValueDecoders) decimal128DecodeType(dctx DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (dvd DefaultValueDecoders) decimal128DecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tDecimal { return emptyValue, ValueDecoderError{ Name: "Decimal128DecodeValue", @@ -1105,6 +1159,9 @@ func (dvd DefaultValueDecoders) decimal128DecodeType(dctx DecodeContext, vr bson } // Decimal128DecodeValue is the ValueDecoderFunc for primitive.Decimal128. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) Decimal128DecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tDecimal { return ValueDecoderError{Name: "Decimal128DecodeValue", Types: []reflect.Type{tDecimal}, Received: val} @@ -1119,7 +1176,7 @@ func (dvd DefaultValueDecoders) Decimal128DecodeValue(dctx DecodeContext, vr bso return nil } -func (dvd DefaultValueDecoders) jsonNumberDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (dvd DefaultValueDecoders) jsonNumberDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tJSONNumber { return emptyValue, ValueDecoderError{ Name: "JSONNumberDecodeValue", @@ -1164,6 +1221,9 @@ func (dvd DefaultValueDecoders) jsonNumberDecodeType(dc DecodeContext, vr bsonrw } // JSONNumberDecodeValue is the ValueDecoderFunc for json.Number. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) JSONNumberDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tJSONNumber { return ValueDecoderError{Name: "JSONNumberDecodeValue", Types: []reflect.Type{tJSONNumber}, Received: val} @@ -1178,7 +1238,7 @@ func (dvd DefaultValueDecoders) JSONNumberDecodeValue(dc DecodeContext, vr bsonr return nil } -func (dvd DefaultValueDecoders) urlDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (dvd DefaultValueDecoders) urlDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tURL { return emptyValue, ValueDecoderError{ Name: "URLDecodeValue", @@ -1213,6 +1273,9 @@ func (dvd DefaultValueDecoders) urlDecodeType(dc DecodeContext, vr bsonrw.ValueR } // URLDecodeValue is the ValueDecoderFunc for url.URL. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) URLDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tURL { return ValueDecoderError{Name: "URLDecodeValue", Types: []reflect.Type{tURL}, Received: val} @@ -1230,7 +1293,7 @@ func (dvd DefaultValueDecoders) URLDecodeValue(dc DecodeContext, vr bsonrw.Value // TimeDecodeValue is the ValueDecoderFunc for time.Time. // // Deprecated: TimeDecodeValue is not registered by default. Use TimeCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) TimeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { +func (dvd DefaultValueDecoders) TimeDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if vr.Type() != bsontype.DateTime { return fmt.Errorf("cannot decode %v into a time.Time", vr.Type()) } @@ -1251,7 +1314,7 @@ func (dvd DefaultValueDecoders) TimeDecodeValue(dc DecodeContext, vr bsonrw.Valu // ByteSliceDecodeValue is the ValueDecoderFunc for []byte. // // Deprecated: ByteSliceDecodeValue is not registered by default. Use ByteSliceCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) ByteSliceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { +func (dvd DefaultValueDecoders) ByteSliceDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if vr.Type() != bsontype.Binary && vr.Type() != bsontype.Null { return fmt.Errorf("cannot decode %v into a []byte", vr.Type()) } @@ -1336,6 +1399,9 @@ func (dvd DefaultValueDecoders) MapDecodeValue(dc DecodeContext, vr bsonrw.Value } // ArrayDecodeValue is the ValueDecoderFunc for array types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) ArrayDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.IsValid() || val.Kind() != reflect.Array { return ValueDecoderError{Name: "ArrayDecodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val} @@ -1447,7 +1513,10 @@ func (dvd DefaultValueDecoders) SliceDecodeValue(dc DecodeContext, vr bsonrw.Val } // ValueUnmarshalerDecodeValue is the ValueDecoderFunc for ValueUnmarshaler implementations. -func (dvd DefaultValueDecoders) ValueUnmarshalerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) ValueUnmarshalerDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.IsValid() || (!val.Type().Implements(tValueUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tValueUnmarshaler)) { return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} } @@ -1471,16 +1540,19 @@ func (dvd DefaultValueDecoders) ValueUnmarshalerDecodeValue(dc DecodeContext, vr return err } - fn := val.Convert(tValueUnmarshaler).MethodByName("UnmarshalBSONValue") - errVal := fn.Call([]reflect.Value{reflect.ValueOf(t), reflect.ValueOf(src)})[0] - if !errVal.IsNil() { - return errVal.Interface().(error) + m, ok := val.Interface().(ValueUnmarshaler) + if !ok { + // NB: this error should be unreachable due to the above checks + return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} } - return nil + return m.UnmarshalBSONValue(t, src) } // UnmarshalerDecodeValue is the ValueDecoderFunc for Unmarshaler implementations. -func (dvd DefaultValueDecoders) UnmarshalerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) UnmarshalerDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.IsValid() || (!val.Type().Implements(tUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tUnmarshaler)) { return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} } @@ -1516,12 +1588,12 @@ func (dvd DefaultValueDecoders) UnmarshalerDecodeValue(dc DecodeContext, vr bson val = val.Addr() // If the type doesn't implement the interface, a pointer to it must. } - fn := val.Convert(tUnmarshaler).MethodByName("UnmarshalBSON") - errVal := fn.Call([]reflect.Value{reflect.ValueOf(src)})[0] - if !errVal.IsNil() { - return errVal.Interface().(error) + m, ok := val.Interface().(Unmarshaler) + if !ok { + // NB: this error should be unreachable due to the above checks + return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} } - return nil + return m.UnmarshalBSON(src) } // EmptyInterfaceDecodeValue is the ValueDecoderFunc for interface{}. @@ -1565,7 +1637,10 @@ func (dvd DefaultValueDecoders) EmptyInterfaceDecodeValue(dc DecodeContext, vr b } // CoreDocumentDecodeValue is the ValueDecoderFunc for bsoncore.Document. -func (DefaultValueDecoders) CoreDocumentDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (DefaultValueDecoders) CoreDocumentDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tCoreDocument { return ValueDecoderError{Name: "CoreDocumentDecodeValue", Types: []reflect.Type{tCoreDocument}, Received: val} } @@ -1671,6 +1746,9 @@ func (dvd DefaultValueDecoders) codeWithScopeDecodeType(dc DecodeContext, vr bso } // CodeWithScopeDecodeValue is the ValueDecoderFunc for CodeWithScope. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) CodeWithScopeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tCodeWithScope { return ValueDecoderError{Name: "CodeWithScopeDecodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go index 6bdb43cb4..4ab14a668 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go @@ -58,10 +58,16 @@ func encodeElement(ec EncodeContext, dw bsonrw.DocumentWriter, e primitive.E) er // DefaultValueEncoders is a namespace type for the default ValueEncoders used // when creating a registry. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. type DefaultValueEncoders struct{} // RegisterDefaultEncoders will register the encoder methods attached to DefaultValueEncoders with // the provided RegistryBuilder. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. func (dve DefaultValueEncoders) RegisterDefaultEncoders(rb *RegistryBuilder) { if rb == nil { panic(errors.New("argument to RegisterDefaultEncoders must not be nil")) @@ -113,7 +119,10 @@ func (dve DefaultValueEncoders) RegisterDefaultEncoders(rb *RegistryBuilder) { } // BooleanEncodeValue is the ValueEncoderFunc for bool types. -func (dve DefaultValueEncoders) BooleanEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) BooleanEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Kind() != reflect.Bool { return ValueEncoderError{Name: "BooleanEncodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val} } @@ -125,6 +134,9 @@ func fitsIn32Bits(i int64) bool { } // IntEncodeValue is the ValueEncoderFunc for int types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. func (dve DefaultValueEncoders) IntEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { switch val.Kind() { case reflect.Int8, reflect.Int16, reflect.Int32: @@ -176,7 +188,10 @@ func (dve DefaultValueEncoders) UintEncodeValue(ec EncodeContext, vw bsonrw.Valu } // FloatEncodeValue is the ValueEncoderFunc for float types. -func (dve DefaultValueEncoders) FloatEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) FloatEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { switch val.Kind() { case reflect.Float32, reflect.Float64: return vw.WriteDouble(val.Float()) @@ -188,7 +203,7 @@ func (dve DefaultValueEncoders) FloatEncodeValue(ec EncodeContext, vw bsonrw.Val // StringEncodeValue is the ValueEncoderFunc for string types. // // Deprecated: StringEncodeValue is not registered by default. Use StringCodec.EncodeValue instead. -func (dve DefaultValueEncoders) StringEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +func (dve DefaultValueEncoders) StringEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if val.Kind() != reflect.String { return ValueEncoderError{ Name: "StringEncodeValue", @@ -201,7 +216,10 @@ func (dve DefaultValueEncoders) StringEncodeValue(ectx EncodeContext, vw bsonrw. } // ObjectIDEncodeValue is the ValueEncoderFunc for primitive.ObjectID. -func (dve DefaultValueEncoders) ObjectIDEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) ObjectIDEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tOID { return ValueEncoderError{Name: "ObjectIDEncodeValue", Types: []reflect.Type{tOID}, Received: val} } @@ -209,7 +227,10 @@ func (dve DefaultValueEncoders) ObjectIDEncodeValue(ec EncodeContext, vw bsonrw. } // Decimal128EncodeValue is the ValueEncoderFunc for primitive.Decimal128. -func (dve DefaultValueEncoders) Decimal128EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) Decimal128EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tDecimal { return ValueEncoderError{Name: "Decimal128EncodeValue", Types: []reflect.Type{tDecimal}, Received: val} } @@ -217,6 +238,9 @@ func (dve DefaultValueEncoders) Decimal128EncodeValue(ec EncodeContext, vw bsonr } // JSONNumberEncodeValue is the ValueEncoderFunc for json.Number. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. func (dve DefaultValueEncoders) JSONNumberEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tJSONNumber { return ValueEncoderError{Name: "JSONNumberEncodeValue", Types: []reflect.Type{tJSONNumber}, Received: val} @@ -237,7 +261,10 @@ func (dve DefaultValueEncoders) JSONNumberEncodeValue(ec EncodeContext, vw bsonr } // URLEncodeValue is the ValueEncoderFunc for url.URL. -func (dve DefaultValueEncoders) URLEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) URLEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tURL { return ValueEncoderError{Name: "URLEncodeValue", Types: []reflect.Type{tURL}, Received: val} } @@ -248,7 +275,7 @@ func (dve DefaultValueEncoders) URLEncodeValue(ec EncodeContext, vw bsonrw.Value // TimeEncodeValue is the ValueEncoderFunc for time.TIme. // // Deprecated: TimeEncodeValue is not registered by default. Use TimeCodec.EncodeValue instead. -func (dve DefaultValueEncoders) TimeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +func (dve DefaultValueEncoders) TimeEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tTime { return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val} } @@ -260,7 +287,7 @@ func (dve DefaultValueEncoders) TimeEncodeValue(ec EncodeContext, vw bsonrw.Valu // ByteSliceEncodeValue is the ValueEncoderFunc for []byte. // // Deprecated: ByteSliceEncodeValue is not registered by default. Use ByteSliceCodec.EncodeValue instead. -func (dve DefaultValueEncoders) ByteSliceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +func (dve DefaultValueEncoders) ByteSliceEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tByteSlice { return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val} } @@ -343,6 +370,9 @@ func (dve DefaultValueEncoders) mapEncodeValue(ec EncodeContext, dw bsonrw.Docum } // ArrayEncodeValue is the ValueEncoderFunc for array types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. func (dve DefaultValueEncoders) ArrayEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Kind() != reflect.Array { return ValueEncoderError{Name: "ArrayEncodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val} @@ -515,7 +545,10 @@ func (dve DefaultValueEncoders) EmptyInterfaceEncodeValue(ec EncodeContext, vw b } // ValueMarshalerEncodeValue is the ValueEncoderFunc for ValueMarshaler implementations. -func (dve DefaultValueEncoders) ValueMarshalerEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) ValueMarshalerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { // Either val or a pointer to val must implement ValueMarshaler switch { case !val.IsValid(): @@ -531,17 +564,22 @@ func (dve DefaultValueEncoders) ValueMarshalerEncodeValue(ec EncodeContext, vw b return ValueEncoderError{Name: "ValueMarshalerEncodeValue", Types: []reflect.Type{tValueMarshaler}, Received: val} } - fn := val.Convert(tValueMarshaler).MethodByName("MarshalBSONValue") - returns := fn.Call(nil) - if !returns[2].IsNil() { - return returns[2].Interface().(error) + m, ok := val.Interface().(ValueMarshaler) + if !ok { + return vw.WriteNull() + } + t, data, err := m.MarshalBSONValue() + if err != nil { + return err } - t, data := returns[0].Interface().(bsontype.Type), returns[1].Interface().([]byte) return bsonrw.Copier{}.CopyValueFromBytes(vw, t, data) } // MarshalerEncodeValue is the ValueEncoderFunc for Marshaler implementations. -func (dve DefaultValueEncoders) MarshalerEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) MarshalerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { // Either val or a pointer to val must implement Marshaler switch { case !val.IsValid(): @@ -557,16 +595,21 @@ func (dve DefaultValueEncoders) MarshalerEncodeValue(ec EncodeContext, vw bsonrw return ValueEncoderError{Name: "MarshalerEncodeValue", Types: []reflect.Type{tMarshaler}, Received: val} } - fn := val.Convert(tMarshaler).MethodByName("MarshalBSON") - returns := fn.Call(nil) - if !returns[1].IsNil() { - return returns[1].Interface().(error) + m, ok := val.Interface().(Marshaler) + if !ok { + return vw.WriteNull() + } + data, err := m.MarshalBSON() + if err != nil { + return err } - data := returns[0].Interface().([]byte) return bsonrw.Copier{}.CopyValueFromBytes(vw, bsontype.EmbeddedDocument, data) } // ProxyEncodeValue is the ValueEncoderFunc for Proxy implementations. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. func (dve DefaultValueEncoders) ProxyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { // Either val or a pointer to val must implement Proxy switch { @@ -583,27 +626,38 @@ func (dve DefaultValueEncoders) ProxyEncodeValue(ec EncodeContext, vw bsonrw.Val return ValueEncoderError{Name: "ProxyEncodeValue", Types: []reflect.Type{tProxy}, Received: val} } - fn := val.Convert(tProxy).MethodByName("ProxyBSON") - returns := fn.Call(nil) - if !returns[1].IsNil() { - return returns[1].Interface().(error) + m, ok := val.Interface().(Proxy) + if !ok { + return vw.WriteNull() } - data := returns[0] - var encoder ValueEncoder - var err error - if data.Elem().IsValid() { - encoder, err = ec.LookupEncoder(data.Elem().Type()) - } else { - encoder, err = ec.LookupEncoder(nil) + v, err := m.ProxyBSON() + if err != nil { + return err } + if v == nil { + encoder, err := ec.LookupEncoder(nil) + if err != nil { + return err + } + return encoder.EncodeValue(ec, vw, reflect.ValueOf(nil)) + } + vv := reflect.ValueOf(v) + switch vv.Kind() { + case reflect.Ptr, reflect.Interface: + vv = vv.Elem() + } + encoder, err := ec.LookupEncoder(vv.Type()) if err != nil { return err } - return encoder.EncodeValue(ec, vw, data.Elem()) + return encoder.EncodeValue(ec, vw, vv) } // JavaScriptEncodeValue is the ValueEncoderFunc for the primitive.JavaScript type. -func (DefaultValueEncoders) JavaScriptEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) JavaScriptEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tJavaScript { return ValueEncoderError{Name: "JavaScriptEncodeValue", Types: []reflect.Type{tJavaScript}, Received: val} } @@ -612,7 +666,10 @@ func (DefaultValueEncoders) JavaScriptEncodeValue(ectx EncodeContext, vw bsonrw. } // SymbolEncodeValue is the ValueEncoderFunc for the primitive.Symbol type. -func (DefaultValueEncoders) SymbolEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) SymbolEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tSymbol { return ValueEncoderError{Name: "SymbolEncodeValue", Types: []reflect.Type{tSymbol}, Received: val} } @@ -621,7 +678,10 @@ func (DefaultValueEncoders) SymbolEncodeValue(ectx EncodeContext, vw bsonrw.Valu } // BinaryEncodeValue is the ValueEncoderFunc for Binary. -func (DefaultValueEncoders) BinaryEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) BinaryEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tBinary { return ValueEncoderError{Name: "BinaryEncodeValue", Types: []reflect.Type{tBinary}, Received: val} } @@ -631,7 +691,10 @@ func (DefaultValueEncoders) BinaryEncodeValue(ec EncodeContext, vw bsonrw.ValueW } // UndefinedEncodeValue is the ValueEncoderFunc for Undefined. -func (DefaultValueEncoders) UndefinedEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) UndefinedEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tUndefined { return ValueEncoderError{Name: "UndefinedEncodeValue", Types: []reflect.Type{tUndefined}, Received: val} } @@ -640,7 +703,10 @@ func (DefaultValueEncoders) UndefinedEncodeValue(ec EncodeContext, vw bsonrw.Val } // DateTimeEncodeValue is the ValueEncoderFunc for DateTime. -func (DefaultValueEncoders) DateTimeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) DateTimeEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tDateTime { return ValueEncoderError{Name: "DateTimeEncodeValue", Types: []reflect.Type{tDateTime}, Received: val} } @@ -649,7 +715,10 @@ func (DefaultValueEncoders) DateTimeEncodeValue(ec EncodeContext, vw bsonrw.Valu } // NullEncodeValue is the ValueEncoderFunc for Null. -func (DefaultValueEncoders) NullEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) NullEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tNull { return ValueEncoderError{Name: "NullEncodeValue", Types: []reflect.Type{tNull}, Received: val} } @@ -658,7 +727,10 @@ func (DefaultValueEncoders) NullEncodeValue(ec EncodeContext, vw bsonrw.ValueWri } // RegexEncodeValue is the ValueEncoderFunc for Regex. -func (DefaultValueEncoders) RegexEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) RegexEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tRegex { return ValueEncoderError{Name: "RegexEncodeValue", Types: []reflect.Type{tRegex}, Received: val} } @@ -669,7 +741,10 @@ func (DefaultValueEncoders) RegexEncodeValue(ec EncodeContext, vw bsonrw.ValueWr } // DBPointerEncodeValue is the ValueEncoderFunc for DBPointer. -func (DefaultValueEncoders) DBPointerEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) DBPointerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tDBPointer { return ValueEncoderError{Name: "DBPointerEncodeValue", Types: []reflect.Type{tDBPointer}, Received: val} } @@ -680,7 +755,10 @@ func (DefaultValueEncoders) DBPointerEncodeValue(ec EncodeContext, vw bsonrw.Val } // TimestampEncodeValue is the ValueEncoderFunc for Timestamp. -func (DefaultValueEncoders) TimestampEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) TimestampEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tTimestamp { return ValueEncoderError{Name: "TimestampEncodeValue", Types: []reflect.Type{tTimestamp}, Received: val} } @@ -691,7 +769,10 @@ func (DefaultValueEncoders) TimestampEncodeValue(ec EncodeContext, vw bsonrw.Val } // MinKeyEncodeValue is the ValueEncoderFunc for MinKey. -func (DefaultValueEncoders) MinKeyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) MinKeyEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tMinKey { return ValueEncoderError{Name: "MinKeyEncodeValue", Types: []reflect.Type{tMinKey}, Received: val} } @@ -700,7 +781,10 @@ func (DefaultValueEncoders) MinKeyEncodeValue(ec EncodeContext, vw bsonrw.ValueW } // MaxKeyEncodeValue is the ValueEncoderFunc for MaxKey. -func (DefaultValueEncoders) MaxKeyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) MaxKeyEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tMaxKey { return ValueEncoderError{Name: "MaxKeyEncodeValue", Types: []reflect.Type{tMaxKey}, Received: val} } @@ -709,7 +793,10 @@ func (DefaultValueEncoders) MaxKeyEncodeValue(ec EncodeContext, vw bsonrw.ValueW } // CoreDocumentEncodeValue is the ValueEncoderFunc for bsoncore.Document. -func (DefaultValueEncoders) CoreDocumentEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) CoreDocumentEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tCoreDocument { return ValueEncoderError{Name: "CoreDocumentEncodeValue", Types: []reflect.Type{tCoreDocument}, Received: val} } @@ -720,6 +807,9 @@ func (DefaultValueEncoders) CoreDocumentEncodeValue(ec EncodeContext, vw bsonrw. } // CodeWithScopeEncodeValue is the ValueEncoderFunc for CodeWithScope. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. func (dve DefaultValueEncoders) CodeWithScopeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tCodeWithScope { return ValueEncoderError{Name: "CodeWithScopeEncodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go index 5f903ebea..4613e5a1e 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go @@ -31,35 +31,39 @@ // allow the use of a function with the correct signature as a ValueDecoder. A DecodeContext // instance is provided and serves similar functionality to the EncodeContext. // -// # Registry and RegistryBuilder -// -// A Registry is an immutable store for ValueEncoders, ValueDecoders, and a type map. See the Registry type -// documentation for examples of registering various custom encoders and decoders. A Registry can be constructed using a -// RegistryBuilder, which handles three main types of codecs: -// -// 1. Type encoders/decoders - These can be registered using the RegisterTypeEncoder and RegisterTypeDecoder methods. -// The registered codec will be invoked when encoding/decoding a value whose type matches the registered type exactly. -// If the registered type is an interface, the codec will be invoked when encoding or decoding values whose type is the -// interface, but not for values with concrete types that implement the interface. -// -// 2. Hook encoders/decoders - These can be registered using the RegisterHookEncoder and RegisterHookDecoder methods. -// These methods only accept interface types and the registered codecs will be invoked when encoding or decoding values -// whose types implement the interface. An example of a hook defined by the driver is bson.Marshaler. The driver will -// call the MarshalBSON method for any value whose type implements bson.Marshaler, regardless of the value's concrete -// type. -// -// 3. Type map entries - This can be used to associate a BSON type with a Go type. These type associations are used when -// decoding into a bson.D/bson.M or a struct field of type interface{}. For example, by default, BSON int32 and int64 -// values decode as Go int32 and int64 instances, respectively, when decoding into a bson.D. The following code would -// change the behavior so these values decode as Go int instances instead: +// # Registry +// +// A Registry is a store for ValueEncoders, ValueDecoders, and a type map. See the Registry type +// documentation for examples of registering various custom encoders and decoders. A Registry can +// have three main types of codecs: +// +// 1. Type encoders/decoders - These can be registered using the RegisterTypeEncoder and +// RegisterTypeDecoder methods. The registered codec will be invoked when encoding/decoding a value +// whose type matches the registered type exactly. +// If the registered type is an interface, the codec will be invoked when encoding or decoding +// values whose type is the interface, but not for values with concrete types that implement the +// interface. +// +// 2. Hook encoders/decoders - These can be registered using the RegisterHookEncoder and +// RegisterHookDecoder methods. These methods only accept interface types and the registered codecs +// will be invoked when encoding or decoding values whose types implement the interface. An example +// of a hook defined by the driver is bson.Marshaler. The driver will call the MarshalBSON method +// for any value whose type implements bson.Marshaler, regardless of the value's concrete type. +// +// 3. Type map entries - This can be used to associate a BSON type with a Go type. These type +// associations are used when decoding into a bson.D/bson.M or a struct field of type interface{}. +// For example, by default, BSON int32 and int64 values decode as Go int32 and int64 instances, +// respectively, when decoding into a bson.D. The following code would change the behavior so these +// values decode as Go int instances instead: // // intType := reflect.TypeOf(int(0)) -// registryBuilder.RegisterTypeMapEntry(bsontype.Int32, intType).RegisterTypeMapEntry(bsontype.Int64, intType) +// registry.RegisterTypeMapEntry(bsontype.Int32, intType).RegisterTypeMapEntry(bsontype.Int64, intType) // -// 4. Kind encoder/decoders - These can be registered using the RegisterDefaultEncoder and RegisterDefaultDecoder -// methods. The registered codec will be invoked when encoding or decoding values whose reflect.Kind matches the -// registered reflect.Kind as long as the value's type doesn't match a registered type or hook encoder/decoder first. -// These methods should be used to change the behavior for all values for a specific kind. +// 4. Kind encoder/decoders - These can be registered using the RegisterDefaultEncoder and +// RegisterDefaultDecoder methods. The registered codec will be invoked when encoding or decoding +// values whose reflect.Kind matches the registered reflect.Kind as long as the value's type doesn't +// match a registered type or hook encoder/decoder first. These methods should be used to change the +// behavior for all values for a specific kind. // // # Registry Lookup Procedure // @@ -67,17 +71,18 @@ // // 1. A type encoder registered for the exact type of the value. // -// 2. A hook encoder registered for an interface that is implemented by the value or by a pointer to the value. If the -// value matches multiple hooks (e.g. the type implements bsoncodec.Marshaler and bsoncodec.ValueMarshaler), the first -// one registered will be selected. Note that registries constructed using bson.NewRegistryBuilder have driver-defined -// hooks registered for the bsoncodec.Marshaler, bsoncodec.ValueMarshaler, and bsoncodec.Proxy interfaces, so those -// will take precedence over any new hooks. +// 2. A hook encoder registered for an interface that is implemented by the value or by a pointer to +// the value. If the value matches multiple hooks (e.g. the type implements bsoncodec.Marshaler and +// bsoncodec.ValueMarshaler), the first one registered will be selected. Note that registries +// constructed using bson.NewRegistry have driver-defined hooks registered for the +// bsoncodec.Marshaler, bsoncodec.ValueMarshaler, and bsoncodec.Proxy interfaces, so those will take +// precedence over any new hooks. // // 3. A kind encoder registered for the value's kind. // -// If all of these lookups fail to find an encoder, an error of type ErrNoEncoder is returned. The same precedence -// rules apply for decoders, with the exception that an error of type ErrNoDecoder will be returned if no decoder is -// found. +// If all of these lookups fail to find an encoder, an error of type ErrNoEncoder is returned. The +// same precedence rules apply for decoders, with the exception that an error of type ErrNoDecoder +// will be returned if no decoder is found. // // # DefaultValueEncoders and DefaultValueDecoders // diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go index eda417cff..94f7dcf1e 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go @@ -16,18 +16,30 @@ import ( ) // EmptyInterfaceCodec is the Codec used for interface{} values. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// EmptyInterfaceCodec registered. type EmptyInterfaceCodec struct { + // DecodeBinaryAsSlice causes DecodeValue to unmarshal BSON binary field values that are the + // "Generic" or "Old" BSON binary subtype as a Go byte slice instead of a primitive.Binary. + // + // Deprecated: Use bson.Decoder.BinaryAsSlice instead. DecodeBinaryAsSlice bool } var ( defaultEmptyInterfaceCodec = NewEmptyInterfaceCodec() - _ ValueCodec = defaultEmptyInterfaceCodec + // Assert that defaultEmptyInterfaceCodec satisfies the typeDecoder interface, which allows it + // to be used by collection type decoders (e.g. map, slice, etc) to set individual values in a + // collection. _ typeDecoder = defaultEmptyInterfaceCodec ) // NewEmptyInterfaceCodec returns a EmptyInterfaceCodec with options opts. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// EmptyInterfaceCodec registered. func NewEmptyInterfaceCodec(opts ...*bsonoptions.EmptyInterfaceCodecOptions) *EmptyInterfaceCodec { interfaceOpt := bsonoptions.MergeEmptyInterfaceCodecOptions(opts...) @@ -121,7 +133,7 @@ func (eic EmptyInterfaceCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReade return emptyValue, err } - if eic.DecodeBinaryAsSlice && rtype == tBinary { + if (eic.DecodeBinaryAsSlice || dc.binaryAsSlice) && rtype == tBinary { binElem := elem.Interface().(primitive.Binary) if binElem.Subtype == bsontype.BinaryGeneric || binElem.Subtype == bsontype.BinaryBinaryOld { elem = reflect.ValueOf(binElem.Data) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go index e1fbef9c6..325c1738a 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go @@ -20,14 +20,29 @@ import ( var defaultMapCodec = NewMapCodec() // MapCodec is the Codec used for map values. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// MapCodec registered. type MapCodec struct { - DecodeZerosMap bool - EncodeNilAsEmpty bool + // DecodeZerosMap causes DecodeValue to delete any existing values from Go maps in the destination + // value passed to Decode before unmarshaling BSON documents into them. + // + // Deprecated: Use bson.Decoder.ZeroMaps instead. + DecodeZerosMap bool + + // EncodeNilAsEmpty causes EncodeValue to marshal nil Go maps as empty BSON documents instead of + // BSON null. + // + // Deprecated: Use bson.Encoder.NilMapAsEmpty instead. + EncodeNilAsEmpty bool + + // EncodeKeysWithStringer causes the Encoder to convert Go map keys to BSON document field name + // strings using fmt.Sprintf() instead of the default string conversion logic. + // + // Deprecated: Use bson.Encoder.StringifyMapKeysWithFmt instead. EncodeKeysWithStringer bool } -var _ ValueCodec = &MapCodec{} - // KeyMarshaler is the interface implemented by an object that can marshal itself into a string key. // This applies to types used as map keys and is similar to encoding.TextMarshaler. type KeyMarshaler interface { @@ -45,6 +60,9 @@ type KeyUnmarshaler interface { } // NewMapCodec returns a MapCodec with options opts. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// MapCodec registered. func NewMapCodec(opts ...*bsonoptions.MapCodecOptions) *MapCodec { mapOpt := bsonoptions.MergeMapCodecOptions(opts...) @@ -67,7 +85,7 @@ func (mc *MapCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val ref return ValueEncoderError{Name: "MapEncodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} } - if val.IsNil() && !mc.EncodeNilAsEmpty { + if val.IsNil() && !mc.EncodeNilAsEmpty && !ec.nilMapAsEmpty { // If we have a nil map but we can't WriteNull, that means we're probably trying to encode // to a TopLevel document. We can't currently tell if this is what actually happened, but if // there's a deeper underlying problem, the error will also be returned from WriteDocument, @@ -100,7 +118,7 @@ func (mc *MapCodec) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, v keys := val.MapKeys() for _, key := range keys { - keyStr, err := mc.encodeKey(key) + keyStr, err := mc.encodeKey(key, ec.stringifyMapKeysWithFmt) if err != nil { return err } @@ -163,7 +181,7 @@ func (mc *MapCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val ref val.Set(reflect.MakeMap(val.Type())) } - if val.Len() > 0 && mc.DecodeZerosMap { + if val.Len() > 0 && (mc.DecodeZerosMap || dc.zeroMaps) { clearMap(val) } @@ -211,8 +229,8 @@ func clearMap(m reflect.Value) { } } -func (mc *MapCodec) encodeKey(val reflect.Value) (string, error) { - if mc.EncodeKeysWithStringer { +func (mc *MapCodec) encodeKey(val reflect.Value, encodeKeysWithStringer bool) (string, error) { + if mc.EncodeKeysWithStringer || encodeKeysWithStringer { return fmt.Sprint(val), nil } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go index 616a3e701..e5923230b 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go @@ -8,7 +8,6 @@ package bsoncodec import ( "reflect" - "sync" "go.mongodb.org/mongo-driver/bson/bsonrw" "go.mongodb.org/mongo-driver/bson/bsontype" @@ -18,18 +17,20 @@ var _ ValueEncoder = &PointerCodec{} var _ ValueDecoder = &PointerCodec{} // PointerCodec is the Codec used for pointers. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// PointerCodec registered. type PointerCodec struct { - ecache map[reflect.Type]ValueEncoder - dcache map[reflect.Type]ValueDecoder - l sync.RWMutex + ecache typeEncoderCache + dcache typeDecoderCache } // NewPointerCodec returns a PointerCodec that has been initialized. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// PointerCodec registered. func NewPointerCodec() *PointerCodec { - return &PointerCodec{ - ecache: make(map[reflect.Type]ValueEncoder), - dcache: make(map[reflect.Type]ValueDecoder), - } + return &PointerCodec{} } // EncodeValue handles encoding a pointer by either encoding it to BSON Null if the pointer is nil @@ -46,24 +47,19 @@ func (pc *PointerCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val return vw.WriteNull() } - pc.l.RLock() - enc, ok := pc.ecache[val.Type()] - pc.l.RUnlock() - if ok { - if enc == nil { - return ErrNoEncoder{Type: val.Type()} + typ := val.Type() + if v, ok := pc.ecache.Load(typ); ok { + if v == nil { + return ErrNoEncoder{Type: typ} } - return enc.EncodeValue(ec, vw, val.Elem()) + return v.EncodeValue(ec, vw, val.Elem()) } - - enc, err := ec.LookupEncoder(val.Type().Elem()) - pc.l.Lock() - pc.ecache[val.Type()] = enc - pc.l.Unlock() + // TODO(charlie): handle concurrent requests for the same type + enc, err := ec.LookupEncoder(typ.Elem()) + enc = pc.ecache.LoadOrStore(typ, enc) if err != nil { return err } - return enc.EncodeValue(ec, vw, val.Elem()) } @@ -74,36 +70,31 @@ func (pc *PointerCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val return ValueDecoderError{Name: "PointerCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Ptr}, Received: val} } + typ := val.Type() if vr.Type() == bsontype.Null { - val.Set(reflect.Zero(val.Type())) + val.Set(reflect.Zero(typ)) return vr.ReadNull() } if vr.Type() == bsontype.Undefined { - val.Set(reflect.Zero(val.Type())) + val.Set(reflect.Zero(typ)) return vr.ReadUndefined() } if val.IsNil() { - val.Set(reflect.New(val.Type().Elem())) + val.Set(reflect.New(typ.Elem())) } - pc.l.RLock() - dec, ok := pc.dcache[val.Type()] - pc.l.RUnlock() - if ok { - if dec == nil { - return ErrNoDecoder{Type: val.Type()} + if v, ok := pc.dcache.Load(typ); ok { + if v == nil { + return ErrNoDecoder{Type: typ} } - return dec.DecodeValue(dc, vr, val.Elem()) + return v.DecodeValue(dc, vr, val.Elem()) } - - dec, err := dc.LookupDecoder(val.Type().Elem()) - pc.l.Lock() - pc.dcache[val.Type()] = dec - pc.l.Unlock() + // TODO(charlie): handle concurrent requests for the same type + dec, err := dc.LookupDecoder(typ.Elem()) + dec = pc.dcache.LoadOrStore(typ, dec) if err != nil { return err } - return dec.DecodeValue(dc, vr, val.Elem()) } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go index 80644023c..f309ee2b3 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go @@ -16,12 +16,18 @@ import ( ) // ErrNilType is returned when nil is passed to either LookupEncoder or LookupDecoder. +// +// Deprecated: ErrNilType will not be supported in Go Driver 2.0. var ErrNilType = errors.New("cannot perform a decoder lookup on ") // ErrNotPointer is returned when a non-pointer type is provided to LookupDecoder. +// +// Deprecated: ErrNotPointer will not be supported in Go Driver 2.0. var ErrNotPointer = errors.New("non-pointer provided to LookupDecoder") // ErrNoEncoder is returned when there wasn't an encoder available for a type. +// +// Deprecated: ErrNoEncoder will not be supported in Go Driver 2.0. type ErrNoEncoder struct { Type reflect.Type } @@ -34,6 +40,8 @@ func (ene ErrNoEncoder) Error() string { } // ErrNoDecoder is returned when there wasn't a decoder available for a type. +// +// Deprecated: ErrNoDecoder will not be supported in Go Driver 2.0. type ErrNoDecoder struct { Type reflect.Type } @@ -43,6 +51,8 @@ func (end ErrNoDecoder) Error() string { } // ErrNoTypeMapEntry is returned when there wasn't a type available for the provided BSON type. +// +// Deprecated: ErrNoTypeMapEntry will not be supported in Go Driver 2.0. type ErrNoTypeMapEntry struct { Type bsontype.Type } @@ -52,63 +62,30 @@ func (entme ErrNoTypeMapEntry) Error() string { } // ErrNotInterface is returned when the provided type is not an interface. +// +// Deprecated: ErrNotInterface will not be supported in Go Driver 2.0. var ErrNotInterface = errors.New("The provided type is not an interface") // A RegistryBuilder is used to build a Registry. This type is not goroutine // safe. +// +// Deprecated: Use Registry instead. type RegistryBuilder struct { - typeEncoders map[reflect.Type]ValueEncoder - interfaceEncoders []interfaceValueEncoder - kindEncoders map[reflect.Kind]ValueEncoder - - typeDecoders map[reflect.Type]ValueDecoder - interfaceDecoders []interfaceValueDecoder - kindDecoders map[reflect.Kind]ValueDecoder - - typeMap map[bsontype.Type]reflect.Type -} - -// A Registry is used to store and retrieve codecs for types and interfaces. This type is the main -// typed passed around and Encoders and Decoders are constructed from it. -type Registry struct { - typeEncoders map[reflect.Type]ValueEncoder - typeDecoders map[reflect.Type]ValueDecoder - - interfaceEncoders []interfaceValueEncoder - interfaceDecoders []interfaceValueDecoder - - kindEncoders map[reflect.Kind]ValueEncoder - kindDecoders map[reflect.Kind]ValueDecoder - - typeMap map[bsontype.Type]reflect.Type - - mu sync.RWMutex + registry *Registry } // NewRegistryBuilder creates a new empty RegistryBuilder. +// +// Deprecated: Use NewRegistry instead. func NewRegistryBuilder() *RegistryBuilder { return &RegistryBuilder{ - typeEncoders: make(map[reflect.Type]ValueEncoder), - typeDecoders: make(map[reflect.Type]ValueDecoder), - - interfaceEncoders: make([]interfaceValueEncoder, 0), - interfaceDecoders: make([]interfaceValueDecoder, 0), - - kindEncoders: make(map[reflect.Kind]ValueEncoder), - kindDecoders: make(map[reflect.Kind]ValueDecoder), - - typeMap: make(map[bsontype.Type]reflect.Type), + registry: NewRegistry(), } } -func buildDefaultRegistry() *Registry { - rb := NewRegistryBuilder() - defaultValueEncoders.RegisterDefaultEncoders(rb) - defaultValueDecoders.RegisterDefaultDecoders(rb) - return rb.Build() -} - // RegisterCodec will register the provided ValueCodec for the provided type. +// +// Deprecated: Use Registry.RegisterTypeEncoder and Registry.RegisterTypeDecoder instead. func (rb *RegistryBuilder) RegisterCodec(t reflect.Type, codec ValueCodec) *RegistryBuilder { rb.RegisterTypeEncoder(t, codec) rb.RegisterTypeDecoder(t, codec) @@ -120,31 +97,22 @@ func (rb *RegistryBuilder) RegisterCodec(t reflect.Type, codec ValueCodec) *Regi // The type will be used directly, so an encoder can be registered for a type and a different encoder can be registered // for a pointer to that type. // -// If the given type is an interface, the encoder will be called when marshalling a type that is that interface. It -// will not be called when marshalling a non-interface type that implements the interface. +// If the given type is an interface, the encoder will be called when marshaling a type that is that interface. It +// will not be called when marshaling a non-interface type that implements the interface. +// +// Deprecated: Use Registry.RegisterTypeEncoder instead. func (rb *RegistryBuilder) RegisterTypeEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { - rb.typeEncoders[t] = enc + rb.registry.RegisterTypeEncoder(t, enc) return rb } // RegisterHookEncoder will register an encoder for the provided interface type t. This encoder will be called when -// marshalling a type if the type implements t or a pointer to the type implements t. If the provided type is not +// marshaling a type if the type implements t or a pointer to the type implements t. If the provided type is not // an interface (i.e. t.Kind() != reflect.Interface), this method will panic. +// +// Deprecated: Use Registry.RegisterInterfaceEncoder instead. func (rb *RegistryBuilder) RegisterHookEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { - if t.Kind() != reflect.Interface { - panicStr := fmt.Sprintf("RegisterHookEncoder expects a type with kind reflect.Interface, "+ - "got type %s with kind %s", t, t.Kind()) - panic(panicStr) - } - - for idx, encoder := range rb.interfaceEncoders { - if encoder.i == t { - rb.interfaceEncoders[idx].ve = enc - return rb - } - } - - rb.interfaceEncoders = append(rb.interfaceEncoders, interfaceValueEncoder{i: t, ve: enc}) + rb.registry.RegisterInterfaceEncoder(t, enc) return rb } @@ -153,97 +121,78 @@ func (rb *RegistryBuilder) RegisterHookEncoder(t reflect.Type, enc ValueEncoder) // The type will be used directly, so a decoder can be registered for a type and a different decoder can be registered // for a pointer to that type. // -// If the given type is an interface, the decoder will be called when unmarshalling into a type that is that interface. -// It will not be called when unmarshalling into a non-interface type that implements the interface. +// If the given type is an interface, the decoder will be called when unmarshaling into a type that is that interface. +// It will not be called when unmarshaling into a non-interface type that implements the interface. +// +// Deprecated: Use Registry.RegisterTypeDecoder instead. func (rb *RegistryBuilder) RegisterTypeDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { - rb.typeDecoders[t] = dec + rb.registry.RegisterTypeDecoder(t, dec) return rb } // RegisterHookDecoder will register an decoder for the provided interface type t. This decoder will be called when -// unmarshalling into a type if the type implements t or a pointer to the type implements t. If the provided type is not +// unmarshaling into a type if the type implements t or a pointer to the type implements t. If the provided type is not // an interface (i.e. t.Kind() != reflect.Interface), this method will panic. +// +// Deprecated: Use Registry.RegisterInterfaceDecoder instead. func (rb *RegistryBuilder) RegisterHookDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { - if t.Kind() != reflect.Interface { - panicStr := fmt.Sprintf("RegisterHookDecoder expects a type with kind reflect.Interface, "+ - "got type %s with kind %s", t, t.Kind()) - panic(panicStr) - } - - for idx, decoder := range rb.interfaceDecoders { - if decoder.i == t { - rb.interfaceDecoders[idx].vd = dec - return rb - } - } - - rb.interfaceDecoders = append(rb.interfaceDecoders, interfaceValueDecoder{i: t, vd: dec}) + rb.registry.RegisterInterfaceDecoder(t, dec) return rb } // RegisterEncoder registers the provided type and encoder pair. // -// Deprecated: Use RegisterTypeEncoder or RegisterHookEncoder instead. +// Deprecated: Use Registry.RegisterTypeEncoder or Registry.RegisterInterfaceEncoder instead. func (rb *RegistryBuilder) RegisterEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { if t == tEmpty { - rb.typeEncoders[t] = enc + rb.registry.RegisterTypeEncoder(t, enc) return rb } switch t.Kind() { case reflect.Interface: - for idx, ir := range rb.interfaceEncoders { - if ir.i == t { - rb.interfaceEncoders[idx].ve = enc - return rb - } - } - - rb.interfaceEncoders = append(rb.interfaceEncoders, interfaceValueEncoder{i: t, ve: enc}) + rb.registry.RegisterInterfaceEncoder(t, enc) default: - rb.typeEncoders[t] = enc + rb.registry.RegisterTypeEncoder(t, enc) } return rb } // RegisterDecoder registers the provided type and decoder pair. // -// Deprecated: Use RegisterTypeDecoder or RegisterHookDecoder instead. +// Deprecated: Use Registry.RegisterTypeDecoder or Registry.RegisterInterfaceDecoder instead. func (rb *RegistryBuilder) RegisterDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { if t == nil { - rb.typeDecoders[nil] = dec + rb.registry.RegisterTypeDecoder(t, dec) return rb } if t == tEmpty { - rb.typeDecoders[t] = dec + rb.registry.RegisterTypeDecoder(t, dec) return rb } switch t.Kind() { case reflect.Interface: - for idx, ir := range rb.interfaceDecoders { - if ir.i == t { - rb.interfaceDecoders[idx].vd = dec - return rb - } - } - - rb.interfaceDecoders = append(rb.interfaceDecoders, interfaceValueDecoder{i: t, vd: dec}) + rb.registry.RegisterInterfaceDecoder(t, dec) default: - rb.typeDecoders[t] = dec + rb.registry.RegisterTypeDecoder(t, dec) } return rb } -// RegisterDefaultEncoder will registr the provided ValueEncoder to the provided +// RegisterDefaultEncoder will register the provided ValueEncoder to the provided // kind. +// +// Deprecated: Use Registry.RegisterKindEncoder instead. func (rb *RegistryBuilder) RegisterDefaultEncoder(kind reflect.Kind, enc ValueEncoder) *RegistryBuilder { - rb.kindEncoders[kind] = enc + rb.registry.RegisterKindEncoder(kind, enc) return rb } // RegisterDefaultDecoder will register the provided ValueDecoder to the // provided kind. +// +// Deprecated: Use Registry.RegisterKindDecoder instead. func (rb *RegistryBuilder) RegisterDefaultDecoder(kind reflect.Kind, dec ValueDecoder) *RegistryBuilder { - rb.kindDecoders[kind] = dec + rb.registry.RegisterKindDecoder(kind, dec) return rb } @@ -256,120 +205,235 @@ func (rb *RegistryBuilder) RegisterDefaultDecoder(kind reflect.Kind, dec ValueDe // to decode to bson.Raw, use the following code: // // rb.RegisterTypeMapEntry(bsontype.EmbeddedDocument, reflect.TypeOf(bson.Raw{})) +// +// Deprecated: Use Registry.RegisterTypeMapEntry instead. func (rb *RegistryBuilder) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) *RegistryBuilder { - rb.typeMap[bt] = rt + rb.registry.RegisterTypeMapEntry(bt, rt) return rb } // Build creates a Registry from the current state of this RegistryBuilder. +// +// Deprecated: Use NewRegistry instead. func (rb *RegistryBuilder) Build() *Registry { - registry := new(Registry) - - registry.typeEncoders = make(map[reflect.Type]ValueEncoder) - for t, enc := range rb.typeEncoders { - registry.typeEncoders[t] = enc + r := &Registry{ + interfaceEncoders: append([]interfaceValueEncoder(nil), rb.registry.interfaceEncoders...), + interfaceDecoders: append([]interfaceValueDecoder(nil), rb.registry.interfaceDecoders...), + typeEncoders: rb.registry.typeEncoders.Clone(), + typeDecoders: rb.registry.typeDecoders.Clone(), + kindEncoders: rb.registry.kindEncoders.Clone(), + kindDecoders: rb.registry.kindDecoders.Clone(), } + rb.registry.typeMap.Range(func(k, v interface{}) bool { + if k != nil && v != nil { + r.typeMap.Store(k, v) + } + return true + }) + return r +} - registry.typeDecoders = make(map[reflect.Type]ValueDecoder) - for t, dec := range rb.typeDecoders { - registry.typeDecoders[t] = dec +// A Registry is used to store and retrieve codecs for types and interfaces. This type is the main +// typed passed around and Encoders and Decoders are constructed from it. +type Registry struct { + interfaceEncoders []interfaceValueEncoder + interfaceDecoders []interfaceValueDecoder + typeEncoders *typeEncoderCache + typeDecoders *typeDecoderCache + kindEncoders *kindEncoderCache + kindDecoders *kindDecoderCache + typeMap sync.Map // map[bsontype.Type]reflect.Type +} + +// NewRegistry creates a new empty Registry. +func NewRegistry() *Registry { + return &Registry{ + typeEncoders: new(typeEncoderCache), + typeDecoders: new(typeDecoderCache), + kindEncoders: new(kindEncoderCache), + kindDecoders: new(kindDecoderCache), } +} - registry.interfaceEncoders = make([]interfaceValueEncoder, len(rb.interfaceEncoders)) - copy(registry.interfaceEncoders, rb.interfaceEncoders) +// RegisterTypeEncoder registers the provided ValueEncoder for the provided type. +// +// The type will be used as provided, so an encoder can be registered for a type and a different +// encoder can be registered for a pointer to that type. +// +// If the given type is an interface, the encoder will be called when marshaling a type that is +// that interface. It will not be called when marshaling a non-interface type that implements the +// interface. To get the latter behavior, call RegisterHookEncoder instead. +// +// RegisterTypeEncoder should not be called concurrently with any other Registry method. +func (r *Registry) RegisterTypeEncoder(valueType reflect.Type, enc ValueEncoder) { + r.typeEncoders.Store(valueType, enc) +} + +// RegisterTypeDecoder registers the provided ValueDecoder for the provided type. +// +// The type will be used as provided, so a decoder can be registered for a type and a different +// decoder can be registered for a pointer to that type. +// +// If the given type is an interface, the decoder will be called when unmarshaling into a type that +// is that interface. It will not be called when unmarshaling into a non-interface type that +// implements the interface. To get the latter behavior, call RegisterHookDecoder instead. +// +// RegisterTypeDecoder should not be called concurrently with any other Registry method. +func (r *Registry) RegisterTypeDecoder(valueType reflect.Type, dec ValueDecoder) { + r.typeDecoders.Store(valueType, dec) +} + +// RegisterKindEncoder registers the provided ValueEncoder for the provided kind. +// +// Use RegisterKindEncoder to register an encoder for any type with the same underlying kind. For +// example, consider the type MyInt defined as +// +// type MyInt int32 +// +// To define an encoder for MyInt and int32, use RegisterKindEncoder like +// +// reg.RegisterKindEncoder(reflect.Int32, myEncoder) +// +// RegisterKindEncoder should not be called concurrently with any other Registry method. +func (r *Registry) RegisterKindEncoder(kind reflect.Kind, enc ValueEncoder) { + r.kindEncoders.Store(kind, enc) +} + +// RegisterKindDecoder registers the provided ValueDecoder for the provided kind. +// +// Use RegisterKindDecoder to register a decoder for any type with the same underlying kind. For +// example, consider the type MyInt defined as +// +// type MyInt int32 +// +// To define an decoder for MyInt and int32, use RegisterKindDecoder like +// +// reg.RegisterKindDecoder(reflect.Int32, myDecoder) +// +// RegisterKindDecoder should not be called concurrently with any other Registry method. +func (r *Registry) RegisterKindDecoder(kind reflect.Kind, dec ValueDecoder) { + r.kindDecoders.Store(kind, dec) +} - registry.interfaceDecoders = make([]interfaceValueDecoder, len(rb.interfaceDecoders)) - copy(registry.interfaceDecoders, rb.interfaceDecoders) +// RegisterInterfaceEncoder registers an encoder for the provided interface type iface. This encoder will +// be called when marshaling a type if the type implements iface or a pointer to the type +// implements iface. If the provided type is not an interface +// (i.e. iface.Kind() != reflect.Interface), this method will panic. +// +// RegisterInterfaceEncoder should not be called concurrently with any other Registry method. +func (r *Registry) RegisterInterfaceEncoder(iface reflect.Type, enc ValueEncoder) { + if iface.Kind() != reflect.Interface { + panicStr := fmt.Errorf("RegisterInterfaceEncoder expects a type with kind reflect.Interface, "+ + "got type %s with kind %s", iface, iface.Kind()) + panic(panicStr) + } - registry.kindEncoders = make(map[reflect.Kind]ValueEncoder) - for kind, enc := range rb.kindEncoders { - registry.kindEncoders[kind] = enc + for idx, encoder := range r.interfaceEncoders { + if encoder.i == iface { + r.interfaceEncoders[idx].ve = enc + return + } } - registry.kindDecoders = make(map[reflect.Kind]ValueDecoder) - for kind, dec := range rb.kindDecoders { - registry.kindDecoders[kind] = dec + r.interfaceEncoders = append(r.interfaceEncoders, interfaceValueEncoder{i: iface, ve: enc}) +} + +// RegisterInterfaceDecoder registers an decoder for the provided interface type iface. This decoder will +// be called when unmarshaling into a type if the type implements iface or a pointer to the type +// implements iface. If the provided type is not an interface (i.e. iface.Kind() != reflect.Interface), +// this method will panic. +// +// RegisterInterfaceDecoder should not be called concurrently with any other Registry method. +func (r *Registry) RegisterInterfaceDecoder(iface reflect.Type, dec ValueDecoder) { + if iface.Kind() != reflect.Interface { + panicStr := fmt.Errorf("RegisterInterfaceDecoder expects a type with kind reflect.Interface, "+ + "got type %s with kind %s", iface, iface.Kind()) + panic(panicStr) } - registry.typeMap = make(map[bsontype.Type]reflect.Type) - for bt, rt := range rb.typeMap { - registry.typeMap[bt] = rt + for idx, decoder := range r.interfaceDecoders { + if decoder.i == iface { + r.interfaceDecoders[idx].vd = dec + return + } } - return registry + r.interfaceDecoders = append(r.interfaceDecoders, interfaceValueDecoder{i: iface, vd: dec}) } -// LookupEncoder inspects the registry for an encoder for the given type. The lookup precedence works as follows: +// RegisterTypeMapEntry will register the provided type to the BSON type. The primary usage for this +// mapping is decoding situations where an empty interface is used and a default type needs to be +// created and decoded into. // -// 1. An encoder registered for the exact type. If the given type represents an interface, an encoder registered using -// RegisterTypeEncoder for the interface will be selected. +// By default, BSON documents will decode into interface{} values as bson.D. To change the default type for BSON +// documents, a type map entry for bsontype.EmbeddedDocument should be registered. For example, to force BSON documents +// to decode to bson.Raw, use the following code: +// +// reg.RegisterTypeMapEntry(bsontype.EmbeddedDocument, reflect.TypeOf(bson.Raw{})) +func (r *Registry) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) { + r.typeMap.Store(bt, rt) +} + +// LookupEncoder returns the first matching encoder in the Registry. It uses the following lookup +// order: // -// 2. An encoder registered using RegisterHookEncoder for an interface implemented by the type or by a pointer to the -// type. +// 1. An encoder registered for the exact type. If the given type is an interface, an encoder +// registered using RegisterTypeEncoder for that interface will be selected. // -// 3. An encoder registered for the reflect.Kind of the value. +// 2. An encoder registered using RegisterInterfaceEncoder for an interface implemented by the type +// or by a pointer to the type. // -// If no encoder is found, an error of type ErrNoEncoder is returned. -func (r *Registry) LookupEncoder(t reflect.Type) (ValueEncoder, error) { - encodererr := ErrNoEncoder{Type: t} - r.mu.RLock() - enc, found := r.lookupTypeEncoder(t) - r.mu.RUnlock() +// 3. An encoder registered using RegisterKindEncoder for the kind of value. +// +// If no encoder is found, an error of type ErrNoEncoder is returned. LookupEncoder is safe for +// concurrent use by multiple goroutines after all codecs and encoders are registered. +func (r *Registry) LookupEncoder(valueType reflect.Type) (ValueEncoder, error) { + enc, found := r.lookupTypeEncoder(valueType) if found { if enc == nil { - return nil, ErrNoEncoder{Type: t} + return nil, ErrNoEncoder{Type: valueType} } return enc, nil } - enc, found = r.lookupInterfaceEncoder(t, true) + enc, found = r.lookupInterfaceEncoder(valueType, true) if found { - r.mu.Lock() - r.typeEncoders[t] = enc - r.mu.Unlock() - return enc, nil + return r.typeEncoders.LoadOrStore(valueType, enc), nil } - - if t == nil { - r.mu.Lock() - r.typeEncoders[t] = nil - r.mu.Unlock() - return nil, encodererr + if valueType == nil { + r.storeTypeEncoder(valueType, nil) + return nil, ErrNoEncoder{Type: valueType} } - enc, found = r.kindEncoders[t.Kind()] - if !found { - r.mu.Lock() - r.typeEncoders[t] = nil - r.mu.Unlock() - return nil, encodererr + if v, ok := r.kindEncoders.Load(valueType.Kind()); ok { + return r.storeTypeEncoder(valueType, v), nil } + r.storeTypeEncoder(valueType, nil) + return nil, ErrNoEncoder{Type: valueType} +} - r.mu.Lock() - r.typeEncoders[t] = enc - r.mu.Unlock() - return enc, nil +func (r *Registry) storeTypeEncoder(rt reflect.Type, enc ValueEncoder) ValueEncoder { + return r.typeEncoders.LoadOrStore(rt, enc) } -func (r *Registry) lookupTypeEncoder(t reflect.Type) (ValueEncoder, bool) { - enc, found := r.typeEncoders[t] - return enc, found +func (r *Registry) lookupTypeEncoder(rt reflect.Type) (ValueEncoder, bool) { + return r.typeEncoders.Load(rt) } -func (r *Registry) lookupInterfaceEncoder(t reflect.Type, allowAddr bool) (ValueEncoder, bool) { - if t == nil { +func (r *Registry) lookupInterfaceEncoder(valueType reflect.Type, allowAddr bool) (ValueEncoder, bool) { + if valueType == nil { return nil, false } for _, ienc := range r.interfaceEncoders { - if t.Implements(ienc.i) { + if valueType.Implements(ienc.i) { return ienc.ve, true } - if allowAddr && t.Kind() != reflect.Ptr && reflect.PtrTo(t).Implements(ienc.i) { - // if *t implements an interface, this will catch if t implements an interface further ahead - // in interfaceEncoders - defaultEnc, found := r.lookupInterfaceEncoder(t, false) + if allowAddr && valueType.Kind() != reflect.Ptr && reflect.PtrTo(valueType).Implements(ienc.i) { + // if *t implements an interface, this will catch if t implements an interface further + // ahead in interfaceEncoders + defaultEnc, found := r.lookupInterfaceEncoder(valueType, false) if !found { - defaultEnc = r.kindEncoders[t.Kind()] + defaultEnc, _ = r.kindEncoders.Load(valueType.Kind()) } return newCondAddrEncoder(ienc.ve, defaultEnc), true } @@ -377,70 +441,62 @@ func (r *Registry) lookupInterfaceEncoder(t reflect.Type, allowAddr bool) (Value return nil, false } -// LookupDecoder inspects the registry for an decoder for the given type. The lookup precedence works as follows: +// LookupDecoder returns the first matching decoder in the Registry. It uses the following lookup +// order: // -// 1. A decoder registered for the exact type. If the given type represents an interface, a decoder registered using -// RegisterTypeDecoder for the interface will be selected. +// 1. A decoder registered for the exact type. If the given type is an interface, a decoder +// registered using RegisterTypeDecoder for that interface will be selected. // -// 2. A decoder registered using RegisterHookDecoder for an interface implemented by the type or by a pointer to the -// type. +// 2. A decoder registered using RegisterInterfaceDecoder for an interface implemented by the type or by +// a pointer to the type. // -// 3. A decoder registered for the reflect.Kind of the value. +// 3. A decoder registered using RegisterKindDecoder for the kind of value. // -// If no decoder is found, an error of type ErrNoDecoder is returned. -func (r *Registry) LookupDecoder(t reflect.Type) (ValueDecoder, error) { - if t == nil { +// If no decoder is found, an error of type ErrNoDecoder is returned. LookupDecoder is safe for +// concurrent use by multiple goroutines after all codecs and decoders are registered. +func (r *Registry) LookupDecoder(valueType reflect.Type) (ValueDecoder, error) { + if valueType == nil { return nil, ErrNilType } - decodererr := ErrNoDecoder{Type: t} - r.mu.RLock() - dec, found := r.lookupTypeDecoder(t) - r.mu.RUnlock() + dec, found := r.lookupTypeDecoder(valueType) if found { if dec == nil { - return nil, ErrNoDecoder{Type: t} + return nil, ErrNoDecoder{Type: valueType} } return dec, nil } - dec, found = r.lookupInterfaceDecoder(t, true) + dec, found = r.lookupInterfaceDecoder(valueType, true) if found { - r.mu.Lock() - r.typeDecoders[t] = dec - r.mu.Unlock() - return dec, nil + return r.storeTypeDecoder(valueType, dec), nil } - dec, found = r.kindDecoders[t.Kind()] - if !found { - r.mu.Lock() - r.typeDecoders[t] = nil - r.mu.Unlock() - return nil, decodererr + if v, ok := r.kindDecoders.Load(valueType.Kind()); ok { + return r.storeTypeDecoder(valueType, v), nil } + r.storeTypeDecoder(valueType, nil) + return nil, ErrNoDecoder{Type: valueType} +} - r.mu.Lock() - r.typeDecoders[t] = dec - r.mu.Unlock() - return dec, nil +func (r *Registry) lookupTypeDecoder(valueType reflect.Type) (ValueDecoder, bool) { + return r.typeDecoders.Load(valueType) } -func (r *Registry) lookupTypeDecoder(t reflect.Type) (ValueDecoder, bool) { - dec, found := r.typeDecoders[t] - return dec, found +func (r *Registry) storeTypeDecoder(typ reflect.Type, dec ValueDecoder) ValueDecoder { + return r.typeDecoders.LoadOrStore(typ, dec) } -func (r *Registry) lookupInterfaceDecoder(t reflect.Type, allowAddr bool) (ValueDecoder, bool) { +func (r *Registry) lookupInterfaceDecoder(valueType reflect.Type, allowAddr bool) (ValueDecoder, bool) { for _, idec := range r.interfaceDecoders { - if t.Implements(idec.i) { + if valueType.Implements(idec.i) { return idec.vd, true } - if allowAddr && t.Kind() != reflect.Ptr && reflect.PtrTo(t).Implements(idec.i) { - // if *t implements an interface, this will catch if t implements an interface further ahead - // in interfaceDecoders - defaultDec, found := r.lookupInterfaceDecoder(t, false) + if allowAddr && valueType.Kind() != reflect.Ptr && reflect.PtrTo(valueType).Implements(idec.i) { + // if *t implements an interface, this will catch if t implements an interface further + // ahead in interfaceDecoders + defaultDec, found := r.lookupInterfaceDecoder(valueType, false) if !found { - defaultDec = r.kindDecoders[t.Kind()] + defaultDec, _ = r.kindDecoders.Load(valueType.Kind()) } return newCondAddrDecoder(idec.vd, defaultDec), true } @@ -450,12 +506,14 @@ func (r *Registry) lookupInterfaceDecoder(t reflect.Type, allowAddr bool) (Value // LookupTypeMapEntry inspects the registry's type map for a Go type for the corresponding BSON // type. If no type is found, ErrNoTypeMapEntry is returned. +// +// LookupTypeMapEntry should not be called concurrently with any other Registry method. func (r *Registry) LookupTypeMapEntry(bt bsontype.Type) (reflect.Type, error) { - t, ok := r.typeMap[bt] - if !ok || t == nil { + v, ok := r.typeMap.Load(bt) + if v == nil || !ok { return nil, ErrNoTypeMapEntry{Type: bt} } - return t, nil + return v.(reflect.Type), nil } type interfaceValueEncoder struct { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go index 3c1b6b860..a43daf005 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go @@ -19,13 +19,21 @@ import ( var defaultSliceCodec = NewSliceCodec() // SliceCodec is the Codec used for slice values. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// SliceCodec registered. type SliceCodec struct { + // EncodeNilAsEmpty causes EncodeValue to marshal nil Go slices as empty BSON arrays instead of + // BSON null. + // + // Deprecated: Use bson.Encoder.NilSliceAsEmpty instead. EncodeNilAsEmpty bool } -var _ ValueCodec = &MapCodec{} - // NewSliceCodec returns a MapCodec with options opts. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// SliceCodec registered. func NewSliceCodec(opts ...*bsonoptions.SliceCodecOptions) *SliceCodec { sliceOpt := bsonoptions.MergeSliceCodecOptions(opts...) @@ -42,21 +50,19 @@ func (sc SliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val re return ValueEncoderError{Name: "SliceEncodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} } - if val.IsNil() && !sc.EncodeNilAsEmpty { + if val.IsNil() && !sc.EncodeNilAsEmpty && !ec.nilSliceAsEmpty { return vw.WriteNull() } // If we have a []byte we want to treat it as a binary instead of as an array. if val.Type().Elem() == tByte { - var byteSlice []byte - for idx := 0; idx < val.Len(); idx++ { - byteSlice = append(byteSlice, val.Index(idx).Interface().(byte)) - } + byteSlice := make([]byte, val.Len()) + reflect.Copy(reflect.ValueOf(byteSlice), val) return vw.WriteBinary(byteSlice) } // If we have a []primitive.E we want to treat it as a document instead of as an array. - if val.Type().ConvertibleTo(tD) { + if val.Type() == tD || val.Type().ConvertibleTo(tD) { d := val.Convert(tD).Interface().(primitive.D) dw, err := vw.WriteDocument() @@ -145,11 +151,8 @@ func (sc *SliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val r if val.IsNil() { val.Set(reflect.MakeSlice(val.Type(), 0, len(data))) } - val.SetLen(0) - for _, elem := range data { - val.Set(reflect.Append(val, reflect.ValueOf(elem))) - } + val.Set(reflect.AppendSlice(val, reflect.ValueOf(data))) return nil case bsontype.String: if sliceType := val.Type().Elem(); sliceType != tByte { @@ -164,11 +167,8 @@ func (sc *SliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val r if val.IsNil() { val.Set(reflect.MakeSlice(val.Type(), 0, len(byteStr))) } - val.SetLen(0) - for _, elem := range byteStr { - val.Set(reflect.Append(val, reflect.ValueOf(elem))) - } + val.Set(reflect.AppendSlice(val, reflect.ValueOf(byteStr))) return nil default: return fmt.Errorf("cannot decode %v into a slice", vrType) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go index 5332b7c3b..ff931b725 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go @@ -15,26 +15,38 @@ import ( "go.mongodb.org/mongo-driver/bson/bsontype" ) -// StringCodec is the Codec used for struct values. +// StringCodec is the Codec used for string values. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// StringCodec registered. type StringCodec struct { + // DecodeObjectIDAsHex specifies if object IDs should be decoded as their hex representation. + // If false, a string made from the raw object ID bytes will be used. Defaults to true. + // + // Deprecated: Decoding object IDs as raw bytes will not be supported in Go Driver 2.0. DecodeObjectIDAsHex bool } var ( defaultStringCodec = NewStringCodec() - _ ValueCodec = defaultStringCodec + // Assert that defaultStringCodec satisfies the typeDecoder interface, which allows it to be + // used by collection type decoders (e.g. map, slice, etc) to set individual values in a + // collection. _ typeDecoder = defaultStringCodec ) // NewStringCodec returns a StringCodec with options opts. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// StringCodec registered. func NewStringCodec(opts ...*bsonoptions.StringCodecOptions) *StringCodec { stringOpt := bsonoptions.MergeStringCodecOptions(opts...) return &StringCodec{*stringOpt.DecodeObjectIDAsHex} } // EncodeValue is the ValueEncoder for string types. -func (sc *StringCodec) EncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +func (sc *StringCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if val.Kind() != reflect.String { return ValueEncoderError{ Name: "StringEncodeValue", @@ -46,7 +58,7 @@ func (sc *StringCodec) EncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, va return vw.WriteString(val.String()) } -func (sc *StringCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (sc *StringCodec) decodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t.Kind() != reflect.String { return emptyValue, ValueDecoderError{ Name: "StringDecodeValue", @@ -71,6 +83,7 @@ func (sc *StringCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t ref if sc.DecodeObjectIDAsHex { str = oid.Hex() } else { + // TODO(GODRIVER-2796): Return an error here instead of decoding to a garbled string. byteArray := [12]byte(oid) str = string(byteArray[:]) } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go index da1ae18e0..4cde0a4d6 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go @@ -59,14 +59,43 @@ type Zeroer interface { } // StructCodec is the Codec used for struct values. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// StructCodec registered. type StructCodec struct { - cache map[reflect.Type]*structDescription - l sync.RWMutex - parser StructTagParser - DecodeZeroStruct bool - DecodeDeepZeroInline bool - EncodeOmitDefaultStruct bool - AllowUnexportedFields bool + cache sync.Map // map[reflect.Type]*structDescription + parser StructTagParser + + // DecodeZeroStruct causes DecodeValue to delete any existing values from Go structs in the + // destination value passed to Decode before unmarshaling BSON documents into them. + // + // Deprecated: Use bson.Decoder.ZeroStructs instead. + DecodeZeroStruct bool + + // DecodeDeepZeroInline causes DecodeValue to delete any existing values from Go structs in the + // destination value passed to Decode before unmarshaling BSON documents into them. + // + // Deprecated: DecodeDeepZeroInline will not be supported in Go Driver 2.0. + DecodeDeepZeroInline bool + + // EncodeOmitDefaultStruct causes the Encoder to consider the zero value for a struct (e.g. + // MyStruct{}) as empty and omit it from the marshaled BSON when the "omitempty" struct tag + // option is set. + // + // Deprecated: Use bson.Encoder.OmitZeroStruct instead. + EncodeOmitDefaultStruct bool + + // AllowUnexportedFields allows encoding and decoding values from un-exported struct fields. + // + // Deprecated: AllowUnexportedFields does not work on recent versions of Go and will not be + // supported in Go Driver 2.0. + AllowUnexportedFields bool + + // OverwriteDuplicatedInlinedFields, if false, causes EncodeValue to return an error if there is + // a duplicate field in the marshaled BSON when the "inline" struct tag option is set. The + // default value is true. + // + // Deprecated: Use bson.Encoder.ErrorOnInlineDuplicates instead. OverwriteDuplicatedInlinedFields bool } @@ -74,6 +103,9 @@ var _ ValueEncoder = &StructCodec{} var _ ValueDecoder = &StructCodec{} // NewStructCodec returns a StructCodec that uses p for struct tag parsing. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// StructCodec registered. func NewStructCodec(p StructTagParser, opts ...*bsonoptions.StructCodecOptions) (*StructCodec, error) { if p == nil { return nil, errors.New("a StructTagParser must be provided to NewStructCodec") @@ -82,7 +114,6 @@ func NewStructCodec(p StructTagParser, opts ...*bsonoptions.StructCodecOptions) structOpt := bsonoptions.MergeStructCodecOptions(opts...) codec := &StructCodec{ - cache: make(map[reflect.Type]*structDescription), parser: p, } @@ -106,12 +137,12 @@ func NewStructCodec(p StructTagParser, opts ...*bsonoptions.StructCodecOptions) } // EncodeValue handles encoding generic struct types. -func (sc *StructCodec) EncodeValue(r EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +func (sc *StructCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Kind() != reflect.Struct { return ValueEncoderError{Name: "StructCodec.EncodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val} } - sd, err := sc.describeStruct(r.Registry, val.Type()) + sd, err := sc.describeStruct(ec.Registry, val.Type(), ec.useJSONStructTags, ec.errorOnInlineDuplicates) if err != nil { return err } @@ -131,7 +162,7 @@ func (sc *StructCodec) EncodeValue(r EncodeContext, vw bsonrw.ValueWriter, val r } } - desc.encoder, rv, err = defaultValueEncoders.lookupElementEncoder(r, desc.encoder, rv) + desc.encoder, rv, err = defaultValueEncoders.lookupElementEncoder(ec, desc.encoder, rv) if err != nil && err != errInvalidValue { return err @@ -158,17 +189,17 @@ func (sc *StructCodec) EncodeValue(r EncodeContext, vw bsonrw.ValueWriter, val r encoder := desc.encoder - var isZero bool - rvInterface := rv.Interface() + var zero bool if cz, ok := encoder.(CodecZeroer); ok { - isZero = cz.IsTypeZero(rvInterface) + zero = cz.IsTypeZero(rv.Interface()) } else if rv.Kind() == reflect.Interface { - // sc.isZero will not treat an interface rv as an interface, so we need to check for the zero interface separately. - isZero = rv.IsNil() + // isZero will not treat an interface rv as an interface, so we need to check for the + // zero interface separately. + zero = rv.IsNil() } else { - isZero = sc.isZero(rvInterface) + zero = isZero(rv, sc.EncodeOmitDefaultStruct || ec.omitZeroStruct) } - if desc.omitEmpty && isZero { + if desc.omitEmpty && zero { continue } @@ -177,7 +208,17 @@ func (sc *StructCodec) EncodeValue(r EncodeContext, vw bsonrw.ValueWriter, val r return err } - ectx := EncodeContext{Registry: r.Registry, MinSize: desc.minSize} + ectx := EncodeContext{ + Registry: ec.Registry, + MinSize: desc.minSize || ec.MinSize, + errorOnInlineDuplicates: ec.errorOnInlineDuplicates, + stringifyMapKeysWithFmt: ec.stringifyMapKeysWithFmt, + nilMapAsEmpty: ec.nilMapAsEmpty, + nilSliceAsEmpty: ec.nilSliceAsEmpty, + nilByteSliceAsEmpty: ec.nilByteSliceAsEmpty, + omitZeroStruct: ec.omitZeroStruct, + useJSONStructTags: ec.useJSONStructTags, + } err = encoder.EncodeValue(ectx, vw2, rv) if err != nil { return err @@ -191,7 +232,7 @@ func (sc *StructCodec) EncodeValue(r EncodeContext, vw bsonrw.ValueWriter, val r return exists } - return defaultMapCodec.mapEncodeValue(r, dw, rv, collisionFn) + return defaultMapCodec.mapEncodeValue(ec, dw, rv, collisionFn) } return dw.WriteDocumentEnd() @@ -213,7 +254,7 @@ func newDecodeError(key string, original error) error { // DecodeValue implements the Codec interface. // By default, map types in val will not be cleared. If a map has existing key/value pairs, it will be extended with the new ones from vr. // For slices, the decoder will set the length of the slice to zero and append all elements. The underlying array will not be cleared. -func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { +func (sc *StructCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Kind() != reflect.Struct { return ValueDecoderError{Name: "StructCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val} } @@ -238,12 +279,12 @@ func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val r return fmt.Errorf("cannot decode %v into a %s", vrType, val.Type()) } - sd, err := sc.describeStruct(r.Registry, val.Type()) + sd, err := sc.describeStruct(dc.Registry, val.Type(), dc.useJSONStructTags, false) if err != nil { return err } - if sc.DecodeZeroStruct { + if sc.DecodeZeroStruct || dc.zeroStructs { val.Set(reflect.Zero(val.Type())) } if sc.DecodeDeepZeroInline && sd.inline { @@ -254,7 +295,7 @@ func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val r var inlineMap reflect.Value if sd.inlineMap >= 0 { inlineMap = val.Field(sd.inlineMap) - decoder, err = r.LookupDecoder(inlineMap.Type().Elem()) + decoder, err = dc.LookupDecoder(inlineMap.Type().Elem()) if err != nil { return err } @@ -298,8 +339,8 @@ func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val r } elem := reflect.New(inlineMap.Type().Elem()).Elem() - r.Ancestor = inlineMap.Type() - err = decoder.DecodeValue(r, vr, elem) + dc.Ancestor = inlineMap.Type() + err = decoder.DecodeValue(dc, vr, elem) if err != nil { return err } @@ -327,9 +368,14 @@ func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val r field = field.Addr() dctx := DecodeContext{ - Registry: r.Registry, - Truncate: fd.truncate || r.Truncate, - defaultDocumentType: r.defaultDocumentType, + Registry: dc.Registry, + Truncate: fd.truncate || dc.Truncate, + defaultDocumentType: dc.defaultDocumentType, + binaryAsSlice: dc.binaryAsSlice, + useJSONStructTags: dc.useJSONStructTags, + useLocalTimeZone: dc.useLocalTimeZone, + zeroMaps: dc.zeroMaps, + zeroStructs: dc.zeroStructs, } if fd.decoder == nil { @@ -345,51 +391,32 @@ func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val r return nil } -func (sc *StructCodec) isZero(i interface{}) bool { - v := reflect.ValueOf(i) - - // check the value validity - if !v.IsValid() { - return true +func isZero(v reflect.Value, omitZeroStruct bool) bool { + kind := v.Kind() + if (kind != reflect.Ptr || !v.IsNil()) && v.Type().Implements(tZeroer) { + return v.Interface().(Zeroer).IsZero() } - - if z, ok := v.Interface().(Zeroer); ok && (v.Kind() != reflect.Ptr || !v.IsNil()) { - return z.IsZero() - } - - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - case reflect.Struct: - if sc.EncodeOmitDefaultStruct { - vt := v.Type() - if vt == tTime { - return v.Interface().(time.Time).IsZero() + if kind == reflect.Struct { + if !omitZeroStruct { + return false + } + vt := v.Type() + if vt == tTime { + return v.Interface().(time.Time).IsZero() + } + numField := vt.NumField() + for i := 0; i < numField; i++ { + ff := vt.Field(i) + if ff.PkgPath != "" && !ff.Anonymous { + continue // Private field } - for i := 0; i < v.NumField(); i++ { - if vt.Field(i).PkgPath != "" && !vt.Field(i).Anonymous { - continue // Private field - } - fld := v.Field(i) - if !sc.isZero(fld.Interface()) { - return false - } + if !isZero(v.Field(i), omitZeroStruct) { + return false } - return true } + return true } - - return false + return !v.IsValid() || v.IsZero() } type structDescription struct { @@ -440,16 +467,35 @@ func (bi byIndex) Less(i, j int) bool { return len(bi[i].inline) < len(bi[j].inline) } -func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescription, error) { +func (sc *StructCodec) describeStruct( + r *Registry, + t reflect.Type, + useJSONStructTags bool, + errorOnDuplicates bool, +) (*structDescription, error) { // We need to analyze the struct, including getting the tags, collecting // information about inlining, and create a map of the field name to the field. - sc.l.RLock() - ds, exists := sc.cache[t] - sc.l.RUnlock() - if exists { - return ds, nil + if v, ok := sc.cache.Load(t); ok { + return v.(*structDescription), nil } + // TODO(charlie): Only describe the struct once when called + // concurrently with the same type. + ds, err := sc.describeStructSlow(r, t, useJSONStructTags, errorOnDuplicates) + if err != nil { + return nil, err + } + if v, loaded := sc.cache.LoadOrStore(t, ds); loaded { + ds = v.(*structDescription) + } + return ds, nil +} +func (sc *StructCodec) describeStructSlow( + r *Registry, + t reflect.Type, + useJSONStructTags bool, + errorOnDuplicates bool, +) (*structDescription, error) { numFields := t.NumField() sd := &structDescription{ fm: make(map[string]fieldDescription, numFields), @@ -482,7 +528,14 @@ func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescr decoder: decoder, } - stags, err := sc.parser.ParseStructTags(sf) + var stags StructTags + // If the caller requested that we use JSON struct tags, use the JSONFallbackStructTagParser + // instead of the parser defined on the codec. + if useJSONStructTags { + stags, err = JSONFallbackStructTagParser.ParseStructTags(sf) + } else { + stags, err = sc.parser.ParseStructTags(sf) + } if err != nil { return nil, err } @@ -512,7 +565,7 @@ func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescr } fallthrough case reflect.Struct: - inlinesf, err := sc.describeStruct(r, sfType) + inlinesf, err := sc.describeStruct(r, sfType, useJSONStructTags, errorOnDuplicates) if err != nil { return nil, err } @@ -564,7 +617,7 @@ func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescr continue } dominant, ok := dominantField(fields[i : i+advance]) - if !ok || !sc.OverwriteDuplicatedInlinedFields { + if !ok || !sc.OverwriteDuplicatedInlinedFields || errorOnDuplicates { return nil, fmt.Errorf("struct %s has duplicated key %s", t.String(), name) } sd.fl = append(sd.fl, dominant) @@ -573,10 +626,6 @@ func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescr sort.Sort(byIndex(sd.fl)) - sc.l.Lock() - sc.cache[t] = sd - sc.l.Unlock() - return sd, nil } @@ -634,21 +683,21 @@ func getInlineField(val reflect.Value, index []int) (reflect.Value, error) { // DeepZero returns recursive zero object func deepZero(st reflect.Type) (result reflect.Value) { - result = reflect.Indirect(reflect.New(st)) - - if result.Kind() == reflect.Struct { - for i := 0; i < result.NumField(); i++ { - if f := result.Field(i); f.Kind() == reflect.Ptr { - if f.CanInterface() { - if ft := reflect.TypeOf(f.Interface()); ft.Elem().Kind() == reflect.Struct { - result.Field(i).Set(recursivePointerTo(deepZero(ft.Elem()))) - } + if st.Kind() == reflect.Struct { + numField := st.NumField() + for i := 0; i < numField; i++ { + if result == emptyValue { + result = reflect.Indirect(reflect.New(st)) + } + f := result.Field(i) + if f.CanInterface() { + if f.Type().Kind() == reflect.Struct { + result.Field(i).Set(recursivePointerTo(deepZero(f.Type().Elem()))) } } } } - - return + return result } // recursivePointerTo calls reflect.New(v.Type) but recursively for its fields inside diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go index 62708c5c7..18d85bfb0 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go @@ -12,12 +12,16 @@ import ( ) // StructTagParser returns the struct tags for a given struct field. +// +// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0. type StructTagParser interface { ParseStructTags(reflect.StructField) (StructTags, error) } // StructTagParserFunc is an adapter that allows a generic function to be used // as a StructTagParser. +// +// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0. type StructTagParserFunc func(reflect.StructField) (StructTags, error) // ParseStructTags implements the StructTagParser interface. @@ -50,7 +54,7 @@ func (stpf StructTagParserFunc) ParseStructTags(sf reflect.StructField) (StructT // Skip This struct field should be skipped. This is usually denoted by parsing a "-" // for the name. // -// TODO(skriptble): Add tags for undefined as nil and for null as nil. +// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0. type StructTags struct { Name string OmitEmpty bool @@ -85,6 +89,8 @@ type StructTags struct { // A struct tag either consisting entirely of '-' or with a bson key with a // value consisting entirely of '-' will return a StructTags with Skip true and // the remaining fields will be their default values. +// +// Deprecated: DefaultStructTagParser will be removed in Go Driver 2.0. var DefaultStructTagParser StructTagParserFunc = func(sf reflect.StructField) (StructTags, error) { key := strings.ToLower(sf.Name) tag, ok := sf.Tag.Lookup("bson") @@ -125,6 +131,9 @@ func parseTags(key string, tag string) (StructTags, error) { // JSONFallbackStructTagParser has the same behavior as DefaultStructTagParser // but will also fallback to parsing the json tag instead on a field where the // bson tag isn't available. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.UseJSONStructTags] and +// [go.mongodb.org/mongo-driver/bson.Decoder.UseJSONStructTags] instead. var JSONFallbackStructTagParser StructTagParserFunc = func(sf reflect.StructField) (StructTags, error) { key := strings.ToLower(sf.Name) tag, ok := sf.Tag.Lookup("bson") diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go index ec7e30f72..7b005a995 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go @@ -22,18 +22,28 @@ const ( ) // TimeCodec is the Codec used for time.Time values. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// TimeCodec registered. type TimeCodec struct { + // UseLocalTimeZone specifies if we should decode into the local time zone. Defaults to false. + // + // Deprecated: Use bson.Decoder.UseLocalTimeZone instead. UseLocalTimeZone bool } var ( defaultTimeCodec = NewTimeCodec() - _ ValueCodec = defaultTimeCodec + // Assert that defaultTimeCodec satisfies the typeDecoder interface, which allows it to be used + // by collection type decoders (e.g. map, slice, etc) to set individual values in a collection. _ typeDecoder = defaultTimeCodec ) // NewTimeCodec returns a TimeCodec with options opts. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// TimeCodec registered. func NewTimeCodec(opts ...*bsonoptions.TimeCodecOptions) *TimeCodec { timeOpt := bsonoptions.MergeTimeCodecOptions(opts...) @@ -95,7 +105,7 @@ func (tc *TimeCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t refle return emptyValue, fmt.Errorf("cannot decode %v into a time.Time", vrType) } - if !tc.UseLocalTimeZone { + if !tc.UseLocalTimeZone && !dc.useLocalTimeZone { timeVal = timeVal.UTC() } return reflect.ValueOf(timeVal), nil @@ -117,7 +127,7 @@ func (tc *TimeCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val re } // EncodeValue is the ValueEncoderFunc for time.TIme. -func (tc *TimeCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +func (tc *TimeCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tTime { return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val} } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go index 07f4b70e6..6ade17b7d 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go @@ -34,6 +34,7 @@ var tValueUnmarshaler = reflect.TypeOf((*ValueUnmarshaler)(nil)).Elem() var tMarshaler = reflect.TypeOf((*Marshaler)(nil)).Elem() var tUnmarshaler = reflect.TypeOf((*Unmarshaler)(nil)).Elem() var tProxy = reflect.TypeOf((*Proxy)(nil)).Elem() +var tZeroer = reflect.TypeOf((*Zeroer)(nil)).Elem() var tBinary = reflect.TypeOf(primitive.Binary{}) var tUndefined = reflect.TypeOf(primitive.Undefined{}) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go index 0b21ce999..7eb106905 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go @@ -17,18 +17,29 @@ import ( ) // UIntCodec is the Codec used for uint values. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// UIntCodec registered. type UIntCodec struct { + // EncodeToMinSize causes EncodeValue to marshal Go uint values (excluding uint64) as the + // minimum BSON int size (either 32-bit or 64-bit) that can represent the integer value. + // + // Deprecated: Use bson.Encoder.IntMinSize instead. EncodeToMinSize bool } var ( defaultUIntCodec = NewUIntCodec() - _ ValueCodec = defaultUIntCodec + // Assert that defaultUIntCodec satisfies the typeDecoder interface, which allows it to be used + // by collection type decoders (e.g. map, slice, etc) to set individual values in a collection. _ typeDecoder = defaultUIntCodec ) // NewUIntCodec returns a UIntCodec with options opts. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// UIntCodec registered. func NewUIntCodec(opts ...*bsonoptions.UIntCodecOptions) *UIntCodec { uintOpt := bsonoptions.MergeUIntCodecOptions(opts...) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go index b1256a4dc..996bd1712 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go @@ -7,22 +7,33 @@ package bsonoptions // ByteSliceCodecOptions represents all possible options for byte slice encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. type ByteSliceCodecOptions struct { EncodeNilAsEmpty *bool // Specifies if a nil byte slice should encode as an empty binary instead of null. Defaults to false. } // ByteSliceCodec creates a new *ByteSliceCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. func ByteSliceCodec() *ByteSliceCodecOptions { return &ByteSliceCodecOptions{} } // SetEncodeNilAsEmpty specifies if a nil byte slice should encode as an empty binary instead of null. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilByteSliceAsEmpty] instead. func (bs *ByteSliceCodecOptions) SetEncodeNilAsEmpty(b bool) *ByteSliceCodecOptions { bs.EncodeNilAsEmpty = &b return bs } // MergeByteSliceCodecOptions combines the given *ByteSliceCodecOptions into a single *ByteSliceCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeByteSliceCodecOptions(opts ...*ByteSliceCodecOptions) *ByteSliceCodecOptions { bs := ByteSliceCodec() for _, opt := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go index 6caaa000e..f522c7e03 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go @@ -7,22 +7,33 @@ package bsonoptions // EmptyInterfaceCodecOptions represents all possible options for interface{} encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. type EmptyInterfaceCodecOptions struct { DecodeBinaryAsSlice *bool // Specifies if Old and Generic type binarys should default to []slice instead of primitive.Binary. Defaults to false. } // EmptyInterfaceCodec creates a new *EmptyInterfaceCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. func EmptyInterfaceCodec() *EmptyInterfaceCodecOptions { return &EmptyInterfaceCodecOptions{} } // SetDecodeBinaryAsSlice specifies if Old and Generic type binarys should default to []slice instead of primitive.Binary. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.BinaryAsSlice] instead. func (e *EmptyInterfaceCodecOptions) SetDecodeBinaryAsSlice(b bool) *EmptyInterfaceCodecOptions { e.DecodeBinaryAsSlice = &b return e } // MergeEmptyInterfaceCodecOptions combines the given *EmptyInterfaceCodecOptions into a single *EmptyInterfaceCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeEmptyInterfaceCodecOptions(opts ...*EmptyInterfaceCodecOptions) *EmptyInterfaceCodecOptions { e := EmptyInterfaceCodec() for _, opt := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go index 7a6a880b8..a7a7c1d98 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go @@ -7,6 +7,9 @@ package bsonoptions // MapCodecOptions represents all possible options for map encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. type MapCodecOptions struct { DecodeZerosMap *bool // Specifies if the map should be zeroed before decoding into it. Defaults to false. EncodeNilAsEmpty *bool // Specifies if a nil map should encode as an empty document instead of null. Defaults to false. @@ -19,17 +22,24 @@ type MapCodecOptions struct { } // MapCodec creates a new *MapCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. func MapCodec() *MapCodecOptions { return &MapCodecOptions{} } // SetDecodeZerosMap specifies if the map should be zeroed before decoding into it. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroMaps] instead. func (t *MapCodecOptions) SetDecodeZerosMap(b bool) *MapCodecOptions { t.DecodeZerosMap = &b return t } // SetEncodeNilAsEmpty specifies if a nil map should encode as an empty document instead of null. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilMapAsEmpty] instead. func (t *MapCodecOptions) SetEncodeNilAsEmpty(b bool) *MapCodecOptions { t.EncodeNilAsEmpty = &b return t @@ -40,12 +50,17 @@ func (t *MapCodecOptions) SetEncodeNilAsEmpty(b bool) *MapCodecOptions { // type must either be a string, an integer type, or implement bsoncodec.KeyUnmarshaler. If true, keys are encoded with // fmt.Sprint() and the encoding key type must be a string, an integer type, or a float. If true, the use of Stringer // will override TextMarshaler/TextUnmarshaler. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.StringifyMapKeysWithFmt] instead. func (t *MapCodecOptions) SetEncodeKeysWithStringer(b bool) *MapCodecOptions { t.EncodeKeysWithStringer = &b return t } // MergeMapCodecOptions combines the given *MapCodecOptions into a single *MapCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeMapCodecOptions(opts ...*MapCodecOptions) *MapCodecOptions { s := MapCodec() for _, opt := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go index ef965e4b4..3c1e4f35b 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go @@ -7,22 +7,33 @@ package bsonoptions // SliceCodecOptions represents all possible options for slice encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. type SliceCodecOptions struct { EncodeNilAsEmpty *bool // Specifies if a nil slice should encode as an empty array instead of null. Defaults to false. } // SliceCodec creates a new *SliceCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. func SliceCodec() *SliceCodecOptions { return &SliceCodecOptions{} } // SetEncodeNilAsEmpty specifies if a nil slice should encode as an empty array instead of null. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilSliceAsEmpty] instead. func (s *SliceCodecOptions) SetEncodeNilAsEmpty(b bool) *SliceCodecOptions { s.EncodeNilAsEmpty = &b return s } // MergeSliceCodecOptions combines the given *SliceCodecOptions into a single *SliceCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeSliceCodecOptions(opts ...*SliceCodecOptions) *SliceCodecOptions { s := SliceCodec() for _, opt := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go index 65964f420..f8b76f996 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go @@ -9,23 +9,34 @@ package bsonoptions var defaultDecodeOIDAsHex = true // StringCodecOptions represents all possible options for string encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. type StringCodecOptions struct { DecodeObjectIDAsHex *bool // Specifies if we should decode ObjectID as the hex value. Defaults to true. } // StringCodec creates a new *StringCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. func StringCodec() *StringCodecOptions { return &StringCodecOptions{} } // SetDecodeObjectIDAsHex specifies if object IDs should be decoded as their hex representation. If false, a string made // from the raw object ID bytes will be used. Defaults to true. +// +// Deprecated: Decoding object IDs as raw bytes will not be supported in Go Driver 2.0. func (t *StringCodecOptions) SetDecodeObjectIDAsHex(b bool) *StringCodecOptions { t.DecodeObjectIDAsHex = &b return t } // MergeStringCodecOptions combines the given *StringCodecOptions into a single *StringCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeStringCodecOptions(opts ...*StringCodecOptions) *StringCodecOptions { s := &StringCodecOptions{&defaultDecodeOIDAsHex} for _, opt := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go index 78d1dd866..1cbfa32e8 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go @@ -9,6 +9,9 @@ package bsonoptions var defaultOverwriteDuplicatedInlinedFields = true // StructCodecOptions represents all possible options for struct encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. type StructCodecOptions struct { DecodeZeroStruct *bool // Specifies if structs should be zeroed before decoding into them. Defaults to false. DecodeDeepZeroInline *bool // Specifies if structs should be recursively zeroed when a inline value is decoded. Defaults to false. @@ -18,17 +21,24 @@ type StructCodecOptions struct { } // StructCodec creates a new *StructCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. func StructCodec() *StructCodecOptions { return &StructCodecOptions{} } // SetDecodeZeroStruct specifies if structs should be zeroed before decoding into them. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroStructs] instead. func (t *StructCodecOptions) SetDecodeZeroStruct(b bool) *StructCodecOptions { t.DecodeZeroStruct = &b return t } // SetDecodeDeepZeroInline specifies if structs should be zeroed before decoding into them. Defaults to false. +// +// Deprecated: DecodeDeepZeroInline will not be supported in Go Driver 2.0. func (t *StructCodecOptions) SetDecodeDeepZeroInline(b bool) *StructCodecOptions { t.DecodeDeepZeroInline = &b return t @@ -36,6 +46,8 @@ func (t *StructCodecOptions) SetDecodeDeepZeroInline(b bool) *StructCodecOptions // SetEncodeOmitDefaultStruct specifies if default structs should be considered empty by omitempty. A default struct has all // its values set to their default value. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.OmitZeroStruct] instead. func (t *StructCodecOptions) SetEncodeOmitDefaultStruct(b bool) *StructCodecOptions { t.EncodeOmitDefaultStruct = &b return t @@ -45,18 +57,26 @@ func (t *StructCodecOptions) SetEncodeOmitDefaultStruct(b bool) *StructCodecOpti // same bson key. When true and decoding, values will be written to the outermost struct with a matching key, and when // encoding, keys will have the value of the top-most matching field. When false, decoding and encoding will error if // there are duplicate keys after the struct is inlined. Defaults to true. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.ErrorOnInlineDuplicates] instead. func (t *StructCodecOptions) SetOverwriteDuplicatedInlinedFields(b bool) *StructCodecOptions { t.OverwriteDuplicatedInlinedFields = &b return t } // SetAllowUnexportedFields specifies if unexported fields should be marshaled/unmarshaled. Defaults to false. +// +// Deprecated: AllowUnexportedFields does not work on recent versions of Go and will not be +// supported in Go Driver 2.0. func (t *StructCodecOptions) SetAllowUnexportedFields(b bool) *StructCodecOptions { t.AllowUnexportedFields = &b return t } // MergeStructCodecOptions combines the given *StructCodecOptions into a single *StructCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeStructCodecOptions(opts ...*StructCodecOptions) *StructCodecOptions { s := &StructCodecOptions{ OverwriteDuplicatedInlinedFields: &defaultOverwriteDuplicatedInlinedFields, diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go index 13496d121..3f38433d2 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go @@ -7,22 +7,33 @@ package bsonoptions // TimeCodecOptions represents all possible options for time.Time encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. type TimeCodecOptions struct { UseLocalTimeZone *bool // Specifies if we should decode into the local time zone. Defaults to false. } // TimeCodec creates a new *TimeCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. func TimeCodec() *TimeCodecOptions { return &TimeCodecOptions{} } // SetUseLocalTimeZone specifies if we should decode into the local time zone. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseLocalTimeZone] instead. func (t *TimeCodecOptions) SetUseLocalTimeZone(b bool) *TimeCodecOptions { t.UseLocalTimeZone = &b return t } // MergeTimeCodecOptions combines the given *TimeCodecOptions into a single *TimeCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeTimeCodecOptions(opts ...*TimeCodecOptions) *TimeCodecOptions { t := TimeCodec() for _, opt := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go index e08b7f192..5091e4d96 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go @@ -7,22 +7,33 @@ package bsonoptions // UIntCodecOptions represents all possible options for uint encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. type UIntCodecOptions struct { EncodeToMinSize *bool // Specifies if all uints except uint64 should be decoded to minimum size bsontype. Defaults to false. } // UIntCodec creates a new *UIntCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. func UIntCodec() *UIntCodecOptions { return &UIntCodecOptions{} } // SetEncodeToMinSize specifies if all uints except uint64 should be decoded to minimum size bsontype. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.IntMinSize] instead. func (u *UIntCodecOptions) SetEncodeToMinSize(b bool) *UIntCodecOptions { u.EncodeToMinSize = &b return u } // MergeUIntCodecOptions combines the given *UIntCodecOptions into a single *UIntCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeUIntCodecOptions(opts ...*UIntCodecOptions) *UIntCodecOptions { u := UIntCodec() for _, opt := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go index 5cdf6460b..4d279b7fe 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go @@ -17,20 +17,32 @@ import ( // Copier is a type that allows copying between ValueReaders, ValueWriters, and // []byte values. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. type Copier struct{} // NewCopier creates a new copier with the given registry. If a nil registry is provided // a default registry is used. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. func NewCopier() Copier { return Copier{} } // CopyDocument handles copying a document from src to dst. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. func CopyDocument(dst ValueWriter, src ValueReader) error { return Copier{}.CopyDocument(dst, src) } // CopyDocument handles copying one document from the src to the dst. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. func (c Copier) CopyDocument(dst ValueWriter, src ValueReader) error { dr, err := src.ReadDocument() if err != nil { @@ -47,6 +59,9 @@ func (c Copier) CopyDocument(dst ValueWriter, src ValueReader) error { // CopyArrayFromBytes copies the values from a BSON array represented as a // []byte to a ValueWriter. +// +// Deprecated: Copying BSON arrays using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. func (c Copier) CopyArrayFromBytes(dst ValueWriter, src []byte) error { aw, err := dst.WriteArray() if err != nil { @@ -63,6 +78,9 @@ func (c Copier) CopyArrayFromBytes(dst ValueWriter, src []byte) error { // CopyDocumentFromBytes copies the values from a BSON document represented as a // []byte to a ValueWriter. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. func (c Copier) CopyDocumentFromBytes(dst ValueWriter, src []byte) error { dw, err := dst.WriteDocument() if err != nil { @@ -81,6 +99,9 @@ type writeElementFn func(key string) (ValueWriter, error) // CopyBytesToArrayWriter copies the values from a BSON Array represented as a []byte to an // ArrayWriter. +// +// Deprecated: Copying BSON arrays using the ArrayWriter interface will not be supported in Go +// Driver 2.0. func (c Copier) CopyBytesToArrayWriter(dst ArrayWriter, src []byte) error { wef := func(_ string) (ValueWriter, error) { return dst.WriteArrayElement() @@ -91,6 +112,9 @@ func (c Copier) CopyBytesToArrayWriter(dst ArrayWriter, src []byte) error { // CopyBytesToDocumentWriter copies the values from a BSON document represented as a []byte to a // DocumentWriter. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. func (c Copier) CopyBytesToDocumentWriter(dst DocumentWriter, src []byte) error { wef := func(key string) (ValueWriter, error) { return dst.WriteDocumentElement(key) @@ -100,7 +124,7 @@ func (c Copier) CopyBytesToDocumentWriter(dst DocumentWriter, src []byte) error } func (c Copier) copyBytesToValueWriter(src []byte, wef writeElementFn) error { - // TODO(skriptble): Create errors types here. Anything thats a tag should be a property. + // TODO(skriptble): Create errors types here. Anything that is a tag should be a property. length, rem, ok := bsoncore.ReadLength(src) if !ok { return fmt.Errorf("couldn't read length from src, not enough bytes. length=%d", len(src)) @@ -150,12 +174,18 @@ func (c Copier) copyBytesToValueWriter(src []byte, wef writeElementFn) error { // CopyDocumentToBytes copies an entire document from the ValueReader and // returns it as bytes. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. func (c Copier) CopyDocumentToBytes(src ValueReader) ([]byte, error) { return c.AppendDocumentBytes(nil, src) } // AppendDocumentBytes functions the same as CopyDocumentToBytes, but will // append the result to dst. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. func (c Copier) AppendDocumentBytes(dst []byte, src ValueReader) ([]byte, error) { if br, ok := src.(BytesReader); ok { _, dst, err := br.ReadValueBytes(dst) @@ -163,7 +193,7 @@ func (c Copier) AppendDocumentBytes(dst []byte, src ValueReader) ([]byte, error) } vw := vwPool.Get().(*valueWriter) - defer vwPool.Put(vw) + defer putValueWriter(vw) vw.reset(dst) @@ -173,6 +203,9 @@ func (c Copier) AppendDocumentBytes(dst []byte, src ValueReader) ([]byte, error) } // AppendArrayBytes copies an array from the ValueReader to dst. +// +// Deprecated: Copying BSON arrays using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. func (c Copier) AppendArrayBytes(dst []byte, src ValueReader) ([]byte, error) { if br, ok := src.(BytesReader); ok { _, dst, err := br.ReadValueBytes(dst) @@ -180,7 +213,7 @@ func (c Copier) AppendArrayBytes(dst []byte, src ValueReader) ([]byte, error) { } vw := vwPool.Get().(*valueWriter) - defer vwPool.Put(vw) + defer putValueWriter(vw) vw.reset(dst) @@ -190,6 +223,8 @@ func (c Copier) AppendArrayBytes(dst []byte, src ValueReader) ([]byte, error) { } // CopyValueFromBytes will write the value represtend by t and src to dst. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.UnmarshalValue] instead. func (c Copier) CopyValueFromBytes(dst ValueWriter, t bsontype.Type, src []byte) error { if wvb, ok := dst.(BytesWriter); ok { return wvb.WriteValueBytes(t, src) @@ -206,19 +241,24 @@ func (c Copier) CopyValueFromBytes(dst ValueWriter, t bsontype.Type, src []byte) // CopyValueToBytes copies a value from src and returns it as a bsontype.Type and a // []byte. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.MarshalValue] instead. func (c Copier) CopyValueToBytes(src ValueReader) (bsontype.Type, []byte, error) { return c.AppendValueBytes(nil, src) } // AppendValueBytes functions the same as CopyValueToBytes, but will append the // result to dst. +// +// Deprecated: Appending individual BSON elements to an existing slice will not be supported in Go +// Driver 2.0. func (c Copier) AppendValueBytes(dst []byte, src ValueReader) (bsontype.Type, []byte, error) { if br, ok := src.(BytesReader); ok { return br.ReadValueBytes(dst) } vw := vwPool.Get().(*valueWriter) - defer vwPool.Put(vw) + defer putValueWriter(vw) start := len(dst) @@ -234,6 +274,9 @@ func (c Copier) AppendValueBytes(dst []byte, src ValueReader) (bsontype.Type, [] } // CopyValue will copy a single value from src to dst. +// +// Deprecated: Copying BSON values using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. func (c Copier) CopyValue(dst ValueWriter, src ValueReader) error { var err error switch src.Type() { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go index 35832d73a..2aca37a91 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go @@ -16,11 +16,15 @@ import ( ) // ExtJSONValueReaderPool is a pool for ValueReaders that read ExtJSON. +// +// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. type ExtJSONValueReaderPool struct { pool sync.Pool } // NewExtJSONValueReaderPool instantiates a new ExtJSONValueReaderPool. +// +// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. func NewExtJSONValueReaderPool() *ExtJSONValueReaderPool { return &ExtJSONValueReaderPool{ pool: sync.Pool{ @@ -32,6 +36,8 @@ func NewExtJSONValueReaderPool() *ExtJSONValueReaderPool { } // Get retrieves a ValueReader from the pool and uses src as the underlying ExtJSON. +// +// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. func (bvrp *ExtJSONValueReaderPool) Get(r io.Reader, canonical bool) (ValueReader, error) { vr := bvrp.pool.Get().(*extJSONValueReader) return vr.reset(r, canonical) @@ -39,6 +45,8 @@ func (bvrp *ExtJSONValueReaderPool) Get(r io.Reader, canonical bool) (ValueReade // Put inserts a ValueReader into the pool. If the ValueReader is not a ExtJSON ValueReader nothing // is inserted into the pool and ok will be false. +// +// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. func (bvrp *ExtJSONValueReaderPool) Put(vr ValueReader) (ok bool) { bvr, ok := vr.(*extJSONValueReader) if !ok { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_writer.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_writer.go index 99ed524b7..bb9303167 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_writer.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_writer.go @@ -23,11 +23,15 @@ import ( ) // ExtJSONValueWriterPool is a pool for ExtJSON ValueWriters. +// +// Deprecated: ExtJSONValueWriterPool will not be supported in Go Driver 2.0. type ExtJSONValueWriterPool struct { pool sync.Pool } // NewExtJSONValueWriterPool creates a new pool for ValueWriter instances that write to ExtJSON. +// +// Deprecated: ExtJSONValueWriterPool will not be supported in Go Driver 2.0. func NewExtJSONValueWriterPool() *ExtJSONValueWriterPool { return &ExtJSONValueWriterPool{ pool: sync.Pool{ @@ -39,6 +43,8 @@ func NewExtJSONValueWriterPool() *ExtJSONValueWriterPool { } // Get retrieves a ExtJSON ValueWriter from the pool and resets it to use w as the destination. +// +// Deprecated: ExtJSONValueWriterPool will not be supported in Go Driver 2.0. func (bvwp *ExtJSONValueWriterPool) Get(w io.Writer, canonical, escapeHTML bool) ValueWriter { vw := bvwp.pool.Get().(*extJSONValueWriter) if writer, ok := w.(*SliceWriter); ok { @@ -53,6 +59,8 @@ func (bvwp *ExtJSONValueWriterPool) Get(w io.Writer, canonical, escapeHTML bool) // Put inserts a ValueWriter into the pool. If the ValueWriter is not a ExtJSON ValueWriter, nothing // happens and ok will be false. +// +// Deprecated: ExtJSONValueWriterPool will not be supported in Go Driver 2.0. func (bvwp *ExtJSONValueWriterPool) Put(vw ValueWriter) (ok bool) { bvw, ok := vw.(*extJSONValueWriter) if !ok { @@ -80,6 +88,7 @@ type extJSONValueWriter struct { frame int64 canonical bool escapeHTML bool + newlines bool } // NewExtJSONValueWriter creates a ValueWriter that writes Extended JSON to w. @@ -88,10 +97,13 @@ func NewExtJSONValueWriter(w io.Writer, canonical, escapeHTML bool) (ValueWriter return nil, errNilWriter } - return newExtJSONWriter(w, canonical, escapeHTML), nil + // Enable newlines for all Extended JSON value writers created by NewExtJSONValueWriter. We + // expect these value writers to be used with an Encoder, which should add newlines after + // encoded Extended JSON documents. + return newExtJSONWriter(w, canonical, escapeHTML, true), nil } -func newExtJSONWriter(w io.Writer, canonical, escapeHTML bool) *extJSONValueWriter { +func newExtJSONWriter(w io.Writer, canonical, escapeHTML, newlines bool) *extJSONValueWriter { stack := make([]ejvwState, 1, 5) stack[0] = ejvwState{mode: mTopLevel} @@ -101,6 +113,7 @@ func newExtJSONWriter(w io.Writer, canonical, escapeHTML bool) *extJSONValueWrit stack: stack, canonical: canonical, escapeHTML: escapeHTML, + newlines: newlines, } } @@ -564,6 +577,12 @@ func (ejvw *extJSONValueWriter) WriteDocumentEnd() error { case mDocument: ejvw.buf = append(ejvw.buf, ',') case mTopLevel: + // If the value writer has newlines enabled, end top-level documents with a newline so that + // multiple documents encoded to the same writer are separated by newlines. That matches the + // Go json.Encoder behavior and also works with bsonrw.NewExtJSONValueReader. + if ejvw.newlines { + ejvw.buf = append(ejvw.buf, '\n') + } if ejvw.w != nil { if _, err := ejvw.w.Write(ejvw.buf); err != nil { return err diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/reader.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/reader.go index 0b8fa28d5..324b10b61 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/reader.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/reader.go @@ -58,6 +58,8 @@ type ValueReader interface { // types that implement ValueReader may also implement this interface. // // The bytes of the value will be appended to dst. +// +// Deprecated: BytesReader will not be supported in Go Driver 2.0. type BytesReader interface { ReadValueBytes(dst []byte) (bsontype.Type, []byte, error) } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go index ef5d837c2..a242bb57c 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go @@ -28,11 +28,15 @@ var vrPool = sync.Pool{ } // BSONValueReaderPool is a pool for ValueReaders that read BSON. +// +// Deprecated: BSONValueReaderPool will not be supported in Go Driver 2.0. type BSONValueReaderPool struct { pool sync.Pool } // NewBSONValueReaderPool instantiates a new BSONValueReaderPool. +// +// Deprecated: BSONValueReaderPool will not be supported in Go Driver 2.0. func NewBSONValueReaderPool() *BSONValueReaderPool { return &BSONValueReaderPool{ pool: sync.Pool{ @@ -44,6 +48,8 @@ func NewBSONValueReaderPool() *BSONValueReaderPool { } // Get retrieves a ValueReader from the pool and uses src as the underlying BSON. +// +// Deprecated: BSONValueReaderPool will not be supported in Go Driver 2.0. func (bvrp *BSONValueReaderPool) Get(src []byte) ValueReader { vr := bvrp.pool.Get().(*valueReader) vr.reset(src) @@ -52,6 +58,8 @@ func (bvrp *BSONValueReaderPool) Get(src []byte) ValueReader { // Put inserts a ValueReader into the pool. If the ValueReader is not a BSON ValueReader nothing // is inserted into the pool and ok will be false. +// +// Deprecated: BSONValueReaderPool will not be supported in Go Driver 2.0. func (bvrp *BSONValueReaderPool) Put(vr ValueReader) (ok bool) { bvr, ok := vr.(*valueReader) if !ok { @@ -731,8 +739,7 @@ func (vr *valueReader) ReadValue() (ValueReader, error) { return nil, ErrEOA } - _, err = vr.readCString() - if err != nil { + if err := vr.skipCString(); err != nil { return nil, err } @@ -786,6 +793,15 @@ func (vr *valueReader) readByte() (byte, error) { return vr.d[vr.offset-1], nil } +func (vr *valueReader) skipCString() error { + idx := bytes.IndexByte(vr.d[vr.offset:], 0x00) + if idx < 0 { + return io.EOF + } + vr.offset += int64(idx) + 1 + return nil +} + func (vr *valueReader) readCString() (string, error) { idx := bytes.IndexByte(vr.d[vr.offset:], 0x00) if idx < 0 { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go index f95a08afd..311518a80 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go @@ -28,12 +28,23 @@ var vwPool = sync.Pool{ }, } +func putValueWriter(vw *valueWriter) { + if vw != nil { + vw.w = nil // don't leak the writer + vwPool.Put(vw) + } +} + // BSONValueWriterPool is a pool for BSON ValueWriters. +// +// Deprecated: BSONValueWriterPool will not be supported in Go Driver 2.0. type BSONValueWriterPool struct { pool sync.Pool } // NewBSONValueWriterPool creates a new pool for ValueWriter instances that write to BSON. +// +// Deprecated: BSONValueWriterPool will not be supported in Go Driver 2.0. func NewBSONValueWriterPool() *BSONValueWriterPool { return &BSONValueWriterPool{ pool: sync.Pool{ @@ -45,6 +56,8 @@ func NewBSONValueWriterPool() *BSONValueWriterPool { } // Get retrieves a BSON ValueWriter from the pool and resets it to use w as the destination. +// +// Deprecated: BSONValueWriterPool will not be supported in Go Driver 2.0. func (bvwp *BSONValueWriterPool) Get(w io.Writer) ValueWriter { vw := bvwp.pool.Get().(*valueWriter) @@ -56,6 +69,8 @@ func (bvwp *BSONValueWriterPool) Get(w io.Writer) ValueWriter { } // GetAtModeElement retrieves a ValueWriterFlusher from the pool and resets it to use w as the destination. +// +// Deprecated: BSONValueWriterPool will not be supported in Go Driver 2.0. func (bvwp *BSONValueWriterPool) GetAtModeElement(w io.Writer) ValueWriterFlusher { vw := bvwp.Get(w).(*valueWriter) vw.push(mElement) @@ -64,6 +79,8 @@ func (bvwp *BSONValueWriterPool) GetAtModeElement(w io.Writer) ValueWriterFlushe // Put inserts a ValueWriter into the pool. If the ValueWriter is not a BSON ValueWriter, nothing // happens and ok will be false. +// +// Deprecated: BSONValueWriterPool will not be supported in Go Driver 2.0. func (bvwp *BSONValueWriterPool) Put(vw ValueWriter) (ok bool) { bvw, ok := vw.(*valueWriter) if !ok { @@ -139,32 +156,21 @@ type valueWriter struct { } func (vw *valueWriter) advanceFrame() { - if vw.frame+1 >= int64(len(vw.stack)) { // We need to grow the stack - length := len(vw.stack) - if length+1 >= cap(vw.stack) { - // double it - buf := make([]vwState, 2*cap(vw.stack)+1) - copy(buf, vw.stack) - vw.stack = buf - } - vw.stack = vw.stack[:length+1] - } vw.frame++ + if vw.frame >= int64(len(vw.stack)) { + vw.stack = append(vw.stack, vwState{}) + } } func (vw *valueWriter) push(m mode) { vw.advanceFrame() // Clean the stack - vw.stack[vw.frame].mode = m - vw.stack[vw.frame].key = "" - vw.stack[vw.frame].arrkey = 0 - vw.stack[vw.frame].start = 0 + vw.stack[vw.frame] = vwState{mode: m} - vw.stack[vw.frame].mode = m switch m { case mDocument, mArray, mCodeWithScope: - vw.reserveLength() + vw.reserveLength() // WARN: this is not needed } } @@ -203,6 +209,7 @@ func newValueWriter(w io.Writer) *valueWriter { return vw } +// TODO: only used in tests func newValueWriterFromSlice(buf []byte) *valueWriter { vw := new(valueWriter) stack := make([]vwState, 1, 5) @@ -239,17 +246,16 @@ func (vw *valueWriter) invalidTransitionError(destination mode, name string, mod } func (vw *valueWriter) writeElementHeader(t bsontype.Type, destination mode, callerName string, addmodes ...mode) error { - switch vw.stack[vw.frame].mode { + frame := &vw.stack[vw.frame] + switch frame.mode { case mElement: - key := vw.stack[vw.frame].key + key := frame.key if !isValidCString(key) { return errors.New("BSON element key cannot contain null bytes") } - - vw.buf = bsoncore.AppendHeader(vw.buf, t, key) + vw.appendHeader(t, key) case mValue: - // TODO: Do this with a cache of the first 1000 or so array keys. - vw.buf = bsoncore.AppendHeader(vw.buf, t, strconv.Itoa(vw.stack[vw.frame].arrkey)) + vw.appendIntHeader(t, frame.arrkey) default: modes := []mode{mElement, mValue} if addmodes != nil { @@ -591,9 +597,11 @@ func (vw *valueWriter) writeLength() error { if length > maxSize { return errMaxDocumentSizeExceeded{size: int64(len(vw.buf))} } - length = length - int(vw.stack[vw.frame].start) - start := vw.stack[vw.frame].start + frame := &vw.stack[vw.frame] + length = length - int(frame.start) + start := frame.start + _ = vw.buf[start+3] // BCE vw.buf[start+0] = byte(length) vw.buf[start+1] = byte(length >> 8) vw.buf[start+2] = byte(length >> 16) @@ -602,5 +610,31 @@ func (vw *valueWriter) writeLength() error { } func isValidCString(cs string) bool { - return !strings.ContainsRune(cs, '\x00') + // Disallow the zero byte in a cstring because the zero byte is used as the + // terminating character. + // + // It's safe to check bytes instead of runes because all multibyte UTF-8 + // code points start with (binary) 11xxxxxx or 10xxxxxx, so 00000000 (i.e. + // 0) will never be part of a multibyte UTF-8 code point. This logic is the + // same as the "r < utf8.RuneSelf" case in strings.IndexRune but can be + // inlined. + // + // https://cs.opensource.google/go/go/+/refs/tags/go1.21.1:src/strings/strings.go;l=127 + return strings.IndexByte(cs, 0) == -1 +} + +// appendHeader is the same as bsoncore.AppendHeader but does not check if the +// key is a valid C string since the caller has already checked for that. +// +// The caller of this function must check if key is a valid C string. +func (vw *valueWriter) appendHeader(t bsontype.Type, key string) { + vw.buf = bsoncore.AppendType(vw.buf, t) + vw.buf = append(vw.buf, key...) + vw.buf = append(vw.buf, 0x00) +} + +func (vw *valueWriter) appendIntHeader(t bsontype.Type, key int) { + vw.buf = bsoncore.AppendType(vw.buf, t) + vw.buf = strconv.AppendInt(vw.buf, int64(key), 10) + vw.buf = append(vw.buf, 0x00) } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/writer.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/writer.go index dff65f87f..628f45293 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/writer.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/writer.go @@ -56,6 +56,8 @@ type ValueWriter interface { } // ValueWriterFlusher is a superset of ValueWriter that exposes functionality to flush to the underlying buffer. +// +// Deprecated: ValueWriterFlusher will not be supported in Go Driver 2.0. type ValueWriterFlusher interface { ValueWriter Flush() error @@ -64,13 +66,20 @@ type ValueWriterFlusher interface { // BytesWriter is the interface used to write BSON bytes to a ValueWriter. // This interface is meant to be a superset of ValueWriter, so that types that // implement ValueWriter may also implement this interface. +// +// Deprecated: BytesWriter will not be supported in Go Driver 2.0. type BytesWriter interface { WriteValueBytes(t bsontype.Type, b []byte) error } // SliceWriter allows a pointer to a slice of bytes to be used as an io.Writer. +// +// Deprecated: SliceWriter will not be supported in Go Driver 2.0. type SliceWriter []byte +// Write writes the bytes to the underlying slice. +// +// Deprecated: SliceWriter will not be supported in Go Driver 2.0. func (sw *SliceWriter) Write(p []byte) (int, error) { written := len(p) *sw = append(*sw, p...) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go b/vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go index 7c91ae518..255d9909e 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go @@ -8,7 +8,9 @@ // a stringifier for the Type to enable easier debugging when working with BSON. package bsontype // import "go.mongodb.org/mongo-driver/bson/bsontype" -// These constants uniquely refer to each BSON type. +// BSON element types as described in https://bsonspec.org/spec.html. +// +// Deprecated: Use bson.Type* constants instead. const ( Double Type = 0x01 String Type = 0x02 @@ -31,7 +33,12 @@ const ( Decimal128 Type = 0x13 MinKey Type = 0xFF MaxKey Type = 0x7F +) +// BSON binary element subtypes as described in https://bsonspec.org/spec.html. +// +// Deprecated: Use the bson.TypeBinary* constants instead. +const ( BinaryGeneric byte = 0x00 BinaryFunction byte = 0x01 BinaryBinaryOld byte = 0x02 @@ -40,6 +47,7 @@ const ( BinaryMD5 byte = 0x05 BinaryEncrypted byte = 0x06 BinaryColumn byte = 0x07 + BinarySensitive byte = 0x08 BinaryUserDefined byte = 0x80 ) @@ -95,3 +103,14 @@ func (bt Type) String() string { return "invalid" } } + +// IsValid will return true if the Type is valid. +func (bt Type) IsValid() bool { + switch bt { + case Double, String, EmbeddedDocument, Array, Binary, Undefined, ObjectID, Boolean, DateTime, Null, Regex, + DBPointer, JavaScript, Symbol, CodeWithScope, Int32, Timestamp, Int64, Decimal128, MinKey, MaxKey: + return true + default: + return false + } +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/decoder.go b/vendor/go.mongodb.org/mongo-driver/bson/decoder.go index 6e189fa58..eac74cd39 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/decoder.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/decoder.go @@ -38,6 +38,12 @@ type Decoder struct { // (*Decoder).SetContext. defaultDocumentM bool defaultDocumentD bool + + binaryAsSlice bool + useJSONStructTags bool + useLocalTimeZone bool + zeroMaps bool + zeroStructs bool } // NewDecoder returns a new decoder that uses the DefaultRegistry to read from vr. @@ -53,6 +59,9 @@ func NewDecoder(vr bsonrw.ValueReader) (*Decoder, error) { } // NewDecoderWithContext returns a new decoder that uses DecodeContext dc to read from vr. +// +// Deprecated: Use [NewDecoder] and use the Decoder configuration methods set the desired unmarshal +// behavior instead. func NewDecoderWithContext(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader) (*Decoder, error) { if dc.Registry == nil { dc.Registry = DefaultRegistry @@ -70,8 +79,7 @@ func NewDecoderWithContext(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader) (* // Decode reads the next BSON document from the stream and decodes it into the // value pointed to by val. // -// The documentation for Unmarshal contains details about of BSON into a Go -// value. +// See [Unmarshal] for details about BSON unmarshaling behavior. func (d *Decoder) Decode(val interface{}) error { if unmarshaler, ok := val.(Unmarshaler); ok { // TODO(skriptble): Reuse a []byte here and use the AppendDocumentBytes method. @@ -100,42 +108,101 @@ func (d *Decoder) Decode(val interface{}) error { if err != nil { return err } + if d.defaultDocumentM { d.dc.DefaultDocumentM() } if d.defaultDocumentD { d.dc.DefaultDocumentD() } + if d.binaryAsSlice { + d.dc.BinaryAsSlice() + } + if d.useJSONStructTags { + d.dc.UseJSONStructTags() + } + if d.useLocalTimeZone { + d.dc.UseLocalTimeZone() + } + if d.zeroMaps { + d.dc.ZeroMaps() + } + if d.zeroStructs { + d.dc.ZeroStructs() + } + return decoder.DecodeValue(d.dc, d.vr, rval) } // Reset will reset the state of the decoder, using the same *DecodeContext used in // the original construction but using vr for reading. func (d *Decoder) Reset(vr bsonrw.ValueReader) error { + // TODO:(GODRIVER-2719): Remove error return value. d.vr = vr return nil } // SetRegistry replaces the current registry of the decoder with r. func (d *Decoder) SetRegistry(r *bsoncodec.Registry) error { + // TODO:(GODRIVER-2719): Remove error return value. d.dc.Registry = r return nil } // SetContext replaces the current registry of the decoder with dc. +// +// Deprecated: Use the Decoder configuration methods to set the desired unmarshal behavior instead. func (d *Decoder) SetContext(dc bsoncodec.DecodeContext) error { + // TODO:(GODRIVER-2719): Remove error return value. d.dc = dc return nil } -// DefaultDocumentM will decode empty documents using the primitive.M type. This behavior is restricted to data typed as -// "interface{}" or "map[string]interface{}". +// DefaultDocumentM causes the Decoder to always unmarshal documents into the primitive.M type. This +// behavior is restricted to data typed as "interface{}" or "map[string]interface{}". func (d *Decoder) DefaultDocumentM() { d.defaultDocumentM = true } -// DefaultDocumentD will decode empty documents using the primitive.D type. This behavior is restricted to data typed as -// "interface{}" or "map[string]interface{}". +// DefaultDocumentD causes the Decoder to always unmarshal documents into the primitive.D type. This +// behavior is restricted to data typed as "interface{}" or "map[string]interface{}". func (d *Decoder) DefaultDocumentD() { d.defaultDocumentD = true } + +// AllowTruncatingDoubles causes the Decoder to truncate the fractional part of BSON "double" values +// when attempting to unmarshal them into a Go integer (int, int8, int16, int32, or int64) struct +// field. The truncation logic does not apply to BSON "decimal128" values. +func (d *Decoder) AllowTruncatingDoubles() { + d.dc.Truncate = true +} + +// BinaryAsSlice causes the Decoder to unmarshal BSON binary field values that are the "Generic" or +// "Old" BSON binary subtype as a Go byte slice instead of a primitive.Binary. +func (d *Decoder) BinaryAsSlice() { + d.binaryAsSlice = true +} + +// UseJSONStructTags causes the Decoder to fall back to using the "json" struct tag if a "bson" +// struct tag is not specified. +func (d *Decoder) UseJSONStructTags() { + d.useJSONStructTags = true +} + +// UseLocalTimeZone causes the Decoder to unmarshal time.Time values in the local timezone instead +// of the UTC timezone. +func (d *Decoder) UseLocalTimeZone() { + d.useLocalTimeZone = true +} + +// ZeroMaps causes the Decoder to delete any existing values from Go maps in the destination value +// passed to Decode before unmarshaling BSON documents into them. +func (d *Decoder) ZeroMaps() { + d.zeroMaps = true +} + +// ZeroStructs causes the Decoder to delete any existing values from Go structs in the destination +// value passed to Decode before unmarshaling BSON documents into them. +func (d *Decoder) ZeroStructs() { + d.zeroStructs = true +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/doc.go index 0134006d8..048b5eb99 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/doc.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/doc.go @@ -7,7 +7,8 @@ // Package bson is a library for reading, writing, and manipulating BSON. BSON is a binary serialization format used to // store documents and make remote procedure calls in MongoDB. The BSON specification is located at https://bsonspec.org. // The BSON library handles marshalling and unmarshalling of values through a configurable codec system. For a description -// of the codec system and examples of registering custom codecs, see the bsoncodec package. +// of the codec system and examples of registering custom codecs, see the bsoncodec package. For additional information and +// usage examples, check out the [Work with BSON] page in the Go Driver docs site. // // # Raw BSON // @@ -138,4 +139,6 @@ // # Marshalling and Unmarshalling // // Manually marshalling and unmarshalling can be done with the Marshal and Unmarshal family of functions. +// +// [Work with BSON]: https://www.mongodb.com/docs/drivers/go/current/fundamentals/bson/ package bson diff --git a/vendor/go.mongodb.org/mongo-driver/bson/encoder.go b/vendor/go.mongodb.org/mongo-driver/bson/encoder.go index fe5125d08..0be2a97fb 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/encoder.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/encoder.go @@ -29,10 +29,20 @@ var encPool = sync.Pool{ type Encoder struct { ec bsoncodec.EncodeContext vw bsonrw.ValueWriter + + errorOnInlineDuplicates bool + intMinSize bool + stringifyMapKeysWithFmt bool + nilMapAsEmpty bool + nilSliceAsEmpty bool + nilByteSliceAsEmpty bool + omitZeroStruct bool + useJSONStructTags bool } // NewEncoder returns a new encoder that uses the DefaultRegistry to write to vw. func NewEncoder(vw bsonrw.ValueWriter) (*Encoder, error) { + // TODO:(GODRIVER-2719): Remove error return value. if vw == nil { return nil, errors.New("cannot create a new Encoder with a nil ValueWriter") } @@ -44,6 +54,9 @@ func NewEncoder(vw bsonrw.ValueWriter) (*Encoder, error) { } // NewEncoderWithContext returns a new encoder that uses EncodeContext ec to write to vw. +// +// Deprecated: Use [NewEncoder] and use the Encoder configuration methods to set the desired marshal +// behavior instead. func NewEncoderWithContext(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter) (*Encoder, error) { if ec.Registry == nil { ec = bsoncodec.EncodeContext{Registry: DefaultRegistry} @@ -60,8 +73,7 @@ func NewEncoderWithContext(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter) (* // Encode writes the BSON encoding of val to the stream. // -// The documentation for Marshal contains details about the conversion of Go -// values to BSON. +// See [Marshal] for details about BSON marshaling behavior. func (e *Encoder) Encode(val interface{}) error { if marshaler, ok := val.(Marshaler); ok { // TODO(skriptble): Should we have a MarshalAppender interface so that we can have []byte reuse? @@ -76,24 +88,112 @@ func (e *Encoder) Encode(val interface{}) error { if err != nil { return err } + + // Copy the configurations applied to the Encoder over to the EncodeContext, which actually + // communicates those configurations to the default ValueEncoders. + if e.errorOnInlineDuplicates { + e.ec.ErrorOnInlineDuplicates() + } + if e.intMinSize { + e.ec.MinSize = true + } + if e.stringifyMapKeysWithFmt { + e.ec.StringifyMapKeysWithFmt() + } + if e.nilMapAsEmpty { + e.ec.NilMapAsEmpty() + } + if e.nilSliceAsEmpty { + e.ec.NilSliceAsEmpty() + } + if e.nilByteSliceAsEmpty { + e.ec.NilByteSliceAsEmpty() + } + if e.omitZeroStruct { + e.ec.OmitZeroStruct() + } + if e.useJSONStructTags { + e.ec.UseJSONStructTags() + } + return encoder.EncodeValue(e.ec, e.vw, reflect.ValueOf(val)) } -// Reset will reset the state of the encoder, using the same *EncodeContext used in +// Reset will reset the state of the Encoder, using the same *EncodeContext used in // the original construction but using vw. func (e *Encoder) Reset(vw bsonrw.ValueWriter) error { + // TODO:(GODRIVER-2719): Remove error return value. e.vw = vw return nil } -// SetRegistry replaces the current registry of the encoder with r. +// SetRegistry replaces the current registry of the Encoder with r. func (e *Encoder) SetRegistry(r *bsoncodec.Registry) error { + // TODO:(GODRIVER-2719): Remove error return value. e.ec.Registry = r return nil } -// SetContext replaces the current EncodeContext of the encoder with er. +// SetContext replaces the current EncodeContext of the encoder with ec. +// +// Deprecated: Use the Encoder configuration methods set the desired marshal behavior instead. func (e *Encoder) SetContext(ec bsoncodec.EncodeContext) error { + // TODO:(GODRIVER-2719): Remove error return value. e.ec = ec return nil } + +// ErrorOnInlineDuplicates causes the Encoder to return an error if there is a duplicate field in +// the marshaled BSON when the "inline" struct tag option is set. +func (e *Encoder) ErrorOnInlineDuplicates() { + e.errorOnInlineDuplicates = true +} + +// IntMinSize causes the Encoder to marshal Go integer values (int, int8, int16, int32, int64, uint, +// uint8, uint16, uint32, or uint64) as the minimum BSON int size (either 32 or 64 bits) that can +// represent the integer value. +func (e *Encoder) IntMinSize() { + e.intMinSize = true +} + +// StringifyMapKeysWithFmt causes the Encoder to convert Go map keys to BSON document field name +// strings using fmt.Sprint instead of the default string conversion logic. +func (e *Encoder) StringifyMapKeysWithFmt() { + e.stringifyMapKeysWithFmt = true +} + +// NilMapAsEmpty causes the Encoder to marshal nil Go maps as empty BSON documents instead of BSON +// null. +func (e *Encoder) NilMapAsEmpty() { + e.nilMapAsEmpty = true +} + +// NilSliceAsEmpty causes the Encoder to marshal nil Go slices as empty BSON arrays instead of BSON +// null. +func (e *Encoder) NilSliceAsEmpty() { + e.nilSliceAsEmpty = true +} + +// NilByteSliceAsEmpty causes the Encoder to marshal nil Go byte slices as empty BSON binary values +// instead of BSON null. +func (e *Encoder) NilByteSliceAsEmpty() { + e.nilByteSliceAsEmpty = true +} + +// TODO(GODRIVER-2820): Update the description to remove the note about only examining exported +// TODO struct fields once the logic is updated to also inspect private struct fields. + +// OmitZeroStruct causes the Encoder to consider the zero value for a struct (e.g. MyStruct{}) +// as empty and omit it from the marshaled BSON when the "omitempty" struct tag option is set. +// +// Note that the Encoder only examines exported struct fields when determining if a struct is the +// zero value. It considers pointers to a zero struct value (e.g. &MyStruct{}) not empty. +func (e *Encoder) OmitZeroStruct() { + e.omitZeroStruct = true +} + +// UseJSONStructTags causes the Encoder to fall back to using the "json" struct tag if a "bson" +// struct tag is not specified. +func (e *Encoder) UseJSONStructTags() { + e.useJSONStructTags = true +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/marshal.go b/vendor/go.mongodb.org/mongo-driver/bson/marshal.go index db8d8ee92..17ce6697e 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/marshal.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/marshal.go @@ -9,6 +9,7 @@ package bson import ( "bytes" "encoding/json" + "sync" "go.mongodb.org/mongo-driver/bson/bsoncodec" "go.mongodb.org/mongo-driver/bson/bsonrw" @@ -20,17 +21,23 @@ const defaultDstCap = 256 var bvwPool = bsonrw.NewBSONValueWriterPool() var extjPool = bsonrw.NewExtJSONValueWriterPool() -// Marshaler is an interface implemented by types that can marshal themselves -// into a BSON document represented as bytes. The bytes returned must be a valid -// BSON document if the error is nil. +// Marshaler is the interface implemented by types that can marshal themselves +// into a valid BSON document. +// +// Implementations of Marshaler must return a full BSON document. To create +// custom BSON marshaling behavior for individual values in a BSON document, +// implement the ValueMarshaler interface instead. type Marshaler interface { MarshalBSON() ([]byte, error) } -// ValueMarshaler is an interface implemented by types that can marshal -// themselves into a BSON value as bytes. The type must be the valid type for -// the bytes returned. The bytes and byte type together must be valid if the -// error is nil. +// ValueMarshaler is the interface implemented by types that can marshal +// themselves into a valid BSON value. The format of the returned bytes must +// match the returned type. +// +// Implementations of ValueMarshaler must return an individual BSON value. To +// create custom BSON marshaling behavior for an entire BSON document, implement +// the Marshaler interface instead. type ValueMarshaler interface { MarshalBSONValue() (bsontype.Type, []byte, error) } @@ -48,12 +55,42 @@ func Marshal(val interface{}) ([]byte, error) { // MarshalAppend will encode val as a BSON document and append the bytes to dst. If dst is not large enough to hold the // bytes, it will be grown. If val is not a type that can be transformed into a document, MarshalValueAppend should be // used instead. +// +// Deprecated: Use [NewEncoder] and pass the dst byte slice (wrapped by a bytes.Buffer) into +// [bsonrw.NewBSONValueWriter]: +// +// buf := bytes.NewBuffer(dst) +// vw, err := bsonrw.NewBSONValueWriter(buf) +// if err != nil { +// panic(err) +// } +// enc, err := bson.NewEncoder(vw) +// if err != nil { +// panic(err) +// } +// +// See [Encoder] for more examples. func MarshalAppend(dst []byte, val interface{}) ([]byte, error) { return MarshalAppendWithRegistry(DefaultRegistry, dst, val) } // MarshalWithRegistry returns the BSON encoding of val as a BSON document. If val is not a type that can be transformed // into a document, MarshalValueWithRegistry should be used instead. +// +// Deprecated: Use [NewEncoder] and specify the Registry by calling [Encoder.SetRegistry] instead: +// +// buf := new(bytes.Buffer) +// vw, err := bsonrw.NewBSONValueWriter(buf) +// if err != nil { +// panic(err) +// } +// enc, err := bson.NewEncoder(vw) +// if err != nil { +// panic(err) +// } +// enc.SetRegistry(reg) +// +// See [Encoder] for more examples. func MarshalWithRegistry(r *bsoncodec.Registry, val interface{}) ([]byte, error) { dst := make([]byte, 0) return MarshalAppendWithRegistry(r, dst, val) @@ -61,6 +98,22 @@ func MarshalWithRegistry(r *bsoncodec.Registry, val interface{}) ([]byte, error) // MarshalWithContext returns the BSON encoding of val as a BSON document using EncodeContext ec. If val is not a type // that can be transformed into a document, MarshalValueWithContext should be used instead. +// +// Deprecated: Use [NewEncoder] and use the Encoder configuration methods to set the desired marshal +// behavior instead: +// +// buf := bytes.NewBuffer(dst) +// vw, err := bsonrw.NewBSONValueWriter(buf) +// if err != nil { +// panic(err) +// } +// enc, err := bson.NewEncoder(vw) +// if err != nil { +// panic(err) +// } +// enc.IntMinSize() +// +// See [Encoder] for more examples. func MarshalWithContext(ec bsoncodec.EncodeContext, val interface{}) ([]byte, error) { dst := make([]byte, 0) return MarshalAppendWithContext(ec, dst, val) @@ -69,16 +122,74 @@ func MarshalWithContext(ec bsoncodec.EncodeContext, val interface{}) ([]byte, er // MarshalAppendWithRegistry will encode val as a BSON document using Registry r and append the bytes to dst. If dst is // not large enough to hold the bytes, it will be grown. If val is not a type that can be transformed into a document, // MarshalValueAppendWithRegistry should be used instead. +// +// Deprecated: Use [NewEncoder], and pass the dst byte slice (wrapped by a bytes.Buffer) into +// [bsonrw.NewBSONValueWriter], and specify the Registry by calling [Encoder.SetRegistry] instead: +// +// buf := bytes.NewBuffer(dst) +// vw, err := bsonrw.NewBSONValueWriter(buf) +// if err != nil { +// panic(err) +// } +// enc, err := bson.NewEncoder(vw) +// if err != nil { +// panic(err) +// } +// enc.SetRegistry(reg) +// +// See [Encoder] for more examples. func MarshalAppendWithRegistry(r *bsoncodec.Registry, dst []byte, val interface{}) ([]byte, error) { return MarshalAppendWithContext(bsoncodec.EncodeContext{Registry: r}, dst, val) } +// Pool of buffers for marshalling BSON. +var bufPool = sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, +} + // MarshalAppendWithContext will encode val as a BSON document using Registry r and EncodeContext ec and append the // bytes to dst. If dst is not large enough to hold the bytes, it will be grown. If val is not a type that can be // transformed into a document, MarshalValueAppendWithContext should be used instead. +// +// Deprecated: Use [NewEncoder], pass the dst byte slice (wrapped by a bytes.Buffer) into +// [bsonrw.NewBSONValueWriter], and use the Encoder configuration methods to set the desired marshal +// behavior instead: +// +// buf := bytes.NewBuffer(dst) +// vw, err := bsonrw.NewBSONValueWriter(buf) +// if err != nil { +// panic(err) +// } +// enc, err := bson.NewEncoder(vw) +// if err != nil { +// panic(err) +// } +// enc.IntMinSize() +// +// See [Encoder] for more examples. func MarshalAppendWithContext(ec bsoncodec.EncodeContext, dst []byte, val interface{}) ([]byte, error) { - sw := new(bsonrw.SliceWriter) - *sw = dst + sw := bufPool.Get().(*bytes.Buffer) + defer func() { + // Proper usage of a sync.Pool requires each entry to have approximately + // the same memory cost. To obtain this property when the stored type + // contains a variably-sized buffer, we add a hard limit on the maximum + // buffer to place back in the pool. We limit the size to 16MiB because + // that's the maximum wire message size supported by any current MongoDB + // server. + // + // Comment based on + // https://cs.opensource.google/go/go/+/refs/tags/go1.19:src/fmt/print.go;l=147 + // + // Recycle byte slices that are smaller than 16MiB and at least half + // occupied. + if sw.Cap() < 16*1024*1024 && sw.Cap()/2 < sw.Len() { + bufPool.Put(sw) + } + }() + + sw.Reset() vw := bvwPool.Get(sw) defer bvwPool.Put(vw) @@ -99,7 +210,7 @@ func MarshalAppendWithContext(ec bsoncodec.EncodeContext, dst []byte, val interf return nil, err } - return *sw, nil + return append(dst, sw.Bytes()...), nil } // MarshalValue returns the BSON encoding of val. @@ -112,17 +223,26 @@ func MarshalValue(val interface{}) (bsontype.Type, []byte, error) { // MarshalValueAppend will append the BSON encoding of val to dst. If dst is not large enough to hold the BSON encoding // of val, dst will be grown. +// +// Deprecated: Appending individual BSON elements to an existing slice will not be supported in Go +// Driver 2.0. func MarshalValueAppend(dst []byte, val interface{}) (bsontype.Type, []byte, error) { return MarshalValueAppendWithRegistry(DefaultRegistry, dst, val) } // MarshalValueWithRegistry returns the BSON encoding of val using Registry r. +// +// Deprecated: Using a custom registry to marshal individual BSON values will not be supported in Go +// Driver 2.0. func MarshalValueWithRegistry(r *bsoncodec.Registry, val interface{}) (bsontype.Type, []byte, error) { dst := make([]byte, 0) return MarshalValueAppendWithRegistry(r, dst, val) } // MarshalValueWithContext returns the BSON encoding of val using EncodeContext ec. +// +// Deprecated: Using a custom EncodeContext to marshal individual BSON elements will not be +// supported in Go Driver 2.0. func MarshalValueWithContext(ec bsoncodec.EncodeContext, val interface{}) (bsontype.Type, []byte, error) { dst := make([]byte, 0) return MarshalValueAppendWithContext(ec, dst, val) @@ -130,12 +250,18 @@ func MarshalValueWithContext(ec bsoncodec.EncodeContext, val interface{}) (bsont // MarshalValueAppendWithRegistry will append the BSON encoding of val to dst using Registry r. If dst is not large // enough to hold the BSON encoding of val, dst will be grown. +// +// Deprecated: Appending individual BSON elements to an existing slice will not be supported in Go +// Driver 2.0. func MarshalValueAppendWithRegistry(r *bsoncodec.Registry, dst []byte, val interface{}) (bsontype.Type, []byte, error) { return MarshalValueAppendWithContext(bsoncodec.EncodeContext{Registry: r}, dst, val) } // MarshalValueAppendWithContext will append the BSON encoding of val to dst using EncodeContext ec. If dst is not large // enough to hold the BSON encoding of val, dst will be grown. +// +// Deprecated: Appending individual BSON elements to an existing slice will not be supported in Go +// Driver 2.0. func MarshalValueAppendWithContext(ec bsoncodec.EncodeContext, dst []byte, val interface{}) (bsontype.Type, []byte, error) { // get a ValueWriter configured to write to dst sw := new(bsonrw.SliceWriter) @@ -173,17 +299,63 @@ func MarshalExtJSON(val interface{}, canonical, escapeHTML bool) ([]byte, error) // MarshalExtJSONAppend will append the extended JSON encoding of val to dst. // If dst is not large enough to hold the extended JSON encoding of val, dst // will be grown. +// +// Deprecated: Use [NewEncoder] and pass the dst byte slice (wrapped by a bytes.Buffer) into +// [bsonrw.NewExtJSONValueWriter] instead: +// +// buf := bytes.NewBuffer(dst) +// vw, err := bsonrw.NewExtJSONValueWriter(buf, true, false) +// if err != nil { +// panic(err) +// } +// enc, err := bson.NewEncoder(vw) +// if err != nil { +// panic(err) +// } +// +// See [Encoder] for more examples. func MarshalExtJSONAppend(dst []byte, val interface{}, canonical, escapeHTML bool) ([]byte, error) { return MarshalExtJSONAppendWithRegistry(DefaultRegistry, dst, val, canonical, escapeHTML) } // MarshalExtJSONWithRegistry returns the extended JSON encoding of val using Registry r. +// +// Deprecated: Use [NewEncoder] and specify the Registry by calling [Encoder.SetRegistry] instead: +// +// buf := new(bytes.Buffer) +// vw, err := bsonrw.NewBSONValueWriter(buf) +// if err != nil { +// panic(err) +// } +// enc, err := bson.NewEncoder(vw) +// if err != nil { +// panic(err) +// } +// enc.SetRegistry(reg) +// +// See [Encoder] for more examples. func MarshalExtJSONWithRegistry(r *bsoncodec.Registry, val interface{}, canonical, escapeHTML bool) ([]byte, error) { dst := make([]byte, 0, defaultDstCap) return MarshalExtJSONAppendWithContext(bsoncodec.EncodeContext{Registry: r}, dst, val, canonical, escapeHTML) } // MarshalExtJSONWithContext returns the extended JSON encoding of val using Registry r. +// +// Deprecated: Use [NewEncoder] and use the Encoder configuration methods to set the desired marshal +// behavior instead: +// +// buf := new(bytes.Buffer) +// vw, err := bsonrw.NewBSONValueWriter(buf) +// if err != nil { +// panic(err) +// } +// enc, err := bson.NewEncoder(vw) +// if err != nil { +// panic(err) +// } +// enc.IntMinSize() +// +// See [Encoder] for more examples. func MarshalExtJSONWithContext(ec bsoncodec.EncodeContext, val interface{}, canonical, escapeHTML bool) ([]byte, error) { dst := make([]byte, 0, defaultDstCap) return MarshalExtJSONAppendWithContext(ec, dst, val, canonical, escapeHTML) @@ -192,6 +364,22 @@ func MarshalExtJSONWithContext(ec bsoncodec.EncodeContext, val interface{}, cano // MarshalExtJSONAppendWithRegistry will append the extended JSON encoding of // val to dst using Registry r. If dst is not large enough to hold the BSON // encoding of val, dst will be grown. +// +// Deprecated: Use [NewEncoder], pass the dst byte slice (wrapped by a bytes.Buffer) into +// [bsonrw.NewExtJSONValueWriter], and specify the Registry by calling [Encoder.SetRegistry] +// instead: +// +// buf := bytes.NewBuffer(dst) +// vw, err := bsonrw.NewExtJSONValueWriter(buf, true, false) +// if err != nil { +// panic(err) +// } +// enc, err := bson.NewEncoder(vw) +// if err != nil { +// panic(err) +// } +// +// See [Encoder] for more examples. func MarshalExtJSONAppendWithRegistry(r *bsoncodec.Registry, dst []byte, val interface{}, canonical, escapeHTML bool) ([]byte, error) { return MarshalExtJSONAppendWithContext(bsoncodec.EncodeContext{Registry: r}, dst, val, canonical, escapeHTML) } @@ -199,6 +387,23 @@ func MarshalExtJSONAppendWithRegistry(r *bsoncodec.Registry, dst []byte, val int // MarshalExtJSONAppendWithContext will append the extended JSON encoding of // val to dst using Registry r. If dst is not large enough to hold the BSON // encoding of val, dst will be grown. +// +// Deprecated: Use [NewEncoder], pass the dst byte slice (wrapped by a bytes.Buffer) into +// [bsonrw.NewExtJSONValueWriter], and use the Encoder configuration methods to set the desired marshal +// behavior instead: +// +// buf := bytes.NewBuffer(dst) +// vw, err := bsonrw.NewExtJSONValueWriter(buf, true, false) +// if err != nil { +// panic(err) +// } +// enc, err := bson.NewEncoder(vw) +// if err != nil { +// panic(err) +// } +// enc.IntMinSize() +// +// See [Encoder] for more examples. func MarshalExtJSONAppendWithContext(ec bsoncodec.EncodeContext, dst []byte, val interface{}, canonical, escapeHTML bool) ([]byte, error) { sw := new(bsonrw.SliceWriter) *sw = dst diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go index ded367316..9bbaffac2 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go @@ -82,18 +82,18 @@ func ObjectIDFromHex(s string) (ObjectID, error) { return NilObjectID, ErrInvalidHex } - b, err := hex.DecodeString(s) + var oid [12]byte + _, err := hex.Decode(oid[:], []byte(s)) if err != nil { return NilObjectID, err } - var oid [12]byte - copy(oid[:], b) - return oid, nil } // IsValidObjectID returns true if the provided hex string represents a valid ObjectID and false if not. +// +// Deprecated: Use ObjectIDFromHex and check the error instead. func IsValidObjectID(s string) bool { _, err := ObjectIDFromHex(s) return err == nil diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go index c72ccc1c4..65f4fbb94 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go @@ -45,7 +45,7 @@ var _ json.Unmarshaler = (*DateTime)(nil) // MarshalJSON marshal to time type. func (d DateTime) MarshalJSON() ([]byte, error) { - return json.Marshal(d.Time()) + return json.Marshal(d.Time().UTC()) } // UnmarshalJSON creates a primitive.DateTime from a JSON string. @@ -141,6 +141,16 @@ type Timestamp struct { I uint32 } +// After reports whether the time instant tp is after tp2. +func (tp Timestamp) After(tp2 Timestamp) bool { + return tp.T > tp2.T || (tp.T == tp2.T && tp.I > tp2.I) +} + +// Before reports whether the time instant tp is before tp2. +func (tp Timestamp) Before(tp2 Timestamp) bool { + return tp.T < tp2.T || (tp.T == tp2.T && tp.I < tp2.I) +} + // Equal compares tp to tp2 and returns true if they are equal. func (tp Timestamp) Equal(tp2 Timestamp) bool { return tp.T == tp2.T && tp.I == tp2.I @@ -151,24 +161,25 @@ func (tp Timestamp) IsZero() bool { return tp.T == 0 && tp.I == 0 } -// CompareTimestamp returns an integer comparing two Timestamps, where T is compared first, followed by I. -// Returns 0 if tp = tp2, 1 if tp > tp2, -1 if tp < tp2. -func CompareTimestamp(tp, tp2 Timestamp) int { - if tp.Equal(tp2) { +// Compare compares the time instant tp with tp2. If tp is before tp2, it returns -1; if tp is after +// tp2, it returns +1; if they're the same, it returns 0. +func (tp Timestamp) Compare(tp2 Timestamp) int { + switch { + case tp.Equal(tp2): return 0 - } - - if tp.T > tp2.T { - return 1 - } - if tp.T < tp2.T { + case tp.Before(tp2): return -1 + default: + return +1 } - // Compare I values because T values are equal - if tp.I > tp2.I { - return 1 - } - return -1 +} + +// CompareTimestamp compares the time instant tp with tp2. If tp is before tp2, it returns -1; if tp is after +// tp2, it returns +1; if they're the same, it returns 0. +// +// Deprecated: Use Timestamp.Compare instead. +func CompareTimestamp(tp, tp2 Timestamp) int { + return tp.Compare(tp2) } // MinKey represents the BSON minkey value. @@ -186,6 +197,9 @@ type MaxKey struct{} type D []E // Map creates a map from the elements of the D. +// +// Deprecated: Converting directly from a D to an M will not be supported in Go Driver 2.0. Instead, +// users should marshal the D to BSON using bson.Marshal and unmarshal it to M using bson.Unmarshal. func (d D) Map() M { m := make(M, len(d)) for _, e := range d { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go index 1cbe3884d..ff32a87a7 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go @@ -8,6 +8,7 @@ package bson import ( "errors" + "fmt" "reflect" "go.mongodb.org/mongo-driver/bson/bsoncodec" @@ -21,10 +22,16 @@ var primitiveCodecs PrimitiveCodecs // PrimitiveCodecs is a namespace for all of the default bsoncodec.Codecs for the primitive types // defined in this package. +// +// Deprecated: Use bson.NewRegistry to get a registry with all primitive encoders and decoders +// registered. type PrimitiveCodecs struct{} // RegisterPrimitiveCodecs will register the encode and decode methods attached to PrimitiveCodecs // with the provided RegistryBuilder. if rb is nil, a new empty RegistryBuilder will be created. +// +// Deprecated: Use bson.NewRegistry to get a registry with all primitive encoders and decoders +// registered. func (pc PrimitiveCodecs) RegisterPrimitiveCodecs(rb *bsoncodec.RegistryBuilder) { if rb == nil { panic(errors.New("argument to RegisterPrimitiveCodecs must not be nil")) @@ -38,18 +45,35 @@ func (pc PrimitiveCodecs) RegisterPrimitiveCodecs(rb *bsoncodec.RegistryBuilder) } // RawValueEncodeValue is the ValueEncoderFunc for RawValue. -func (PrimitiveCodecs) RawValueEncodeValue(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// If the RawValue's Type is "invalid" and the RawValue's Value is not empty or +// nil, then this method will return an error. +// +// Deprecated: Use bson.NewRegistry to get a registry with all primitive +// encoders and decoders registered. +func (PrimitiveCodecs) RawValueEncodeValue(_ bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tRawValue { - return bsoncodec.ValueEncoderError{Name: "RawValueEncodeValue", Types: []reflect.Type{tRawValue}, Received: val} + return bsoncodec.ValueEncoderError{ + Name: "RawValueEncodeValue", + Types: []reflect.Type{tRawValue}, + Received: val, + } } rawvalue := val.Interface().(RawValue) + if !rawvalue.Type.IsValid() { + return fmt.Errorf("the RawValue Type specifies an invalid BSON type: %#x", byte(rawvalue.Type)) + } + return bsonrw.Copier{}.CopyValueFromBytes(vw, rawvalue.Type, rawvalue.Value) } // RawValueDecodeValue is the ValueDecoderFunc for RawValue. -func (PrimitiveCodecs) RawValueDecodeValue(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { +// +// Deprecated: Use bson.NewRegistry to get a registry with all primitive encoders and decoders +// registered. +func (PrimitiveCodecs) RawValueDecodeValue(_ bsoncodec.DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tRawValue { return bsoncodec.ValueDecoderError{Name: "RawValueDecodeValue", Types: []reflect.Type{tRawValue}, Received: val} } @@ -64,7 +88,10 @@ func (PrimitiveCodecs) RawValueDecodeValue(dc bsoncodec.DecodeContext, vr bsonrw } // RawEncodeValue is the ValueEncoderFunc for Reader. -func (PrimitiveCodecs) RawEncodeValue(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use bson.NewRegistry to get a registry with all primitive encoders and decoders +// registered. +func (PrimitiveCodecs) RawEncodeValue(_ bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tRaw { return bsoncodec.ValueEncoderError{Name: "RawEncodeValue", Types: []reflect.Type{tRaw}, Received: val} } @@ -75,7 +102,10 @@ func (PrimitiveCodecs) RawEncodeValue(ec bsoncodec.EncodeContext, vw bsonrw.Valu } // RawDecodeValue is the ValueDecoderFunc for Reader. -func (PrimitiveCodecs) RawDecodeValue(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { +// +// Deprecated: Use bson.NewRegistry to get a registry with all primitive encoders and decoders +// registered. +func (PrimitiveCodecs) RawDecodeValue(_ bsoncodec.DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tRaw { return bsoncodec.ValueDecoderError{Name: "RawDecodeValue", Types: []reflect.Type{tRaw}, Received: val} } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/raw.go b/vendor/go.mongodb.org/mongo-driver/bson/raw.go index efd705daa..130da61ba 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/raw.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/raw.go @@ -16,18 +16,27 @@ import ( // ErrNilReader indicates that an operation was attempted on a nil bson.Reader. var ErrNilReader = errors.New("nil reader") -// Raw is a wrapper around a byte slice. It will interpret the slice as a -// BSON document. This type is a wrapper around a bsoncore.Document. Errors returned from the -// methods on this type and associated types come from the bsoncore package. +// Raw is a raw encoded BSON document. It can be used to delay BSON document decoding or precompute +// a BSON encoded document. +// +// A Raw must be a full BSON document. Use the RawValue type for individual BSON values. type Raw []byte -// NewFromIOReader reads in a document from the given io.Reader and constructs a Raw from -// it. -func NewFromIOReader(r io.Reader) (Raw, error) { +// ReadDocument reads a BSON document from the io.Reader and returns it as a bson.Raw. If the +// reader contains multiple BSON documents, only the first document is read. +func ReadDocument(r io.Reader) (Raw, error) { doc, err := bsoncore.NewDocumentFromReader(r) return Raw(doc), err } +// NewFromIOReader reads a BSON document from the io.Reader and returns it as a bson.Raw. If the +// reader contains multiple BSON documents, only the first document is read. +// +// Deprecated: Use ReadDocument instead. +func NewFromIOReader(r io.Reader) (Raw, error) { + return ReadDocument(r) +} + // Validate validates the document. This method only validates the first document in // the slice, to validate other documents, the slice must be resliced. func (r Raw) Validate() (err error) { return bsoncore.Document(r).Validate() } @@ -51,12 +60,19 @@ func (r Raw) LookupErr(key ...string) (RawValue, error) { // elements. If the document is not valid, the elements up to the invalid point will be returned // along with an error. func (r Raw) Elements() ([]RawElement, error) { - elems, err := bsoncore.Document(r).Elements() + doc := bsoncore.Document(r) + if len(doc) == 0 { + return nil, nil + } + elems, err := doc.Elements() + if err != nil { + return nil, err + } relems := make([]RawElement, 0, len(elems)) for _, elem := range elems { relems = append(relems, RawElement(elem)) } - return relems, err + return relems, nil } // Values returns this document as a slice of values. The returned slice will contain valid values. @@ -81,5 +97,5 @@ func (r Raw) IndexErr(index uint) (RawElement, error) { return RawElement(elem), err } -// String implements the fmt.Stringer interface. +// String returns the BSON document encoded as Extended JSON. func (r Raw) String() string { return bsoncore.Document(r).String() } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/raw_element.go b/vendor/go.mongodb.org/mongo-driver/bson/raw_element.go index 006f503a3..8ce13c2cc 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/raw_element.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/raw_element.go @@ -10,10 +10,7 @@ import ( "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" ) -// RawElement represents a BSON element in byte form. This type provides a simple way to -// transform a slice of bytes into a BSON element and extract information from it. -// -// RawElement is a thin wrapper around a bsoncore.Element. +// RawElement is a raw encoded BSON document or array element. type RawElement []byte // Key returns the key for this element. If the element is not valid, this method returns an empty @@ -36,7 +33,7 @@ func (re RawElement) ValueErr() (RawValue, error) { // Validate ensures re is a valid BSON element. func (re RawElement) Validate() error { return bsoncore.Element(re).Validate() } -// String implements the fmt.Stringer interface. The output will be in extended JSON format. +// String returns the BSON element encoded as Extended JSON. func (re RawElement) String() string { doc := bsoncore.BuildDocument(nil, re) j, err := MarshalExtJSON(Raw(doc), true, false) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go b/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go index 75297f30f..4d1bfb316 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go @@ -26,11 +26,10 @@ var ErrNilContext = errors.New("DecodeContext cannot be nil") // ErrNilRegistry is returned when the provided registry is nil. var ErrNilRegistry = errors.New("Registry cannot be nil") -// RawValue represents a BSON value in byte form. It can be used to hold unprocessed BSON or to -// defer processing of BSON. Type is the BSON type of the value and Value are the raw bytes that -// represent the element. +// RawValue is a raw encoded BSON value. It can be used to delay BSON value decoding or precompute +// BSON encoded value. Type is the BSON type of the value and Value is the raw encoded BSON value. // -// This type wraps bsoncore.Value for most of it's functionality. +// A RawValue must be an individual BSON value. Use the Raw type for full BSON documents. type RawValue struct { Type bsontype.Type Value []byte @@ -38,6 +37,12 @@ type RawValue struct { r *bsoncodec.Registry } +// IsZero reports whether the RawValue is zero, i.e. no data is present on +// the RawValue. It returns true if Type is 0 and Value is empty or nil. +func (rv RawValue) IsZero() bool { + return rv.Type == 0x00 && len(rv.Value) == 0 +} + // Unmarshal deserializes BSON into the provided val. If RawValue cannot be unmarshaled into val, an // error is returned. This method will use the registry used to create the RawValue, if the RawValue // was created from partial BSON processing, or it will use the default registry. Users wishing to @@ -268,10 +273,16 @@ func (rv RawValue) Int32OK() (int32, bool) { return convertToCoreValue(rv).Int32 // AsInt32 returns a BSON number as an int32. If the BSON type is not a numeric one, this method // will panic. +// +// Deprecated: Use AsInt64 instead. If an int32 is required, convert the returned value to an int32 +// and perform any required overflow/underflow checking. func (rv RawValue) AsInt32() int32 { return convertToCoreValue(rv).AsInt32() } // AsInt32OK is the same as AsInt32, except that it returns a boolean instead of // panicking. +// +// Deprecated: Use AsInt64OK instead. If an int32 is required, convert the returned value to an +// int32 and perform any required overflow/underflow checking. func (rv RawValue) AsInt32OK() (int32, bool) { return convertToCoreValue(rv).AsInt32OK() } // Timestamp returns the BSON timestamp value the Value represents. It panics if the value is a diff --git a/vendor/go.mongodb.org/mongo-driver/bson/registry.go b/vendor/go.mongodb.org/mongo-driver/bson/registry.go index 16d7573e7..b5b0f3568 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/registry.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/registry.go @@ -6,15 +6,19 @@ package bson -import "go.mongodb.org/mongo-driver/bson/bsoncodec" +import ( + "go.mongodb.org/mongo-driver/bson/bsoncodec" +) // DefaultRegistry is the default bsoncodec.Registry. It contains the default codecs and the // primitive codecs. -var DefaultRegistry = NewRegistryBuilder().Build() +var DefaultRegistry = NewRegistry() // NewRegistryBuilder creates a new RegistryBuilder configured with the default encoders and // decoders from the bsoncodec.DefaultValueEncoders and bsoncodec.DefaultValueDecoders types and the // PrimitiveCodecs type in this package. +// +// Deprecated: Use NewRegistry instead. func NewRegistryBuilder() *bsoncodec.RegistryBuilder { rb := bsoncodec.NewRegistryBuilder() bsoncodec.DefaultValueEncoders{}.RegisterDefaultEncoders(rb) @@ -22,3 +26,10 @@ func NewRegistryBuilder() *bsoncodec.RegistryBuilder { primitiveCodecs.RegisterPrimitiveCodecs(rb) return rb } + +// NewRegistry creates a new Registry configured with the default encoders and decoders from the +// bsoncodec.DefaultValueEncoders and bsoncodec.DefaultValueDecoders types and the PrimitiveCodecs +// type in this package. +func NewRegistry() *bsoncodec.Registry { + return NewRegistryBuilder().Build() +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/types.go b/vendor/go.mongodb.org/mongo-driver/bson/types.go index 13a1c35cf..ef3981246 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/types.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/types.go @@ -10,7 +10,7 @@ import ( "go.mongodb.org/mongo-driver/bson/bsontype" ) -// These constants uniquely refer to each BSON type. +// BSON element types as described in https://bsonspec.org/spec.html. const ( TypeDouble = bsontype.Double TypeString = bsontype.String @@ -34,3 +34,17 @@ const ( TypeMinKey = bsontype.MinKey TypeMaxKey = bsontype.MaxKey ) + +// BSON binary element subtypes as described in https://bsonspec.org/spec.html. +const ( + TypeBinaryGeneric = bsontype.BinaryGeneric + TypeBinaryFunction = bsontype.BinaryFunction + TypeBinaryBinaryOld = bsontype.BinaryBinaryOld + TypeBinaryUUIDOld = bsontype.BinaryUUIDOld + TypeBinaryUUID = bsontype.BinaryUUID + TypeBinaryMD5 = bsontype.BinaryMD5 + TypeBinaryEncrypted = bsontype.BinaryEncrypted + TypeBinaryColumn = bsontype.BinaryColumn + TypeBinarySensitive = bsontype.BinarySensitive + TypeBinaryUserDefined = bsontype.BinaryUserDefined +) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go b/vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go index f936ba183..66da17ee0 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go @@ -14,18 +14,26 @@ import ( "go.mongodb.org/mongo-driver/bson/bsontype" ) -// Unmarshaler is an interface implemented by types that can unmarshal a BSON -// document representation of themselves. The BSON bytes can be assumed to be -// valid. UnmarshalBSON must copy the BSON bytes if it wishes to retain the data -// after returning. +// Unmarshaler is the interface implemented by types that can unmarshal a BSON +// document representation of themselves. The input can be assumed to be a valid +// encoding of a BSON document. UnmarshalBSON must copy the JSON data if it +// wishes to retain the data after returning. +// +// Unmarshaler is only used to unmarshal full BSON documents. To create custom +// BSON unmarshaling behavior for individual values in a BSON document, +// implement the ValueUnmarshaler interface instead. type Unmarshaler interface { UnmarshalBSON([]byte) error } -// ValueUnmarshaler is an interface implemented by types that can unmarshal a -// BSON value representation of themselves. The BSON bytes and type can be -// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it -// wishes to retain the data after returning. +// ValueUnmarshaler is the interface implemented by types that can unmarshal a +// BSON value representation of themselves. The input can be assumed to be a +// valid encoding of a BSON value. UnmarshalBSONValue must copy the BSON value +// bytes if it wishes to retain the data after returning. +// +// ValueUnmarshaler is only used to unmarshal individual values in a BSON +// document. To create custom BSON unmarshaling behavior for an entire BSON +// document, implement the Unmarshaler interface instead. type ValueUnmarshaler interface { UnmarshalBSONValue(bsontype.Type, []byte) error } @@ -40,6 +48,16 @@ func Unmarshal(data []byte, val interface{}) error { // UnmarshalWithRegistry parses the BSON-encoded data using Registry r and // stores the result in the value pointed to by val. If val is nil or not // a pointer, UnmarshalWithRegistry returns InvalidUnmarshalError. +// +// Deprecated: Use [NewDecoder] and specify the Registry by calling [Decoder.SetRegistry] instead: +// +// dec, err := bson.NewDecoder(bsonrw.NewBSONDocumentReader(data)) +// if err != nil { +// panic(err) +// } +// dec.SetRegistry(reg) +// +// See [Decoder] for more examples. func UnmarshalWithRegistry(r *bsoncodec.Registry, data []byte, val interface{}) error { vr := bsonrw.NewBSONDocumentReader(data) return unmarshalFromReader(bsoncodec.DecodeContext{Registry: r}, vr, val) @@ -48,11 +66,40 @@ func UnmarshalWithRegistry(r *bsoncodec.Registry, data []byte, val interface{}) // UnmarshalWithContext parses the BSON-encoded data using DecodeContext dc and // stores the result in the value pointed to by val. If val is nil or not // a pointer, UnmarshalWithRegistry returns InvalidUnmarshalError. +// +// Deprecated: Use [NewDecoder] and use the Decoder configuration methods to set the desired unmarshal +// behavior instead: +// +// dec, err := bson.NewDecoder(bsonrw.NewBSONDocumentReader(data)) +// if err != nil { +// panic(err) +// } +// dec.DefaultDocumentM() +// +// See [Decoder] for more examples. func UnmarshalWithContext(dc bsoncodec.DecodeContext, data []byte, val interface{}) error { vr := bsonrw.NewBSONDocumentReader(data) return unmarshalFromReader(dc, vr, val) } +// UnmarshalValue parses the BSON value of type t with bson.DefaultRegistry and +// stores the result in the value pointed to by val. If val is nil or not a pointer, +// UnmarshalValue returns an error. +func UnmarshalValue(t bsontype.Type, data []byte, val interface{}) error { + return UnmarshalValueWithRegistry(DefaultRegistry, t, data, val) +} + +// UnmarshalValueWithRegistry parses the BSON value of type t with registry r and +// stores the result in the value pointed to by val. If val is nil or not a pointer, +// UnmarshalValue returns an error. +// +// Deprecated: Using a custom registry to unmarshal individual BSON values will not be supported in +// Go Driver 2.0. +func UnmarshalValueWithRegistry(r *bsoncodec.Registry, t bsontype.Type, data []byte, val interface{}) error { + vr := bsonrw.NewBSONValueReader(t, data) + return unmarshalFromReader(bsoncodec.DecodeContext{Registry: r}, vr, val) +} + // UnmarshalExtJSON parses the extended JSON-encoded data and stores the result // in the value pointed to by val. If val is nil or not a pointer, Unmarshal // returns InvalidUnmarshalError. @@ -63,6 +110,20 @@ func UnmarshalExtJSON(data []byte, canonical bool, val interface{}) error { // UnmarshalExtJSONWithRegistry parses the extended JSON-encoded data using // Registry r and stores the result in the value pointed to by val. If val is // nil or not a pointer, UnmarshalWithRegistry returns InvalidUnmarshalError. +// +// Deprecated: Use [NewDecoder] and specify the Registry by calling [Decoder.SetRegistry] instead: +// +// vr, err := bsonrw.NewExtJSONValueReader(bytes.NewReader(data), true) +// if err != nil { +// panic(err) +// } +// dec, err := bson.NewDecoder(vr) +// if err != nil { +// panic(err) +// } +// dec.SetRegistry(reg) +// +// See [Decoder] for more examples. func UnmarshalExtJSONWithRegistry(r *bsoncodec.Registry, data []byte, canonical bool, val interface{}) error { ejvr, err := bsonrw.NewExtJSONValueReader(bytes.NewReader(data), canonical) if err != nil { @@ -75,6 +136,21 @@ func UnmarshalExtJSONWithRegistry(r *bsoncodec.Registry, data []byte, canonical // UnmarshalExtJSONWithContext parses the extended JSON-encoded data using // DecodeContext dc and stores the result in the value pointed to by val. If val is // nil or not a pointer, UnmarshalWithRegistry returns InvalidUnmarshalError. +// +// Deprecated: Use [NewDecoder] and use the Decoder configuration methods to set the desired unmarshal +// behavior instead: +// +// vr, err := bsonrw.NewExtJSONValueReader(bytes.NewReader(data), true) +// if err != nil { +// panic(err) +// } +// dec, err := bson.NewDecoder(vr) +// if err != nil { +// panic(err) +// } +// dec.DefaultDocumentM() +// +// See [Decoder] for more examples. func UnmarshalExtJSONWithContext(dc bsoncodec.DecodeContext, data []byte, canonical bool, val interface{}) error { ejvr, err := bsonrw.NewExtJSONValueReader(bytes.NewReader(data), canonical) if err != nil { diff --git a/vendor/go.mongodb.org/mongo-driver/event/monitoring.go b/vendor/go.mongodb.org/mongo-driver/event/monitoring.go index ac05e401c..53d1caf2e 100644 --- a/vendor/go.mongodb.org/mongo-driver/event/monitoring.go +++ b/vendor/go.mongodb.org/mongo-driver/event/monitoring.go @@ -8,6 +8,7 @@ package event // import "go.mongodb.org/mongo-driver/event" import ( "context" + "time" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/primitive" @@ -23,8 +24,14 @@ type CommandStartedEvent struct { RequestID int64 ConnectionID string // ServerConnectionID contains the connection ID from the server of the operation. If the server does not return - // this value (e.g. on MDB < 4.2), it is unset. + // this value (e.g. on MDB < 4.2), it is unset. If the server connection ID would cause an int32 overflow, then + // then this field will be nil. + // + // Deprecated: Use ServerConnectionID64. ServerConnectionID *int32 + // ServerConnectionID64 contains the connection ID from the server of the operation. If the server does not + // return this value (e.g. on MDB < 4.2), it is unset. + ServerConnectionID64 *int64 // ServiceID contains the ID of the server to which the command was sent if it is running behind a load balancer. // Otherwise, it is unset. ServiceID *primitive.ObjectID @@ -32,13 +39,22 @@ type CommandStartedEvent struct { // CommandFinishedEvent represents a generic command finishing. type CommandFinishedEvent struct { + // Deprecated: Use Duration instead. DurationNanos int64 + Duration time.Duration CommandName string + DatabaseName string RequestID int64 ConnectionID string // ServerConnectionID contains the connection ID from the server of the operation. If the server does not return - // this value (e.g. on MDB < 4.2), it is unset. + // this value (e.g. on MDB < 4.2), it is unset.If the server connection ID would cause an int32 overflow, then + // this field will be nil. + // + // Deprecated: Use ServerConnectionID64. ServerConnectionID *int32 + // ServerConnectionID64 contains the connection ID from the server of the operation. If the server does not + // return this value (e.g. on MDB < 4.2), it is unset. + ServerConnectionID64 *int64 // ServiceID contains the ID of the server to which the command was sent if it is running behind a load balancer. // Otherwise, it is unset. ServiceID *primitive.ObjectID @@ -105,6 +121,7 @@ type PoolEvent struct { // ServiceID is only set if the Type is PoolCleared and the server is deployed behind a load balancer. This field // can be used to distinguish between individual servers in a load balanced deployment. ServiceID *primitive.ObjectID `json:"serviceId"` + Error error `json:"error"` } // PoolMonitor is a function that allows the user to gain access to events occurring in the pool @@ -157,7 +174,9 @@ type ServerHeartbeatStartedEvent struct { // ServerHeartbeatSucceededEvent is an event generated when the heartbeat succeeds. type ServerHeartbeatSucceededEvent struct { + // Deprecated: Use Duration instead. DurationNanos int64 + Duration time.Duration Reply description.Server ConnectionID string // The address this heartbeat was sent to with a unique identifier Awaited bool // If this heartbeat was awaitable @@ -165,7 +184,9 @@ type ServerHeartbeatSucceededEvent struct { // ServerHeartbeatFailedEvent is an event generated when the heartbeat fails. type ServerHeartbeatFailedEvent struct { + // Deprecated: Use Duration instead. DurationNanos int64 + Duration time.Duration Failure error ConnectionID string // The address this heartbeat was sent to with a unique identifier Awaited bool // If this heartbeat was awaitable diff --git a/vendor/go.mongodb.org/mongo-driver/internal/aws/awserr/error.go b/vendor/go.mongodb.org/mongo-driver/internal/aws/awserr/error.go new file mode 100644 index 000000000..63d06a176 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/aws/awserr/error.go @@ -0,0 +1,60 @@ +// Copyright (C) MongoDB, Inc. 2023-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +// +// Based on github.com/aws/aws-sdk-go by Amazon.com, Inc. with code from: +// - github.com/aws/aws-sdk-go/blob/v1.44.225/aws/awserr/error.go +// See THIRD-PARTY-NOTICES for original license terms + +// Package awserr represents API error interface accessors for the SDK. +package awserr + +// An Error wraps lower level errors with code, message and an original error. +// The underlying concrete error type may also satisfy other interfaces which +// can be to used to obtain more specific information about the error. +type Error interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErr() error +} + +// BatchedErrors is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occurred in the batch. +// +// Replaces BatchError +type BatchedErrors interface { + // Satisfy the base Error interface. + Error + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// New returns an Error object described by the code, message, and origErr. +// +// If origErr satisfies the Error interface it will not be wrapped within a new +// Error object and will instead be returned. +func New(code, message string, origErr error) Error { + var errs []error + if origErr != nil { + errs = append(errs, origErr) + } + return newBaseError(code, message, errs) +} + +// NewBatchError returns an BatchedErrors with a collection of errors as an +// array of errors. +func NewBatchError(code, message string, errs []error) BatchedErrors { + return newBaseError(code, message, errs) +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/aws/awserr/types.go b/vendor/go.mongodb.org/mongo-driver/internal/aws/awserr/types.go new file mode 100644 index 000000000..18cb4cda2 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/aws/awserr/types.go @@ -0,0 +1,144 @@ +// Copyright (C) MongoDB, Inc. 2023-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +// +// Based on github.com/aws/aws-sdk-go by Amazon.com, Inc. with code from: +// - github.com/aws/aws-sdk-go/blob/v1.44.225/aws/awserr/types.go +// See THIRD-PARTY-NOTICES for original license terms + +package awserr + +import ( + "fmt" +) + +// SprintError returns a string of the formatted error code. +// +// Both extra and origErr are optional. If they are included their lines +// will be added, but if they are not included their lines will be ignored. +func SprintError(code, message, extra string, origErr error) string { + msg := fmt.Sprintf("%s: %s", code, message) + if extra != "" { + msg = fmt.Sprintf("%s\n\t%s", msg, extra) + } + if origErr != nil { + msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error()) + } + return msg +} + +// A baseError wraps the code and message which defines an error. It also +// can be used to wrap an original error object. +// +// Should be used as the root for errors satisfying the awserr.Error. Also +// for any error which does not fit into a specific error wrapper type. +type baseError struct { + // Classification of error + code string + + // Detailed information about error + message string + + // Optional original error this error is based off of. Allows building + // chained errors. + errs []error +} + +// newBaseError returns an error object for the code, message, and errors. +// +// code is a short no whitespace phrase depicting the classification of +// the error that is being created. +// +// message is the free flow string containing detailed information about the +// error. +// +// origErrs is the error objects which will be nested under the new errors to +// be returned. +func newBaseError(code, message string, origErrs []error) *baseError { + b := &baseError{ + code: code, + message: message, + errs: origErrs, + } + + return b +} + +// Error returns the string representation of the error. +// +// See ErrorWithExtra for formatting. +// +// Satisfies the error interface. +func (b baseError) Error() string { + size := len(b.errs) + if size > 0 { + return SprintError(b.code, b.message, "", errorList(b.errs)) + } + + return SprintError(b.code, b.message, "", nil) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (b baseError) String() string { + return b.Error() +} + +// Code returns the short phrase depicting the classification of the error. +func (b baseError) Code() string { + return b.code +} + +// Message returns the error details message. +func (b baseError) Message() string { + return b.message +} + +// OrigErr returns the original error if one was set. Nil is returned if no +// error was set. This only returns the first element in the list. If the full +// list is needed, use BatchedErrors. +func (b baseError) OrigErr() error { + switch len(b.errs) { + case 0: + return nil + case 1: + return b.errs[0] + default: + if err, ok := b.errs[0].(Error); ok { + return NewBatchError(err.Code(), err.Message(), b.errs[1:]) + } + return NewBatchError("BatchedErrors", + "multiple errors occurred", b.errs) + } +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (b baseError) OrigErrs() []error { + return b.errs +} + +// An error list that satisfies the golang interface +type errorList []error + +// Error returns the string representation of the error. +// +// Satisfies the error interface. +func (e errorList) Error() string { + msg := "" + // How do we want to handle the array size being zero + if size := len(e); size > 0 { + for i := 0; i < size; i++ { + msg += e[i].Error() + // We check the next index to see if it is within the slice. + // If it is, then we append a newline. We do this, because unit tests + // could be broken with the additional '\n' + if i+1 < size { + msg += "\n" + } + } + } + return msg +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/aws/credentials/chain_provider.go b/vendor/go.mongodb.org/mongo-driver/internal/aws/credentials/chain_provider.go new file mode 100644 index 000000000..684392715 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/aws/credentials/chain_provider.go @@ -0,0 +1,72 @@ +// Copyright (C) MongoDB, Inc. 2023-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +// +// Based on github.com/aws/aws-sdk-go by Amazon.com, Inc. with code from: +// - github.com/aws/aws-sdk-go/blob/v1.44.225/aws/credentials/chain_provider.go +// See THIRD-PARTY-NOTICES for original license terms + +package credentials + +import ( + "go.mongodb.org/mongo-driver/internal/aws/awserr" +) + +// A ChainProvider will search for a provider which returns credentials +// and cache that provider until Retrieve is called again. +// +// The ChainProvider provides a way of chaining multiple providers together +// which will pick the first available using priority order of the Providers +// in the list. +// +// If none of the Providers retrieve valid credentials Value, ChainProvider's +// Retrieve() will return the error ErrNoValidProvidersFoundInChain. +// +// If a Provider is found which returns valid credentials Value ChainProvider +// will cache that Provider for all calls to IsExpired(), until Retrieve is +// called again. +type ChainProvider struct { + Providers []Provider + curr Provider +} + +// NewChainCredentials returns a pointer to a new Credentials object +// wrapping a chain of providers. +func NewChainCredentials(providers []Provider) *Credentials { + return NewCredentials(&ChainProvider{ + Providers: append([]Provider{}, providers...), + }) +} + +// Retrieve returns the credentials value or error if no provider returned +// without error. +// +// If a provider is found it will be cached and any calls to IsExpired() +// will return the expired state of the cached provider. +func (c *ChainProvider) Retrieve() (Value, error) { + var errs = make([]error, 0, len(c.Providers)) + for _, p := range c.Providers { + creds, err := p.Retrieve() + if err == nil { + c.curr = p + return creds, nil + } + errs = append(errs, err) + } + c.curr = nil + + var err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs) + return Value{}, err +} + +// IsExpired will returned the expired state of the currently cached provider +// if there is one. If there is no current provider, true will be returned. +func (c *ChainProvider) IsExpired() bool { + if c.curr != nil { + return c.curr.IsExpired() + } + + return true +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/aws/credentials/credentials.go b/vendor/go.mongodb.org/mongo-driver/internal/aws/credentials/credentials.go new file mode 100644 index 000000000..53181aa16 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/aws/credentials/credentials.go @@ -0,0 +1,197 @@ +// Copyright (C) MongoDB, Inc. 2023-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +// +// Based on github.com/aws/aws-sdk-go by Amazon.com, Inc. with code from: +// - github.com/aws/aws-sdk-go/blob/v1.44.225/aws/credentials/credentials.go +// See THIRD-PARTY-NOTICES for original license terms + +package credentials + +import ( + "context" + "sync" + "time" + + "go.mongodb.org/mongo-driver/internal/aws/awserr" + "golang.org/x/sync/singleflight" +) + +// A Value is the AWS credentials value for individual credential fields. +// +// A Value is also used to represent Azure credentials. +// Azure credentials only consist of an access token, which is stored in the `SessionToken` field. +type Value struct { + // AWS Access key ID + AccessKeyID string + + // AWS Secret Access Key + SecretAccessKey string + + // AWS Session Token + SessionToken string + + // Provider used to get credentials + ProviderName string +} + +// HasKeys returns if the credentials Value has both AccessKeyID and +// SecretAccessKey value set. +func (v Value) HasKeys() bool { + return len(v.AccessKeyID) != 0 && len(v.SecretAccessKey) != 0 +} + +// A Provider is the interface for any component which will provide credentials +// Value. A provider is required to manage its own Expired state, and what to +// be expired means. +// +// The Provider should not need to implement its own mutexes, because +// that will be managed by Credentials. +type Provider interface { + // Retrieve returns nil if it successfully retrieved the value. + // Error is returned if the value were not obtainable, or empty. + Retrieve() (Value, error) + + // IsExpired returns if the credentials are no longer valid, and need + // to be retrieved. + IsExpired() bool +} + +// ProviderWithContext is a Provider that can retrieve credentials with a Context +type ProviderWithContext interface { + Provider + + RetrieveWithContext(context.Context) (Value, error) +} + +// A Credentials provides concurrency safe retrieval of AWS credentials Value. +// +// A Credentials is also used to fetch Azure credentials Value. +// +// Credentials will cache the credentials value until they expire. Once the value +// expires the next Get will attempt to retrieve valid credentials. +// +// Credentials is safe to use across multiple goroutines and will manage the +// synchronous state so the Providers do not need to implement their own +// synchronization. +// +// The first Credentials.Get() will always call Provider.Retrieve() to get the +// first instance of the credentials Value. All calls to Get() after that +// will return the cached credentials Value until IsExpired() returns true. +type Credentials struct { + sf singleflight.Group + + m sync.RWMutex + creds Value + provider Provider +} + +// NewCredentials returns a pointer to a new Credentials with the provider set. +func NewCredentials(provider Provider) *Credentials { + c := &Credentials{ + provider: provider, + } + return c +} + +// GetWithContext returns the credentials value, or error if the credentials +// Value failed to be retrieved. Will return early if the passed in context is +// canceled. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +func (c *Credentials) GetWithContext(ctx context.Context) (Value, error) { + // Check if credentials are cached, and not expired. + select { + case curCreds, ok := <-c.asyncIsExpired(): + // ok will only be true, of the credentials were not expired. ok will + // be false and have no value if the credentials are expired. + if ok { + return curCreds, nil + } + case <-ctx.Done(): + return Value{}, awserr.New("RequestCanceled", + "request context canceled", ctx.Err()) + } + + // Cannot pass context down to the actual retrieve, because the first + // context would cancel the whole group when there is not direct + // association of items in the group. + resCh := c.sf.DoChan("", func() (interface{}, error) { + return c.singleRetrieve(&suppressedContext{ctx}) + }) + select { + case res := <-resCh: + return res.Val.(Value), res.Err + case <-ctx.Done(): + return Value{}, awserr.New("RequestCanceled", + "request context canceled", ctx.Err()) + } +} + +func (c *Credentials) singleRetrieve(ctx context.Context) (interface{}, error) { + c.m.Lock() + defer c.m.Unlock() + + if curCreds := c.creds; !c.isExpiredLocked(curCreds) { + return curCreds, nil + } + + var creds Value + var err error + if p, ok := c.provider.(ProviderWithContext); ok { + creds, err = p.RetrieveWithContext(ctx) + } else { + creds, err = c.provider.Retrieve() + } + if err == nil { + c.creds = creds + } + + return creds, err +} + +// asyncIsExpired returns a channel of credentials Value. If the channel is +// closed the credentials are expired and credentials value are not empty. +func (c *Credentials) asyncIsExpired() <-chan Value { + ch := make(chan Value, 1) + go func() { + c.m.RLock() + defer c.m.RUnlock() + + if curCreds := c.creds; !c.isExpiredLocked(curCreds) { + ch <- curCreds + } + + close(ch) + }() + + return ch +} + +// isExpiredLocked helper method wrapping the definition of expired credentials. +func (c *Credentials) isExpiredLocked(creds interface{}) bool { + return creds == nil || creds.(Value) == Value{} || c.provider.IsExpired() +} + +type suppressedContext struct { + context.Context +} + +func (s *suppressedContext) Deadline() (deadline time.Time, ok bool) { + return time.Time{}, false +} + +func (s *suppressedContext) Done() <-chan struct{} { + return nil +} + +func (s *suppressedContext) Err() error { + return nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/aws/signer/v4/header_rules.go b/vendor/go.mongodb.org/mongo-driver/internal/aws/signer/v4/header_rules.go new file mode 100644 index 000000000..a3726467f --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/aws/signer/v4/header_rules.go @@ -0,0 +1,51 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +// +// Based on github.com/aws/aws-sdk-go by Amazon.com, Inc. with code from: +// - github.com/aws/aws-sdk-go/blob/v1.44.225/aws/signer/v4/header_rules.go +// See THIRD-PARTY-NOTICES for original license terms + +package v4 + +// validator houses a set of rule needed for validation of a +// string value +type rules []rule + +// rule interface allows for more flexible rules and just simply +// checks whether or not a value adheres to that rule +type rule interface { + IsValid(value string) bool +} + +// IsValid will iterate through all rules and see if any rules +// apply to the value and supports nested rules +func (r rules) IsValid(value string) bool { + for _, rule := range r { + if rule.IsValid(value) { + return true + } + } + return false +} + +// mapRule generic rule for maps +type mapRule map[string]struct{} + +// IsValid for the map rule satisfies whether it exists in the map +func (m mapRule) IsValid(value string) bool { + _, ok := m[value] + return ok +} + +// excludeList is a generic rule for exclude listing +type excludeList struct { + rule +} + +// IsValid for exclude list checks if the value is within the exclude list +func (b excludeList) IsValid(value string) bool { + return !b.rule.IsValid(value) +} diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/awsv4/request.go b/vendor/go.mongodb.org/mongo-driver/internal/aws/signer/v4/request.go similarity index 96% rename from vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/awsv4/request.go rename to vendor/go.mongodb.org/mongo-driver/internal/aws/signer/v4/request.go index 014ee0833..7a43bb303 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/awsv4/request.go +++ b/vendor/go.mongodb.org/mongo-driver/internal/aws/signer/v4/request.go @@ -5,10 +5,10 @@ // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 // // Based on github.com/aws/aws-sdk-go by Amazon.com, Inc. with code from: -// - github.com/aws/aws-sdk-go/blob/v1.34.28/aws/request/request.go +// - github.com/aws/aws-sdk-go/blob/v1.44.225/aws/request/request.go // See THIRD-PARTY-NOTICES for original license terms -package awsv4 +package v4 import ( "net/http" diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/awsv4/rest.go b/vendor/go.mongodb.org/mongo-driver/internal/aws/signer/v4/uri_path.go similarity index 72% rename from vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/awsv4/rest.go rename to vendor/go.mongodb.org/mongo-driver/internal/aws/signer/v4/uri_path.go index b1f86a095..69b6005eb 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/awsv4/rest.go +++ b/vendor/go.mongodb.org/mongo-driver/internal/aws/signer/v4/uri_path.go @@ -5,14 +5,17 @@ // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 // // Based on github.com/aws/aws-sdk-go by Amazon.com, Inc. with code from: -// - github.com/aws/aws-sdk-go/blob/v1.34.28/private/protocol/rest/build.go +// - github.com/aws/aws-sdk-go/blob/v1.44.225/aws/signer/v4/uri_path.go +// - github.com/aws/aws-sdk-go/blob/v1.44.225/private/protocol/rest/build.go // See THIRD-PARTY-NOTICES for original license terms -package awsv4 +package v4 import ( "bytes" "fmt" + "net/url" + "strings" ) // Whether the byte value can be sent without escaping in AWS URLs @@ -31,6 +34,22 @@ func init() { } } +func getURIPath(u *url.URL) string { + var uri string + + if len(u.Opaque) > 0 { + uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/") + } else { + uri = u.EscapedPath() + } + + if len(uri) == 0 { + uri = "/" + } + + return uri +} + // EscapePath escapes part of a URL path in Amazon style func EscapePath(path string, encodeSep bool) string { var buf bytes.Buffer diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/awsv4/signer.go b/vendor/go.mongodb.org/mongo-driver/internal/aws/signer/v4/v4.go similarity index 80% rename from vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/awsv4/signer.go rename to vendor/go.mongodb.org/mongo-driver/internal/aws/signer/v4/v4.go index 23508c1f7..6cf4586bb 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/awsv4/signer.go +++ b/vendor/go.mongodb.org/mongo-driver/internal/aws/signer/v4/v4.go @@ -5,13 +5,10 @@ // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 // // Based on github.com/aws/aws-sdk-go by Amazon.com, Inc. with code from: -// - github.com/aws/aws-sdk-go/blob/v1.34.28/aws/request/request.go -// - github.com/aws/aws-sdk-go/blob/v1.34.28/aws/signer/v4/v4.go -// - github.com/aws/aws-sdk-go/blob/v1.34.28/aws/signer/v4/uri_path.go -// - github.com/aws/aws-sdk-go/blob/v1.34.28/aws/types.go +// - github.com/aws/aws-sdk-go/blob/v1.44.225/aws/signer/v4/v4.go // See THIRD-PARTY-NOTICES for original license terms -package awsv4 +package v4 import ( "crypto/hmac" @@ -25,6 +22,9 @@ import ( "sort" "strings" "time" + + "go.mongodb.org/mongo-driver/internal/aws" + "go.mongodb.org/mongo-driver/internal/aws/credentials" ) const ( @@ -41,7 +41,7 @@ const ( ) var ignoredHeaders = rules{ - denylist{ + excludeList{ mapRule{ authorizationHeader: struct{}{}, "User-Agent": struct{}{}, @@ -53,13 +53,13 @@ var ignoredHeaders = rules{ // Signer applies AWS v4 signing to given request. Use this to sign requests // that need to be signed with AWS V4 Signatures. type Signer struct { - Credentials *StaticProvider + // The authentication credentials the request will be signed against. + // This value must be set to sign requests. + Credentials *credentials.Credentials } -// NewSigner returns a Signer pointer configured with the credentials and optional -// option values provided. If not options are provided the Signer will use its -// default configuration. -func NewSigner(credentials *StaticProvider) *Signer { +// NewSigner returns a Signer pointer configured with the credentials provided. +func NewSigner(credentials *credentials.Credentials) *Signer { v4 := &Signer{ Credentials: credentials, } @@ -76,7 +76,7 @@ type signingCtx struct { Time time.Time SignedHeaderVals http.Header - credValues Value + credValues credentials.Value bodyDigest string signedHeaders string @@ -85,7 +85,6 @@ type signingCtx struct { credentialString string stringToSign string signature string - authorization string } // Sign signs AWS v4 requests with the provided body, service name, region the @@ -136,7 +135,7 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi } var err error - ctx.credValues, err = v4.Credentials.Retrieve() + ctx.credValues, err = v4.Credentials.GetWithContext(r.Context()) if err != nil { return http.Header{}, err } @@ -200,31 +199,6 @@ func (ctx *signingCtx) build() error { return nil } -// GetSignedRequestSignature attempts to extract the signature of the request. -// Returning an error if the request is unsigned, or unable to extract the -// signature. -func GetSignedRequestSignature(r *http.Request) ([]byte, error) { - - if auth := r.Header.Get(authorizationHeader); len(auth) != 0 { - ps := strings.Split(auth, ", ") - for _, p := range ps { - if idx := strings.Index(p, authHeaderSignatureElem); idx >= 0 { - sig := p[len(authHeaderSignatureElem):] - if len(sig) == 0 { - return nil, fmt.Errorf("invalid request signature authorization header") - } - return hex.DecodeString(sig) - } - } - } - - if sig := r.URL.Query().Get("X-Amz-Signature"); len(sig) != 0 { - return hex.DecodeString(sig) - } - - return nil, fmt.Errorf("request not signed") -} - func (ctx *signingCtx) buildTime() { ctx.Request.Header.Set("X-Amz-Date", formatTime(ctx.Time)) } @@ -234,7 +208,7 @@ func (ctx *signingCtx) buildCredentialString() { } func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { - headers := make([]string, 0, len(header)) + headers := make([]string, 0, len(header)+1) headers = append(headers, "host") for k, v := range header { if !r.IsValid(k) { @@ -258,37 +232,25 @@ func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { ctx.signedHeaders = strings.Join(headers, ";") - headerValues := make([]string, len(headers)) + headerItems := make([]string, len(headers)) for i, k := range headers { if k == "host" { if ctx.Request.Host != "" { - headerValues[i] = "host:" + ctx.Request.Host + headerItems[i] = "host:" + ctx.Request.Host } else { - headerValues[i] = "host:" + ctx.Request.URL.Host + headerItems[i] = "host:" + ctx.Request.URL.Host } } else { - headerValues[i] = k + ":" + - strings.Join(ctx.SignedHeaderVals[k], ",") + headerValues := make([]string, len(ctx.SignedHeaderVals[k])) + for i, v := range ctx.SignedHeaderVals[k] { + headerValues[i] = strings.TrimSpace(v) + } + headerItems[i] = k + ":" + + strings.Join(headerValues, ",") } } - stripExcessSpaces(headerValues) - ctx.canonicalHeaders = strings.Join(headerValues, "\n") -} - -func getURIPath(u *url.URL) string { - var uri string - - if len(u.Opaque) > 0 { - uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/") - } else { - uri = u.EscapedPath() - } - - if len(uri) == 0 { - uri = "/" - } - - return uri + stripExcessSpaces(headerItems) + ctx.canonicalHeaders = strings.Join(headerItems, "\n") } func (ctx *signingCtx) buildCanonicalString() { @@ -329,6 +291,9 @@ func (ctx *signingCtx) buildBodyDigest() error { if ctx.Body == nil { hash = emptyStringSHA256 } else { + if !aws.IsReaderSeekable(ctx.Body) { + return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body) + } hashBytes, err := makeSha256Reader(ctx.Body) if err != nil { return err @@ -358,27 +323,6 @@ func hashSHA256(data []byte) []byte { return hash.Sum(nil) } -// seekerLen attempts to get the number of bytes remaining at the seeker's -// current position. Returns the number of bytes remaining or error. -func seekerLen(s io.Seeker) (int64, error) { - curOffset, err := s.Seek(0, io.SeekCurrent) - if err != nil { - return 0, err - } - - endOffset, err := s.Seek(0, io.SeekEnd) - if err != nil { - return 0, err - } - - _, err = s.Seek(curOffset, io.SeekStart) - if err != nil { - return 0, err - } - - return endOffset - curOffset, nil -} - func makeSha256Reader(reader io.ReadSeeker) (hashBytes []byte, err error) { hash := sha256.New() start, err := reader.Seek(0, io.SeekCurrent) @@ -392,7 +336,7 @@ func makeSha256Reader(reader io.ReadSeeker) (hashBytes []byte, err error) { // Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies // smaller than 32KB. Fall back to io.Copy if we fail to determine the size. - size, err := seekerLen(reader) + size, err := aws.SeekerLen(reader) if err != nil { _, _ = io.Copy(hash, reader) } else { @@ -409,6 +353,8 @@ const doubleSpace = " " func stripExcessSpaces(vals []string) { var j, k, l, m, spaces int for i, str := range vals { + // revive:disable:empty-block + // Trim trailing spaces for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- { } @@ -416,6 +362,9 @@ func stripExcessSpaces(vals []string) { // Trim leading spaces for k = 0; k < j && str[k] == ' '; k++ { } + + // revive:enable:empty-block + str = str[k : j+1] // Strip multiple spaces. diff --git a/vendor/go.mongodb.org/mongo-driver/internal/aws/types.go b/vendor/go.mongodb.org/mongo-driver/internal/aws/types.go new file mode 100644 index 000000000..52aecda76 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/aws/types.go @@ -0,0 +1,153 @@ +// Copyright (C) MongoDB, Inc. 2023-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +// +// Based on github.com/aws/aws-sdk-go by Amazon.com, Inc. with code from: +// - github.com/aws/aws-sdk-go/blob/v1.44.225/aws/types.go +// See THIRD-PARTY-NOTICES for original license terms + +package aws + +import ( + "io" +) + +// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Allows the +// SDK to accept an io.Reader that is not also an io.Seeker for unsigned +// streaming payload API operations. +// +// A ReadSeekCloser wrapping an nonseekable io.Reader used in an API +// operation's input will prevent that operation being retried in the case of +// network errors, and cause operation requests to fail if the operation +// requires payload signing. +// +// Note: If using With S3 PutObject to stream an object upload The SDK's S3 +// Upload manager (s3manager.Uploader) provides support for streaming with the +// ability to retry network errors. +func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { + return ReaderSeekerCloser{r} +} + +// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and +// io.Closer interfaces to the underlying object if they are available. +type ReaderSeekerCloser struct { + r io.Reader +} + +// IsReaderSeekable returns if the underlying reader type can be seeked. A +// io.Reader might not actually be seekable if it is the ReaderSeekerCloser +// type. +func IsReaderSeekable(r io.Reader) bool { + switch v := r.(type) { + case ReaderSeekerCloser: + return v.IsSeeker() + case *ReaderSeekerCloser: + return v.IsSeeker() + case io.ReadSeeker: + return true + default: + return false + } +} + +// Read reads from the reader up to size of p. The number of bytes read, and +// error if it occurred will be returned. +// +// If the reader is not an io.Reader zero bytes read, and nil error will be +// returned. +// +// Performs the same functionality as io.Reader Read +func (r ReaderSeekerCloser) Read(p []byte) (int, error) { + switch t := r.r.(type) { + case io.Reader: + return t.Read(p) + } + return 0, nil +} + +// Seek sets the offset for the next Read to offset, interpreted according to +// whence: 0 means relative to the origin of the file, 1 means relative to the +// current offset, and 2 means relative to the end. Seek returns the new offset +// and an error, if any. +// +// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. +func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { + switch t := r.r.(type) { + case io.Seeker: + return t.Seek(offset, whence) + } + return int64(0), nil +} + +// IsSeeker returns if the underlying reader is also a seeker. +func (r ReaderSeekerCloser) IsSeeker() bool { + _, ok := r.r.(io.Seeker) + return ok +} + +// HasLen returns the length of the underlying reader if the value implements +// the Len() int method. +func (r ReaderSeekerCloser) HasLen() (int, bool) { + type lenner interface { + Len() int + } + + if lr, ok := r.r.(lenner); ok { + return lr.Len(), true + } + + return 0, false +} + +// GetLen returns the length of the bytes remaining in the underlying reader. +// Checks first for Len(), then io.Seeker to determine the size of the +// underlying reader. +// +// Will return -1 if the length cannot be determined. +func (r ReaderSeekerCloser) GetLen() (int64, error) { + if l, ok := r.HasLen(); ok { + return int64(l), nil + } + + if s, ok := r.r.(io.Seeker); ok { + return seekerLen(s) + } + + return -1, nil +} + +// SeekerLen attempts to get the number of bytes remaining at the seeker's +// current position. Returns the number of bytes remaining or error. +func SeekerLen(s io.Seeker) (int64, error) { + // Determine if the seeker is actually seekable. ReaderSeekerCloser + // hides the fact that a io.Readers might not actually be seekable. + switch v := s.(type) { + case ReaderSeekerCloser: + return v.GetLen() + case *ReaderSeekerCloser: + return v.GetLen() + } + + return seekerLen(s) +} + +func seekerLen(s io.Seeker) (int64, error) { + curOffset, err := s.Seek(0, io.SeekCurrent) + if err != nil { + return 0, err + } + + endOffset, err := s.Seek(0, io.SeekEnd) + if err != nil { + return 0, err + } + + _, err = s.Seek(curOffset, io.SeekStart) + if err != nil { + return 0, err + } + + return endOffset - curOffset, nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/string_util.go b/vendor/go.mongodb.org/mongo-driver/internal/bsonutil/bsonutil.go similarity index 64% rename from vendor/go.mongodb.org/mongo-driver/internal/string_util.go rename to vendor/go.mongodb.org/mongo-driver/internal/bsonutil/bsonutil.go index 6cafa791d..eebb32890 100644 --- a/vendor/go.mongodb.org/mongo-driver/internal/string_util.go +++ b/vendor/go.mongodb.org/mongo-driver/internal/bsonutil/bsonutil.go @@ -4,7 +4,7 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -package internal +package bsonutil import ( "fmt" @@ -12,13 +12,6 @@ import ( "go.mongodb.org/mongo-driver/bson" ) -// StringSliceFromRawElement decodes the provided BSON element into a []string. This internally calls -// StringSliceFromRawValue on the element's value. The error conditions outlined in that function's documentation -// apply for this function as well. -func StringSliceFromRawElement(element bson.RawElement) ([]string, error) { - return StringSliceFromRawValue(element.Key(), element.Value()) -} - // StringSliceFromRawValue decodes the provided BSON value into a []string. This function returns an error if the value // is not an array or any of the elements in the array are not strings. The name parameter is used to add context to // error messages. @@ -43,3 +36,27 @@ func StringSliceFromRawValue(name string, val bson.RawValue) ([]string, error) { } return strs, nil } + +// RawToDocuments converts a bson.Raw that is internally an array of documents to []bson.Raw. +func RawToDocuments(doc bson.Raw) []bson.Raw { + values, err := doc.Values() + if err != nil { + panic(fmt.Sprintf("error converting BSON document to values: %v", err)) + } + + out := make([]bson.Raw, len(values)) + for i := range values { + out[i] = values[i].Document() + } + + return out +} + +// RawToInterfaces takes one or many bson.Raw documents and returns them as a []interface{}. +func RawToInterfaces(docs ...bson.Raw) []interface{} { + out := make([]interface{}, len(docs)) + for i := range docs { + out[i] = docs[i] + } + return out +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/cancellation_listener.go b/vendor/go.mongodb.org/mongo-driver/internal/cancellation_listener.go deleted file mode 100644 index a7fa163bb..000000000 --- a/vendor/go.mongodb.org/mongo-driver/internal/cancellation_listener.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package internal - -import "context" - -// CancellationListener listens for context cancellation in a loop until the context expires or the listener is aborted. -type CancellationListener struct { - aborted bool - done chan struct{} -} - -// NewCancellationListener constructs a CancellationListener. -func NewCancellationListener() *CancellationListener { - return &CancellationListener{ - done: make(chan struct{}), - } -} - -// Listen blocks until the provided context is cancelled or listening is aborted via the StopListening function. If this -// detects that the context has been cancelled (i.e. ctx.Err() == context.Canceled), the provided callback is called to -// abort in-progress work. Even if the context expires, this function will block until StopListening is called. -func (c *CancellationListener) Listen(ctx context.Context, abortFn func()) { - c.aborted = false - - select { - case <-ctx.Done(): - if ctx.Err() == context.Canceled { - c.aborted = true - abortFn() - } - - <-c.done - case <-c.done: - } -} - -// StopListening stops the in-progress Listen call. This blocks if there is no in-progress Listen call. This function -// will return true if the provided abort callback was called when listening for cancellation on the previous context. -func (c *CancellationListener) StopListening() bool { - c.done <- struct{}{} - return c.aborted -} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/codecutil/encoding.go b/vendor/go.mongodb.org/mongo-driver/internal/codecutil/encoding.go new file mode 100644 index 000000000..2aaf8f271 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/codecutil/encoding.go @@ -0,0 +1,65 @@ +// Copyright (C) MongoDB, Inc. 2023-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package codecutil + +import ( + "bytes" + "errors" + "fmt" + "io" + "reflect" + + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" +) + +var ErrNilValue = errors.New("value is nil") + +// MarshalError is returned when attempting to transform a value into a document +// results in an error. +type MarshalError struct { + Value interface{} + Err error +} + +// Error implements the error interface. +func (e MarshalError) Error() string { + return fmt.Sprintf("cannot transform type %s to a BSON Document: %v", + reflect.TypeOf(e.Value), e.Err) +} + +// EncoderFn is used to functionally construct an encoder for marshaling values. +type EncoderFn func(io.Writer) (*bson.Encoder, error) + +// MarshalValue will attempt to encode the value with the encoder returned by +// the encoder function. +func MarshalValue(val interface{}, encFn EncoderFn) (bsoncore.Value, error) { + // If the val is already a bsoncore.Value, then do nothing. + if bval, ok := val.(bsoncore.Value); ok { + return bval, nil + } + + if val == nil { + return bsoncore.Value{}, ErrNilValue + } + + buf := new(bytes.Buffer) + + enc, err := encFn(buf) + if err != nil { + return bsoncore.Value{}, err + } + + // Encode the value in a single-element document with an empty key. Use + // bsoncore to extract the first element and return the BSON value. + err = enc.Encode(bson.D{{Key: "", Value: val}}) + if err != nil { + return bsoncore.Value{}, MarshalError{Value: val, Err: err} + } + + return bsoncore.Document(buf.Bytes()).Index(0).Value(), nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/credproviders/assume_role_provider.go b/vendor/go.mongodb.org/mongo-driver/internal/credproviders/assume_role_provider.go new file mode 100644 index 000000000..3a95cf401 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/credproviders/assume_role_provider.go @@ -0,0 +1,148 @@ +// Copyright (C) MongoDB, Inc. 2023-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package credproviders + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "time" + + "go.mongodb.org/mongo-driver/internal/aws/credentials" + "go.mongodb.org/mongo-driver/internal/uuid" +) + +const ( + // assumeRoleProviderName provides a name of assume role provider + assumeRoleProviderName = "AssumeRoleProvider" + + stsURI = `https://sts.amazonaws.com/?Action=AssumeRoleWithWebIdentity&RoleSessionName=%s&RoleArn=%s&WebIdentityToken=%s&Version=2011-06-15` +) + +// An AssumeRoleProvider retrieves credentials for assume role with web identity. +type AssumeRoleProvider struct { + AwsRoleArnEnv EnvVar + AwsWebIdentityTokenFileEnv EnvVar + AwsRoleSessionNameEnv EnvVar + + httpClient *http.Client + expiration time.Time + + // expiryWindow will allow the credentials to trigger refreshing prior to the credentials actually expiring. + // This is beneficial so expiring credentials do not cause request to fail unexpectedly due to exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + expiryWindow time.Duration +} + +// NewAssumeRoleProvider returns a pointer to an assume role provider. +func NewAssumeRoleProvider(httpClient *http.Client, expiryWindow time.Duration) *AssumeRoleProvider { + return &AssumeRoleProvider{ + // AwsRoleArnEnv is the environment variable for AWS_ROLE_ARN + AwsRoleArnEnv: EnvVar("AWS_ROLE_ARN"), + // AwsWebIdentityTokenFileEnv is the environment variable for AWS_WEB_IDENTITY_TOKEN_FILE + AwsWebIdentityTokenFileEnv: EnvVar("AWS_WEB_IDENTITY_TOKEN_FILE"), + // AwsRoleSessionNameEnv is the environment variable for AWS_ROLE_SESSION_NAME + AwsRoleSessionNameEnv: EnvVar("AWS_ROLE_SESSION_NAME"), + httpClient: httpClient, + expiryWindow: expiryWindow, + } +} + +// RetrieveWithContext retrieves the keys from the AWS service. +func (a *AssumeRoleProvider) RetrieveWithContext(ctx context.Context) (credentials.Value, error) { + const defaultHTTPTimeout = 10 * time.Second + + v := credentials.Value{ProviderName: assumeRoleProviderName} + + roleArn := a.AwsRoleArnEnv.Get() + tokenFile := a.AwsWebIdentityTokenFileEnv.Get() + if tokenFile == "" && roleArn == "" { + return v, errors.New("AWS_WEB_IDENTITY_TOKEN_FILE and AWS_ROLE_ARN are missing") + } + if tokenFile != "" && roleArn == "" { + return v, errors.New("AWS_WEB_IDENTITY_TOKEN_FILE is set, but AWS_ROLE_ARN is missing") + } + if tokenFile == "" && roleArn != "" { + return v, errors.New("AWS_ROLE_ARN is set, but AWS_WEB_IDENTITY_TOKEN_FILE is missing") + } + token, err := ioutil.ReadFile(tokenFile) + if err != nil { + return v, err + } + + sessionName := a.AwsRoleSessionNameEnv.Get() + if sessionName == "" { + // Use a UUID if the RoleSessionName is not given. + id, err := uuid.New() + if err != nil { + return v, err + } + sessionName = id.String() + } + + fullURI := fmt.Sprintf(stsURI, sessionName, roleArn, string(token)) + + req, err := http.NewRequest(http.MethodPost, fullURI, nil) + if err != nil { + return v, err + } + req.Header.Set("Accept", "application/json") + + ctx, cancel := context.WithTimeout(ctx, defaultHTTPTimeout) + defer cancel() + resp, err := a.httpClient.Do(req.WithContext(ctx)) + if err != nil { + return v, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return v, fmt.Errorf("response failure: %s", resp.Status) + } + + var stsResp struct { + Response struct { + Result struct { + Credentials struct { + AccessKeyID string `json:"AccessKeyId"` + SecretAccessKey string `json:"SecretAccessKey"` + Token string `json:"SessionToken"` + Expiration float64 `json:"Expiration"` + } `json:"Credentials"` + } `json:"AssumeRoleWithWebIdentityResult"` + } `json:"AssumeRoleWithWebIdentityResponse"` + } + + err = json.NewDecoder(resp.Body).Decode(&stsResp) + if err != nil { + return v, err + } + v.AccessKeyID = stsResp.Response.Result.Credentials.AccessKeyID + v.SecretAccessKey = stsResp.Response.Result.Credentials.SecretAccessKey + v.SessionToken = stsResp.Response.Result.Credentials.Token + if !v.HasKeys() { + return v, errors.New("failed to retrieve web identity keys") + } + sec := int64(stsResp.Response.Result.Credentials.Expiration) + a.expiration = time.Unix(sec, 0).Add(-a.expiryWindow) + + return v, nil +} + +// Retrieve retrieves the keys from the AWS service. +func (a *AssumeRoleProvider) Retrieve() (credentials.Value, error) { + return a.RetrieveWithContext(context.Background()) +} + +// IsExpired returns true if the credentials are expired. +func (a *AssumeRoleProvider) IsExpired() bool { + return a.expiration.Before(time.Now()) +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/credproviders/ec2_provider.go b/vendor/go.mongodb.org/mongo-driver/internal/credproviders/ec2_provider.go new file mode 100644 index 000000000..771bfca13 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/credproviders/ec2_provider.go @@ -0,0 +1,183 @@ +// Copyright (C) MongoDB, Inc. 2023-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package credproviders + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "time" + + "go.mongodb.org/mongo-driver/internal/aws/credentials" +) + +const ( + // ec2ProviderName provides a name of EC2 provider + ec2ProviderName = "EC2Provider" + + awsEC2URI = "http://169.254.169.254/" + awsEC2RolePath = "latest/meta-data/iam/security-credentials/" + awsEC2TokenPath = "latest/api/token" + + defaultHTTPTimeout = 10 * time.Second +) + +// An EC2Provider retrieves credentials from EC2 metadata. +type EC2Provider struct { + httpClient *http.Client + expiration time.Time + + // expiryWindow will allow the credentials to trigger refreshing prior to the credentials actually expiring. + // This is beneficial so expiring credentials do not cause request to fail unexpectedly due to exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + expiryWindow time.Duration +} + +// NewEC2Provider returns a pointer to an EC2 credential provider. +func NewEC2Provider(httpClient *http.Client, expiryWindow time.Duration) *EC2Provider { + return &EC2Provider{ + httpClient: httpClient, + expiryWindow: expiryWindow, + } +} + +func (e *EC2Provider) getToken(ctx context.Context) (string, error) { + req, err := http.NewRequest(http.MethodPut, awsEC2URI+awsEC2TokenPath, nil) + if err != nil { + return "", err + } + const defaultEC2TTLSeconds = "30" + req.Header.Set("X-aws-ec2-metadata-token-ttl-seconds", defaultEC2TTLSeconds) + + ctx, cancel := context.WithTimeout(ctx, defaultHTTPTimeout) + defer cancel() + resp, err := e.httpClient.Do(req.WithContext(ctx)) + if err != nil { + return "", err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("%s %s failed: %s", req.Method, req.URL.String(), resp.Status) + } + + token, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + if len(token) == 0 { + return "", errors.New("unable to retrieve token from EC2 metadata") + } + return string(token), nil +} + +func (e *EC2Provider) getRoleName(ctx context.Context, token string) (string, error) { + req, err := http.NewRequest(http.MethodGet, awsEC2URI+awsEC2RolePath, nil) + if err != nil { + return "", err + } + req.Header.Set("X-aws-ec2-metadata-token", token) + + ctx, cancel := context.WithTimeout(ctx, defaultHTTPTimeout) + defer cancel() + resp, err := e.httpClient.Do(req.WithContext(ctx)) + if err != nil { + return "", err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("%s %s failed: %s", req.Method, req.URL.String(), resp.Status) + } + + role, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + if len(role) == 0 { + return "", errors.New("unable to retrieve role_name from EC2 metadata") + } + return string(role), nil +} + +func (e *EC2Provider) getCredentials(ctx context.Context, token string, role string) (credentials.Value, time.Time, error) { + v := credentials.Value{ProviderName: ec2ProviderName} + + pathWithRole := awsEC2URI + awsEC2RolePath + role + req, err := http.NewRequest(http.MethodGet, pathWithRole, nil) + if err != nil { + return v, time.Time{}, err + } + req.Header.Set("X-aws-ec2-metadata-token", token) + ctx, cancel := context.WithTimeout(ctx, defaultHTTPTimeout) + defer cancel() + resp, err := e.httpClient.Do(req.WithContext(ctx)) + if err != nil { + return v, time.Time{}, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return v, time.Time{}, fmt.Errorf("%s %s failed: %s", req.Method, req.URL.String(), resp.Status) + } + + var ec2Resp struct { + AccessKeyID string `json:"AccessKeyId"` + SecretAccessKey string `json:"SecretAccessKey"` + Token string `json:"Token"` + Expiration time.Time `json:"Expiration"` + } + + err = json.NewDecoder(resp.Body).Decode(&ec2Resp) + if err != nil { + return v, time.Time{}, err + } + + v.AccessKeyID = ec2Resp.AccessKeyID + v.SecretAccessKey = ec2Resp.SecretAccessKey + v.SessionToken = ec2Resp.Token + + return v, ec2Resp.Expiration, nil +} + +// RetrieveWithContext retrieves the keys from the AWS service. +func (e *EC2Provider) RetrieveWithContext(ctx context.Context) (credentials.Value, error) { + v := credentials.Value{ProviderName: ec2ProviderName} + + token, err := e.getToken(ctx) + if err != nil { + return v, err + } + + role, err := e.getRoleName(ctx, token) + if err != nil { + return v, err + } + + v, exp, err := e.getCredentials(ctx, token, role) + if err != nil { + return v, err + } + if !v.HasKeys() { + return v, errors.New("failed to retrieve EC2 keys") + } + e.expiration = exp.Add(-e.expiryWindow) + + return v, nil +} + +// Retrieve retrieves the keys from the AWS service. +func (e *EC2Provider) Retrieve() (credentials.Value, error) { + return e.RetrieveWithContext(context.Background()) +} + +// IsExpired returns true if the credentials are expired. +func (e *EC2Provider) IsExpired() bool { + return e.expiration.Before(time.Now()) +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/credproviders/ecs_provider.go b/vendor/go.mongodb.org/mongo-driver/internal/credproviders/ecs_provider.go new file mode 100644 index 000000000..0c3a27e62 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/credproviders/ecs_provider.go @@ -0,0 +1,112 @@ +// Copyright (C) MongoDB, Inc. 2023-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package credproviders + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "time" + + "go.mongodb.org/mongo-driver/internal/aws/credentials" +) + +const ( + // ecsProviderName provides a name of ECS provider + ecsProviderName = "ECSProvider" + + awsRelativeURI = "http://169.254.170.2/" +) + +// An ECSProvider retrieves credentials from ECS metadata. +type ECSProvider struct { + AwsContainerCredentialsRelativeURIEnv EnvVar + + httpClient *http.Client + expiration time.Time + + // expiryWindow will allow the credentials to trigger refreshing prior to the credentials actually expiring. + // This is beneficial so expiring credentials do not cause request to fail unexpectedly due to exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + expiryWindow time.Duration +} + +// NewECSProvider returns a pointer to an ECS credential provider. +func NewECSProvider(httpClient *http.Client, expiryWindow time.Duration) *ECSProvider { + return &ECSProvider{ + // AwsContainerCredentialsRelativeURIEnv is the environment variable for AWS_CONTAINER_CREDENTIALS_RELATIVE_URI + AwsContainerCredentialsRelativeURIEnv: EnvVar("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"), + httpClient: httpClient, + expiryWindow: expiryWindow, + } +} + +// RetrieveWithContext retrieves the keys from the AWS service. +func (e *ECSProvider) RetrieveWithContext(ctx context.Context) (credentials.Value, error) { + const defaultHTTPTimeout = 10 * time.Second + + v := credentials.Value{ProviderName: ecsProviderName} + + relativeEcsURI := e.AwsContainerCredentialsRelativeURIEnv.Get() + if len(relativeEcsURI) == 0 { + return v, errors.New("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI is missing") + } + fullURI := awsRelativeURI + relativeEcsURI + + req, err := http.NewRequest(http.MethodGet, fullURI, nil) + if err != nil { + return v, err + } + req.Header.Set("Accept", "application/json") + + ctx, cancel := context.WithTimeout(ctx, defaultHTTPTimeout) + defer cancel() + resp, err := e.httpClient.Do(req.WithContext(ctx)) + if err != nil { + return v, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return v, fmt.Errorf("response failure: %s", resp.Status) + } + + var ecsResp struct { + AccessKeyID string `json:"AccessKeyId"` + SecretAccessKey string `json:"SecretAccessKey"` + Token string `json:"Token"` + Expiration time.Time `json:"Expiration"` + } + + err = json.NewDecoder(resp.Body).Decode(&ecsResp) + if err != nil { + return v, err + } + + v.AccessKeyID = ecsResp.AccessKeyID + v.SecretAccessKey = ecsResp.SecretAccessKey + v.SessionToken = ecsResp.Token + if !v.HasKeys() { + return v, errors.New("failed to retrieve ECS keys") + } + e.expiration = ecsResp.Expiration.Add(-e.expiryWindow) + + return v, nil +} + +// Retrieve retrieves the keys from the AWS service. +func (e *ECSProvider) Retrieve() (credentials.Value, error) { + return e.RetrieveWithContext(context.Background()) +} + +// IsExpired returns true if the credentials are expired. +func (e *ECSProvider) IsExpired() bool { + return e.expiration.Before(time.Now()) +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/credproviders/env_provider.go b/vendor/go.mongodb.org/mongo-driver/internal/credproviders/env_provider.go new file mode 100644 index 000000000..59ca63363 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/credproviders/env_provider.go @@ -0,0 +1,69 @@ +// Copyright (C) MongoDB, Inc. 2023-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package credproviders + +import ( + "os" + + "go.mongodb.org/mongo-driver/internal/aws/credentials" +) + +// envProviderName provides a name of Env provider +const envProviderName = "EnvProvider" + +// EnvVar is an environment variable +type EnvVar string + +// Get retrieves the environment variable +func (ev EnvVar) Get() string { + return os.Getenv(string(ev)) +} + +// A EnvProvider retrieves credentials from the environment variables of the +// running process. Environment credentials never expire. +type EnvProvider struct { + AwsAccessKeyIDEnv EnvVar + AwsSecretAccessKeyEnv EnvVar + AwsSessionTokenEnv EnvVar + + retrieved bool +} + +// NewEnvProvider returns a pointer to an ECS credential provider. +func NewEnvProvider() *EnvProvider { + return &EnvProvider{ + // AwsAccessKeyIDEnv is the environment variable for AWS_ACCESS_KEY_ID + AwsAccessKeyIDEnv: EnvVar("AWS_ACCESS_KEY_ID"), + // AwsSecretAccessKeyEnv is the environment variable for AWS_SECRET_ACCESS_KEY + AwsSecretAccessKeyEnv: EnvVar("AWS_SECRET_ACCESS_KEY"), + // AwsSessionTokenEnv is the environment variable for AWS_SESSION_TOKEN + AwsSessionTokenEnv: EnvVar("AWS_SESSION_TOKEN"), + } +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvProvider) Retrieve() (credentials.Value, error) { + e.retrieved = false + + v := credentials.Value{ + AccessKeyID: e.AwsAccessKeyIDEnv.Get(), + SecretAccessKey: e.AwsSecretAccessKeyEnv.Get(), + SessionToken: e.AwsSessionTokenEnv.Get(), + ProviderName: envProviderName, + } + err := verify(v) + if err == nil { + e.retrieved = true + } + + return v, err +} + +// IsExpired returns true if the credentials have not been retrieved. +func (e *EnvProvider) IsExpired() bool { + return !e.retrieved +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/credproviders/imds_provider.go b/vendor/go.mongodb.org/mongo-driver/internal/credproviders/imds_provider.go new file mode 100644 index 000000000..96dad1a82 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/credproviders/imds_provider.go @@ -0,0 +1,103 @@ +// Copyright (C) MongoDB, Inc. 2023-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package credproviders + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "time" + + "go.mongodb.org/mongo-driver/internal/aws/credentials" +) + +const ( + // AzureProviderName provides a name of Azure provider + AzureProviderName = "AzureProvider" + + azureURI = "http://169.254.169.254/metadata/identity/oauth2/token" +) + +// An AzureProvider retrieves credentials from Azure IMDS. +type AzureProvider struct { + httpClient *http.Client + expiration time.Time + expiryWindow time.Duration +} + +// NewAzureProvider returns a pointer to an Azure credential provider. +func NewAzureProvider(httpClient *http.Client, expiryWindow time.Duration) *AzureProvider { + return &AzureProvider{ + httpClient: httpClient, + expiration: time.Time{}, + expiryWindow: expiryWindow, + } +} + +// RetrieveWithContext retrieves the keys from the Azure service. +func (a *AzureProvider) RetrieveWithContext(ctx context.Context) (credentials.Value, error) { + v := credentials.Value{ProviderName: AzureProviderName} + req, err := http.NewRequest(http.MethodGet, azureURI, nil) + if err != nil { + return v, fmt.Errorf("unable to retrieve Azure credentials: %w", err) + } + q := make(url.Values) + q.Set("api-version", "2018-02-01") + q.Set("resource", "https://vault.azure.net") + req.URL.RawQuery = q.Encode() + req.Header.Set("Metadata", "true") + req.Header.Set("Accept", "application/json") + + resp, err := a.httpClient.Do(req.WithContext(ctx)) + if err != nil { + return v, fmt.Errorf("unable to retrieve Azure credentials: %w", err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return v, fmt.Errorf("unable to retrieve Azure credentials: error reading response body: %w", err) + } + if resp.StatusCode != http.StatusOK { + return v, fmt.Errorf("unable to retrieve Azure credentials: expected StatusCode 200, got StatusCode: %v. Response body: %s", resp.StatusCode, body) + } + var tokenResponse struct { + AccessToken string `json:"access_token"` + ExpiresIn string `json:"expires_in"` + } + // Attempt to read body as JSON + err = json.Unmarshal(body, &tokenResponse) + if err != nil { + return v, fmt.Errorf("unable to retrieve Azure credentials: error reading body JSON: %w (response body: %s)", err, body) + } + if tokenResponse.AccessToken == "" { + return v, fmt.Errorf("unable to retrieve Azure credentials: got unexpected empty accessToken from Azure Metadata Server. Response body: %s", body) + } + v.SessionToken = tokenResponse.AccessToken + + expiresIn, err := time.ParseDuration(tokenResponse.ExpiresIn + "s") + if err != nil { + return v, err + } + if expiration := expiresIn - a.expiryWindow; expiration > 0 { + a.expiration = time.Now().Add(expiration) + } + + return v, err +} + +// Retrieve retrieves the keys from the Azure service. +func (a *AzureProvider) Retrieve() (credentials.Value, error) { + return a.RetrieveWithContext(context.Background()) +} + +// IsExpired returns if the credentials have been retrieved. +func (a *AzureProvider) IsExpired() bool { + return a.expiration.Before(time.Now()) +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/credproviders/static_provider.go b/vendor/go.mongodb.org/mongo-driver/internal/credproviders/static_provider.go new file mode 100644 index 000000000..6b4961394 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/credproviders/static_provider.go @@ -0,0 +1,59 @@ +// Copyright (C) MongoDB, Inc. 2023-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package credproviders + +import ( + "errors" + + "go.mongodb.org/mongo-driver/internal/aws/credentials" +) + +// staticProviderName provides a name of Static provider +const staticProviderName = "StaticProvider" + +// A StaticProvider is a set of credentials which are set programmatically, +// and will never expire. +type StaticProvider struct { + credentials.Value + + verified bool + err error +} + +func verify(v credentials.Value) error { + if !v.HasKeys() { + return errors.New("failed to retrieve ACCESS_KEY_ID and SECRET_ACCESS_KEY") + } + if v.AccessKeyID != "" && v.SecretAccessKey == "" { + return errors.New("ACCESS_KEY_ID is set, but SECRET_ACCESS_KEY is missing") + } + if v.AccessKeyID == "" && v.SecretAccessKey != "" { + return errors.New("SECRET_ACCESS_KEY is set, but ACCESS_KEY_ID is missing") + } + if v.AccessKeyID == "" && v.SecretAccessKey == "" && v.SessionToken != "" { + return errors.New("AWS_SESSION_TOKEN is set, but ACCESS_KEY_ID and SECRET_ACCESS_KEY are missing") + } + return nil + +} + +// Retrieve returns the credentials or error if the credentials are invalid. +func (s *StaticProvider) Retrieve() (credentials.Value, error) { + if !s.verified { + s.err = verify(s.Value) + s.Value.ProviderName = staticProviderName + s.verified = true + } + return s.Value, s.err +} + +// IsExpired returns if the credentials are expired. +// +// For StaticProvider, the credentials never expired. +func (s *StaticProvider) IsExpired() bool { + return false +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/csfle_util.go b/vendor/go.mongodb.org/mongo-driver/internal/csfle/csfle.go similarity index 98% rename from vendor/go.mongodb.org/mongo-driver/internal/csfle_util.go rename to vendor/go.mongodb.org/mongo-driver/internal/csfle/csfle.go index 635d8e353..71e71b468 100644 --- a/vendor/go.mongodb.org/mongo-driver/internal/csfle_util.go +++ b/vendor/go.mongodb.org/mongo-driver/internal/csfle/csfle.go @@ -4,7 +4,7 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -package internal +package csfle import ( "fmt" diff --git a/vendor/go.mongodb.org/mongo-driver/internal/csot_util.go b/vendor/go.mongodb.org/mongo-driver/internal/csot/csot.go similarity index 99% rename from vendor/go.mongodb.org/mongo-driver/internal/csot_util.go rename to vendor/go.mongodb.org/mongo-driver/internal/csot/csot.go index 1e63257b3..678252c51 100644 --- a/vendor/go.mongodb.org/mongo-driver/internal/csot_util.go +++ b/vendor/go.mongodb.org/mongo-driver/internal/csot/csot.go @@ -4,7 +4,7 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -package internal +package csot import ( "context" diff --git a/vendor/go.mongodb.org/mongo-driver/internal/driverutil/hello.go b/vendor/go.mongodb.org/mongo-driver/internal/driverutil/hello.go new file mode 100644 index 000000000..18a70f0ca --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/driverutil/hello.go @@ -0,0 +1,128 @@ +// Copyright (C) MongoDB, Inc. 2023-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package driverutil + +import ( + "os" + "strings" +) + +const AwsLambdaPrefix = "AWS_Lambda_" + +const ( + // FaaS environment variable names + + // EnvVarAWSExecutionEnv is the AWS Execution environment variable. + EnvVarAWSExecutionEnv = "AWS_EXECUTION_ENV" + // EnvVarAWSLambdaRuntimeAPI is the AWS Lambda runtime API variable. + EnvVarAWSLambdaRuntimeAPI = "AWS_LAMBDA_RUNTIME_API" + // EnvVarFunctionsWorkerRuntime is the functions worker runtime variable. + EnvVarFunctionsWorkerRuntime = "FUNCTIONS_WORKER_RUNTIME" + // EnvVarKService is the K Service variable. + EnvVarKService = "K_SERVICE" + // EnvVarFunctionName is the function name variable. + EnvVarFunctionName = "FUNCTION_NAME" + // EnvVarVercel is the Vercel variable. + EnvVarVercel = "VERCEL" + // EnvVarK8s is the K8s variable. + EnvVarK8s = "KUBERNETES_SERVICE_HOST" +) + +const ( + // FaaS environment variable names + + // EnvVarAWSRegion is the AWS region variable. + EnvVarAWSRegion = "AWS_REGION" + // EnvVarAWSLambdaFunctionMemorySize is the AWS Lambda function memory size variable. + EnvVarAWSLambdaFunctionMemorySize = "AWS_LAMBDA_FUNCTION_MEMORY_SIZE" + // EnvVarFunctionMemoryMB is the function memory in megabytes variable. + EnvVarFunctionMemoryMB = "FUNCTION_MEMORY_MB" + // EnvVarFunctionTimeoutSec is the function timeout in seconds variable. + EnvVarFunctionTimeoutSec = "FUNCTION_TIMEOUT_SEC" + // EnvVarFunctionRegion is the function region variable. + EnvVarFunctionRegion = "FUNCTION_REGION" + // EnvVarVercelRegion is the Vercel region variable. + EnvVarVercelRegion = "VERCEL_REGION" +) + +const ( + // FaaS environment names used by the client + + // EnvNameAWSLambda is the AWS Lambda environment name. + EnvNameAWSLambda = "aws.lambda" + // EnvNameAzureFunc is the Azure Function environment name. + EnvNameAzureFunc = "azure.func" + // EnvNameGCPFunc is the Google Cloud Function environment name. + EnvNameGCPFunc = "gcp.func" + // EnvNameVercel is the Vercel environment name. + EnvNameVercel = "vercel" +) + +// GetFaasEnvName parses the FaaS environment variable name and returns the +// corresponding name used by the client. If none of the variables or variables +// for multiple names are populated the client.env value MUST be entirely +// omitted. When variables for multiple "client.env.name" values are present, +// "vercel" takes precedence over "aws.lambda"; any other combination MUST cause +// "client.env" to be entirely omitted. +func GetFaasEnvName() string { + envVars := []string{ + EnvVarAWSExecutionEnv, + EnvVarAWSLambdaRuntimeAPI, + EnvVarFunctionsWorkerRuntime, + EnvVarKService, + EnvVarFunctionName, + EnvVarVercel, + } + + // If none of the variables are populated the client.env value MUST be + // entirely omitted. + names := make(map[string]struct{}) + + for _, envVar := range envVars { + val := os.Getenv(envVar) + if val == "" { + continue + } + + var name string + + switch envVar { + case EnvVarAWSExecutionEnv: + if !strings.HasPrefix(val, AwsLambdaPrefix) { + continue + } + + name = EnvNameAWSLambda + case EnvVarAWSLambdaRuntimeAPI: + name = EnvNameAWSLambda + case EnvVarFunctionsWorkerRuntime: + name = EnvNameAzureFunc + case EnvVarKService, EnvVarFunctionName: + name = EnvNameGCPFunc + case EnvVarVercel: + // "vercel" takes precedence over "aws.lambda". + delete(names, EnvNameAWSLambda) + + name = EnvNameVercel + } + + names[name] = struct{}{} + if len(names) > 1 { + // If multiple names are populated the client.env value + // MUST be entirely omitted. + names = nil + + break + } + } + + for name := range names { + return name + } + + return "" +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/driverutil/operation.go b/vendor/go.mongodb.org/mongo-driver/internal/driverutil/operation.go new file mode 100644 index 000000000..32704312f --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/driverutil/operation.go @@ -0,0 +1,31 @@ +// Copyright (C) MongoDB, Inc. 2023-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package driverutil + +// Operation Names should be sourced from the command reference documentation: +// https://www.mongodb.com/docs/manual/reference/command/ +const ( + AbortTransactionOp = "abortTransaction" // AbortTransactionOp is the name for aborting a transaction + AggregateOp = "aggregate" // AggregateOp is the name for aggregating + CommitTransactionOp = "commitTransaction" // CommitTransactionOp is the name for committing a transaction + CountOp = "count" // CountOp is the name for counting + CreateOp = "create" // CreateOp is the name for creating + CreateIndexesOp = "createIndexes" // CreateIndexesOp is the name for creating indexes + DeleteOp = "delete" // DeleteOp is the name for deleting + DistinctOp = "distinct" // DistinctOp is the name for distinct + DropOp = "drop" // DropOp is the name for dropping + DropDatabaseOp = "dropDatabase" // DropDatabaseOp is the name for dropping a database + DropIndexesOp = "dropIndexes" // DropIndexesOp is the name for dropping indexes + EndSessionsOp = "endSessions" // EndSessionsOp is the name for ending sessions + FindAndModifyOp = "findAndModify" // FindAndModifyOp is the name for finding and modifying + FindOp = "find" // FindOp is the name for finding + InsertOp = "insert" // InsertOp is the name for inserting + ListCollectionsOp = "listCollections" // ListCollectionsOp is the name for listing collections + ListIndexesOp = "listIndexes" // ListIndexesOp is the name for listing indexes + ListDatabasesOp = "listDatabases" // ListDatabasesOp is the name for listing databases + UpdateOp = "update" // UpdateOp is the name for updating +) diff --git a/vendor/go.mongodb.org/mongo-driver/internal/error.go b/vendor/go.mongodb.org/mongo-driver/internal/error.go deleted file mode 100644 index 348bcdfb1..000000000 --- a/vendor/go.mongodb.org/mongo-driver/internal/error.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package internal - -import ( - "fmt" -) - -// WrappedError represents an error that contains another error. -type WrappedError interface { - // Message gets the basic message of the error. - Message() string - // Inner gets the inner error if one exists. - Inner() error -} - -// RolledUpErrorMessage gets a flattened error message. -func RolledUpErrorMessage(err error) string { - if wrappedErr, ok := err.(WrappedError); ok { - inner := wrappedErr.Inner() - if inner != nil { - return fmt.Sprintf("%s: %s", wrappedErr.Message(), RolledUpErrorMessage(inner)) - } - - return wrappedErr.Message() - } - - return err.Error() -} - -// UnwrapError attempts to unwrap the error down to its root cause. -func UnwrapError(err error) error { - - switch tErr := err.(type) { - case WrappedError: - return UnwrapError(tErr.Inner()) - case *multiError: - return UnwrapError(tErr.errors[0]) - } - - return err -} - -// WrapError wraps an error with a message. -func WrapError(inner error, message string) error { - return &wrappedError{message, inner} -} - -// WrapErrorf wraps an error with a message. -func WrapErrorf(inner error, format string, args ...interface{}) error { - return &wrappedError{fmt.Sprintf(format, args...), inner} -} - -// MultiError combines multiple errors into a single error. If there are no errors, -// nil is returned. If there is 1 error, it is returned. Otherwise, they are combined. -func MultiError(errors ...error) error { - - // remove nils from the error list - var nonNils []error - for _, e := range errors { - if e != nil { - nonNils = append(nonNils, e) - } - } - - switch len(nonNils) { - case 0: - return nil - case 1: - return nonNils[0] - default: - return &multiError{ - message: "multiple errors encountered", - errors: nonNils, - } - } -} - -type multiError struct { - message string - errors []error -} - -func (e *multiError) Message() string { - return e.message -} - -func (e *multiError) Error() string { - result := e.message - for _, e := range e.errors { - result += fmt.Sprintf("\n %s", e) - } - return result -} - -func (e *multiError) Errors() []error { - return e.errors -} - -type wrappedError struct { - message string - inner error -} - -func (e *wrappedError) Message() string { - return e.message -} - -func (e *wrappedError) Error() string { - return RolledUpErrorMessage(e) -} - -func (e *wrappedError) Inner() error { - return e.inner -} - -func (e *wrappedError) Unwrap() error { - return e.inner -} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/const.go b/vendor/go.mongodb.org/mongo-driver/internal/handshake/handshake.go similarity index 64% rename from vendor/go.mongodb.org/mongo-driver/internal/const.go rename to vendor/go.mongodb.org/mongo-driver/internal/handshake/handshake.go index a7ef69d13..c9537d3ef 100644 --- a/vendor/go.mongodb.org/mongo-driver/internal/const.go +++ b/vendor/go.mongodb.org/mongo-driver/internal/handshake/handshake.go @@ -4,16 +4,10 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -package internal // import "go.mongodb.org/mongo-driver/internal" - -// Version is the current version of the driver. -var Version = "local build" +package handshake // LegacyHello is the legacy version of the hello command. var LegacyHello = "isMaster" // LegacyHelloLowercase is the lowercase, legacy version of the hello command. var LegacyHelloLowercase = "ismaster" - -// LegacyNotPrimary is the legacy version of the "not primary" server error message. -var LegacyNotPrimary = "not master" diff --git a/vendor/go.mongodb.org/mongo-driver/internal/http.go b/vendor/go.mongodb.org/mongo-driver/internal/httputil/httputil.go similarity index 52% rename from vendor/go.mongodb.org/mongo-driver/internal/http.go rename to vendor/go.mongodb.org/mongo-driver/internal/httputil/httputil.go index 13c5fbe9c..db0dd5f12 100644 --- a/vendor/go.mongodb.org/mongo-driver/internal/http.go +++ b/vendor/go.mongodb.org/mongo-driver/internal/httputil/httputil.go @@ -4,30 +4,22 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -package internal // import "go.mongodb.org/mongo-driver/internal" +package httputil import ( "net/http" - "time" ) // DefaultHTTPClient is the default HTTP client used across the driver. var DefaultHTTPClient = &http.Client{ - // TODO(GODRIVER-2623): Use "http.DefaultTransport.Clone" once we change the minimum supported Go version to 1.13. - Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, - MaxIdleConns: 100, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - }, + Transport: http.DefaultTransport.(*http.Transport).Clone(), } // CloseIdleHTTPConnections closes any connections which were previously -// connected from previous requests but are now sitting idle in -// a "keep-alive" state. It does not interrupt any connections currently -// in use. -// Borrowed from go standard library. +// connected from previous requests but are now sitting idle in a "keep-alive" +// state. It does not interrupt any connections currently in use. +// +// Borrowed from the Go standard library. func CloseIdleHTTPConnections(client *http.Client) { type closeIdler interface { CloseIdleConnections() diff --git a/vendor/go.mongodb.org/mongo-driver/internal/logger/component.go b/vendor/go.mongodb.org/mongo-driver/internal/logger/component.go new file mode 100644 index 000000000..0a3d55320 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/logger/component.go @@ -0,0 +1,314 @@ +// Copyright (C) MongoDB, Inc. 2023-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package logger + +import ( + "os" + "strconv" + + "go.mongodb.org/mongo-driver/bson/primitive" +) + +const ( + CommandFailed = "Command failed" + CommandStarted = "Command started" + CommandSucceeded = "Command succeeded" + ConnectionPoolCreated = "Connection pool created" + ConnectionPoolReady = "Connection pool ready" + ConnectionPoolCleared = "Connection pool cleared" + ConnectionPoolClosed = "Connection pool closed" + ConnectionCreated = "Connection created" + ConnectionReady = "Connection ready" + ConnectionClosed = "Connection closed" + ConnectionCheckoutStarted = "Connection checkout started" + ConnectionCheckoutFailed = "Connection checkout failed" + ConnectionCheckedOut = "Connection checked out" + ConnectionCheckedIn = "Connection checked in" + ServerSelectionFailed = "Server selection failed" + ServerSelectionStarted = "Server selection started" + ServerSelectionSucceeded = "Server selection succeeded" + ServerSelectionWaiting = "Waiting for suitable server to become available" + TopologyClosed = "Stopped topology monitoring" + TopologyDescriptionChanged = "Topology description changed" + TopologyOpening = "Starting topology monitoring" + TopologyServerClosed = "Stopped server monitoring" + TopologyServerHeartbeatFailed = "Server heartbeat failed" + TopologyServerHeartbeatStarted = "Server heartbeat started" + TopologyServerHeartbeatSucceeded = "Server heartbeat succeeded" + TopologyServerOpening = "Starting server monitoring" +) + +const ( + KeyAwaited = "awaited" + KeyCommand = "command" + KeyCommandName = "commandName" + KeyDatabaseName = "databaseName" + KeyDriverConnectionID = "driverConnectionId" + KeyDurationMS = "durationMS" + KeyError = "error" + KeyFailure = "failure" + KeyMaxConnecting = "maxConnecting" + KeyMaxIdleTimeMS = "maxIdleTimeMS" + KeyMaxPoolSize = "maxPoolSize" + KeyMessage = "message" + KeyMinPoolSize = "minPoolSize" + KeyNewDescription = "newDescription" + KeyOperation = "operation" + KeyOperationID = "operationId" + KeyPreviousDescription = "previousDescription" + KeyRemainingTimeMS = "remainingTimeMS" + KeyReason = "reason" + KeyReply = "reply" + KeyRequestID = "requestId" + KeySelector = "selector" + KeyServerConnectionID = "serverConnectionId" + KeyServerHost = "serverHost" + KeyServerPort = "serverPort" + KeyServiceID = "serviceId" + KeyTimestamp = "timestamp" + KeyTopologyDescription = "topologyDescription" + KeyTopologyID = "topologyId" +) + +// KeyValues is a list of key-value pairs. +type KeyValues []interface{} + +// Add adds a key-value pair to an instance of a KeyValues list. +func (kvs *KeyValues) Add(key string, value interface{}) { + *kvs = append(*kvs, key, value) +} + +const ( + ReasonConnClosedStale = "Connection became stale because the pool was cleared" + ReasonConnClosedIdle = "Connection has been available but unused for longer than the configured max idle time" + ReasonConnClosedError = "An error occurred while using the connection" + ReasonConnClosedPoolClosed = "Connection pool was closed" + ReasonConnCheckoutFailedTimout = "Wait queue timeout elapsed without a connection becoming available" + ReasonConnCheckoutFailedError = "An error occurred while trying to establish a new connection" + ReasonConnCheckoutFailedPoolClosed = "Connection pool was closed" +) + +// Component is an enumeration representing the "components" which can be +// logged against. A LogLevel can be configured on a per-component basis. +type Component int + +const ( + // ComponentAll enables logging for all components. + ComponentAll Component = iota + + // ComponentCommand enables command monitor logging. + ComponentCommand + + // ComponentTopology enables topology logging. + ComponentTopology + + // ComponentServerSelection enables server selection logging. + ComponentServerSelection + + // ComponentConnection enables connection services logging. + ComponentConnection +) + +const ( + mongoDBLogAllEnvVar = "MONGODB_LOG_ALL" + mongoDBLogCommandEnvVar = "MONGODB_LOG_COMMAND" + mongoDBLogTopologyEnvVar = "MONGODB_LOG_TOPOLOGY" + mongoDBLogServerSelectionEnvVar = "MONGODB_LOG_SERVER_SELECTION" + mongoDBLogConnectionEnvVar = "MONGODB_LOG_CONNECTION" +) + +var componentEnvVarMap = map[string]Component{ + mongoDBLogAllEnvVar: ComponentAll, + mongoDBLogCommandEnvVar: ComponentCommand, + mongoDBLogTopologyEnvVar: ComponentTopology, + mongoDBLogServerSelectionEnvVar: ComponentServerSelection, + mongoDBLogConnectionEnvVar: ComponentConnection, +} + +// EnvHasComponentVariables returns true if the environment contains any of the +// component environment variables. +func EnvHasComponentVariables() bool { + for envVar := range componentEnvVarMap { + if os.Getenv(envVar) != "" { + return true + } + } + + return false +} + +// Command is a struct defining common fields that must be included in all +// commands. +type Command struct { + // TODO(GODRIVER-2824): change the DriverConnectionID type to int64. + DriverConnectionID uint64 // Driver's ID for the connection + Name string // Command name + DatabaseName string // Database name + Message string // Message associated with the command + OperationID int32 // Driver-generated operation ID + RequestID int64 // Driver-generated request ID + ServerConnectionID *int64 // Server's ID for the connection used for the command + ServerHost string // Hostname or IP address for the server + ServerPort string // Port for the server + ServiceID *primitive.ObjectID // ID for the command in load balancer mode +} + +// SerializeCommand takes a command and a variable number of key-value pairs and +// returns a slice of interface{} that can be passed to the logger for +// structured logging. +func SerializeCommand(cmd Command, extraKeysAndValues ...interface{}) KeyValues { + // Initialize the boilerplate keys and values. + keysAndValues := KeyValues{ + KeyCommandName, cmd.Name, + KeyDatabaseName, cmd.DatabaseName, + KeyDriverConnectionID, cmd.DriverConnectionID, + KeyMessage, cmd.Message, + KeyOperationID, cmd.OperationID, + KeyRequestID, cmd.RequestID, + KeyServerHost, cmd.ServerHost, + } + + // Add the extra keys and values. + for i := 0; i < len(extraKeysAndValues); i += 2 { + keysAndValues.Add(extraKeysAndValues[i].(string), extraKeysAndValues[i+1]) + } + + port, err := strconv.ParseInt(cmd.ServerPort, 10, 32) + if err == nil { + keysAndValues.Add(KeyServerPort, port) + } + + // Add the "serverConnectionId" if it is not nil. + if cmd.ServerConnectionID != nil { + keysAndValues.Add(KeyServerConnectionID, *cmd.ServerConnectionID) + } + + // Add the "serviceId" if it is not nil. + if cmd.ServiceID != nil { + keysAndValues.Add(KeyServiceID, cmd.ServiceID.Hex()) + } + + return keysAndValues +} + +// Connection contains data that all connection log messages MUST contain. +type Connection struct { + Message string // Message associated with the connection + ServerHost string // Hostname or IP address for the server + ServerPort string // Port for the server +} + +// SerializeConnection serializes a Connection message into a slice of keys and +// values that can be passed to a logger. +func SerializeConnection(conn Connection, extraKeysAndValues ...interface{}) KeyValues { + // Initialize the boilerplate keys and values. + keysAndValues := KeyValues{ + KeyMessage, conn.Message, + KeyServerHost, conn.ServerHost, + } + + // Add the optional keys and values. + for i := 0; i < len(extraKeysAndValues); i += 2 { + keysAndValues.Add(extraKeysAndValues[i].(string), extraKeysAndValues[i+1]) + } + + port, err := strconv.ParseInt(conn.ServerPort, 10, 32) + if err == nil { + keysAndValues.Add(KeyServerPort, port) + } + + return keysAndValues +} + +// Server contains data that all server messages MAY contain. +type Server struct { + DriverConnectionID uint64 // Driver's ID for the connection + TopologyID primitive.ObjectID // Driver's unique ID for this topology + Message string // Message associated with the topology + ServerConnectionID *int64 // Server's ID for the connection + ServerHost string // Hostname or IP address for the server + ServerPort string // Port for the server +} + +// SerializeServer serializes a Server message into a slice of keys and +// values that can be passed to a logger. +func SerializeServer(srv Server, extraKV ...interface{}) KeyValues { + // Initialize the boilerplate keys and values. + keysAndValues := KeyValues{ + KeyDriverConnectionID, srv.DriverConnectionID, + KeyMessage, srv.Message, + KeyServerHost, srv.ServerHost, + KeyTopologyID, srv.TopologyID.Hex(), + } + + if connID := srv.ServerConnectionID; connID != nil { + keysAndValues.Add(KeyServerConnectionID, *connID) + } + + port, err := strconv.ParseInt(srv.ServerPort, 10, 32) + if err == nil { + keysAndValues.Add(KeyServerPort, port) + } + + // Add the optional keys and values. + for i := 0; i < len(extraKV); i += 2 { + keysAndValues.Add(extraKV[i].(string), extraKV[i+1]) + } + + return keysAndValues +} + +// ServerSelection contains data that all server selection messages MUST +// contain. +type ServerSelection struct { + Selector string + OperationID *int32 + Operation string + TopologyDescription string +} + +// SerializeServerSelection serializes a Topology message into a slice of keys +// and values that can be passed to a logger. +func SerializeServerSelection(srvSelection ServerSelection, extraKV ...interface{}) KeyValues { + keysAndValues := KeyValues{ + KeySelector, srvSelection.Selector, + KeyOperation, srvSelection.Operation, + KeyTopologyDescription, srvSelection.TopologyDescription, + } + + if srvSelection.OperationID != nil { + keysAndValues.Add(KeyOperationID, *srvSelection.OperationID) + } + + // Add the optional keys and values. + for i := 0; i < len(extraKV); i += 2 { + keysAndValues.Add(extraKV[i].(string), extraKV[i+1]) + } + + return keysAndValues +} + +// Topology contains data that all topology messages MAY contain. +type Topology struct { + ID primitive.ObjectID // Driver's unique ID for this topology + Message string // Message associated with the topology +} + +// SerializeTopology serializes a Topology message into a slice of keys and +// values that can be passed to a logger. +func SerializeTopology(topo Topology, extraKV ...interface{}) KeyValues { + keysAndValues := KeyValues{ + KeyTopologyID, topo.ID.Hex(), + } + + // Add the optional keys and values. + for i := 0; i < len(extraKV); i += 2 { + keysAndValues.Add(extraKV[i].(string), extraKV[i+1]) + } + + return keysAndValues +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/logger/context.go b/vendor/go.mongodb.org/mongo-driver/internal/logger/context.go new file mode 100644 index 000000000..785f141c4 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/logger/context.go @@ -0,0 +1,48 @@ +// Copyright (C) MongoDB, Inc. 2023-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package logger + +import "context" + +// contextKey is a custom type used to prevent key collisions when using the +// context package. +type contextKey string + +const ( + contextKeyOperation contextKey = "operation" + contextKeyOperationID contextKey = "operationID" +) + +// WithOperationName adds the operation name to the context. +func WithOperationName(ctx context.Context, operation string) context.Context { + return context.WithValue(ctx, contextKeyOperation, operation) +} + +// WithOperationID adds the operation ID to the context. +func WithOperationID(ctx context.Context, operationID int32) context.Context { + return context.WithValue(ctx, contextKeyOperationID, operationID) +} + +// OperationName returns the operation name from the context. +func OperationName(ctx context.Context) (string, bool) { + operationName := ctx.Value(contextKeyOperation) + if operationName == nil { + return "", false + } + + return operationName.(string), true +} + +// OperationID returns the operation ID from the context. +func OperationID(ctx context.Context) (int32, bool) { + operationID := ctx.Value(contextKeyOperationID) + if operationID == nil { + return 0, false + } + + return operationID.(int32), true +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/logger/io_sink.go b/vendor/go.mongodb.org/mongo-driver/internal/logger/io_sink.go new file mode 100644 index 000000000..c5ff1474b --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/logger/io_sink.go @@ -0,0 +1,58 @@ +// Copyright (C) MongoDB, Inc. 2023-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package logger + +import ( + "encoding/json" + "io" + "sync" + "time" +) + +// IOSink writes a JSON-encoded message to the io.Writer. +type IOSink struct { + enc *json.Encoder + + // encMu protects the encoder from concurrent writes. While the logger + // itself does not concurrently write to the sink, the sink may be used + // concurrently within the driver. + encMu sync.Mutex +} + +// Compile-time check to ensure IOSink implements the LogSink interface. +var _ LogSink = &IOSink{} + +// NewIOSink will create an IOSink object that writes JSON messages to the +// provided io.Writer. +func NewIOSink(out io.Writer) *IOSink { + return &IOSink{ + enc: json.NewEncoder(out), + } +} + +// Info will write a JSON-encoded message to the io.Writer. +func (sink *IOSink) Info(_ int, msg string, keysAndValues ...interface{}) { + kvMap := make(map[string]interface{}, len(keysAndValues)/2+2) + + kvMap[KeyTimestamp] = time.Now().UnixNano() + kvMap[KeyMessage] = msg + + for i := 0; i < len(keysAndValues); i += 2 { + kvMap[keysAndValues[i].(string)] = keysAndValues[i+1] + } + + sink.encMu.Lock() + defer sink.encMu.Unlock() + + _ = sink.enc.Encode(kvMap) +} + +// Error will write a JSON-encoded error message to the io.Writer. +func (sink *IOSink) Error(err error, msg string, kv ...interface{}) { + kv = append(kv, KeyError, err.Error()) + sink.Info(0, msg, kv...) +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/logger/level.go b/vendor/go.mongodb.org/mongo-driver/internal/logger/level.go new file mode 100644 index 000000000..07f85b35d --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/logger/level.go @@ -0,0 +1,74 @@ +// Copyright (C) MongoDB, Inc. 2023-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package logger + +import "strings" + +// DiffToInfo is the number of levels in the Go Driver that come before the +// "Info" level. This should ensure that "Info" is the 0th level passed to the +// sink. +const DiffToInfo = 1 + +// Level is an enumeration representing the log severity levels supported by +// the driver. The order of the logging levels is important. The driver expects +// that a user will likely use the "logr" package to create a LogSink, which +// defaults InfoLevel as 0. Any additions to the Level enumeration before the +// InfoLevel will need to also update the "diffToInfo" constant. +type Level int + +const ( + // LevelOff suppresses logging. + LevelOff Level = iota + + // LevelInfo enables logging of informational messages. These logs are + // high-level information about normal driver behavior. + LevelInfo + + // LevelDebug enables logging of debug messages. These logs can be + // voluminous and are intended for detailed information that may be + // helpful when debugging an application. + LevelDebug +) + +const ( + levelLiteralOff = "off" + levelLiteralEmergency = "emergency" + levelLiteralAlert = "alert" + levelLiteralCritical = "critical" + levelLiteralError = "error" + levelLiteralWarning = "warning" + levelLiteralNotice = "notice" + levelLiteralInfo = "info" + levelLiteralDebug = "debug" + levelLiteralTrace = "trace" +) + +var LevelLiteralMap = map[string]Level{ + levelLiteralOff: LevelOff, + levelLiteralEmergency: LevelInfo, + levelLiteralAlert: LevelInfo, + levelLiteralCritical: LevelInfo, + levelLiteralError: LevelInfo, + levelLiteralWarning: LevelInfo, + levelLiteralNotice: LevelInfo, + levelLiteralInfo: LevelInfo, + levelLiteralDebug: LevelDebug, + levelLiteralTrace: LevelDebug, +} + +// ParseLevel will check if the given string is a valid environment variable +// for a logging severity level. If it is, then it will return the associated +// driver's Level. The default Level is “LevelOff”. +func ParseLevel(str string) Level { + for literal, level := range LevelLiteralMap { + if strings.EqualFold(literal, str) { + return level + } + } + + return LevelOff +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/logger/logger.go b/vendor/go.mongodb.org/mongo-driver/internal/logger/logger.go new file mode 100644 index 000000000..03d42814f --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/logger/logger.go @@ -0,0 +1,275 @@ +// Copyright (C) MongoDB, Inc. 2023-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package logger provides the internal logging solution for the MongoDB Go +// Driver. +package logger + +import ( + "fmt" + "os" + "strconv" + "strings" +) + +// DefaultMaxDocumentLength is the default maximum number of bytes that can be +// logged for a stringified BSON document. +const DefaultMaxDocumentLength = 1000 + +// TruncationSuffix are trailing ellipsis "..." appended to a message to +// indicate to the user that truncation occurred. This constant does not count +// toward the max document length. +const TruncationSuffix = "..." + +const logSinkPathEnvVar = "MONGODB_LOG_PATH" +const maxDocumentLengthEnvVar = "MONGODB_LOG_MAX_DOCUMENT_LENGTH" + +// LogSink represents a logging implementation, this interface should be 1-1 +// with the exported "LogSink" interface in the mongo/options package. +type LogSink interface { + // Info logs a non-error message with the given key/value pairs. The + // level argument is provided for optional logging. + Info(level int, msg string, keysAndValues ...interface{}) + + // Error logs an error, with the given message and key/value pairs. + Error(err error, msg string, keysAndValues ...interface{}) +} + +// Logger represents the configuration for the internal logger. +type Logger struct { + ComponentLevels map[Component]Level // Log levels for each component. + Sink LogSink // LogSink for log printing. + MaxDocumentLength uint // Command truncation width. + logFile *os.File // File to write logs to. +} + +// New will construct a new logger. If any of the given options are the +// zero-value of the argument type, then the constructor will attempt to +// source the data from the environment. If the environment has not been set, +// then the constructor will the respective default values. +func New(sink LogSink, maxDocLen uint, compLevels map[Component]Level) (*Logger, error) { + logger := &Logger{ + ComponentLevels: selectComponentLevels(compLevels), + MaxDocumentLength: selectMaxDocumentLength(maxDocLen), + } + + sink, logFile, err := selectLogSink(sink) + if err != nil { + return nil, err + } + + logger.Sink = sink + logger.logFile = logFile + + return logger, nil +} + +// Close will close the logger's log file, if it exists. +func (logger *Logger) Close() error { + if logger.logFile != nil { + return logger.logFile.Close() + } + + return nil +} + +// LevelComponentEnabled will return true if the given LogLevel is enabled for +// the given LogComponent. If the ComponentLevels on the logger are enabled for +// "ComponentAll", then this function will return true for any level bound by +// the level assigned to "ComponentAll". +// +// If the level is not enabled (i.e. LevelOff), then false is returned. This is +// to avoid false positives, such as returning "true" for a component that is +// not enabled. For example, without this condition, an empty LevelComponent +// would be considered "enabled" for "LevelOff". +func (logger *Logger) LevelComponentEnabled(level Level, component Component) bool { + if level == LevelOff { + return false + } + + if logger.ComponentLevels == nil { + return false + } + + return logger.ComponentLevels[component] >= level || + logger.ComponentLevels[ComponentAll] >= level +} + +// Print will synchronously print the given message to the configured LogSink. +// If the LogSink is nil, then this method will do nothing. Future work could be done to make +// this method asynchronous, see buffer management in libraries such as log4j. +// +// It's worth noting that many structured logs defined by DBX-wide +// specifications include a "message" field, which is often shared with the +// message arguments passed to this print function. The "Info" method used by +// this function is implemented based on the go-logr/logr LogSink interface, +// which is why "Print" has a message parameter. Any duplication in code is +// intentional to adhere to the logr pattern. +func (logger *Logger) Print(level Level, component Component, msg string, keysAndValues ...interface{}) { + // If the level is not enabled for the component, then + // skip the message. + if !logger.LevelComponentEnabled(level, component) { + return + } + + // If the sink is nil, then skip the message. + if logger.Sink == nil { + return + } + + logger.Sink.Info(int(level)-DiffToInfo, msg, keysAndValues...) +} + +// Error logs an error, with the given message and key/value pairs. +// It functions similarly to Print, but may have unique behavior, and should be +// preferred for logging errors. +func (logger *Logger) Error(err error, msg string, keysAndValues ...interface{}) { + if logger.Sink == nil { + return + } + + logger.Sink.Error(err, msg, keysAndValues...) +} + +// selectMaxDocumentLength will return the integer value of the first non-zero +// function, with the user-defined function taking priority over the environment +// variables. For the environment, the function will attempt to get the value of +// "MONGODB_LOG_MAX_DOCUMENT_LENGTH" and parse it as an unsigned integer. If the +// environment variable is not set or is not an unsigned integer, then this +// function will return the default max document length. +func selectMaxDocumentLength(maxDocLen uint) uint { + if maxDocLen != 0 { + return maxDocLen + } + + maxDocLenEnv := os.Getenv(maxDocumentLengthEnvVar) + if maxDocLenEnv != "" { + maxDocLenEnvInt, err := strconv.ParseUint(maxDocLenEnv, 10, 32) + if err == nil { + return uint(maxDocLenEnvInt) + } + } + + return DefaultMaxDocumentLength +} + +const ( + logSinkPathStdout = "stdout" + logSinkPathStderr = "stderr" +) + +// selectLogSink will return the first non-nil LogSink, with the user-defined +// LogSink taking precedence over the environment-defined LogSink. If no LogSink +// is defined, then this function will return a LogSink that writes to stderr. +func selectLogSink(sink LogSink) (LogSink, *os.File, error) { + if sink != nil { + return sink, nil, nil + } + + path := os.Getenv(logSinkPathEnvVar) + lowerPath := strings.ToLower(path) + + if lowerPath == string(logSinkPathStderr) { + return NewIOSink(os.Stderr), nil, nil + } + + if lowerPath == string(logSinkPathStdout) { + return NewIOSink(os.Stdout), nil, nil + } + + if path != "" { + logFile, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_RDWR, 0666) + if err != nil { + return nil, nil, fmt.Errorf("unable to open log file: %v", err) + } + + return NewIOSink(logFile), logFile, nil + } + + return NewIOSink(os.Stderr), nil, nil +} + +// selectComponentLevels returns a new map of LogComponents to LogLevels that is +// the result of merging the user-defined data with the environment, with the +// user-defined data taking priority. +func selectComponentLevels(componentLevels map[Component]Level) map[Component]Level { + selected := make(map[Component]Level) + + // Determine if the "MONGODB_LOG_ALL" environment variable is set. + var globalEnvLevel *Level + if all := os.Getenv(mongoDBLogAllEnvVar); all != "" { + level := ParseLevel(all) + globalEnvLevel = &level + } + + for envVar, component := range componentEnvVarMap { + // If the component already has a level, then skip it. + if _, ok := componentLevels[component]; ok { + selected[component] = componentLevels[component] + + continue + } + + // If the "MONGODB_LOG_ALL" environment variable is set, then + // set the level for the component to the value of the + // environment variable. + if globalEnvLevel != nil { + selected[component] = *globalEnvLevel + + continue + } + + // Otherwise, set the level for the component to the value of + // the environment variable. + selected[component] = ParseLevel(os.Getenv(envVar)) + } + + return selected +} + +// truncate will truncate a string to the given width, appending "..." to the +// end of the string if it is truncated. This routine is safe for multi-byte +// characters. +func truncate(str string, width uint) string { + if width == 0 { + return "" + } + + if len(str) <= int(width) { + return str + } + + // Truncate the byte slice of the string to the given width. + newStr := str[:width] + + // Check if the last byte is at the beginning of a multi-byte character. + // If it is, then remove the last byte. + if newStr[len(newStr)-1]&0xC0 == 0xC0 { + return newStr[:len(newStr)-1] + TruncationSuffix + } + + // Check if the last byte is in the middle of a multi-byte character. If + // it is, then step back until we find the beginning of the character. + if newStr[len(newStr)-1]&0xC0 == 0x80 { + for i := len(newStr) - 1; i >= 0; i-- { + if newStr[i]&0xC0 == 0xC0 { + return newStr[:i] + TruncationSuffix + } + } + } + + return newStr + TruncationSuffix +} + +// FormatMessage formats a BSON document for logging. The document is truncated +// to the given width. +func FormatMessage(msg string, width uint) string { + if len(msg) == 0 { + return "{}" + } + + return truncate(msg, width) +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/ptrutil/int64.go b/vendor/go.mongodb.org/mongo-driver/internal/ptrutil/int64.go new file mode 100644 index 000000000..1c3ab57ef --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/ptrutil/int64.go @@ -0,0 +1,39 @@ +// Copyright (C) MongoDB, Inc. 2023-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package ptrutil + +// CompareInt64 is a piecewise function with the following return conditions: +// +// (1) 2, ptr1 != nil AND ptr2 == nil +// (2) 1, *ptr1 > *ptr2 +// (3) 0, ptr1 == ptr2 or *ptr1 == *ptr2 +// (4) -1, *ptr1 < *ptr2 +// (5) -2, ptr1 == nil AND ptr2 != nil +func CompareInt64(ptr1, ptr2 *int64) int { + if ptr1 == ptr2 { + // This will catch the double nil or same-pointer cases. + return 0 + } + + if ptr1 == nil && ptr2 != nil { + return -2 + } + + if ptr1 != nil && ptr2 == nil { + return 2 + } + + if *ptr1 > *ptr2 { + return 1 + } + + if *ptr1 < *ptr2 { + return -1 + } + + return 0 +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/bits.go b/vendor/go.mongodb.org/mongo-driver/internal/rand/bits.go similarity index 100% rename from vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/bits.go rename to vendor/go.mongodb.org/mongo-driver/internal/rand/bits.go diff --git a/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/exp.go b/vendor/go.mongodb.org/mongo-driver/internal/rand/exp.go similarity index 100% rename from vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/exp.go rename to vendor/go.mongodb.org/mongo-driver/internal/rand/exp.go diff --git a/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/normal.go b/vendor/go.mongodb.org/mongo-driver/internal/rand/normal.go similarity index 100% rename from vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/normal.go rename to vendor/go.mongodb.org/mongo-driver/internal/rand/normal.go diff --git a/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/rand.go b/vendor/go.mongodb.org/mongo-driver/internal/rand/rand.go similarity index 99% rename from vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/rand.go rename to vendor/go.mongodb.org/mongo-driver/internal/rand/rand.go index ffd0509bd..4c3d3e6ee 100644 --- a/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/rand.go +++ b/vendor/go.mongodb.org/mongo-driver/internal/rand/rand.go @@ -357,7 +357,7 @@ func (s *LockedSource) Seed(seed uint64) { s.lk.Unlock() } -// seedPos implements Seed for a LockedSource without a race condiiton. +// seedPos implements Seed for a LockedSource without a race condition. func (s *LockedSource) seedPos(seed uint64, readPos *int8) { s.lk.Lock() s.src.Seed(seed) diff --git a/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/rng.go b/vendor/go.mongodb.org/mongo-driver/internal/rand/rng.go similarity index 100% rename from vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/rng.go rename to vendor/go.mongodb.org/mongo-driver/internal/rand/rng.go diff --git a/vendor/go.mongodb.org/mongo-driver/internal/randutil/randutil.go b/vendor/go.mongodb.org/mongo-driver/internal/randutil/randutil.go index 961607432..dd8c6d6f4 100644 --- a/vendor/go.mongodb.org/mongo-driver/internal/randutil/randutil.go +++ b/vendor/go.mongodb.org/mongo-driver/internal/randutil/randutil.go @@ -12,7 +12,7 @@ import ( "fmt" "io" - xrand "go.mongodb.org/mongo-driver/internal/randutil/rand" + xrand "go.mongodb.org/mongo-driver/internal/rand" ) // NewLockedRand returns a new "x/exp/rand" pseudo-random number generator seeded with a diff --git a/vendor/go.mongodb.org/mongo-driver/internal/uri_validation_errors.go b/vendor/go.mongodb.org/mongo-driver/internal/uri_validation_errors.go deleted file mode 100644 index 21e73002a..000000000 --- a/vendor/go.mongodb.org/mongo-driver/internal/uri_validation_errors.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package internal - -import "errors" - -var ( - // ErrLoadBalancedWithMultipleHosts is returned when loadBalanced=true is specified in a URI with multiple hosts. - ErrLoadBalancedWithMultipleHosts = errors.New("loadBalanced cannot be set to true if multiple hosts are specified") - // ErrLoadBalancedWithReplicaSet is returned when loadBalanced=true is specified in a URI with the replicaSet option. - ErrLoadBalancedWithReplicaSet = errors.New("loadBalanced cannot be set to true if a replica set name is specified") - // ErrLoadBalancedWithDirectConnection is returned when loadBalanced=true is specified in a URI with the directConnection option. - ErrLoadBalancedWithDirectConnection = errors.New("loadBalanced cannot be set to true if the direct connection option is specified") - // ErrSRVMaxHostsWithReplicaSet is returned when srvMaxHosts > 0 is specified in a URI with the replicaSet option. - ErrSRVMaxHostsWithReplicaSet = errors.New("srvMaxHosts cannot be a positive value if a replica set name is specified") - // ErrSRVMaxHostsWithLoadBalanced is returned when srvMaxHosts > 0 is specified in a URI with loadBalanced=true. - ErrSRVMaxHostsWithLoadBalanced = errors.New("srvMaxHosts cannot be a positive value if loadBalanced is set to true") -) diff --git a/vendor/go.mongodb.org/mongo-driver/internal/uuid/uuid.go b/vendor/go.mongodb.org/mongo-driver/internal/uuid/uuid.go index 78f16645d..86c2a33a7 100644 --- a/vendor/go.mongodb.org/mongo-driver/internal/uuid/uuid.go +++ b/vendor/go.mongodb.org/mongo-driver/internal/uuid/uuid.go @@ -7,6 +7,7 @@ package uuid import ( + "encoding/hex" "io" "go.mongodb.org/mongo-driver/internal/randutil" @@ -51,3 +52,17 @@ var globalSource = newSource() func New() (UUID, error) { return globalSource.new() } + +func (uuid UUID) String() string { + var str [36]byte + hex.Encode(str[:], uuid[:4]) + str[8] = '-' + hex.Encode(str[9:13], uuid[4:6]) + str[13] = '-' + hex.Encode(str[14:18], uuid[6:8]) + str[18] = '-' + hex.Encode(str[19:23], uuid[8:10]) + str[23] = '-' + hex.Encode(str[24:], uuid[10:]) + return string(str[:]) +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/background_context.go b/vendor/go.mongodb.org/mongo-driver/mongo/background_context.go similarity index 87% rename from vendor/go.mongodb.org/mongo-driver/internal/background_context.go rename to vendor/go.mongodb.org/mongo-driver/mongo/background_context.go index 6f190edb3..e4146e8b7 100644 --- a/vendor/go.mongodb.org/mongo-driver/internal/background_context.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/background_context.go @@ -4,7 +4,7 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -package internal +package mongo import "context" @@ -16,9 +16,9 @@ type backgroundContext struct { childValuesCtx context.Context } -// NewBackgroundContext creates a new Context whose behavior matches that of context.Background(), but Value calls are +// newBackgroundContext creates a new Context whose behavior matches that of context.Background(), but Value calls are // forwarded to the provided ctx parameter. If ctx is nil, context.Background() is returned. -func NewBackgroundContext(ctx context.Context) context.Context { +func newBackgroundContext(ctx context.Context) context.Context { if ctx == nil { return context.Background() } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/batch_cursor.go b/vendor/go.mongodb.org/mongo-driver/mongo/batch_cursor.go index da2e27bc6..51d59d0ff 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/batch_cursor.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/batch_cursor.go @@ -8,6 +8,7 @@ package mongo import ( "context" + "time" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver" @@ -35,9 +36,21 @@ type batchCursor interface { // Close closes the cursor. Close(context.Context) error - // The SetBatchSize method is a modifier function used to adjust the - // batch size of the cursor that implements it. + // SetBatchSize is a modifier function used to adjust the batch size of + // the cursor that implements it. SetBatchSize(int32) + + // SetMaxTime will set the maximum amount of time the server will allow + // the operations to execute. The server will error if this field is set + // but the cursor is not configured with awaitData=true. + // + // The time.Duration value passed by this setter will be converted and + // rounded down to the nearest millisecond. + SetMaxTime(time.Duration) + + // SetComment will set a user-configurable comment that can be used to + // identify the operation in server logs. + SetComment(interface{}) } // changeStreamCursor is the interface implemented by batch cursors that also provide the functionality for retrieving diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write.go b/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write.go index 2c58f2229..42d286ea7 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write.go @@ -10,6 +10,7 @@ import ( "context" "go.mongodb.org/mongo-driver/bson/bsoncodec" + "go.mongodb.org/mongo-driver/bson/primitive" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/options" "go.mongodb.org/mongo-driver/mongo/writeconcern" @@ -25,7 +26,7 @@ type bulkWriteBatch struct { indexes []int } -// bulkWrite perfoms a bulkwrite operation +// bulkWrite performs a bulkwrite operation type bulkWrite struct { comment interface{} ordered *bool @@ -165,7 +166,11 @@ func (bw *bulkWrite) runInsert(ctx context.Context, batch bulkWriteBatch) (opera var i int for _, model := range batch.models { converted := model.(*InsertOneModel) - doc, _, err := transformAndEnsureID(bw.collection.registry, converted.Document) + doc, err := marshal(converted.Document, bw.collection.bsonOpts, bw.collection.registry) + if err != nil { + return operation.InsertResult{}, err + } + doc, _, err = ensureID(doc, primitive.NewObjectID(), bw.collection.bsonOpts, bw.collection.registry) if err != nil { return operation.InsertResult{}, err } @@ -179,9 +184,10 @@ func (bw *bulkWrite) runInsert(ctx context.Context, batch bulkWriteBatch) (opera ServerSelector(bw.selector).ClusterClock(bw.collection.client.clock). Database(bw.collection.db.name).Collection(bw.collection.name). Deployment(bw.collection.client.deployment).Crypt(bw.collection.client.cryptFLE). - ServerAPI(bw.collection.client.serverAPI).Timeout(bw.collection.client.timeout) + ServerAPI(bw.collection.client.serverAPI).Timeout(bw.collection.client.timeout). + Logger(bw.collection.client.logger) if bw.comment != nil { - comment, err := transformValue(bw.collection.registry, bw.comment, true, "comment") + comment, err := marshalValue(bw.comment, bw.collection.bsonOpts, bw.collection.registry) if err != nil { return op.Result(), err } @@ -216,10 +222,22 @@ func (bw *bulkWrite) runDelete(ctx context.Context, batch bulkWriteBatch) (opera switch converted := model.(type) { case *DeleteOneModel: - doc, err = createDeleteDoc(converted.Filter, converted.Collation, converted.Hint, true, bw.collection.registry) + doc, err = createDeleteDoc( + converted.Filter, + converted.Collation, + converted.Hint, + true, + bw.collection.bsonOpts, + bw.collection.registry) hasHint = hasHint || (converted.Hint != nil) case *DeleteManyModel: - doc, err = createDeleteDoc(converted.Filter, converted.Collation, converted.Hint, false, bw.collection.registry) + doc, err = createDeleteDoc( + converted.Filter, + converted.Collation, + converted.Hint, + false, + bw.collection.bsonOpts, + bw.collection.registry) hasHint = hasHint || (converted.Hint != nil) } @@ -236,16 +254,17 @@ func (bw *bulkWrite) runDelete(ctx context.Context, batch bulkWriteBatch) (opera ServerSelector(bw.selector).ClusterClock(bw.collection.client.clock). Database(bw.collection.db.name).Collection(bw.collection.name). Deployment(bw.collection.client.deployment).Crypt(bw.collection.client.cryptFLE).Hint(hasHint). - ServerAPI(bw.collection.client.serverAPI).Timeout(bw.collection.client.timeout) + ServerAPI(bw.collection.client.serverAPI).Timeout(bw.collection.client.timeout). + Logger(bw.collection.client.logger) if bw.comment != nil { - comment, err := transformValue(bw.collection.registry, bw.comment, true, "comment") + comment, err := marshalValue(bw.comment, bw.collection.bsonOpts, bw.collection.registry) if err != nil { return op.Result(), err } op.Comment(comment) } if bw.let != nil { - let, err := transformBsoncoreDocument(bw.collection.registry, bw.let, true, "let") + let, err := marshal(bw.let, bw.collection.bsonOpts, bw.collection.registry) if err != nil { return operation.DeleteResult{}, err } @@ -265,10 +284,15 @@ func (bw *bulkWrite) runDelete(ctx context.Context, batch bulkWriteBatch) (opera return op.Result(), err } -func createDeleteDoc(filter interface{}, collation *options.Collation, hint interface{}, deleteOne bool, - registry *bsoncodec.Registry) (bsoncore.Document, error) { - - f, err := transformBsoncoreDocument(registry, filter, true, "filter") +func createDeleteDoc( + filter interface{}, + collation *options.Collation, + hint interface{}, + deleteOne bool, + bsonOpts *options.BSONOptions, + registry *bsoncodec.Registry, +) (bsoncore.Document, error) { + f, err := marshal(filter, bsonOpts, registry) if err != nil { return nil, err } @@ -284,7 +308,10 @@ func createDeleteDoc(filter interface{}, collation *options.Collation, hint inte doc = bsoncore.AppendDocumentElement(doc, "collation", collation.ToDocument()) } if hint != nil { - hintVal, err := transformValue(registry, hint, false, "hint") + if isUnorderedMap(hint) { + return nil, ErrMapForOrderedArgument{"hint"} + } + hintVal, err := marshalValue(hint, bsonOpts, registry) if err != nil { return nil, err } @@ -305,17 +332,44 @@ func (bw *bulkWrite) runUpdate(ctx context.Context, batch bulkWriteBatch) (opera switch converted := model.(type) { case *ReplaceOneModel: - doc, err = createUpdateDoc(converted.Filter, converted.Replacement, converted.Hint, nil, converted.Collation, converted.Upsert, false, - false, bw.collection.registry) + doc, err = createUpdateDoc( + converted.Filter, + converted.Replacement, + converted.Hint, + nil, + converted.Collation, + converted.Upsert, + false, + false, + bw.collection.bsonOpts, + bw.collection.registry) hasHint = hasHint || (converted.Hint != nil) case *UpdateOneModel: - doc, err = createUpdateDoc(converted.Filter, converted.Update, converted.Hint, converted.ArrayFilters, converted.Collation, converted.Upsert, false, - true, bw.collection.registry) + doc, err = createUpdateDoc( + converted.Filter, + converted.Update, + converted.Hint, + converted.ArrayFilters, + converted.Collation, + converted.Upsert, + false, + true, + bw.collection.bsonOpts, + bw.collection.registry) hasHint = hasHint || (converted.Hint != nil) hasArrayFilters = hasArrayFilters || (converted.ArrayFilters != nil) case *UpdateManyModel: - doc, err = createUpdateDoc(converted.Filter, converted.Update, converted.Hint, converted.ArrayFilters, converted.Collation, converted.Upsert, true, - true, bw.collection.registry) + doc, err = createUpdateDoc( + converted.Filter, + converted.Update, + converted.Hint, + converted.ArrayFilters, + converted.Collation, + converted.Upsert, + true, + true, + bw.collection.bsonOpts, + bw.collection.registry) hasHint = hasHint || (converted.Hint != nil) hasArrayFilters = hasArrayFilters || (converted.ArrayFilters != nil) } @@ -331,16 +385,17 @@ func (bw *bulkWrite) runUpdate(ctx context.Context, batch bulkWriteBatch) (opera ServerSelector(bw.selector).ClusterClock(bw.collection.client.clock). Database(bw.collection.db.name).Collection(bw.collection.name). Deployment(bw.collection.client.deployment).Crypt(bw.collection.client.cryptFLE).Hint(hasHint). - ArrayFilters(hasArrayFilters).ServerAPI(bw.collection.client.serverAPI).Timeout(bw.collection.client.timeout) + ArrayFilters(hasArrayFilters).ServerAPI(bw.collection.client.serverAPI). + Timeout(bw.collection.client.timeout).Logger(bw.collection.client.logger) if bw.comment != nil { - comment, err := transformValue(bw.collection.registry, bw.comment, true, "comment") + comment, err := marshalValue(bw.comment, bw.collection.bsonOpts, bw.collection.registry) if err != nil { return op.Result(), err } op.Comment(comment) } if bw.let != nil { - let, err := transformBsoncoreDocument(bw.collection.registry, bw.let, true, "let") + let, err := marshal(bw.let, bw.collection.bsonOpts, bw.collection.registry) if err != nil { return operation.UpdateResult{}, err } @@ -362,6 +417,7 @@ func (bw *bulkWrite) runUpdate(ctx context.Context, batch bulkWriteBatch) (opera return op.Result(), err } + func createUpdateDoc( filter interface{}, update interface{}, @@ -371,9 +427,10 @@ func createUpdateDoc( upsert *bool, multi bool, checkDollarKey bool, + bsonOpts *options.BSONOptions, registry *bsoncodec.Registry, ) (bsoncore.Document, error) { - f, err := transformBsoncoreDocument(registry, filter, true, "filter") + f, err := marshal(filter, bsonOpts, registry) if err != nil { return nil, err } @@ -381,7 +438,7 @@ func createUpdateDoc( uidx, updateDoc := bsoncore.AppendDocumentStart(nil) updateDoc = bsoncore.AppendDocumentElement(updateDoc, "q", f) - u, err := transformUpdateValue(registry, update, checkDollarKey) + u, err := marshalUpdateValue(update, bsonOpts, registry, checkDollarKey) if err != nil { return nil, err } @@ -393,11 +450,15 @@ func createUpdateDoc( } if arrayFilters != nil { - arr, err := arrayFilters.ToArrayDocument() + reg := registry + if arrayFilters.Registry != nil { + reg = arrayFilters.Registry + } + arr, err := marshalValue(arrayFilters.Filters, bsonOpts, reg) if err != nil { return nil, err } - updateDoc = bsoncore.AppendArrayElement(updateDoc, "arrayFilters", arr) + updateDoc = bsoncore.AppendArrayElement(updateDoc, "arrayFilters", arr.Data) } if collation != nil { @@ -409,7 +470,10 @@ func createUpdateDoc( } if hint != nil { - hintVal, err := transformValue(registry, hint, false, "hint") + if isUnorderedMap(hint) { + return nil, ErrMapForOrderedArgument{"hint"} + } + hintVal, err := marshalValue(hint, bsonOpts, registry) if err != nil { return nil, err } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go b/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go index 6857e1e3c..773cbb0e5 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go @@ -17,7 +17,7 @@ import ( "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/bsoncodec" "go.mongodb.org/mongo-driver/bson/primitive" - "go.mongodb.org/mongo-driver/internal" + "go.mongodb.org/mongo-driver/internal/csot" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/options" "go.mongodb.org/mongo-driver/mongo/readconcern" @@ -80,6 +80,7 @@ type ChangeStream struct { err error sess *session.Client client *Client + bsonOpts *options.BSONOptions registry *bsoncodec.Registry streamType StreamType options *options.ChangeStreamOptions @@ -92,6 +93,7 @@ type changeStreamConfig struct { readConcern *readconcern.ReadConcern readPreference *readpref.ReadPref client *Client + bsonOpts *options.BSONOptions registry *bsoncodec.Registry streamType StreamType collectionName string @@ -105,8 +107,13 @@ func newChangeStream(ctx context.Context, config changeStreamConfig, pipeline in ctx = context.Background() } + cursorOpts := config.client.createBaseCursorOptions() + + cursorOpts.MarshalValueEncoderFn = newEncoderFn(config.bsonOpts, config.registry) + cs := &ChangeStream{ client: config.client, + bsonOpts: config.bsonOpts, registry: config.registry, streamType: config.streamType, options: options.MergeChangeStreamOptions(opts...), @@ -114,7 +121,7 @@ func newChangeStream(ctx context.Context, config changeStreamConfig, pipeline in description.ReadPrefSelector(config.readPreference), description.LatencySelector(config.client.localThreshold), }), - cursorOptions: config.client.createBaseCursorOptions(), + cursorOptions: cursorOpts, } cs.sess = sessionFromContext(ctx) @@ -138,7 +145,7 @@ func newChangeStream(ctx context.Context, config changeStreamConfig, pipeline in if comment := cs.options.Comment; comment != nil { cs.aggregate.Comment(*comment) - commentVal, err := transformValue(cs.registry, comment, true, "comment") + commentVal, err := marshalValue(comment, cs.bsonOpts, cs.registry) if err != nil { return nil, err } @@ -273,8 +280,8 @@ func (cs *ChangeStream) executeOperation(ctx context.Context, resuming bool) err // If no deadline is set on the passed-in context, cs.client.timeout is set, and context is not already // a Timeout context, honor cs.client.timeout in new Timeout context for change stream operation execution // and potential retry. - if _, deadlineSet := ctx.Deadline(); !deadlineSet && cs.client.timeout != nil && !internal.IsTimeoutContext(ctx) { - newCtx, cancelFunc := internal.MakeTimeoutContext(ctx, *cs.client.timeout) + if _, deadlineSet := ctx.Deadline(); !deadlineSet && cs.client.timeout != nil && !csot.IsTimeoutContext(ctx) { + newCtx, cancelFunc := csot.MakeTimeoutContext(ctx, *cs.client.timeout) // Redefine ctx to be the new timeout-derived context. ctx = newCtx // Cancel the timeout-derived context at the end of executeOperation to avoid a context leak. @@ -287,7 +294,7 @@ func (cs *ChangeStream) executeOperation(ctx context.Context, resuming bool) err if cs.client.retryReads { retries = 1 } - if internal.IsTimeoutContext(ctx) { + if csot.IsTimeoutContext(ctx) { retries = -1 } @@ -389,7 +396,7 @@ func (cs *ChangeStream) storeResumeToken() error { func (cs *ChangeStream) buildPipelineSlice(pipeline interface{}) error { val := reflect.ValueOf(pipeline) if !val.IsValid() || !(val.Kind() == reflect.Slice) { - cs.err = errors.New("can only transform slices and arrays into aggregation pipelines, but got invalid") + cs.err = errors.New("can only marshal slices and arrays into aggregation pipelines, but got invalid") return cs.err } @@ -410,7 +417,7 @@ func (cs *ChangeStream) buildPipelineSlice(pipeline interface{}) error { for i := 0; i < val.Len(); i++ { var elem []byte - elem, cs.err = transformBsoncoreDocument(cs.registry, val.Index(i).Interface(), true, fmt.Sprintf("pipeline stage :%v", i)) + elem, cs.err = marshal(val.Index(i).Interface(), cs.bsonOpts, cs.registry) if cs.err != nil { return cs.err } @@ -438,7 +445,7 @@ func (cs *ChangeStream) createPipelineOptionsDoc() (bsoncore.Document, error) { if cs.options.ResumeAfter != nil { var raDoc bsoncore.Document - raDoc, cs.err = transformBsoncoreDocument(cs.registry, cs.options.ResumeAfter, true, "resumeAfter") + raDoc, cs.err = marshal(cs.options.ResumeAfter, cs.bsonOpts, cs.registry) if cs.err != nil { return nil, cs.err } @@ -452,7 +459,7 @@ func (cs *ChangeStream) createPipelineOptionsDoc() (bsoncore.Document, error) { if cs.options.StartAfter != nil { var saDoc bsoncore.Document - saDoc, cs.err = transformBsoncoreDocument(cs.registry, cs.options.StartAfter, true, "startAfter") + saDoc, cs.err = marshal(cs.options.StartAfter, cs.bsonOpts, cs.registry) if cs.err != nil { return nil, cs.err } @@ -524,6 +531,16 @@ func (cs *ChangeStream) ID() int64 { return cs.cursor.ID() } +// SetBatchSize sets the number of documents to fetch from the database with +// each iteration of the ChangeStream's "Next" or "TryNext" method. This setting +// only affects subsequent document batches fetched from the database. +func (cs *ChangeStream) SetBatchSize(size int32) { + // Set batch size on the cursor options also so any "resumed" change stream + // cursors will pick up the latest batch size setting. + cs.cursorOptions.BatchSize = size + cs.cursor.SetBatchSize(size) +} + // Decode will unmarshal the current event document into val and return any errors from the unmarshalling process // without any modification. If val is nil or is a typed nil, an error will be returned. func (cs *ChangeStream) Decode(val interface{}) error { @@ -531,7 +548,11 @@ func (cs *ChangeStream) Decode(val interface{}) error { return ErrNilCursor } - return bson.UnmarshalWithRegistry(cs.registry, cs.Current, val) + dec, err := getDecoder(cs.Current, cs.bsonOpts, cs.registry) + if err != nil { + return fmt.Errorf("error configuring BSON decoder: %w", err) + } + return dec.Decode(val) } // Err returns the last error seen by the change stream, or nil if no errors has occurred. diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/client.go b/vendor/go.mongodb.org/mongo-driver/mongo/client.go index b38663747..592927483 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/client.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/client.go @@ -16,7 +16,8 @@ import ( "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/bsoncodec" "go.mongodb.org/mongo-driver/event" - "go.mongodb.org/mongo-driver/internal" + "go.mongodb.org/mongo-driver/internal/httputil" + "go.mongodb.org/mongo-driver/internal/logger" "go.mongodb.org/mongo-driver/internal/uuid" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/options" @@ -60,6 +61,7 @@ type Client struct { readPreference *readpref.ReadPref readConcern *readconcern.ReadConcern writeConcern *writeconcern.WriteConcern + bsonOpts *options.BSONOptions registry *bsoncodec.Registry monitor *event.CommandMonitor serverAPI *driver.ServerAPIOptions @@ -67,6 +69,7 @@ type Client struct { sessionPool *session.Pool timeout *time.Duration httpClient *http.Client + logger *logger.Logger // client-side encryption fields keyVaultClientFLE *Client @@ -125,6 +128,8 @@ func Connect(ctx context.Context, opts ...*options.ClientOptions) (*Client, erro // option fields of previous options, there is no partial overwriting. For example, if Username is // set in the Auth field for the first option, and Password is set for the second but with no // Username, after the merge the Username field will be empty. +// +// Deprecated: Use [Connect] instead. func NewClient(opts ...*options.ClientOptions) (*Client, error) { clientOpt := options.MergeClientOptions(opts...) @@ -160,6 +165,10 @@ func NewClient(opts ...*options.ClientOptions) (*Client, error) { if clientOpt.ReadPreference != nil { client.readPreference = clientOpt.ReadPreference } + // BSONOptions + if clientOpt.BSONOptions != nil { + client.bsonOpts = clientOpt.BSONOptions + } // Registry client.registry = bson.DefaultRegistry if clientOpt.Registry != nil { @@ -216,6 +225,13 @@ func NewClient(opts ...*options.ClientOptions) (*Client, error) { return nil, replaceErrors(err) } } + + // Create a logger for the client. + client.logger, err = newLogger(clientOpt.LoggerOptions) + if err != nil { + return nil, fmt.Errorf("invalid logger options: %w", err) + } + return client, nil } @@ -224,6 +240,8 @@ func NewClient(opts ...*options.ClientOptions) (*Client, error) { // // Connect starts background goroutines to monitor the state of the deployment and does not do any I/O in the main // goroutine. The Client.Ping method can be used to verify that the connection was created successfully. +// +// Deprecated: Use [mongo.Connect] instead. func (c *Client) Connect(ctx context.Context) error { if connector, ok := c.deployment.(driver.Connector); ok { err := connector.Connect() @@ -277,12 +295,16 @@ func (c *Client) Connect(ctx context.Context) error { // or write operations. If this method returns with no errors, all connections // associated with this Client have been closed. func (c *Client) Disconnect(ctx context.Context) error { + if c.logger != nil { + defer c.logger.Close() + } + if ctx == nil { ctx = context.Background() } - if c.httpClient == internal.DefaultHTTPClient { - defer internal.CloseIdleHTTPConnections(c.httpClient) + if c.httpClient == httputil.DefaultHTTPClient { + defer httputil.CloseIdleHTTPConnections(c.httpClient) } c.endSessions(ctx) @@ -445,14 +467,14 @@ func (c *Client) configureAutoEncryption(clientOpts *options.ClientOptions) erro return err } - // If the crypt_shared library was loaded successfully, signal to the mongocryptd client creator - // that it can bypass spawning mongocryptd. - cryptSharedLibAvailable := mc.CryptSharedLibVersionString() != "" - mongocryptdFLE, err := newMongocryptdClient(cryptSharedLibAvailable, clientOpts.AutoEncryptionOptions) - if err != nil { - return err + // If the crypt_shared library was not loaded, try to spawn and connect to mongocryptd. + if mc.CryptSharedLibVersionString() == "" { + mongocryptdFLE, err := newMongocryptdClient(clientOpts.AutoEncryptionOptions) + if err != nil { + return err + } + c.mongocryptdFLE = mongocryptdFLE } - c.mongocryptdFLE = mongocryptdFLE c.configureCryptFLE(mc, clientOpts.AutoEncryptionOptions) return nil @@ -514,7 +536,7 @@ func (c *Client) newMongoCrypt(opts *options.AutoEncryptionOptions) (*mongocrypt // convert schemas in SchemaMap to bsoncore documents cryptSchemaMap := make(map[string]bsoncore.Document) for k, v := range opts.SchemaMap { - schema, err := transformBsoncoreDocument(c.registry, v, true, "schemaMap") + schema, err := marshal(v, c.bsonOpts, c.registry) if err != nil { return nil, err } @@ -524,14 +546,14 @@ func (c *Client) newMongoCrypt(opts *options.AutoEncryptionOptions) (*mongocrypt // convert schemas in EncryptedFieldsMap to bsoncore documents cryptEncryptedFieldsMap := make(map[string]bsoncore.Document) for k, v := range opts.EncryptedFieldsMap { - encryptedFields, err := transformBsoncoreDocument(c.registry, v, true, "encryptedFieldsMap") + encryptedFields, err := marshal(v, c.bsonOpts, c.registry) if err != nil { return nil, err } cryptEncryptedFieldsMap[k] = encryptedFields } - kmsProviders, err := transformBsoncoreDocument(c.registry, opts.KmsProviders, true, "kmsProviders") + kmsProviders, err := marshal(opts.KmsProviders, c.bsonOpts, c.registry) if err != nil { return nil, fmt.Errorf("error creating KMS providers document: %v", err) } @@ -565,7 +587,8 @@ func (c *Client) newMongoCrypt(opts *options.AutoEncryptionOptions) (*mongocrypt SetBypassQueryAnalysis(bypassQueryAnalysis). SetEncryptedFieldsMap(cryptEncryptedFieldsMap). SetCryptSharedLibDisabled(cryptSharedLibDisabled || bypassAutoEncryption). - SetCryptSharedLibOverridePath(cryptSharedLibPath)) + SetCryptSharedLibOverridePath(cryptSharedLibPath). + SetHTTPClient(opts.HTTPClient)) if err != nil { return nil, err } @@ -609,7 +632,6 @@ func (c *Client) configureCryptFLE(mc *mongocrypt.MongoCrypt, opts *options.Auto KeyFn: kr.cryptKeys, MarkFn: c.mongocryptdFLE.markCommand, TLSConfig: opts.TLSConfig, - HTTPClient: opts.HTTPClient, BypassAutoEncryption: bypass, }) } @@ -657,7 +679,7 @@ func (c *Client) ListDatabases(ctx context.Context, filter interface{}, opts ... return ListDatabasesResult{}, err } - filterDoc, err := transformBsoncoreDocument(c.registry, filter, true, "filter") + filterDoc, err := marshal(filter, c.bsonOpts, c.registry) if err != nil { return ListDatabasesResult{}, err } @@ -788,6 +810,7 @@ func (c *Client) Watch(ctx context.Context, pipeline interface{}, readConcern: c.readConcern, readPreference: c.readPreference, client: c, + bsonOpts: c.bsonOpts, registry: c.registry, streamType: ClientStream, crypt: c.cryptFLE, @@ -817,3 +840,28 @@ func (c *Client) createBaseCursorOptions() driver.CursorOptions { ServerAPI: c.serverAPI, } } + +// newLogger will use the LoggerOptions to create an internal logger and publish +// messages using a LogSink. +func newLogger(opts *options.LoggerOptions) (*logger.Logger, error) { + // If there are no logger options, then create a default logger. + if opts == nil { + opts = options.Logger() + } + + // If there are no component-level options and the environment does not + // contain component variables, then do nothing. + if (opts.ComponentLevels == nil || len(opts.ComponentLevels) == 0) && + !logger.EnvHasComponentVariables() { + + return nil, nil + } + + // Otherwise, collect the component-level options and create a logger. + componentLevels := make(map[logger.Component]logger.Level) + for component, level := range opts.ComponentLevels { + componentLevels[logger.Component(component)] = logger.Level(level) + } + + return logger.New(opts.Sink, opts.MaxDocumentLength, componentLevels) +} diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/client_encryption.go b/vendor/go.mongodb.org/mongo-driver/mongo/client_encryption.go index 59c550b95..01c2ec319 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/client_encryption.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/client_encryption.go @@ -8,11 +8,12 @@ package mongo import ( "context" + "errors" "fmt" "strings" - "github.com/pkg/errors" "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/bson/bsonrw" "go.mongodb.org/mongo-driver/bson/primitive" "go.mongodb.org/mongo-driver/mongo/options" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" @@ -43,7 +44,7 @@ func NewClientEncryption(keyVaultClient *Client, opts ...*options.ClientEncrypti db, coll := splitNamespace(ceo.KeyVaultNamespace) ce.keyVaultColl = ce.keyVaultClient.Database(db).Collection(coll, keyVaultCollOpts) - kmsProviders, err := transformBsoncoreDocument(bson.DefaultRegistry, ceo.KmsProviders, true, "kmsProviders") + kmsProviders, err := marshal(ceo.KmsProviders, nil, nil) if err != nil { return nil, fmt.Errorf("error creating KMS providers map: %v", err) } @@ -53,7 +54,8 @@ func NewClientEncryption(keyVaultClient *Client, opts ...*options.ClientEncrypti // Explicitly disable loading the crypt_shared library for the Crypt used for // ClientEncryption because it's only needed for AutoEncryption and we don't expect users to // have the crypt_shared library installed if they're using ClientEncryption. - SetCryptSharedLibDisabled(true)) + SetCryptSharedLibDisabled(true). + SetHTTPClient(ceo.HTTPClient)) if err != nil { return nil, err } @@ -66,12 +68,67 @@ func NewClientEncryption(keyVaultClient *Client, opts ...*options.ClientEncrypti KeyFn: kr.cryptKeys, CollInfoFn: cir.cryptCollInfo, TLSConfig: ceo.TLSConfig, - HTTPClient: ceo.HTTPClient, }) return ce, nil } +// CreateEncryptedCollection creates a new collection for Queryable Encryption with the help of automatic generation of new encryption data keys for null keyIds. +// It returns the created collection and the encrypted fields document used to create it. +func (ce *ClientEncryption) CreateEncryptedCollection(ctx context.Context, + db *Database, coll string, createOpts *options.CreateCollectionOptions, + kmsProvider string, masterKey interface{}) (*Collection, bson.M, error) { + if createOpts == nil { + return nil, nil, errors.New("nil CreateCollectionOptions") + } + ef := createOpts.EncryptedFields + if ef == nil { + return nil, nil, errors.New("no EncryptedFields defined for the collection") + } + + efBSON, err := marshal(ef, db.bsonOpts, db.registry) + if err != nil { + return nil, nil, err + } + r := bsonrw.NewBSONDocumentReader(efBSON) + dec, err := bson.NewDecoder(r) + if err != nil { + return nil, nil, err + } + var m bson.M + err = dec.Decode(&m) + if err != nil { + return nil, nil, err + } + + if v, ok := m["fields"]; ok { + if fields, ok := v.(bson.A); ok { + for _, field := range fields { + if f, ok := field.(bson.M); !ok { + continue + } else if v, ok := f["keyId"]; ok && v == nil { + dkOpts := options.DataKey() + if masterKey != nil { + dkOpts.SetMasterKey(masterKey) + } + keyid, err := ce.CreateDataKey(ctx, kmsProvider, dkOpts) + if err != nil { + createOpts.EncryptedFields = m + return nil, m, err + } + f["keyId"] = keyid + } + } + createOpts.EncryptedFields = m + } + } + err = db.CreateCollection(ctx, coll, createOpts) + if err != nil { + return nil, m, err + } + return db.Collection(coll), m, nil +} + // AddKeyAltName adds a keyAltName to the keyAltNames array of the key document in the key vault collection with the // given UUID (BSON binary subtype 0x04). Returns the previous version of the key document. func (ce *ClientEncryption) AddKeyAltName(ctx context.Context, id primitive.Binary, keyAltName string) *SingleResult { @@ -90,7 +147,10 @@ func (ce *ClientEncryption) CreateDataKey(ctx context.Context, kmsProvider strin dko := options.MergeDataKeyOptions(opts...) co := mcopts.DataKey().SetKeyAltNames(dko.KeyAltNames) if dko.MasterKey != nil { - keyDoc, err := transformBsoncoreDocument(ce.keyVaultClient.registry, dko.MasterKey, true, "masterKey") + keyDoc, err := marshal( + dko.MasterKey, + ce.keyVaultClient.bsonOpts, + ce.keyVaultClient.registry) if err != nil { return primitive.Binary{}, err } @@ -116,10 +176,8 @@ func (ce *ClientEncryption) CreateDataKey(ctx context.Context, kmsProvider strin return primitive.Binary{Subtype: subtype, Data: data}, nil } -// Encrypt encrypts a BSON value with the given key and algorithm. Returns an encrypted value (BSON binary of subtype 6). -func (ce *ClientEncryption) Encrypt(ctx context.Context, val bson.RawValue, - opts ...*options.EncryptOptions) (primitive.Binary, error) { - +// transformExplicitEncryptionOptions creates explicit encryption options to be passed to libmongocrypt. +func transformExplicitEncryptionOptions(opts ...*options.EncryptOptions) *mcopts.ExplicitEncryptionOptions { eo := options.MergeEncryptOptions(opts...) transformed := mcopts.ExplicitEncryption() if eo.KeyID != nil { @@ -135,6 +193,28 @@ func (ce *ClientEncryption) Encrypt(ctx context.Context, val bson.RawValue, transformed.SetContentionFactor(*eo.ContentionFactor) } + if eo.RangeOptions != nil { + var transformedRange mcopts.ExplicitRangeOptions + if eo.RangeOptions.Min != nil { + transformedRange.Min = &bsoncore.Value{Type: eo.RangeOptions.Min.Type, Data: eo.RangeOptions.Min.Value} + } + if eo.RangeOptions.Max != nil { + transformedRange.Max = &bsoncore.Value{Type: eo.RangeOptions.Max.Type, Data: eo.RangeOptions.Max.Value} + } + if eo.RangeOptions.Precision != nil { + transformedRange.Precision = eo.RangeOptions.Precision + } + transformedRange.Sparsity = eo.RangeOptions.Sparsity + transformed.SetRangeOptions(transformedRange) + } + return transformed +} + +// Encrypt encrypts a BSON value with the given key and algorithm. Returns an encrypted value (BSON binary of subtype 6). +func (ce *ClientEncryption) Encrypt(ctx context.Context, val bson.RawValue, + opts ...*options.EncryptOptions) (primitive.Binary, error) { + + transformed := transformExplicitEncryptionOptions(opts...) subtype, data, err := ce.crypt.EncryptExplicit(ctx, bsoncore.Value{Type: val.Type, Data: val.Value}, transformed) if err != nil { return primitive.Binary{}, err @@ -142,6 +222,40 @@ func (ce *ClientEncryption) Encrypt(ctx context.Context, val bson.RawValue, return primitive.Binary{Subtype: subtype, Data: data}, nil } +// EncryptExpression encrypts an expression to query a range index. +// On success, `result` is populated with the resulting BSON document. +// `expr` is expected to be a BSON document of one of the following forms: +// 1. A Match Expression of this form: +// {$and: [{: {$gt: }}, {: {$lt: }}]} +// 2. An Aggregate Expression of this form: +// {$and: [{$gt: [, ]}, {$lt: [, ]}] +// $gt may also be $gte. $lt may also be $lte. +// Only supported for queryType "rangePreview" +// Beta: The Range algorithm is experimental only. It is not intended for public use. It is subject to breaking changes. +func (ce *ClientEncryption) EncryptExpression(ctx context.Context, expr interface{}, result interface{}, opts ...*options.EncryptOptions) error { + transformed := transformExplicitEncryptionOptions(opts...) + + exprDoc, err := marshal(expr, nil, nil) + if err != nil { + return err + } + + encryptedExprDoc, err := ce.crypt.EncryptExplicitExpression(ctx, exprDoc, transformed) + if err != nil { + return err + } + if raw, ok := result.(*bson.Raw); ok { + // Avoid the cost of Unmarshal. + *raw = bson.Raw(encryptedExprDoc) + return nil + } + err = bson.Unmarshal([]byte(encryptedExprDoc), result) + if err != nil { + return err + } + return nil +} + // Decrypt decrypts an encrypted value (BSON binary of subtype 6) and returns the original BSON value. func (ce *ClientEncryption) Decrypt(ctx context.Context, val primitive.Binary) (bson.RawValue, error) { decrypted, err := ce.crypt.DecryptExplicit(ctx, val.Subtype, val.Data) @@ -269,7 +383,10 @@ func (ce *ClientEncryption) RewrapManyDataKey(ctx context.Context, filter interf // Transfer rmdko options to /x/ package options to publish the mongocrypt feed. co := mcopts.RewrapManyDataKey() if rmdko.MasterKey != nil { - keyDoc, err := transformBsoncoreDocument(ce.keyVaultClient.registry, rmdko.MasterKey, true, "masterKey") + keyDoc, err := marshal( + rmdko.MasterKey, + ce.keyVaultClient.bsonOpts, + ce.keyVaultClient.registry) if err != nil { return nil, err } @@ -280,7 +397,7 @@ func (ce *ClientEncryption) RewrapManyDataKey(ctx context.Context, filter interf } // Prepare the filters and rewrap the data key using mongocrypt. - filterdoc, err := transformBsoncoreDocument(ce.keyVaultClient.registry, filter, true, "filter") + filterdoc, err := marshal(filter, ce.keyVaultClient.bsonOpts, ce.keyVaultClient.registry) if err != nil { return nil, err } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/collection.go b/vendor/go.mongodb.org/mongo-driver/mongo/collection.go index 547110ea2..fcbfcc77a 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/collection.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/collection.go @@ -10,13 +10,15 @@ import ( "context" "errors" "fmt" + "reflect" "strings" "time" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/bsoncodec" "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/internal" + "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/internal/csfle" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/options" "go.mongodb.org/mongo-driver/mongo/readconcern" @@ -38,6 +40,7 @@ type Collection struct { readPreference *readpref.ReadPref readSelector description.ServerSelector writeSelector description.ServerSelector + bsonOpts *options.BSONOptions registry *bsoncodec.Registry } @@ -46,6 +49,7 @@ type aggregateParams struct { ctx context.Context pipeline interface{} client *Client + bsonOpts *options.BSONOptions registry *bsoncodec.Registry readConcern *readconcern.ReadConcern writeConcern *writeconcern.WriteConcern @@ -82,6 +86,11 @@ func newCollection(db *Database, name string, opts ...*options.CollectionOptions rp = collOpt.ReadPreference } + bsonOpts := db.bsonOpts + if collOpt.BSONOptions != nil { + bsonOpts = collOpt.BSONOptions + } + reg := db.registry if collOpt.Registry != nil { reg = collOpt.Registry @@ -106,6 +115,7 @@ func newCollection(db *Database, name string, opts ...*options.CollectionOptions writeConcern: wc, readSelector: readSelector, writeSelector: writeSelector, + bsonOpts: bsonOpts, registry: reg, } @@ -242,11 +252,17 @@ func (coll *Collection) insert(ctx context.Context, documents []interface{}, docs := make([]bsoncore.Document, len(documents)) for i, doc := range documents { - var err error - docs[i], result[i], err = transformAndEnsureID(coll.registry, doc) + bsoncoreDoc, err := marshal(doc, coll.bsonOpts, coll.registry) if err != nil { return nil, err } + bsoncoreDoc, id, err := ensureID(bsoncoreDoc, primitive.NewObjectID(), coll.bsonOpts, coll.registry) + if err != nil { + return nil, err + } + + docs[i] = bsoncoreDoc + result[i] = id } sess := sessionFromContext(ctx) @@ -275,13 +291,13 @@ func (coll *Collection) insert(ctx context.Context, documents []interface{}, ServerSelector(selector).ClusterClock(coll.client.clock). Database(coll.db.name).Collection(coll.name). Deployment(coll.client.deployment).Crypt(coll.client.cryptFLE).Ordered(true). - ServerAPI(coll.client.serverAPI).Timeout(coll.client.timeout) + ServerAPI(coll.client.serverAPI).Timeout(coll.client.timeout).Logger(coll.client.logger) imo := options.MergeInsertManyOptions(opts...) if imo.BypassDocumentValidation != nil && *imo.BypassDocumentValidation { op = op.BypassDocumentValidation(*imo.BypassDocumentValidation) } if imo.Comment != nil { - comment, err := transformValue(coll.registry, imo.Comment, true, "comment") + comment, err := marshalValue(imo.Comment, coll.bsonOpts, coll.registry) if err != nil { return nil, err } @@ -400,7 +416,7 @@ func (coll *Collection) delete(ctx context.Context, filter interface{}, deleteOn ctx = context.Background() } - f, err := transformBsoncoreDocument(coll.registry, filter, true, "filter") + f, err := marshal(filter, coll.bsonOpts, coll.registry) if err != nil { return nil, err } @@ -438,7 +454,10 @@ func (coll *Collection) delete(ctx context.Context, filter interface{}, deleteOn doc = bsoncore.AppendDocumentElement(doc, "collation", do.Collation.ToDocument()) } if do.Hint != nil { - hint, err := transformValue(coll.registry, do.Hint, false, "hint") + if isUnorderedMap(do.Hint) { + return nil, ErrMapForOrderedArgument{"hint"} + } + hint, err := marshalValue(do.Hint, coll.bsonOpts, coll.registry) if err != nil { return nil, err } @@ -452,9 +471,9 @@ func (coll *Collection) delete(ctx context.Context, filter interface{}, deleteOn ServerSelector(selector).ClusterClock(coll.client.clock). Database(coll.db.name).Collection(coll.name). Deployment(coll.client.deployment).Crypt(coll.client.cryptFLE).Ordered(true). - ServerAPI(coll.client.serverAPI).Timeout(coll.client.timeout) + ServerAPI(coll.client.serverAPI).Timeout(coll.client.timeout).Logger(coll.client.logger) if do.Comment != nil { - comment, err := transformValue(coll.registry, do.Comment, true, "comment") + comment, err := marshalValue(do.Comment, coll.bsonOpts, coll.registry) if err != nil { return nil, err } @@ -464,7 +483,7 @@ func (coll *Collection) delete(ctx context.Context, filter interface{}, deleteOn op = op.Hint(true) } if do.Let != nil { - let, err := transformBsoncoreDocument(coll.registry, do.Let, true, "let") + let, err := marshal(do.Let, coll.bsonOpts, coll.registry) if err != nil { return nil, err } @@ -527,8 +546,17 @@ func (coll *Collection) updateOrReplace(ctx context.Context, filter bsoncore.Doc // collation, arrayFilters, upsert, and hint are included on the individual update documents rather than as part of the // command - updateDoc, err := createUpdateDoc(filter, update, uo.Hint, uo.ArrayFilters, uo.Collation, uo.Upsert, multi, - checkDollarKey, coll.registry) + updateDoc, err := createUpdateDoc( + filter, + update, + uo.Hint, + uo.ArrayFilters, + uo.Collation, + uo.Upsert, + multi, + checkDollarKey, + coll.bsonOpts, + coll.registry) if err != nil { return nil, err } @@ -560,9 +588,9 @@ func (coll *Collection) updateOrReplace(ctx context.Context, filter bsoncore.Doc Database(coll.db.name).Collection(coll.name). Deployment(coll.client.deployment).Crypt(coll.client.cryptFLE).Hint(uo.Hint != nil). ArrayFilters(uo.ArrayFilters != nil).Ordered(true).ServerAPI(coll.client.serverAPI). - Timeout(coll.client.timeout) + Timeout(coll.client.timeout).Logger(coll.client.logger) if uo.Let != nil { - let, err := transformBsoncoreDocument(coll.registry, uo.Let, true, "let") + let, err := marshal(uo.Let, coll.bsonOpts, coll.registry) if err != nil { return nil, err } @@ -573,7 +601,7 @@ func (coll *Collection) updateOrReplace(ctx context.Context, filter bsoncore.Doc op = op.BypassDocumentValidation(*uo.BypassDocumentValidation) } if uo.Comment != nil { - comment, err := transformValue(coll.registry, uo.Comment, true, "comment") + comment, err := marshalValue(uo.Comment, coll.bsonOpts, coll.registry) if err != nil { return nil, err } @@ -648,7 +676,7 @@ func (coll *Collection) UpdateOne(ctx context.Context, filter interface{}, updat ctx = context.Background() } - f, err := transformBsoncoreDocument(coll.registry, filter, true, "filter") + f, err := marshal(filter, coll.bsonOpts, coll.registry) if err != nil { return nil, err } @@ -676,7 +704,7 @@ func (coll *Collection) UpdateMany(ctx context.Context, filter interface{}, upda ctx = context.Background() } - f, err := transformBsoncoreDocument(coll.registry, filter, true, "filter") + f, err := marshal(filter, coll.bsonOpts, coll.registry) if err != nil { return nil, err } @@ -704,12 +732,12 @@ func (coll *Collection) ReplaceOne(ctx context.Context, filter interface{}, ctx = context.Background() } - f, err := transformBsoncoreDocument(coll.registry, filter, true, "filter") + f, err := marshal(filter, coll.bsonOpts, coll.registry) if err != nil { return nil, err } - r, err := transformBsoncoreDocument(coll.registry, replacement, true, "replacement") + r, err := marshal(replacement, coll.bsonOpts, coll.registry) if err != nil { return nil, err } @@ -756,6 +784,7 @@ func (coll *Collection) Aggregate(ctx context.Context, pipeline interface{}, registry: coll.registry, readConcern: coll.readConcern, writeConcern: coll.writeConcern, + bsonOpts: coll.bsonOpts, retryRead: coll.client.retryReads, db: coll.db.name, col: coll.name, @@ -773,7 +802,7 @@ func aggregate(a aggregateParams) (cur *Cursor, err error) { a.ctx = context.Background() } - pipelineArr, hasOutputStage, err := transformAggregatePipeline(a.registry, a.pipeline) + pipelineArr, hasOutputStage, err := marshalAggregatePipeline(a.pipeline, a.bsonOpts, a.registry) if err != nil { return nil, err } @@ -812,8 +841,11 @@ func aggregate(a aggregateParams) (cur *Cursor, err error) { } ao := options.MergeAggregateOptions(a.opts...) + cursorOpts := a.client.createBaseCursorOptions() + cursorOpts.MarshalValueEncoderFn = newEncoderFn(a.bsonOpts, a.registry) + op := operation.NewAggregate(pipelineArr). Session(sess). WriteConcern(wc). @@ -851,21 +883,24 @@ func aggregate(a aggregateParams) (cur *Cursor, err error) { if ao.Comment != nil { op.Comment(*ao.Comment) - commentVal, err := transformValue(a.registry, ao.Comment, true, "comment") + commentVal, err := marshalValue(ao.Comment, a.bsonOpts, a.registry) if err != nil { return nil, err } cursorOpts.Comment = commentVal } if ao.Hint != nil { - hintVal, err := transformValue(a.registry, ao.Hint, false, "hint") + if isUnorderedMap(ao.Hint) { + return nil, ErrMapForOrderedArgument{"hint"} + } + hintVal, err := marshalValue(ao.Hint, a.bsonOpts, a.registry) if err != nil { return nil, err } op.Hint(hintVal) } if ao.Let != nil { - let, err := transformBsoncoreDocument(a.registry, ao.Let, true, "let") + let, err := marshal(ao.Let, a.bsonOpts, a.registry) if err != nil { return nil, err } @@ -904,7 +939,7 @@ func aggregate(a aggregateParams) (cur *Cursor, err error) { if err != nil { return nil, replaceErrors(err) } - cursor, err := newCursorWithSession(bc, a.registry, sess) + cursor, err := newCursorWithSession(bc, a.client.bsonOpts, a.registry, sess) return cursor, replaceErrors(err) } @@ -925,7 +960,7 @@ func (coll *Collection) CountDocuments(ctx context.Context, filter interface{}, countOpts := options.MergeCountOptions(opts...) - pipelineArr, err := countDocumentsAggregatePipeline(coll.registry, filter, countOpts) + pipelineArr, err := countDocumentsAggregatePipeline(filter, coll.bsonOpts, coll.registry, countOpts) if err != nil { return 0, err } @@ -956,7 +991,10 @@ func (coll *Collection) CountDocuments(ctx context.Context, filter interface{}, op.Comment(*countOpts.Comment) } if countOpts.Hint != nil { - hintVal, err := transformValue(coll.registry, countOpts.Hint, false, "hint") + if isUnorderedMap(countOpts.Hint) { + return 0, ErrMapForOrderedArgument{"hint"} + } + hintVal, err := marshalValue(countOpts.Hint, coll.bsonOpts, coll.registry) if err != nil { return 0, err } @@ -1033,7 +1071,7 @@ func (coll *Collection) EstimatedDocumentCount(ctx context.Context, Timeout(coll.client.timeout).MaxTime(co.MaxTime) if co.Comment != nil { - comment, err := transformValue(coll.registry, co.Comment, false, "comment") + comment, err := marshalValue(co.Comment, coll.bsonOpts, coll.registry) if err != nil { return 0, err } @@ -1067,7 +1105,7 @@ func (coll *Collection) Distinct(ctx context.Context, fieldName string, filter i ctx = context.Background() } - f, err := transformBsoncoreDocument(coll.registry, filter, true, "filter") + f, err := marshal(filter, coll.bsonOpts, coll.registry) if err != nil { return nil, err } @@ -1103,7 +1141,7 @@ func (coll *Collection) Distinct(ctx context.Context, fieldName string, filter i op.Collation(bsoncore.Document(option.Collation.ToDocument())) } if option.Comment != nil { - comment, err := transformValue(coll.registry, option.Comment, true, "comment") + comment, err := marshalValue(option.Comment, coll.bsonOpts, coll.registry) if err != nil { return nil, err } @@ -1158,7 +1196,7 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, ctx = context.Background() } - f, err := transformBsoncoreDocument(coll.registry, filter, true, "filter") + f, err := marshal(filter, coll.bsonOpts, coll.registry) if err != nil { return nil, err } @@ -1192,9 +1230,12 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, CommandMonitor(coll.client.monitor).ServerSelector(selector). ClusterClock(coll.client.clock).Database(coll.db.name).Collection(coll.name). Deployment(coll.client.deployment).Crypt(coll.client.cryptFLE).ServerAPI(coll.client.serverAPI). - Timeout(coll.client.timeout).MaxTime(fo.MaxTime) + Timeout(coll.client.timeout).MaxTime(fo.MaxTime).Logger(coll.client.logger) cursorOpts := coll.client.createBaseCursorOptions() + + cursorOpts.MarshalValueEncoderFn = newEncoderFn(coll.bsonOpts, coll.registry) + if fo.AllowDiskUse != nil { op.AllowDiskUse(*fo.AllowDiskUse) } @@ -1211,7 +1252,7 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, if fo.Comment != nil { op.Comment(*fo.Comment) - commentVal, err := transformValue(coll.registry, fo.Comment, true, "comment") + commentVal, err := marshalValue(fo.Comment, coll.bsonOpts, coll.registry) if err != nil { return nil, err } @@ -1227,14 +1268,17 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, } } if fo.Hint != nil { - hint, err := transformValue(coll.registry, fo.Hint, false, "hint") + if isUnorderedMap(fo.Hint) { + return nil, ErrMapForOrderedArgument{"hint"} + } + hint, err := marshalValue(fo.Hint, coll.bsonOpts, coll.registry) if err != nil { return nil, err } op.Hint(hint) } if fo.Let != nil { - let, err := transformBsoncoreDocument(coll.registry, fo.Let, true, "let") + let, err := marshal(fo.Let, coll.bsonOpts, coll.registry) if err != nil { return nil, err } @@ -1250,7 +1294,7 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, op.Limit(limit) } if fo.Max != nil { - max, err := transformBsoncoreDocument(coll.registry, fo.Max, true, "max") + max, err := marshal(fo.Max, coll.bsonOpts, coll.registry) if err != nil { return nil, err } @@ -1260,7 +1304,7 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, cursorOpts.MaxTimeMS = int64(*fo.MaxAwaitTime / time.Millisecond) } if fo.Min != nil { - min, err := transformBsoncoreDocument(coll.registry, fo.Min, true, "min") + min, err := marshal(fo.Min, coll.bsonOpts, coll.registry) if err != nil { return nil, err } @@ -1273,7 +1317,7 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, op.OplogReplay(*fo.OplogReplay) } if fo.Projection != nil { - proj, err := transformBsoncoreDocument(coll.registry, fo.Projection, true, "projection") + proj, err := marshal(fo.Projection, coll.bsonOpts, coll.registry) if err != nil { return nil, err } @@ -1292,7 +1336,10 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, op.Snapshot(*fo.Snapshot) } if fo.Sort != nil { - sort, err := transformBsoncoreDocument(coll.registry, fo.Sort, false, "sort") + if isUnorderedMap(fo.Sort) { + return nil, ErrMapForOrderedArgument{"sort"} + } + sort, err := marshal(fo.Sort, coll.bsonOpts, coll.registry) if err != nil { return nil, err } @@ -1312,7 +1359,7 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, if err != nil { return nil, replaceErrors(err) } - return newCursorWithSession(bc, coll.registry, sess) + return newCursorWithSession(bc, coll.bsonOpts, coll.registry, sess) } // FindOne executes a find command and returns a SingleResult for one document in the collection. @@ -1362,7 +1409,13 @@ func (coll *Collection) FindOne(ctx context.Context, filter interface{}, findOpts = append(findOpts, options.Find().SetLimit(-1)) cursor, err := coll.Find(ctx, filter, findOpts...) - return &SingleResult{cur: cursor, reg: coll.registry, err: replaceErrors(err)} + return &SingleResult{ + ctx: ctx, + cur: cursor, + bsonOpts: coll.bsonOpts, + reg: coll.registry, + err: replaceErrors(err), + } } func (coll *Collection) findAndModify(ctx context.Context, op *operation.FindAndModify) *SingleResult { @@ -1413,7 +1466,12 @@ func (coll *Collection) findAndModify(ctx context.Context, op *operation.FindAnd return &SingleResult{err: err} } - return &SingleResult{rdr: bson.Raw(op.Result().Value), reg: coll.registry} + return &SingleResult{ + ctx: ctx, + rdr: bson.Raw(op.Result().Value), + bsonOpts: coll.bsonOpts, + reg: coll.registry, + } } // FindOneAndDelete executes a findAndModify command to delete at most one document in the collection. and returns the @@ -1430,7 +1488,7 @@ func (coll *Collection) findAndModify(ctx context.Context, op *operation.FindAnd func (coll *Collection) FindOneAndDelete(ctx context.Context, filter interface{}, opts ...*options.FindOneAndDeleteOptions) *SingleResult { - f, err := transformBsoncoreDocument(coll.registry, filter, true, "filter") + f, err := marshal(filter, coll.bsonOpts, coll.registry) if err != nil { return &SingleResult{err: err} } @@ -1441,35 +1499,41 @@ func (coll *Collection) FindOneAndDelete(ctx context.Context, filter interface{} op = op.Collation(bsoncore.Document(fod.Collation.ToDocument())) } if fod.Comment != nil { - comment, err := transformValue(coll.registry, fod.Comment, true, "comment") + comment, err := marshalValue(fod.Comment, coll.bsonOpts, coll.registry) if err != nil { return &SingleResult{err: err} } op = op.Comment(comment) } if fod.Projection != nil { - proj, err := transformBsoncoreDocument(coll.registry, fod.Projection, true, "projection") + proj, err := marshal(fod.Projection, coll.bsonOpts, coll.registry) if err != nil { return &SingleResult{err: err} } op = op.Fields(proj) } if fod.Sort != nil { - sort, err := transformBsoncoreDocument(coll.registry, fod.Sort, false, "sort") + if isUnorderedMap(fod.Sort) { + return &SingleResult{err: ErrMapForOrderedArgument{"sort"}} + } + sort, err := marshal(fod.Sort, coll.bsonOpts, coll.registry) if err != nil { return &SingleResult{err: err} } op = op.Sort(sort) } if fod.Hint != nil { - hint, err := transformValue(coll.registry, fod.Hint, false, "hint") + if isUnorderedMap(fod.Hint) { + return &SingleResult{err: ErrMapForOrderedArgument{"hint"}} + } + hint, err := marshalValue(fod.Hint, coll.bsonOpts, coll.registry) if err != nil { return &SingleResult{err: err} } op = op.Hint(hint) } if fod.Let != nil { - let, err := transformBsoncoreDocument(coll.registry, fod.Let, true, "let") + let, err := marshal(fod.Let, coll.bsonOpts, coll.registry) if err != nil { return &SingleResult{err: err} } @@ -1496,11 +1560,11 @@ func (coll *Collection) FindOneAndDelete(ctx context.Context, filter interface{} func (coll *Collection) FindOneAndReplace(ctx context.Context, filter interface{}, replacement interface{}, opts ...*options.FindOneAndReplaceOptions) *SingleResult { - f, err := transformBsoncoreDocument(coll.registry, filter, true, "filter") + f, err := marshal(filter, coll.bsonOpts, coll.registry) if err != nil { return &SingleResult{err: err} } - r, err := transformBsoncoreDocument(coll.registry, replacement, true, "replacement") + r, err := marshal(replacement, coll.bsonOpts, coll.registry) if err != nil { return &SingleResult{err: err} } @@ -1518,14 +1582,14 @@ func (coll *Collection) FindOneAndReplace(ctx context.Context, filter interface{ op = op.Collation(bsoncore.Document(fo.Collation.ToDocument())) } if fo.Comment != nil { - comment, err := transformValue(coll.registry, fo.Comment, true, "comment") + comment, err := marshalValue(fo.Comment, coll.bsonOpts, coll.registry) if err != nil { return &SingleResult{err: err} } op = op.Comment(comment) } if fo.Projection != nil { - proj, err := transformBsoncoreDocument(coll.registry, fo.Projection, true, "projection") + proj, err := marshal(fo.Projection, coll.bsonOpts, coll.registry) if err != nil { return &SingleResult{err: err} } @@ -1535,7 +1599,10 @@ func (coll *Collection) FindOneAndReplace(ctx context.Context, filter interface{ op = op.NewDocument(*fo.ReturnDocument == options.After) } if fo.Sort != nil { - sort, err := transformBsoncoreDocument(coll.registry, fo.Sort, false, "sort") + if isUnorderedMap(fo.Sort) { + return &SingleResult{err: ErrMapForOrderedArgument{"sort"}} + } + sort, err := marshal(fo.Sort, coll.bsonOpts, coll.registry) if err != nil { return &SingleResult{err: err} } @@ -1545,14 +1612,17 @@ func (coll *Collection) FindOneAndReplace(ctx context.Context, filter interface{ op = op.Upsert(*fo.Upsert) } if fo.Hint != nil { - hint, err := transformValue(coll.registry, fo.Hint, false, "hint") + if isUnorderedMap(fo.Hint) { + return &SingleResult{err: ErrMapForOrderedArgument{"hint"}} + } + hint, err := marshalValue(fo.Hint, coll.bsonOpts, coll.registry) if err != nil { return &SingleResult{err: err} } op = op.Hint(hint) } if fo.Let != nil { - let, err := transformBsoncoreDocument(coll.registry, fo.Let, true, "let") + let, err := marshal(fo.Let, coll.bsonOpts, coll.registry) if err != nil { return &SingleResult{err: err} } @@ -1584,7 +1654,7 @@ func (coll *Collection) FindOneAndUpdate(ctx context.Context, filter interface{} ctx = context.Background() } - f, err := transformBsoncoreDocument(coll.registry, filter, true, "filter") + f, err := marshal(filter, coll.bsonOpts, coll.registry) if err != nil { return &SingleResult{err: err} } @@ -1593,18 +1663,23 @@ func (coll *Collection) FindOneAndUpdate(ctx context.Context, filter interface{} op := operation.NewFindAndModify(f).ServerAPI(coll.client.serverAPI).Timeout(coll.client.timeout). MaxTime(fo.MaxTime) - u, err := transformUpdateValue(coll.registry, update, true) + u, err := marshalUpdateValue(update, coll.bsonOpts, coll.registry, true) if err != nil { return &SingleResult{err: err} } op = op.Update(u) if fo.ArrayFilters != nil { - filtersDoc, err := fo.ArrayFilters.ToArrayDocument() + af := fo.ArrayFilters + reg := coll.registry + if af.Registry != nil { + reg = af.Registry + } + filtersDoc, err := marshalValue(af.Filters, coll.bsonOpts, reg) if err != nil { return &SingleResult{err: err} } - op = op.ArrayFilters(bsoncore.Document(filtersDoc)) + op = op.ArrayFilters(filtersDoc.Data) } if fo.BypassDocumentValidation != nil && *fo.BypassDocumentValidation { op = op.BypassDocumentValidation(*fo.BypassDocumentValidation) @@ -1613,14 +1688,14 @@ func (coll *Collection) FindOneAndUpdate(ctx context.Context, filter interface{} op = op.Collation(bsoncore.Document(fo.Collation.ToDocument())) } if fo.Comment != nil { - comment, err := transformValue(coll.registry, fo.Comment, true, "comment") + comment, err := marshalValue(fo.Comment, coll.bsonOpts, coll.registry) if err != nil { return &SingleResult{err: err} } op = op.Comment(comment) } if fo.Projection != nil { - proj, err := transformBsoncoreDocument(coll.registry, fo.Projection, true, "projection") + proj, err := marshal(fo.Projection, coll.bsonOpts, coll.registry) if err != nil { return &SingleResult{err: err} } @@ -1630,7 +1705,10 @@ func (coll *Collection) FindOneAndUpdate(ctx context.Context, filter interface{} op = op.NewDocument(*fo.ReturnDocument == options.After) } if fo.Sort != nil { - sort, err := transformBsoncoreDocument(coll.registry, fo.Sort, false, "sort") + if isUnorderedMap(fo.Sort) { + return &SingleResult{err: ErrMapForOrderedArgument{"sort"}} + } + sort, err := marshal(fo.Sort, coll.bsonOpts, coll.registry) if err != nil { return &SingleResult{err: err} } @@ -1640,14 +1718,17 @@ func (coll *Collection) FindOneAndUpdate(ctx context.Context, filter interface{} op = op.Upsert(*fo.Upsert) } if fo.Hint != nil { - hint, err := transformValue(coll.registry, fo.Hint, false, "hint") + if isUnorderedMap(fo.Hint) { + return &SingleResult{err: ErrMapForOrderedArgument{"hint"}} + } + hint, err := marshalValue(fo.Hint, coll.bsonOpts, coll.registry) if err != nil { return &SingleResult{err: err} } op = op.Hint(hint) } if fo.Let != nil { - let, err := transformBsoncoreDocument(coll.registry, fo.Let, true, "let") + let, err := marshal(fo.Let, coll.bsonOpts, coll.registry) if err != nil { return &SingleResult{err: err} } @@ -1677,6 +1758,7 @@ func (coll *Collection) Watch(ctx context.Context, pipeline interface{}, readConcern: coll.readConcern, readPreference: coll.readPreference, client: coll.client, + bsonOpts: coll.bsonOpts, registry: coll.registry, streamType: CollectionStream, collectionName: coll.Name(), @@ -1691,6 +1773,13 @@ func (coll *Collection) Indexes() IndexView { return IndexView{coll: coll} } +// SearchIndexes returns a SearchIndexView instance that can be used to perform operations on the search indexes for the collection. +func (coll *Collection) SearchIndexes() SearchIndexView { + return SearchIndexView{ + coll: coll, + } +} + // Drop drops the collection on the server. This method ignores "namespace not found" errors so it is safe to drop // a collection that does not exist on the server. func (coll *Collection) Drop(ctx context.Context) error { @@ -1715,14 +1804,14 @@ func (coll *Collection) Drop(ctx context.Context) error { // dropEncryptedCollection drops a collection with EncryptedFields. func (coll *Collection) dropEncryptedCollection(ctx context.Context, ef interface{}) error { - efBSON, err := transformBsoncoreDocument(coll.registry, ef, true /* mapAllowed */, "encryptedFields") + efBSON, err := marshal(ef, coll.bsonOpts, coll.registry) if err != nil { return fmt.Errorf("error transforming document: %v", err) } - // Drop the three encryption-related, associated collections: `escCollection`, `eccCollection` and `ecocCollection`. + // Drop the two encryption-related, associated collections: `escCollection` and `ecocCollection`. // Drop ESCCollection. - escCollection, err := internal.GetEncryptedStateCollectionName(efBSON, coll.name, internal.EncryptedStateCollection) + escCollection, err := csfle.GetEncryptedStateCollectionName(efBSON, coll.name, csfle.EncryptedStateCollection) if err != nil { return err } @@ -1730,17 +1819,8 @@ func (coll *Collection) dropEncryptedCollection(ctx context.Context, ef interfac return err } - // Drop ECCCollection. - eccCollection, err := internal.GetEncryptedStateCollectionName(efBSON, coll.name, internal.EncryptedCacheCollection) - if err != nil { - return err - } - if err := coll.db.Collection(eccCollection).drop(ctx); err != nil { - return err - } - // Drop ECOCCollection. - ecocCollection, err := internal.GetEncryptedStateCollectionName(efBSON, coll.name, internal.EncryptedCompactionCollection) + ecocCollection, err := csfle.GetEncryptedStateCollectionName(efBSON, coll.name, csfle.EncryptedCompactionCollection) if err != nil { return err } @@ -1749,10 +1829,7 @@ func (coll *Collection) dropEncryptedCollection(ctx context.Context, ef interfac } // Drop the data collection. - if err := coll.drop(ctx); err != nil { - return err - } - return nil + return coll.drop(ctx) } // drop drops a collection without EncryptedFields. @@ -1790,7 +1867,7 @@ func (coll *Collection) drop(ctx context.Context) error { ServerAPI(coll.client.serverAPI).Timeout(coll.client.timeout) err = op.Execute(ctx) - // ignore namespace not found erorrs + // ignore namespace not found errors driverErr, ok := err.(driver.Error) if !ok || (ok && !driverErr.NamespaceNotFound()) { return replaceErrors(err) @@ -1798,26 +1875,52 @@ func (coll *Collection) drop(ctx context.Context) error { return nil } -// makePinnedSelector makes a selector for a pinned session with a pinned server. Will attempt to do server selection on -// the pinned server but if that fails it will go through a list of default selectors -func makePinnedSelector(sess *session.Client, defaultSelector description.ServerSelector) description.ServerSelectorFunc { - return func(t description.Topology, svrs []description.Server) ([]description.Server, error) { - if sess != nil && sess.PinnedServer != nil { - // If there is a pinned server, try to find it in the list of candidates. - for _, candidate := range svrs { - if candidate.Addr == sess.PinnedServer.Addr { - return []description.Server{candidate}, nil - } - } +type pinnedServerSelector struct { + stringer fmt.Stringer + fallback description.ServerSelector + session *session.Client +} - return nil, nil +func (pss pinnedServerSelector) String() string { + if pss.stringer == nil { + return "" + } + + return pss.stringer.String() +} + +func (pss pinnedServerSelector) SelectServer( + t description.Topology, + svrs []description.Server, +) ([]description.Server, error) { + if pss.session != nil && pss.session.PinnedServer != nil { + // If there is a pinned server, try to find it in the list of candidates. + for _, candidate := range svrs { + if candidate.Addr == pss.session.PinnedServer.Addr { + return []description.Server{candidate}, nil + } } - return defaultSelector.SelectServer(t, svrs) + return nil, nil + } + + return pss.fallback.SelectServer(t, svrs) +} + +func makePinnedSelector(sess *session.Client, fallback description.ServerSelector) description.ServerSelector { + pss := pinnedServerSelector{ + session: sess, + fallback: fallback, + } + + if srvSelectorStringer, ok := fallback.(fmt.Stringer); ok { + pss.stringer = srvSelectorStringer } + + return pss } -func makeReadPrefSelector(sess *session.Client, selector description.ServerSelector, localThreshold time.Duration) description.ServerSelectorFunc { +func makeReadPrefSelector(sess *session.Client, selector description.ServerSelector, localThreshold time.Duration) description.ServerSelector { if sess != nil && sess.TransactionRunning() { selector = description.CompositeSelector([]description.ServerSelector{ description.ReadPrefSelector(sess.CurrentRp), @@ -1828,7 +1931,7 @@ func makeReadPrefSelector(sess *session.Client, selector description.ServerSelec return makePinnedSelector(sess, selector) } -func makeOutputAggregateSelector(sess *session.Client, rp *readpref.ReadPref, localThreshold time.Duration) description.ServerSelectorFunc { +func makeOutputAggregateSelector(sess *session.Client, rp *readpref.ReadPref, localThreshold time.Duration) description.ServerSelector { if sess != nil && sess.TransactionRunning() { // Use current transaction's read preference if available rp = sess.CurrentRp @@ -1840,3 +1943,11 @@ func makeOutputAggregateSelector(sess *session.Client, rp *readpref.ReadPref, lo }) return makePinnedSelector(sess, selector) } + +// isUnorderedMap returns true if val is a map with more than 1 element. It is typically used to +// check for unordered Go values that are used in nested command documents where different field +// orders mean different things. Examples are the "sort" and "hint" fields. +func isUnorderedMap(val interface{}) bool { + refValue := reflect.ValueOf(val) + return refValue.Kind() == reflect.Map && refValue.Len() > 1 +} diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/cursor.go b/vendor/go.mongodb.org/mongo-driver/mongo/cursor.go index e2320eade..d2228ed9c 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/cursor.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/cursor.go @@ -12,10 +12,12 @@ import ( "fmt" "io" "reflect" + "time" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/bsoncodec" - "go.mongodb.org/mongo-driver/x/bsonx" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/mongo/options" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver" "go.mongodb.org/mongo-driver/x/mongo/driver/session" @@ -32,17 +34,27 @@ type Cursor struct { bc batchCursor batch *bsoncore.DocumentSequence batchLength int + bsonOpts *options.BSONOptions registry *bsoncodec.Registry clientSession *session.Client err error } -func newCursor(bc batchCursor, registry *bsoncodec.Registry) (*Cursor, error) { - return newCursorWithSession(bc, registry, nil) +func newCursor( + bc batchCursor, + bsonOpts *options.BSONOptions, + registry *bsoncodec.Registry, +) (*Cursor, error) { + return newCursorWithSession(bc, bsonOpts, registry, nil) } -func newCursorWithSession(bc batchCursor, registry *bsoncodec.Registry, clientSession *session.Client) (*Cursor, error) { +func newCursorWithSession( + bc batchCursor, + bsonOpts *options.BSONOptions, + registry *bsoncodec.Registry, + clientSession *session.Client, +) (*Cursor, error) { if registry == nil { registry = bson.DefaultRegistry } @@ -51,6 +63,7 @@ func newCursorWithSession(bc batchCursor, registry *bsoncodec.Registry, clientSe } c := &Cursor{ bc: bc, + bsonOpts: bsonOpts, registry: registry, clientSession: clientSession, } @@ -83,8 +96,6 @@ func NewCursorFromDocuments(documents []interface{}, err error, registry *bsonco switch t := doc.(type) { case nil: return nil, ErrNilDocument - case bsonx.Doc: - doc = t.Copy() case []byte: // Slight optimization so we'll just use MarshalBSON and not go through the codec machinery. doc = bson.Raw(t) @@ -115,8 +126,8 @@ func (c *Cursor) ID() int64 { return c.bc.ID() } // Next gets the next document for this cursor. It returns true if there were no errors and the cursor has not been // exhausted. // -// Next blocks until a document is available, an error occurs, or ctx expires. If ctx expires, the -// error will be set to ctx.Err(). In an error case, Next will return false. +// Next blocks until a document is available or an error occurs. If the context expires, the cursor's error will +// be set to ctx.Err(). In case of an error, Next will return false. // // If Next returns false, subsequent calls will also return false. func (c *Cursor) Next(ctx context.Context) bool { @@ -128,7 +139,7 @@ func (c *Cursor) Next(ctx context.Context) bool { // Next. See https://www.mongodb.com/docs/manual/core/tailable-cursors/ for more information about tailable cursors. // // TryNext returns false if the cursor is exhausted, an error occurs when getting results from the server, the next -// document is not yet available, or ctx expires. If ctx expires, the error will be set to ctx.Err(). +// document is not yet available, or ctx expires. If the context expires, the cursor's error will be set to ctx.Err(). // // If TryNext returns false and an error occurred or the cursor has been exhausted (i.e. c.Err() != nil || c.ID() == 0), // subsequent attempts will also return false. Otherwise, it is safe to call TryNext again until a document is @@ -206,10 +217,62 @@ func (c *Cursor) next(ctx context.Context, nonBlocking bool) bool { } } +func getDecoder( + data []byte, + opts *options.BSONOptions, + reg *bsoncodec.Registry, +) (*bson.Decoder, error) { + dec, err := bson.NewDecoder(bsonrw.NewBSONDocumentReader(data)) + if err != nil { + return nil, err + } + + if opts != nil { + if opts.AllowTruncatingDoubles { + dec.AllowTruncatingDoubles() + } + if opts.BinaryAsSlice { + dec.BinaryAsSlice() + } + if opts.DefaultDocumentD { + dec.DefaultDocumentD() + } + if opts.DefaultDocumentM { + dec.DefaultDocumentM() + } + if opts.UseJSONStructTags { + dec.UseJSONStructTags() + } + if opts.UseLocalTimeZone { + dec.UseLocalTimeZone() + } + if opts.ZeroMaps { + dec.ZeroMaps() + } + if opts.ZeroStructs { + dec.ZeroStructs() + } + } + + if reg != nil { + // TODO:(GODRIVER-2719): Remove error handling. + if err := dec.SetRegistry(reg); err != nil { + return nil, err + } + } + + return dec, nil +} + // Decode will unmarshal the current document into val and return any errors from the unmarshalling process without any // modification. If val is nil or is a typed nil, an error will be returned. func (c *Cursor) Decode(val interface{}) error { - return bson.UnmarshalWithRegistry(c.registry, c.Current, val) + dec, err := getDecoder(c.Current, c.bsonOpts, c.registry) + if err != nil { + return fmt.Errorf("error configuring BSON decoder: %w", err) + } + + return dec.Decode(val) } // Err returns the last error seen by the Cursor, or nil if no error has occurred. @@ -298,7 +361,12 @@ func (c *Cursor) addFromBatch(sliceVal reflect.Value, elemType reflect.Type, bat } currElem := sliceVal.Index(index).Addr().Interface() - if err = bson.UnmarshalWithRegistry(c.registry, doc, currElem); err != nil { + dec, err := getDecoder(doc, c.bsonOpts, c.registry) + if err != nil { + return sliceVal, index, fmt.Errorf("error configuring BSON decoder: %w", err) + } + err = dec.Decode(currElem) + if err != nil { return sliceVal, index, err } @@ -322,6 +390,22 @@ func (c *Cursor) SetBatchSize(batchSize int32) { c.bc.SetBatchSize(batchSize) } +// SetMaxTime will set the maximum amount of time the server will allow the +// operations to execute. The server will error if this field is set but the +// cursor is not configured with awaitData=true. +// +// The time.Duration value passed by this setter will be converted and rounded +// down to the nearest millisecond. +func (c *Cursor) SetMaxTime(dur time.Duration) { + c.bc.SetMaxTime(dur) +} + +// SetComment will set a user-configurable comment that can be used to identify +// the operation in server logs. +func (c *Cursor) SetComment(comment interface{}) { + c.bc.SetComment(comment) +} + // BatchCursorFromCursor returns a driver.BatchCursor for the given Cursor. If there is no underlying // driver.BatchCursor, nil is returned. // diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/database.go b/vendor/go.mongodb.org/mongo-driver/mongo/database.go index 415537d51..f5d5ad379 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/database.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/database.go @@ -10,10 +10,11 @@ import ( "context" "errors" "fmt" + "time" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/bsoncodec" - "go.mongodb.org/mongo-driver/internal" + "go.mongodb.org/mongo-driver/internal/csfle" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/options" "go.mongodb.org/mongo-driver/mongo/readconcern" @@ -38,6 +39,7 @@ type Database struct { readPreference *readpref.ReadPref readSelector description.ServerSelector writeSelector description.ServerSelector + bsonOpts *options.BSONOptions registry *bsoncodec.Registry } @@ -59,6 +61,11 @@ func newDatabase(client *Client, name string, opts ...*options.DatabaseOptions) wc = dbOpt.WriteConcern } + bsonOpts := client.bsonOpts + if dbOpt.BSONOptions != nil { + bsonOpts = dbOpt.BSONOptions + } + reg := client.registry if dbOpt.Registry != nil { reg = dbOpt.Registry @@ -70,6 +77,7 @@ func newDatabase(client *Client, name string, opts ...*options.DatabaseOptions) readPreference: rp, readConcern: rc, writeConcern: wc, + bsonOpts: bsonOpts, registry: reg, } @@ -149,7 +157,11 @@ func (db *Database) processRunCommand(ctx context.Context, cmd interface{}, return nil, sess, errors.New("read preference in a transaction must be primary") } - runCmdDoc, err := transformBsoncoreDocument(db.registry, cmd, false, "cmd") + if isUnorderedMap(cmd) { + return nil, sess, ErrMapForOrderedArgument{"cmd"} + } + + runCmdDoc, err := marshal(cmd, db.bsonOpts, db.registry) if err != nil { return nil, sess, err } @@ -165,15 +177,22 @@ func (db *Database) processRunCommand(ctx context.Context, cmd interface{}, switch cursorCommand { case true: cursorOpts := db.client.createBaseCursorOptions() + + cursorOpts.MarshalValueEncoderFn = newEncoderFn(db.bsonOpts, db.registry) + op = operation.NewCursorCommand(runCmdDoc, cursorOpts) default: op = operation.NewCommand(runCmdDoc) } + + // TODO(GODRIVER-2649): ReadConcern(db.readConcern) will not actually pass the database's + // read concern. Remove this note once readConcern is correctly passed to the operation + // level. return op.Session(sess).CommandMonitor(db.client.monitor). ServerSelector(readSelect).ClusterClock(db.client.clock). Database(db.name).Deployment(db.client.deployment).ReadConcern(db.readConcern). Crypt(db.client.cryptFLE).ReadPreference(ro.ReadPreference).ServerAPI(db.client.serverAPI). - Timeout(db.client.timeout), sess, nil + Timeout(db.client.timeout).Logger(db.client.logger), sess, nil } // RunCommand executes the given command against the database. This function does not obey the Database's read @@ -203,9 +222,11 @@ func (db *Database) RunCommand(ctx context.Context, runCommand interface{}, opts // RunCommand can be used to run a write, thus execute may return a write error _, convErr := processWriteError(err) return &SingleResult{ - err: convErr, - rdr: bson.Raw(op.Result()), - reg: db.registry, + ctx: ctx, + err: convErr, + rdr: bson.Raw(op.Result()), + bsonOpts: db.bsonOpts, + reg: db.registry, } } @@ -236,6 +257,10 @@ func (db *Database) RunCommandCursor(ctx context.Context, runCommand interface{} if err = op.Execute(ctx); err != nil { closeImplicitSession(sess) + if errors.Is(err, driver.ErrNoCursor) { + return nil, errors.New( + "database response does not contain a cursor; try using RunCommand instead") + } return nil, replaceErrors(err) } @@ -244,7 +269,7 @@ func (db *Database) RunCommandCursor(ctx context.Context, runCommand interface{} closeImplicitSession(sess) return nil, replaceErrors(err) } - cursor, err := newCursorWithSession(bc, db.registry, sess) + cursor, err := newCursorWithSession(bc, db.bsonOpts, db.registry, sess) return cursor, replaceErrors(err) } @@ -347,7 +372,7 @@ func (db *Database) ListCollections(ctx context.Context, filter interface{}, opt ctx = context.Background() } - filterDoc, err := transformBsoncoreDocument(db.registry, filter, true, "filter") + filterDoc, err := marshal(filter, db.bsonOpts, db.registry) if err != nil { return nil, err } @@ -377,6 +402,9 @@ func (db *Database) ListCollections(ctx context.Context, filter interface{}, opt ServerAPI(db.client.serverAPI).Timeout(db.client.timeout) cursorOpts := db.client.createBaseCursorOptions() + + cursorOpts.MarshalValueEncoderFn = newEncoderFn(db.bsonOpts, db.registry) + if lco.NameOnly != nil { op = op.NameOnly(*lco.NameOnly) } @@ -405,7 +433,7 @@ func (db *Database) ListCollections(ctx context.Context, filter interface{}, opt closeImplicitSession(sess) return nil, replaceErrors(err) } - cursor, err := newCursorWithSession(bc, db.registry, sess) + cursor, err := newCursorWithSession(bc, db.bsonOpts, db.registry, sess) return cursor, replaceErrors(err) } @@ -569,17 +597,37 @@ func (db *Database) getEncryptedFieldsFromMap(collectionName string) interface{} // createCollectionWithEncryptedFields creates a collection with an EncryptedFields. func (db *Database) createCollectionWithEncryptedFields(ctx context.Context, name string, ef interface{}, opts ...*options.CreateCollectionOptions) error { - efBSON, err := transformBsoncoreDocument(db.registry, ef, true /* mapAllowed */, "encryptedFields") + efBSON, err := marshal(ef, db.bsonOpts, db.registry) if err != nil { return fmt.Errorf("error transforming document: %v", err) } - // Create the three encryption-related, associated collections: `escCollection`, `eccCollection` and `ecocCollection`. + // Check the wire version to ensure server is 7.0.0 or newer. + // After the wire version check, and before creating the collections, it is possible the server state changes. + // That is OK. This wire version check is a best effort to inform users earlier if using a QEv2 driver with a QEv1 server. + { + const QEv2WireVersion = 21 + server, err := db.client.deployment.SelectServer(ctx, description.WriteSelector()) + if err != nil { + return fmt.Errorf("error selecting server to check maxWireVersion: %w", err) + } + conn, err := server.Connection(ctx) + if err != nil { + return fmt.Errorf("error getting connection to check maxWireVersion: %w", err) + } + defer conn.Close() + wireVersionRange := conn.Description().WireVersion + if wireVersionRange.Max < QEv2WireVersion { + return fmt.Errorf("Driver support of Queryable Encryption is incompatible with server. Upgrade server to use Queryable Encryption. Got maxWireVersion %v but need maxWireVersion >= %v", wireVersionRange.Max, QEv2WireVersion) + } + } + + // Create the two encryption-related, associated collections: `escCollection` and `ecocCollection`. stateCollectionOpts := options.CreateCollection(). SetClusteredIndex(bson.D{{"key", bson.D{{"_id", 1}}}, {"unique", true}}) // Create ESCCollection. - escCollection, err := internal.GetEncryptedStateCollectionName(efBSON, name, internal.EncryptedStateCollection) + escCollection, err := csfle.GetEncryptedStateCollectionName(efBSON, name, csfle.EncryptedStateCollection) if err != nil { return err } @@ -588,18 +636,8 @@ func (db *Database) createCollectionWithEncryptedFields(ctx context.Context, nam return err } - // Create ECCCollection. - eccCollection, err := internal.GetEncryptedStateCollectionName(efBSON, name, internal.EncryptedCacheCollection) - if err != nil { - return err - } - - if err := db.createCollection(ctx, eccCollection, stateCollectionOpts); err != nil { - return err - } - // Create ECOCCollection. - ecocCollection, err := internal.GetEncryptedStateCollectionName(efBSON, name, internal.EncryptedCompactionCollection) + ecocCollection, err := csfle.GetEncryptedStateCollectionName(efBSON, name, csfle.EncryptedCompactionCollection) if err != nil { return err } @@ -647,7 +685,7 @@ func (db *Database) createCollectionOperation(name string, opts ...*options.Crea op.Collation(bsoncore.Document(cco.Collation.ToDocument())) } if cco.ChangeStreamPreAndPostImages != nil { - csppi, err := transformBsoncoreDocument(db.registry, cco.ChangeStreamPreAndPostImages, true, "changeStreamPreAndPostImages") + csppi, err := marshal(cco.ChangeStreamPreAndPostImages, db.bsonOpts, db.registry) if err != nil { return nil, err } @@ -656,7 +694,7 @@ func (db *Database) createCollectionOperation(name string, opts ...*options.Crea if cco.DefaultIndexOptions != nil { idx, doc := bsoncore.AppendDocumentStart(nil) if cco.DefaultIndexOptions.StorageEngine != nil { - storageEngine, err := transformBsoncoreDocument(db.registry, cco.DefaultIndexOptions.StorageEngine, true, "storageEngine") + storageEngine, err := marshal(cco.DefaultIndexOptions.StorageEngine, db.bsonOpts, db.registry) if err != nil { return nil, err } @@ -677,7 +715,7 @@ func (db *Database) createCollectionOperation(name string, opts ...*options.Crea op.Size(*cco.SizeInBytes) } if cco.StorageEngine != nil { - storageEngine, err := transformBsoncoreDocument(db.registry, cco.StorageEngine, true, "storageEngine") + storageEngine, err := marshal(cco.StorageEngine, db.bsonOpts, db.registry) if err != nil { return nil, err } @@ -690,7 +728,7 @@ func (db *Database) createCollectionOperation(name string, opts ...*options.Crea op.ValidationLevel(*cco.ValidationLevel) } if cco.Validator != nil { - validator, err := transformBsoncoreDocument(db.registry, cco.Validator, true, "validator") + validator, err := marshal(cco.Validator, db.bsonOpts, db.registry) if err != nil { return nil, err } @@ -710,6 +748,18 @@ func (db *Database) createCollectionOperation(name string, opts ...*options.Crea doc = bsoncore.AppendStringElement(doc, "granularity", *cco.TimeSeriesOptions.Granularity) } + if cco.TimeSeriesOptions.BucketMaxSpan != nil { + bmss := int64(*cco.TimeSeriesOptions.BucketMaxSpan / time.Second) + + doc = bsoncore.AppendInt64Element(doc, "bucketMaxSpanSeconds", bmss) + } + + if cco.TimeSeriesOptions.BucketRounding != nil { + brs := int64(*cco.TimeSeriesOptions.BucketRounding / time.Second) + + doc = bsoncore.AppendInt64Element(doc, "bucketRoundingSeconds", brs) + } + doc, err := bsoncore.AppendDocumentEnd(doc, idx) if err != nil { return nil, err @@ -718,7 +768,7 @@ func (db *Database) createCollectionOperation(name string, opts ...*options.Crea op.TimeSeries(doc) } if cco.ClusteredIndex != nil { - clusteredIndex, err := transformBsoncoreDocument(db.registry, cco.ClusteredIndex, true, "clusteredIndex") + clusteredIndex, err := marshal(cco.ClusteredIndex, db.bsonOpts, db.registry) if err != nil { return nil, err } @@ -744,7 +794,7 @@ func (db *Database) createCollectionOperation(name string, opts ...*options.Crea func (db *Database) CreateView(ctx context.Context, viewName, viewOn string, pipeline interface{}, opts ...*options.CreateViewOptions) error { - pipelineArray, _, err := transformAggregatePipeline(db.registry, pipeline) + pipelineArray, _, err := marshalAggregatePipeline(pipeline, db.bsonOpts, db.registry) if err != nil { return err } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/description/server.go b/vendor/go.mongodb.org/mongo-driver/mongo/description/server.go index a20c86ac9..cf3942383 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/description/server.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/description/server.go @@ -13,7 +13,9 @@ import ( "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/primitive" - "go.mongodb.org/mongo-driver/internal" + "go.mongodb.org/mongo-driver/internal/bsonutil" + "go.mongodb.org/mongo-driver/internal/handshake" + "go.mongodb.org/mongo-driver/internal/ptrutil" "go.mongodb.org/mongo-driver/mongo/address" "go.mongodb.org/mongo-driver/tag" ) @@ -31,35 +33,37 @@ type SelectedServer struct { type Server struct { Addr address.Address - Arbiters []string - AverageRTT time.Duration - AverageRTTSet bool - Compression []string // compression methods returned by server - CanonicalAddr address.Address - ElectionID primitive.ObjectID - HeartbeatInterval time.Duration - HelloOK bool - Hosts []string - IsCryptd bool - LastError error - LastUpdateTime time.Time - LastWriteTime time.Time - MaxBatchCount uint32 - MaxDocumentSize uint32 - MaxMessageSize uint32 - Members []address.Address - Passives []string - Passive bool - Primary address.Address - ReadOnly bool - ServiceID *primitive.ObjectID // Only set for servers that are deployed behind a load balancer. - SessionTimeoutMinutes uint32 - SetName string - SetVersion uint32 - Tags tag.Set - TopologyVersion *TopologyVersion - Kind ServerKind - WireVersion *VersionRange + Arbiters []string + AverageRTT time.Duration + AverageRTTSet bool + Compression []string // compression methods returned by server + CanonicalAddr address.Address + ElectionID primitive.ObjectID + HeartbeatInterval time.Duration + HelloOK bool + Hosts []string + IsCryptd bool + LastError error + LastUpdateTime time.Time + LastWriteTime time.Time + MaxBatchCount uint32 + MaxDocumentSize uint32 + MaxMessageSize uint32 + Members []address.Address + Passives []string + Passive bool + Primary address.Address + ReadOnly bool + ServiceID *primitive.ObjectID // Only set for servers that are deployed behind a load balancer. + // Deprecated: Use SessionTimeoutMinutesPtr instead. + SessionTimeoutMinutes uint32 + SessionTimeoutMinutesPtr *int64 + SetName string + SetVersion uint32 + Tags tag.Set + TopologyVersion *TopologyVersion + Kind ServerKind + WireVersion *VersionRange } // NewServer creates a new server description from the given hello command response. @@ -78,7 +82,7 @@ func NewServer(addr address.Address, response bson.Raw) Server { switch element.Key() { case "arbiters": var err error - desc.Arbiters, err = internal.StringSliceFromRawElement(element) + desc.Arbiters, err = stringSliceFromRawElement(element) if err != nil { desc.LastError = err return desc @@ -91,7 +95,7 @@ func NewServer(addr address.Address, response bson.Raw) Server { } case "compression": var err error - desc.Compression, err = internal.StringSliceFromRawElement(element) + desc.Compression, err = stringSliceFromRawElement(element) if err != nil { desc.LastError = err return desc @@ -122,7 +126,7 @@ func NewServer(addr address.Address, response bson.Raw) Server { } case "hosts": var err error - desc.Hosts, err = internal.StringSliceFromRawElement(element) + desc.Hosts, err = stringSliceFromRawElement(element) if err != nil { desc.LastError = err return desc @@ -133,7 +137,7 @@ func NewServer(addr address.Address, response bson.Raw) Server { desc.LastError = fmt.Errorf("expected 'isWritablePrimary' to be a boolean but it's a BSON %s", element.Value().Type) return desc } - case internal.LegacyHelloLowercase: + case handshake.LegacyHelloLowercase: isWritablePrimary, ok = element.Value().BooleanOK() if !ok { desc.LastError = fmt.Errorf("expected legacy hello to be a boolean but it's a BSON %s", element.Value().Type) @@ -166,7 +170,9 @@ func NewServer(addr address.Address, response bson.Raw) Server { desc.LastError = fmt.Errorf("expected 'logicalSessionTimeoutMinutes' to be an integer but it's a BSON %s", element.Value().Type) return desc } + desc.SessionTimeoutMinutes = uint32(i64) + desc.SessionTimeoutMinutesPtr = &i64 case "maxBsonObjectSize": i64, ok := element.Value().AsInt64OK() if !ok { @@ -225,7 +231,7 @@ func NewServer(addr address.Address, response bson.Raw) Server { } case "passives": var err error - desc.Passives, err = internal.StringSliceFromRawElement(element) + desc.Passives, err = stringSliceFromRawElement(element) if err != nil { desc.LastError = err return desc @@ -462,7 +468,7 @@ func (s Server) Equal(other Server) bool { return false } - if s.SessionTimeoutMinutes != other.SessionTimeoutMinutes { + if ptrutil.CompareInt64(s.SessionTimeoutMinutesPtr, other.SessionTimeoutMinutesPtr) != 0 { return false } @@ -486,3 +492,11 @@ func sliceStringEqual(a []string, b []string) bool { } return true } + +// stringSliceFromRawElement decodes the provided BSON element into a []string. +// This internally calls StringSliceFromRawValue on the element's value. The +// error conditions outlined in that function's documentation apply for this +// function as well. +func stringSliceFromRawElement(element bson.RawElement) ([]string, error) { + return bsonutil.StringSliceFromRawValue(element.Key(), element.Value()) +} diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/description/server_selector.go b/vendor/go.mongodb.org/mongo-driver/mongo/description/server_selector.go index df5e77a45..aee1f050c 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/description/server_selector.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/description/server_selector.go @@ -7,6 +7,7 @@ package description import ( + "encoding/json" "fmt" "math" "time" @@ -30,10 +31,48 @@ func (ssf ServerSelectorFunc) SelectServer(t Topology, s []Server) ([]Server, er return ssf(t, s) } +// serverSelectorInfo contains metadata concerning the server selector for the +// purpose of publication. +type serverSelectorInfo struct { + Type string + Data string `json:",omitempty"` + Selectors []serverSelectorInfo `json:",omitempty"` +} + +// String returns the JSON string representation of the serverSelectorInfo. +func (sss serverSelectorInfo) String() string { + bytes, _ := json.Marshal(sss) + + return string(bytes) +} + +// serverSelectorInfoGetter is an interface that defines an info() method to +// get the serverSelectorInfo. +type serverSelectorInfoGetter interface { + info() serverSelectorInfo +} + type compositeSelector struct { selectors []ServerSelector } +func (cs *compositeSelector) info() serverSelectorInfo { + csInfo := serverSelectorInfo{Type: "compositeSelector"} + + for _, sel := range cs.selectors { + if getter, ok := sel.(serverSelectorInfoGetter); ok { + csInfo.Selectors = append(csInfo.Selectors, getter.info()) + } + } + + return csInfo +} + +// String returns the JSON string representation of the compositeSelector. +func (cs *compositeSelector) String() string { + return cs.info().String() +} + // CompositeSelector combines multiple selectors into a single selector by applying them in order to the candidates // list. // @@ -68,8 +107,16 @@ func LatencySelector(latency time.Duration) ServerSelector { return &latencySelector{latency: latency} } -func (ls *latencySelector) SelectServer(t Topology, candidates []Server) ([]Server, error) { - if ls.latency < 0 { +func (latencySelector) info() serverSelectorInfo { + return serverSelectorInfo{Type: "latencySelector"} +} + +func (selector latencySelector) String() string { + return selector.info().String() +} + +func (selector *latencySelector) SelectServer(t Topology, candidates []Server) ([]Server, error) { + if selector.latency < 0 { return candidates, nil } if t.Kind == LoadBalanced { @@ -94,90 +141,109 @@ func (ls *latencySelector) SelectServer(t Topology, candidates []Server) ([]Serv return candidates, nil } - max := min + ls.latency + max := min + selector.latency - var result []Server - for _, candidate := range candidates { + viableIndexes := make([]int, 0, len(candidates)) + for i, candidate := range candidates { if candidate.AverageRTTSet { if candidate.AverageRTT <= max { - result = append(result, candidate) + viableIndexes = append(viableIndexes, i) } } } - + if len(viableIndexes) == len(candidates) { + return candidates, nil + } + result := make([]Server, len(viableIndexes)) + for i, idx := range viableIndexes { + result[i] = candidates[idx] + } return result, nil } } +type writeServerSelector struct{} + // WriteSelector selects all the writable servers. func WriteSelector() ServerSelector { - return ServerSelectorFunc(func(t Topology, candidates []Server) ([]Server, error) { - switch t.Kind { - case Single, LoadBalanced: - return candidates, nil - default: - result := []Server{} - for _, candidate := range candidates { - switch candidate.Kind { - case Mongos, RSPrimary, Standalone: - result = append(result, candidate) - } + return writeServerSelector{} +} + +func (writeServerSelector) info() serverSelectorInfo { + return serverSelectorInfo{Type: "writeSelector"} +} + +func (selector writeServerSelector) String() string { + return selector.info().String() +} + +func (writeServerSelector) SelectServer(t Topology, candidates []Server) ([]Server, error) { + switch t.Kind { + case Single, LoadBalanced: + return candidates, nil + default: + result := []Server{} + for _, candidate := range candidates { + switch candidate.Kind { + case Mongos, RSPrimary, Standalone: + result = append(result, candidate) } - return result, nil } - }) + return result, nil + } +} + +type readPrefServerSelector struct { + rp *readpref.ReadPref + isOutputAggregate bool } // ReadPrefSelector selects servers based on the provided read preference. func ReadPrefSelector(rp *readpref.ReadPref) ServerSelector { - return readPrefSelector(rp, false) + return readPrefServerSelector{ + rp: rp, + isOutputAggregate: false, + } } -// OutputAggregateSelector selects servers based on the provided read preference given that the underlying operation is -// aggregate with an output stage. -func OutputAggregateSelector(rp *readpref.ReadPref) ServerSelector { - return readPrefSelector(rp, true) +func (selector readPrefServerSelector) info() serverSelectorInfo { + return serverSelectorInfo{ + Type: "readPrefSelector", + Data: selector.rp.String(), + } } -func readPrefSelector(rp *readpref.ReadPref, isOutputAggregate bool) ServerSelector { - return ServerSelectorFunc(func(t Topology, candidates []Server) ([]Server, error) { - if t.Kind == LoadBalanced { - // In LoadBalanced mode, there should only be one server in the topology and it must be selected. We check - // this before checking MaxStaleness support because there's no monitoring in this mode, so the candidate - // server wouldn't have a wire version set, which would result in an error. - return candidates, nil - } +func (selector readPrefServerSelector) String() string { + return selector.info().String() +} - if _, set := rp.MaxStaleness(); set { - for _, s := range candidates { - if s.Kind != Unknown { - if err := maxStalenessSupported(s.WireVersion); err != nil { - return nil, err - } - } - } - } +func (selector readPrefServerSelector) SelectServer(t Topology, candidates []Server) ([]Server, error) { + if t.Kind == LoadBalanced { + // In LoadBalanced mode, there should only be one server in the topology and it must be selected. We check + // this before checking MaxStaleness support because there's no monitoring in this mode, so the candidate + // server wouldn't have a wire version set, which would result in an error. + return candidates, nil + } - switch t.Kind { - case Single: - return candidates, nil - case ReplicaSetNoPrimary, ReplicaSetWithPrimary: - return selectForReplicaSet(rp, isOutputAggregate, t, candidates) - case Sharded: - return selectByKind(candidates, Mongos), nil - } + switch t.Kind { + case Single: + return candidates, nil + case ReplicaSetNoPrimary, ReplicaSetWithPrimary: + return selectForReplicaSet(selector.rp, selector.isOutputAggregate, t, candidates) + case Sharded: + return selectByKind(candidates, Mongos), nil + } - return nil, nil - }) + return nil, nil } -// maxStalenessSupported returns an error if the given server version does not support max staleness. -func maxStalenessSupported(wireVersion *VersionRange) error { - if wireVersion != nil && wireVersion.Max < 5 { - return fmt.Errorf("max staleness is only supported for servers 3.4 or newer") +// OutputAggregateSelector selects servers based on the provided read preference +// given that the underlying operation is aggregate with an output stage. +func OutputAggregateSelector(rp *readpref.ReadPref) ServerSelector { + return readPrefServerSelector{ + rp: rp, + isOutputAggregate: true, } - - return nil } func selectForReplicaSet(rp *readpref.ReadPref, isOutputAggregate bool, t Topology, candidates []Server) ([]Server, error) { @@ -304,6 +370,9 @@ func selectByKind(candidates []Server, kind ServerKind) []Server { viableIndexes = append(viableIndexes, i) } } + if len(viableIndexes) == len(candidates) { + return candidates + } result := make([]Server, len(viableIndexes)) for i, idx := range viableIndexes { result[i] = candidates[idx] diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/description/topology.go b/vendor/go.mongodb.org/mongo-driver/mongo/description/topology.go index 8544548c9..b082515e5 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/description/topology.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/description/topology.go @@ -14,11 +14,13 @@ import ( // Topology contains information about a MongoDB cluster. type Topology struct { - Servers []Server - SetName string - Kind TopologyKind - SessionTimeoutMinutes uint32 - CompatibilityErr error + Servers []Server + SetName string + Kind TopologyKind + // Deprecated: Use SessionTimeoutMinutesPtr instead. + SessionTimeoutMinutes uint32 + SessionTimeoutMinutesPtr *int64 + CompatibilityErr error } // String implements the Stringer interface. diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/doc.go b/vendor/go.mongodb.org/mongo-driver/mongo/doc.go index 39bb53099..e0a5d66ac 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/doc.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/doc.go @@ -94,10 +94,13 @@ // // # Potential DNS Issues // -// Building with Go 1.11+ and using connection strings with the "mongodb+srv"[1] scheme is +// Building with Go 1.11+ and using connection strings with the "mongodb+srv"[1] scheme is unfortunately // incompatible with some DNS servers in the wild due to the change introduced in -// https://github.com/golang/go/issues/10622. If you receive an error with the message "cannot -// unmarshal DNS message" while running an operation, we suggest you use a different DNS server. +// https://github.com/golang/go/issues/10622. You may receive an error with the message "cannot unmarshal DNS message" +// while running an operation when using DNS servers that non-compliantly compress SRV records. Old versions of kube-dns +// and the native DNS resolver (systemd-resolver) on Ubuntu 18.04 are known to be non-compliant in this manner. We suggest +// using a different DNS server (8.8.8.8 is the common default), and, if that's not possible, avoiding the "mongodb+srv" +// scheme. // // # Client Side Encryption // @@ -122,6 +125,8 @@ // This bug may result in data corruption. // Please use libmongocrypt 1.5.2 or higher when calling RewrapManyDataKey. // +// - Go Driver v1.12.0 requires libmongocrypt v1.8.0 or higher. +// // To install libmongocrypt, follow the instructions for your // operating system: // diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/errors.go b/vendor/go.mongodb.org/mongo-driver/mongo/errors.go index 5c72a495e..72c3bcc24 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/errors.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/errors.go @@ -15,6 +15,7 @@ import ( "strings" "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/internal/codecutil" "go.mongodb.org/mongo-driver/x/mongo/driver" "go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt" "go.mongodb.org/mongo-driver/x/mongo/driver/topology" @@ -87,53 +88,70 @@ func replaceErrors(err error) error { return MongocryptError{Code: me.Code, Message: me.Message} } + if errors.Is(err, codecutil.ErrNilValue) { + return ErrNilValue + } + + if marshalErr, ok := err.(codecutil.MarshalError); ok { + return MarshalError{ + Value: marshalErr.Value, + Err: marshalErr.Err, + } + } + return err } -// IsDuplicateKeyError returns true if err is a duplicate key error +// IsDuplicateKeyError returns true if err is a duplicate key error. func IsDuplicateKeyError(err error) bool { - // handles SERVER-7164 and SERVER-11493 - for ; err != nil; err = unwrap(err) { - if e, ok := err.(ServerError); ok { - return e.HasErrorCode(11000) || e.HasErrorCode(11001) || e.HasErrorCode(12582) || - e.HasErrorCodeWithMessage(16460, " E11000 ") - } + if se := ServerError(nil); errors.As(err, &se) { + return se.HasErrorCode(11000) || // Duplicate key error. + se.HasErrorCode(11001) || // Duplicate key error on update. + // Duplicate key error in a capped collection. See SERVER-7164. + se.HasErrorCode(12582) || + // Mongos insert error caused by a duplicate key error. See + // SERVER-11493. + se.HasErrorCodeWithMessage(16460, " E11000 ") } return false } -// IsTimeout returns true if err is from a timeout +// timeoutErrs is a list of error values that indicate a timeout happened. +var timeoutErrs = [...]error{ + context.DeadlineExceeded, + driver.ErrDeadlineWouldBeExceeded, + topology.ErrServerSelectionTimeout, +} + +// IsTimeout returns true if err was caused by a timeout. For error chains, +// IsTimeout returns true if any error in the chain was caused by a timeout. func IsTimeout(err error) bool { - for ; err != nil; err = unwrap(err) { - // check unwrappable errors together - if err == context.DeadlineExceeded { - return true - } - if err == driver.ErrDeadlineWouldBeExceeded { - return true - } - if err == topology.ErrServerSelectionTimeout { - return true - } - if _, ok := err.(topology.WaitQueueTimeoutError); ok { + // Check if the error chain contains any of the timeout error values. + for _, target := range timeoutErrs { + if errors.Is(err, target) { return true } - if ce, ok := err.(CommandError); ok && ce.IsMaxTimeMSExpiredError() { - return true - } - if we, ok := err.(WriteException); ok && we.WriteConcernError != nil && - we.WriteConcernError.IsMaxTimeMSExpiredError() { + } + + // Check if the error chain contains any error types that can indicate + // timeout. + if errors.As(err, &topology.WaitQueueTimeoutError{}) { + return true + } + if ce := (CommandError{}); errors.As(err, &ce) && ce.IsMaxTimeMSExpiredError() { + return true + } + if we := (WriteException{}); errors.As(err, &we) && we.WriteConcernError != nil && we.WriteConcernError.IsMaxTimeMSExpiredError() { + return true + } + if ne := net.Error(nil); errors.As(err, &ne) { + return ne.Timeout() + } + // Check timeout error labels. + if le := LabeledError(nil); errors.As(err, &le) { + if le.HasErrorLabel("NetworkTimeoutError") || le.HasErrorLabel("ExceededTimeLimitError") { return true } - if ne, ok := err.(net.Error); ok { - return ne.Timeout() - } - //timeout error labels - if le, ok := err.(LabeledError); ok { - if le.HasErrorLabel("NetworkTimeoutError") || le.HasErrorLabel("ExceededTimeLimitError") { - return true - } - } } return false @@ -320,7 +338,7 @@ func (we WriteError) HasErrorCode(code int) bool { // HasErrorLabel returns true if the error contains the specified label. WriteErrors do not contain labels, // so we always return false. -func (we WriteError) HasErrorLabel(label string) bool { +func (we WriteError) HasErrorLabel(string) bool { return false } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/index_view.go b/vendor/go.mongodb.org/mongo-driver/mongo/index_view.go index 3500b775f..41a93a214 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/index_view.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/index_view.go @@ -94,6 +94,9 @@ func (iv IndexView) List(ctx context.Context, opts ...*options.ListIndexesOption Timeout(iv.coll.client.timeout) cursorOpts := iv.coll.client.createBaseCursorOptions() + + cursorOpts.MarshalValueEncoderFn = newEncoderFn(iv.coll.bsonOpts, iv.coll.registry) + lio := options.MergeListIndexesOptions(opts...) if lio.BatchSize != nil { op = op.BatchSize(*lio.BatchSize) @@ -122,7 +125,7 @@ func (iv IndexView) List(ctx context.Context, opts ...*options.ListIndexesOption closeImplicitSession(sess) return nil, replaceErrors(err) } - cursor, err := newCursorWithSession(bc, iv.coll.registry, sess) + cursor, err := newCursorWithSession(bc, iv.coll.bsonOpts, iv.coll.registry, sess) return cursor, replaceErrors(err) } @@ -181,7 +184,11 @@ func (iv IndexView) CreateMany(ctx context.Context, models []IndexModel, opts .. return nil, fmt.Errorf("index model keys cannot be nil") } - keys, err := transformBsoncoreDocument(iv.coll.registry, model.Keys, false, "keys") + if isUnorderedMap(model.Keys) { + return nil, ErrMapForOrderedArgument{"keys"} + } + + keys, err := marshal(model.Keys, iv.coll.bsonOpts, iv.coll.registry) if err != nil { return nil, err } @@ -250,7 +257,7 @@ func (iv IndexView) CreateMany(ctx context.Context, models []IndexModel, opts .. Deployment(iv.coll.client.deployment).ServerSelector(selector).ServerAPI(iv.coll.client.serverAPI). Timeout(iv.coll.client.timeout).MaxTime(option.MaxTime) if option.CommitQuorum != nil { - commitQuorum, err := transformValue(iv.coll.registry, option.CommitQuorum, true, "commitQuorum") + commitQuorum, err := marshalValue(option.CommitQuorum, iv.coll.bsonOpts, iv.coll.registry) if err != nil { return nil, err } @@ -282,7 +289,7 @@ func (iv IndexView) createOptionsDoc(opts *options.IndexOptions) (bsoncore.Docum optsDoc = bsoncore.AppendBooleanElement(optsDoc, "sparse", *opts.Sparse) } if opts.StorageEngine != nil { - doc, err := transformBsoncoreDocument(iv.coll.registry, opts.StorageEngine, true, "storageEngine") + doc, err := marshal(opts.StorageEngine, iv.coll.bsonOpts, iv.coll.registry) if err != nil { return nil, err } @@ -305,7 +312,7 @@ func (iv IndexView) createOptionsDoc(opts *options.IndexOptions) (bsoncore.Docum optsDoc = bsoncore.AppendInt32Element(optsDoc, "textIndexVersion", *opts.TextVersion) } if opts.Weights != nil { - doc, err := transformBsoncoreDocument(iv.coll.registry, opts.Weights, true, "weights") + doc, err := marshal(opts.Weights, iv.coll.bsonOpts, iv.coll.registry) if err != nil { return nil, err } @@ -328,7 +335,7 @@ func (iv IndexView) createOptionsDoc(opts *options.IndexOptions) (bsoncore.Docum optsDoc = bsoncore.AppendInt32Element(optsDoc, "bucketSize", *opts.BucketSize) } if opts.PartialFilterExpression != nil { - doc, err := transformBsoncoreDocument(iv.coll.registry, opts.PartialFilterExpression, true, "partialFilterExpression") + doc, err := marshal(opts.PartialFilterExpression, iv.coll.bsonOpts, iv.coll.registry) if err != nil { return nil, err } @@ -339,7 +346,7 @@ func (iv IndexView) createOptionsDoc(opts *options.IndexOptions) (bsoncore.Docum optsDoc = bsoncore.AppendDocumentElement(optsDoc, "collation", bsoncore.Document(opts.Collation.ToDocument())) } if opts.WildcardProjection != nil { - doc, err := transformBsoncoreDocument(iv.coll.registry, opts.WildcardProjection, true, "wildcardProjection") + doc, err := marshal(opts.WildcardProjection, iv.coll.bsonOpts, iv.coll.registry) if err != nil { return nil, err } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/mongo.go b/vendor/go.mongodb.org/mongo-driver/mongo/mongo.go index 2fa5e54ae..393c5b771 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/mongo.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/mongo.go @@ -7,20 +7,23 @@ package mongo // import "go.mongodb.org/mongo-driver/mongo" import ( + "bytes" "context" "errors" "fmt" + "io" "net" "reflect" "strconv" "strings" + "go.mongodb.org/mongo-driver/internal/codecutil" "go.mongodb.org/mongo-driver/mongo/options" - "go.mongodb.org/mongo-driver/x/bsonx" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/bsoncodec" + "go.mongodb.org/mongo-driver/bson/bsonrw" "go.mongodb.org/mongo-driver/bson/bsontype" "go.mongodb.org/mongo-driver/bson/primitive" ) @@ -34,6 +37,8 @@ type Dialer interface { // provided type into BSON bytes and append those bytes to the provided []byte. // The AppendBSON can return a non-nil error and non-nil []byte. The AppendBSON // method may also write incomplete BSON to the []byte. +// +// Deprecated: BSONAppender is unused and will be removed in Go Driver 2.0. type BSONAppender interface { AppendBSON([]byte, interface{}) ([]byte, error) } @@ -41,14 +46,18 @@ type BSONAppender interface { // BSONAppenderFunc is an adapter function that allows any function that // satisfies the AppendBSON method signature to be used where a BSONAppender is // used. +// +// Deprecated: BSONAppenderFunc is unused and will be removed in Go Driver 2.0. type BSONAppenderFunc func([]byte, interface{}) ([]byte, error) // AppendBSON implements the BSONAppender interface +// +// Deprecated: BSONAppenderFunc is unused and will be removed in Go Driver 2.0. func (baf BSONAppenderFunc) AppendBSON(dst []byte, val interface{}) ([]byte, error) { return baf(dst, val) } -// MarshalError is returned when attempting to transform a value into a document +// MarshalError is returned when attempting to marshal a value into a document // results in an error. type MarshalError struct { Value interface{} @@ -57,7 +66,7 @@ type MarshalError struct { // Error implements the error interface. func (me MarshalError) Error() string { - return fmt.Sprintf("cannot transform type %s to a BSON Document: %v", reflect.TypeOf(me.Value), me.Err) + return fmt.Sprintf("cannot marshal type %s to a BSON Document: %v", reflect.TypeOf(me.Value), me.Err) } // Pipeline is a type that makes creating aggregation pipelines easier. It is a @@ -71,58 +80,77 @@ func (me MarshalError) Error() string { // } type Pipeline []bson.D -// transformAndEnsureID is a hack that makes it easy to get a RawValue as the _id value. -// It will also add an ObjectID _id as the first key if it not already present in the passed-in val. -func transformAndEnsureID(registry *bsoncodec.Registry, val interface{}) (bsoncore.Document, interface{}, error) { - if registry == nil { - registry = bson.NewRegistryBuilder().Build() - } - switch tt := val.(type) { - case nil: - return nil, nil, ErrNilDocument - case bsonx.Doc: - val = tt.Copy() - case []byte: - // Slight optimization so we'll just use MarshalBSON and not go through the codec machinery. - val = bson.Raw(tt) - } - - // TODO(skriptble): Use a pool of these instead. - doc := make(bsoncore.Document, 0, 256) - doc, err := bson.MarshalAppendWithRegistry(registry, doc, val) +// bvwPool is a pool of BSON value writers. BSON value writers +var bvwPool = bsonrw.NewBSONValueWriterPool() + +// getEncoder takes a writer, BSON options, and a BSON registry and returns a properly configured +// bson.Encoder that writes to the given writer. +func getEncoder( + w io.Writer, + opts *options.BSONOptions, + reg *bsoncodec.Registry, +) (*bson.Encoder, error) { + vw := bvwPool.Get(w) + enc, err := bson.NewEncoder(vw) if err != nil { - return nil, nil, MarshalError{Value: val, Err: err} + return nil, err } - var id interface{} - - value := doc.Lookup("_id") - switch value.Type { - case bsontype.Type(0): - value = bsoncore.Value{Type: bsontype.ObjectID, Data: bsoncore.AppendObjectID(nil, primitive.NewObjectID())} - olddoc := doc - doc = make(bsoncore.Document, 0, len(olddoc)+17) // type byte + _id + null byte + object ID - _, doc = bsoncore.ReserveLength(doc) - doc = bsoncore.AppendValueElement(doc, "_id", value) - doc = append(doc, olddoc[4:]...) // remove the length - doc = bsoncore.UpdateLength(doc, 0, int32(len(doc))) - default: - // We copy the bytes here to ensure that any bytes returned to the user aren't modified - // later. - buf := make([]byte, len(value.Data)) - copy(buf, value.Data) - value.Data = buf + if opts != nil { + if opts.ErrorOnInlineDuplicates { + enc.ErrorOnInlineDuplicates() + } + if opts.IntMinSize { + enc.IntMinSize() + } + if opts.NilByteSliceAsEmpty { + enc.NilByteSliceAsEmpty() + } + if opts.NilMapAsEmpty { + enc.NilMapAsEmpty() + } + if opts.NilSliceAsEmpty { + enc.NilSliceAsEmpty() + } + if opts.OmitZeroStruct { + enc.OmitZeroStruct() + } + if opts.StringifyMapKeysWithFmt { + enc.StringifyMapKeysWithFmt() + } + if opts.UseJSONStructTags { + enc.UseJSONStructTags() + } } - err = bson.RawValue{Type: value.Type, Value: value.Data}.UnmarshalWithRegistry(registry, &id) - if err != nil { - return nil, nil, err + if reg != nil { + // TODO:(GODRIVER-2719): Remove error handling. + if err := enc.SetRegistry(reg); err != nil { + return nil, err + } } - return doc, id, nil + return enc, nil } -func transformBsoncoreDocument(registry *bsoncodec.Registry, val interface{}, mapAllowed bool, paramName string) (bsoncore.Document, error) { +// newEncoderFn will return a function for constructing an encoder based on the +// provided codec options. +func newEncoderFn(opts *options.BSONOptions, registry *bsoncodec.Registry) codecutil.EncoderFn { + return func(w io.Writer) (*bson.Encoder, error) { + return getEncoder(w, opts, registry) + } +} + +// marshal marshals the given value as a BSON document. Byte slices are always converted to a +// bson.Raw before marshaling. +// +// If bsonOpts and registry are specified, the encoder is configured with the requested behaviors. +// If they are nil, the default behaviors are used. +func marshal( + val interface{}, + bsonOpts *options.BSONOptions, + registry *bsoncodec.Registry, +) (bsoncore.Document, error) { if registry == nil { registry = bson.DefaultRegistry } @@ -133,20 +161,72 @@ func transformBsoncoreDocument(registry *bsoncodec.Registry, val interface{}, ma // Slight optimization so we'll just use MarshalBSON and not go through the codec machinery. val = bson.Raw(bs) } - if !mapAllowed { - refValue := reflect.ValueOf(val) - if refValue.Kind() == reflect.Map && refValue.Len() > 1 { - return nil, ErrMapForOrderedArgument{paramName} - } + + buf := new(bytes.Buffer) + enc, err := getEncoder(buf, bsonOpts, registry) + if err != nil { + return nil, fmt.Errorf("error configuring BSON encoder: %w", err) } - // TODO(skriptble): Use a pool of these instead. - buf := make([]byte, 0, 256) - b, err := bson.MarshalAppendWithRegistry(registry, buf[:0], val) + err = enc.Encode(val) if err != nil { return nil, MarshalError{Value: val, Err: err} } - return b, nil + + return buf.Bytes(), nil +} + +// ensureID inserts the given ObjectID as an element named "_id" at the +// beginning of the given BSON document if there is not an "_id" already. If +// there is already an element named "_id", the document is not modified. It +// returns the resulting document and the decoded Go value of the "_id" element. +func ensureID( + doc bsoncore.Document, + oid primitive.ObjectID, + bsonOpts *options.BSONOptions, + reg *bsoncodec.Registry, +) (bsoncore.Document, interface{}, error) { + if reg == nil { + reg = bson.DefaultRegistry + } + + // Try to find the "_id" element. If it exists, try to unmarshal just the + // "_id" field as an interface{} and return it along with the unmodified + // BSON document. + if _, err := doc.LookupErr("_id"); err == nil { + var id struct { + ID interface{} `bson:"_id"` + } + dec, err := getDecoder(doc, bsonOpts, reg) + if err != nil { + return nil, nil, fmt.Errorf("error configuring BSON decoder: %w", err) + } + err = dec.Decode(&id) + if err != nil { + return nil, nil, fmt.Errorf("error unmarshaling BSON document: %w", err) + } + + return doc, id.ID, nil + } + + // We couldn't find an "_id" element, so add one with the value of the + // provided ObjectID. + + olddoc := doc + + // Reserve an extra 17 bytes for the "_id" field we're about to add: + // type (1) + "_id" (3) + terminator (1) + object ID (12) + const extraSpace = 17 + doc = make(bsoncore.Document, 0, len(olddoc)+extraSpace) + _, doc = bsoncore.ReserveLength(doc) + doc = bsoncore.AppendObjectIDElement(doc, "_id", oid) + + // Remove and re-write the BSON document length header. + const int32Len = 4 + doc = append(doc, olddoc[int32Len:]...) + doc = bsoncore.UpdateLength(doc, 0, int32(len(doc))) + + return doc, oid, nil } func ensureDollarKey(doc bsoncore.Document) error { @@ -169,7 +249,11 @@ func ensureNoDollarKey(doc bsoncore.Document) error { return nil } -func transformAggregatePipeline(registry *bsoncodec.Registry, pipeline interface{}) (bsoncore.Document, bool, error) { +func marshalAggregatePipeline( + pipeline interface{}, + bsonOpts *options.BSONOptions, + registry *bsoncodec.Registry, +) (bsoncore.Document, bool, error) { switch t := pipeline.(type) { case bsoncodec.ValueMarshaler: btype, val, err := t.MarshalBSONValue() @@ -195,7 +279,7 @@ func transformAggregatePipeline(registry *bsoncodec.Registry, pipeline interface default: val := reflect.ValueOf(t) if !val.IsValid() || (val.Kind() != reflect.Slice && val.Kind() != reflect.Array) { - return nil, false, fmt.Errorf("can only transform slices and arrays into aggregation pipelines, but got %v", val.Kind()) + return nil, false, fmt.Errorf("can only marshal slices and arrays into aggregation pipelines, but got %v", val.Kind()) } var hasOutputStage bool @@ -209,7 +293,7 @@ func transformAggregatePipeline(registry *bsoncodec.Registry, pipeline interface return nil, false, fmt.Errorf("%T is not an allowed pipeline type as it represents a single document. Use bson.A or mongo.Pipeline instead", t) } - // bsoncore.Arrays do not need to be transformed. Only check validity and presence of output stage. + // bsoncore.Arrays do not need to be marshaled. Only check validity and presence of output stage. case bsoncore.Array: if err := t.Validate(); err != nil { return nil, false, err @@ -236,7 +320,7 @@ func transformAggregatePipeline(registry *bsoncodec.Registry, pipeline interface aidx, arr := bsoncore.AppendArrayStart(nil) for idx := 0; idx < valLen; idx++ { - doc, err := transformBsoncoreDocument(registry, val.Index(idx).Interface(), true, fmt.Sprintf("pipeline stage :%v", idx)) + doc, err := marshal(val.Index(idx).Interface(), bsonOpts, registry) if err != nil { return nil, false, err } @@ -253,7 +337,12 @@ func transformAggregatePipeline(registry *bsoncodec.Registry, pipeline interface } } -func transformUpdateValue(registry *bsoncodec.Registry, update interface{}, dollarKeysAllowed bool) (bsoncore.Value, error) { +func marshalUpdateValue( + update interface{}, + bsonOpts *options.BSONOptions, + registry *bsoncodec.Registry, + dollarKeysAllowed bool, +) (bsoncore.Value, error) { documentCheckerFunc := ensureDollarKey if !dollarKeysAllowed { documentCheckerFunc = ensureNoDollarKey @@ -264,9 +353,9 @@ func transformUpdateValue(registry *bsoncodec.Registry, update interface{}, doll switch t := update.(type) { case nil: return u, ErrNilDocument - case primitive.D, bsonx.Doc: + case primitive.D: u.Type = bsontype.EmbeddedDocument - u.Data, err = transformBsoncoreDocument(registry, update, true, "update") + u.Data, err = marshal(update, bsonOpts, registry) if err != nil { return u, err } @@ -304,11 +393,11 @@ func transformUpdateValue(registry *bsoncodec.Registry, update interface{}, doll default: val := reflect.ValueOf(t) if !val.IsValid() { - return u, fmt.Errorf("can only transform slices and arrays into update pipelines, but got %v", val.Kind()) + return u, fmt.Errorf("can only marshal slices and arrays into update pipelines, but got %v", val.Kind()) } if val.Kind() != reflect.Slice && val.Kind() != reflect.Array { u.Type = bsontype.EmbeddedDocument - u.Data, err = transformBsoncoreDocument(registry, update, true, "update") + u.Data, err = marshal(update, bsonOpts, registry) if err != nil { return u, err } @@ -320,7 +409,7 @@ func transformUpdateValue(registry *bsoncodec.Registry, update interface{}, doll aidx, arr := bsoncore.AppendArrayStart(nil) valLen := val.Len() for idx := 0; idx < valLen; idx++ { - doc, err := transformBsoncoreDocument(registry, val.Index(idx).Interface(), true, "update") + doc, err := marshal(val.Index(idx).Interface(), bsonOpts, registry) if err != nil { return u, err } @@ -336,33 +425,22 @@ func transformUpdateValue(registry *bsoncodec.Registry, update interface{}, doll } } -func transformValue(registry *bsoncodec.Registry, val interface{}, mapAllowed bool, paramName string) (bsoncore.Value, error) { - if registry == nil { - registry = bson.DefaultRegistry - } - if val == nil { - return bsoncore.Value{}, ErrNilValue - } - - if !mapAllowed { - refValue := reflect.ValueOf(val) - if refValue.Kind() == reflect.Map && refValue.Len() > 1 { - return bsoncore.Value{}, ErrMapForOrderedArgument{paramName} - } - } - - buf := make([]byte, 0, 256) - bsonType, bsonValue, err := bson.MarshalValueAppendWithRegistry(registry, buf[:0], val) - if err != nil { - return bsoncore.Value{}, MarshalError{Value: val, Err: err} - } - - return bsoncore.Value{Type: bsonType, Data: bsonValue}, nil +func marshalValue( + val interface{}, + bsonOpts *options.BSONOptions, + registry *bsoncodec.Registry, +) (bsoncore.Value, error) { + return codecutil.MarshalValue(val, newEncoderFn(bsonOpts, registry)) } // Build the aggregation pipeline for the CountDocument command. -func countDocumentsAggregatePipeline(registry *bsoncodec.Registry, filter interface{}, opts *options.CountOptions) (bsoncore.Document, error) { - filterDoc, err := transformBsoncoreDocument(registry, filter, true, "filter") +func countDocumentsAggregatePipeline( + filter interface{}, + encOpts *options.BSONOptions, + registry *bsoncodec.Registry, + opts *options.CountOptions, +) (bsoncore.Document, error) { + filterDoc, err := marshal(filter, encOpts, registry) if err != nil { return nil, err } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/mongocryptd.go b/vendor/go.mongodb.org/mongo-driver/mongo/mongocryptd.go index 016ccef62..2603a3918 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/mongocryptd.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/mongocryptd.go @@ -35,7 +35,10 @@ type mongocryptdClient struct { spawnArgs []string } -func newMongocryptdClient(cryptSharedLibAvailable bool, opts *options.AutoEncryptionOptions) (*mongocryptdClient, error) { +// newMongocryptdClient creates a client to mongocryptd. +// newMongocryptdClient is expected to not be called if the crypt shared library is available. +// The crypt shared library replaces all mongocryptd functionality. +func newMongocryptdClient(opts *options.AutoEncryptionOptions) (*mongocryptdClient, error) { // create mcryptClient instance and spawn process if necessary var bypassSpawn bool var bypassAutoEncryption bool @@ -54,8 +57,7 @@ func newMongocryptdClient(cryptSharedLibAvailable bool, opts *options.AutoEncryp // - mongocryptdBypassSpawn is passed // - bypassAutoEncryption is true because mongocryptd is not used during decryption // - bypassQueryAnalysis is true because mongocryptd is not used during decryption - // - the crypt_shared library is available because it replaces all mongocryptd functionality. - bypassSpawn: bypassSpawn || bypassAutoEncryption || bypassQueryAnalysis || cryptSharedLibAvailable, + bypassSpawn: bypassSpawn || bypassAutoEncryption || bypassQueryAnalysis, } if !mc.bypassSpawn { @@ -89,7 +91,7 @@ func (mc *mongocryptdClient) markCommand(ctx context.Context, dbName string, cmd ctx = NewSessionContext(ctx, nil) db := mc.client.Database(dbName, databaseOpts) - res, err := db.RunCommand(ctx, cmd).DecodeBytes() + res, err := db.RunCommand(ctx, cmd).Raw() // propagate original result if err == nil { return bsoncore.Document(res), nil @@ -103,7 +105,7 @@ func (mc *mongocryptdClient) markCommand(ctx context.Context, dbName string, cmd if err = mc.spawnProcess(); err != nil { return nil, err } - res, err = db.RunCommand(ctx, cmd).DecodeBytes() + res, err = db.RunCommand(ctx, cmd).Raw() if err != nil { return nil, MongocryptdError{Wrapped: err} } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/aggregateoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/aggregateoptions.go index 38ed24908..20e1c7043 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/aggregateoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/aggregateoptions.go @@ -139,6 +139,9 @@ func (ao *AggregateOptions) SetCustom(c bson.M) *AggregateOptions { // MergeAggregateOptions combines the given AggregateOptions instances into a single AggregateOptions in a last-one-wins // fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeAggregateOptions(opts ...*AggregateOptions) *AggregateOptions { aggOpts := Aggregate() for _, ao := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/autoencryptionoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/autoencryptionoptions.go index f42714b3d..15d513862 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/autoencryptionoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/autoencryptionoptions.go @@ -10,7 +10,7 @@ import ( "crypto/tls" "net/http" - "go.mongodb.org/mongo-driver/internal" + "go.mongodb.org/mongo-driver/internal/httputil" ) // AutoEncryptionOptions represents options used to configure auto encryption/decryption behavior for a mongo.Client @@ -43,7 +43,7 @@ type AutoEncryptionOptions struct { // AutoEncryption creates a new AutoEncryptionOptions configured with default values. func AutoEncryption() *AutoEncryptionOptions { return &AutoEncryptionOptions{ - HTTPClient: internal.DefaultHTTPClient, + HTTPClient: httputil.DefaultHTTPClient, } } @@ -151,7 +151,6 @@ func (a *AutoEncryptionOptions) SetTLSConfig(tlsOpts map[string]*tls.Config) *Au // SetEncryptedFieldsMap specifies a map from namespace to local EncryptedFieldsMap document. // EncryptedFieldsMap is used for Queryable Encryption. -// Queryable Encryption is in Public Technical Preview. Queryable Encryption should not be used in production and is subject to backwards breaking changes. func (a *AutoEncryptionOptions) SetEncryptedFieldsMap(ef map[string]interface{}) *AutoEncryptionOptions { a.EncryptedFieldsMap = ef return a @@ -159,13 +158,15 @@ func (a *AutoEncryptionOptions) SetEncryptedFieldsMap(ef map[string]interface{}) // SetBypassQueryAnalysis specifies whether or not query analysis should be used for automatic encryption. // Use this option when using explicit encryption with Queryable Encryption. -// Queryable Encryption is in Public Technical Preview. Queryable Encryption should not be used in production and is subject to backwards breaking changes. func (a *AutoEncryptionOptions) SetBypassQueryAnalysis(bypass bool) *AutoEncryptionOptions { a.BypassQueryAnalysis = &bypass return a } // MergeAutoEncryptionOptions combines the argued AutoEncryptionOptions in a last-one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeAutoEncryptionOptions(opts ...*AutoEncryptionOptions) *AutoEncryptionOptions { aeo := AutoEncryption() for _, opt := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/bulkwriteoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/bulkwriteoptions.go index 0c36d0b7b..153de0c73 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/bulkwriteoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/bulkwriteoptions.go @@ -67,6 +67,9 @@ func (b *BulkWriteOptions) SetLet(let interface{}) *BulkWriteOptions { // MergeBulkWriteOptions combines the given BulkWriteOptions instances into a single BulkWriteOptions in a last-one-wins // fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeBulkWriteOptions(opts ...*BulkWriteOptions) *BulkWriteOptions { b := BulkWrite() for _, opt := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/changestreamoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/changestreamoptions.go index 54d3a782e..3d06a668e 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/changestreamoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/changestreamoptions.go @@ -156,6 +156,9 @@ func (cso *ChangeStreamOptions) SetCustomPipeline(cp bson.M) *ChangeStreamOption // MergeChangeStreamOptions combines the given ChangeStreamOptions instances into a single ChangeStreamOptions in a // last-one-wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeChangeStreamOptions(opts ...*ChangeStreamOptions) *ChangeStreamOptions { csOpts := ChangeStream() for _, cso := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/clientencryptionoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/clientencryptionoptions.go index 81ea42d42..2457f682b 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/clientencryptionoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/clientencryptionoptions.go @@ -11,7 +11,7 @@ import ( "fmt" "net/http" - "go.mongodb.org/mongo-driver/internal" + "go.mongodb.org/mongo-driver/internal/httputil" ) // ClientEncryptionOptions represents all possible options used to configure a ClientEncryption instance. @@ -25,7 +25,7 @@ type ClientEncryptionOptions struct { // ClientEncryption creates a new ClientEncryptionOptions instance. func ClientEncryption() *ClientEncryptionOptions { return &ClientEncryptionOptions{ - HTTPClient: internal.DefaultHTTPClient, + HTTPClient: httputil.DefaultHTTPClient, } } @@ -122,6 +122,9 @@ func BuildTLSConfig(tlsOpts map[string]interface{}) (*tls.Config, error) { } // MergeClientEncryptionOptions combines the argued ClientEncryptionOptions in a last-one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeClientEncryptionOptions(opts ...*ClientEncryptionOptions) *ClientEncryptionOptions { ceo := ClientEncryption() for _, opt := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go index 4355b2f30..42664be03 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go @@ -23,7 +23,7 @@ import ( "github.com/youmark/pkcs8" "go.mongodb.org/mongo-driver/bson/bsoncodec" "go.mongodb.org/mongo-driver/event" - "go.mongodb.org/mongo-driver/internal" + "go.mongodb.org/mongo-driver/internal/httputil" "go.mongodb.org/mongo-driver/mongo/readconcern" "go.mongodb.org/mongo-driver/mongo/readpref" "go.mongodb.org/mongo-driver/mongo/writeconcern" @@ -33,6 +33,26 @@ import ( "go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage" ) +const ( + // ServerMonitoringModeAuto indicates that the client will behave like "poll" + // mode when running on a FaaS (Function as a Service) platform, or like + // "stream" mode otherwise. The client detects its execution environment by + // following the rules for generating the "client.env" handshake metadata field + // as specified in the MongoDB Handshake specification. This is the default + // mode. + ServerMonitoringModeAuto = connstring.ServerMonitoringModeAuto + + // ServerMonitoringModePoll indicates that the client will periodically check + // the server using a hello or legacy hello command and then sleep for + // heartbeatFrequencyMS milliseconds before running another check. + ServerMonitoringModePoll = connstring.ServerMonitoringModePoll + + // ServerMonitoringModeStream indicates that the client will use a streaming + // protocol when the server supports it. The streaming protocol optimally + // reduces the time it takes for a client to discover server state changes. + ServerMonitoringModeStream = connstring.ServerMonitoringModeStream +) + // ContextDialer is an interface that can be implemented by types that can create connections. It should be used to // provide a custom dialer when configuring a Client. // @@ -92,6 +112,88 @@ type Credential struct { PasswordSet bool } +// BSONOptions are optional BSON marshaling and unmarshaling behaviors. +type BSONOptions struct { + // UseJSONStructTags causes the driver to fall back to using the "json" + // struct tag if a "bson" struct tag is not specified. + UseJSONStructTags bool + + // ErrorOnInlineDuplicates causes the driver to return an error if there is + // a duplicate field in the marshaled BSON when the "inline" struct tag + // option is set. + ErrorOnInlineDuplicates bool + + // IntMinSize causes the driver to marshal Go integer values (int, int8, + // int16, int32, int64, uint, uint8, uint16, uint32, or uint64) as the + // minimum BSON int size (either 32 or 64 bits) that can represent the + // integer value. + IntMinSize bool + + // NilMapAsEmpty causes the driver to marshal nil Go maps as empty BSON + // documents instead of BSON null. + // + // Empty BSON documents take up slightly more space than BSON null, but + // preserve the ability to use document update operations like "$set" that + // do not work on BSON null. + NilMapAsEmpty bool + + // NilSliceAsEmpty causes the driver to marshal nil Go slices as empty BSON + // arrays instead of BSON null. + // + // Empty BSON arrays take up slightly more space than BSON null, but + // preserve the ability to use array update operations like "$push" or + // "$addToSet" that do not work on BSON null. + NilSliceAsEmpty bool + + // NilByteSliceAsEmpty causes the driver to marshal nil Go byte slices as + // empty BSON binary values instead of BSON null. + NilByteSliceAsEmpty bool + + // OmitZeroStruct causes the driver to consider the zero value for a struct + // (e.g. MyStruct{}) as empty and omit it from the marshaled BSON when the + // "omitempty" struct tag option is set. + OmitZeroStruct bool + + // StringifyMapKeysWithFmt causes the driver to convert Go map keys to BSON + // document field name strings using fmt.Sprint instead of the default + // string conversion logic. + StringifyMapKeysWithFmt bool + + // AllowTruncatingDoubles causes the driver to truncate the fractional part + // of BSON "double" values when attempting to unmarshal them into a Go + // integer (int, int8, int16, int32, or int64) struct field. The truncation + // logic does not apply to BSON "decimal128" values. + AllowTruncatingDoubles bool + + // BinaryAsSlice causes the driver to unmarshal BSON binary field values + // that are the "Generic" or "Old" BSON binary subtype as a Go byte slice + // instead of a primitive.Binary. + BinaryAsSlice bool + + // DefaultDocumentD causes the driver to always unmarshal documents into the + // primitive.D type. This behavior is restricted to data typed as + // "interface{}" or "map[string]interface{}". + DefaultDocumentD bool + + // DefaultDocumentM causes the driver to always unmarshal documents into the + // primitive.M type. This behavior is restricted to data typed as + // "interface{}" or "map[string]interface{}". + DefaultDocumentM bool + + // UseLocalTimeZone causes the driver to unmarshal time.Time values in the + // local timezone instead of the UTC timezone. + UseLocalTimeZone bool + + // ZeroMaps causes the driver to delete any existing values from Go maps in + // the destination value before unmarshaling BSON documents into them. + ZeroMaps bool + + // ZeroStructs causes the driver to delete any existing values from Go + // structs in the destination value before unmarshaling BSON documents into + // them. + ZeroStructs bool +} + // ClientOptions contains options to configure a Client instance. Each option can be set through setter functions. See // documentation for each setter function for an explanation of the option. type ClientOptions struct { @@ -108,6 +210,7 @@ type ClientOptions struct { HTTPClient *http.Client LoadBalanced *bool LocalThreshold *time.Duration + LoggerOptions *LoggerOptions MaxConnIdleTime *time.Duration MaxPoolSize *uint64 MinPoolSize *uint64 @@ -117,11 +220,13 @@ type ClientOptions struct { ServerMonitor *event.ServerMonitor ReadConcern *readconcern.ReadConcern ReadPreference *readpref.ReadPref + BSONOptions *BSONOptions Registry *bsoncodec.Registry ReplicaSet *string RetryReads *bool RetryWrites *bool ServerAPIOptions *ServerAPIOptions + ServerMonitoringMode *string ServerSelectionTimeout *time.Duration SRVMaxHosts *int SRVServiceName *string @@ -165,7 +270,7 @@ type ClientOptions struct { // Client creates a new ClientOptions instance. func Client() *ClientOptions { return &ClientOptions{ - HTTPClient: internal.DefaultHTTPClient, + HTTPClient: httputil.DefaultHTTPClient, } } @@ -203,25 +308,30 @@ func (c *ClientOptions) validate() error { // Validation for load-balanced mode. if c.LoadBalanced != nil && *c.LoadBalanced { if len(c.Hosts) > 1 { - return internal.ErrLoadBalancedWithMultipleHosts + return connstring.ErrLoadBalancedWithMultipleHosts } if c.ReplicaSet != nil { - return internal.ErrLoadBalancedWithReplicaSet + return connstring.ErrLoadBalancedWithReplicaSet } if c.Direct != nil && *c.Direct { - return internal.ErrLoadBalancedWithDirectConnection + return connstring.ErrLoadBalancedWithDirectConnection } } // Validation for srvMaxHosts. if c.SRVMaxHosts != nil && *c.SRVMaxHosts > 0 { if c.ReplicaSet != nil { - return internal.ErrSRVMaxHostsWithReplicaSet + return connstring.ErrSRVMaxHostsWithReplicaSet } if c.LoadBalanced != nil && *c.LoadBalanced { - return internal.ErrSRVMaxHostsWithLoadBalanced + return connstring.ErrSRVMaxHostsWithLoadBalanced } } + + if mode := c.ServerMonitoringMode; mode != nil && !connstring.IsValidServerMonitoringMode(*mode) { + return fmt.Errorf("invalid server monitoring mode: %q", *mode) + } + return nil } @@ -233,7 +343,7 @@ func (c *ClientOptions) GetURI() string { // ApplyURI parses the given URI and sets options accordingly. The URI can contain host names, IPv4/IPv6 literals, or // an SRV record that will be resolved when the Client is created. When using an SRV record, TLS support is -// implictly enabled. Specify the "tls=false" URI option to override this. +// implicitly enabled. Specify the "tls=false" URI option to override this. // // If the connection string contains any options that have previously been set, it will overwrite them. Options that // correspond to multiple URI parameters, such as WriteConcern, will be completely overwritten if any of the query @@ -489,7 +599,7 @@ func (c *ClientOptions) SetAuth(auth Credential) *ClientOptions { // 3. "zstd" - requires server version >= 4.2, and driver version >= 1.2.0 with cgo support enabled or driver // version >= 1.3.0 without cgo. // -// If this option is specified, the driver will perform a negotiation with the server to determine a common list of of +// If this option is specified, the driver will perform a negotiation with the server to determine a common list of // compressors and will use the first one in that list when performing operations. See // https://www.mongodb.com/docs/manual/reference/program/mongod/#cmdoption-mongod-networkmessagecompressors for more // information about configuring compression on the server and the server-side defaults. @@ -502,18 +612,17 @@ func (c *ClientOptions) SetCompressors(comps []string) *ClientOptions { return c } -// SetConnectTimeout specifies a timeout that is used for creating connections to the server. If a custom Dialer is -// specified through SetDialer, this option must not be used. This can be set through ApplyURI with the -// "connectTimeoutMS" (e.g "connectTimeoutMS=30") option. If set to 0, no timeout will be used. The default is 30 -// seconds. +// SetConnectTimeout specifies a timeout that is used for creating connections to the server. This can be set through +// ApplyURI with the "connectTimeoutMS" (e.g "connectTimeoutMS=30") option. If set to 0, no timeout will be used. The +// default is 30 seconds. func (c *ClientOptions) SetConnectTimeout(d time.Duration) *ClientOptions { c.ConnectTimeout = &d return c } -// SetDialer specifies a custom ContextDialer to be used to create new connections to the server. The default is a -// net.Dialer with the Timeout field set to ConnectTimeout. See https://golang.org/pkg/net/#Dialer for more information -// about the net.Dialer type. +// SetDialer specifies a custom ContextDialer to be used to create new connections to the server. This method overrides +// the default net.Dialer, so dialer options such as Timeout, KeepAlive, Resolver, etc can be set. +// See https://golang.org/pkg/net/#Dialer for more information about the net.Dialer type. func (c *ClientOptions) SetDialer(d ContextDialer) *ClientOptions { c.Dialer = d return c @@ -580,6 +689,14 @@ func (c *ClientOptions) SetLocalThreshold(d time.Duration) *ClientOptions { return c } +// SetLoggerOptions specifies a LoggerOptions containing options for +// configuring a logger. +func (c *ClientOptions) SetLoggerOptions(opts *LoggerOptions) *ClientOptions { + c.LoggerOptions = opts + + return c +} + // SetMaxConnIdleTime specifies the maximum amount of time that a connection will remain idle in a connection pool // before it is removed from the pool and closed. This can also be set through the "maxIdleTimeMS" URI option (e.g. // "maxIdleTimeMS=10000"). The default is 0, meaning a connection can remain unused indefinitely. @@ -660,6 +777,12 @@ func (c *ClientOptions) SetReadPreference(rp *readpref.ReadPref) *ClientOptions return c } +// SetBSONOptions configures optional BSON marshaling and unmarshaling behavior. +func (c *ClientOptions) SetBSONOptions(opts *BSONOptions) *ClientOptions { + c.BSONOptions = opts + return c +} + // SetRegistry specifies the BSON registry to use for BSON marshalling/unmarshalling operations. The default is // bson.DefaultRegistry. func (c *ClientOptions) SetRegistry(registry *bsoncodec.Registry) *ClientOptions { @@ -752,7 +875,8 @@ func (c *ClientOptions) SetTimeout(d time.Duration) *ClientOptions { // "tlsPrivateKeyFile". The "tlsCertificateKeyFile" option specifies a path to the client certificate and private key, // which must be concatenated into one file. The "tlsCertificateFile" and "tlsPrivateKey" combination specifies separate // paths to the client certificate and private key, respectively. Note that if "tlsCertificateKeyFile" is used, the -// other two options must not be specified. +// other two options must not be specified. Only the subject name of the first certificate is honored as the username +// for X509 auth in a file with multiple certs. // // 3. "tlsCertificateKeyFilePassword" (or "sslClientCertificateKeyPassword"): Specify the password to decrypt the client // private key file (e.g. "tlsCertificateKeyFilePassword=password"). @@ -773,7 +897,7 @@ func (c *ClientOptions) SetTLSConfig(cfg *tls.Config) *ClientOptions { // SetHTTPClient specifies the http.Client to be used for any HTTP requests. // -// This should only be used to set custom HTTP client configurations. By default, the connection will use an internal.DefaultHTTPClient. +// This should only be used to set custom HTTP client configurations. By default, the connection will use an httputil.DefaultHTTPClient. func (c *ClientOptions) SetHTTPClient(client *http.Client) *ClientOptions { c.HTTPClient = client return c @@ -847,6 +971,16 @@ func (c *ClientOptions) SetServerAPIOptions(opts *ServerAPIOptions) *ClientOptio return c } +// SetServerMonitoringMode specifies the server monitoring protocol to use. See +// the helper constants ServerMonitoringModeAuto, ServerMonitoringModePoll, and +// ServerMonitoringModeStream for more information about valid server +// monitoring modes. +func (c *ClientOptions) SetServerMonitoringMode(mode string) *ClientOptions { + c.ServerMonitoringMode = &mode + + return c +} + // SetSRVMaxHosts specifies the maximum number of SRV results to randomly select during polling. To limit the number // of hosts selected in SRV discovery, this function must be called before ApplyURI. This can also be set through // the "srvMaxHosts" URI option. @@ -866,6 +1000,9 @@ func (c *ClientOptions) SetSRVServiceName(srvName string) *ClientOptions { // MergeClientOptions combines the given *ClientOptions into a single *ClientOptions in a last one wins fashion. // The specified options are merged with the existing options on the client, with the specified options taking // precedence. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeClientOptions(opts ...*ClientOptions) *ClientOptions { c := Client() @@ -940,6 +1077,9 @@ func MergeClientOptions(opts ...*ClientOptions) *ClientOptions { if opt.ReadPreference != nil { c.ReadPreference = opt.ReadPreference } + if opt.BSONOptions != nil { + c.BSONOptions = opt.BSONOptions + } if opt.Registry != nil { c.Registry = opt.Registry } @@ -1000,6 +1140,12 @@ func MergeClientOptions(opts ...*ClientOptions) *ClientOptions { if opt.cs != nil { c.cs = opt.cs } + if opt.LoggerOptions != nil { + c.LoggerOptions = opt.LoggerOptions + } + if opt.ServerMonitoringMode != nil { + c.ServerMonitoringMode = opt.ServerMonitoringMode + } } return c @@ -1049,8 +1195,8 @@ func addClientCertFromConcatenatedFile(cfg *tls.Config, certKeyFile, keyPassword return addClientCertFromBytes(cfg, data, keyPassword) } -// addClientCertFromBytes adds a client certificate to the configuration given a path to the -// containing file and returns the certificate's subject name. +// addClientCertFromBytes adds client certificates to the configuration given a path to the +// containing file and returns the subject name in the first certificate. func addClientCertFromBytes(cfg *tls.Config, data []byte, keyPasswd string) (string, error) { var currentBlock *pem.Block var certDecodedBlock []byte @@ -1067,7 +1213,11 @@ func addClientCertFromBytes(cfg *tls.Config, data []byte, keyPasswd string) (str if currentBlock.Type == "CERTIFICATE" { certBlock := data[start : len(data)-len(remaining)] certBlocks = append(certBlocks, certBlock) - certDecodedBlock = currentBlock.Bytes + // Assign the certDecodedBlock when it is never set, + // so only the first certificate is honored in a file with multiple certs. + if certDecodedBlock == nil { + certDecodedBlock = currentBlock.Bytes + } start += len(certBlock) } else if strings.HasSuffix(currentBlock.Type, "PRIVATE KEY") { isEncrypted := x509.IsEncryptedPEMBlock(currentBlock) || strings.Contains(currentBlock.Type, "ENCRYPTED PRIVATE KEY") diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/collectionoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/collectionoptions.go index e8b68a270..04fda6d77 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/collectionoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/collectionoptions.go @@ -27,6 +27,10 @@ type CollectionOptions struct { // the read preference of the Database used to configure the Collection will be used. ReadPreference *readpref.ReadPref + // BSONOptions configures optional BSON marshaling and unmarshaling + // behavior. + BSONOptions *BSONOptions + // Registry is the BSON registry to marshal and unmarshal documents for operations executed on the Collection. The default value // is nil, which means that the registry of the Database used to configure the Collection will be used. Registry *bsoncodec.Registry @@ -55,6 +59,12 @@ func (c *CollectionOptions) SetReadPreference(rp *readpref.ReadPref) *Collection return c } +// SetBSONOptions configures optional BSON marshaling and unmarshaling behavior. +func (c *CollectionOptions) SetBSONOptions(opts *BSONOptions) *CollectionOptions { + c.BSONOptions = opts + return c +} + // SetRegistry sets the value for the Registry field. func (c *CollectionOptions) SetRegistry(r *bsoncodec.Registry) *CollectionOptions { c.Registry = r @@ -63,6 +73,9 @@ func (c *CollectionOptions) SetRegistry(r *bsoncodec.Registry) *CollectionOption // MergeCollectionOptions combines the given CollectionOptions instances into a single *CollectionOptions in a // last-one-wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeCollectionOptions(opts ...*CollectionOptions) *CollectionOptions { c := Collection() diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/countoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/countoptions.go index f772ec4a3..bb765d950 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/countoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/countoptions.go @@ -89,6 +89,9 @@ func (co *CountOptions) SetSkip(i int64) *CountOptions { } // MergeCountOptions combines the given CountOptions instances into a single CountOptions in a last-one-wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeCountOptions(opts ...*CountOptions) *CountOptions { countOpts := Count() for _, co := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/createcollectionoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/createcollectionoptions.go index 6fc7d066a..d8ffaaf33 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/createcollectionoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/createcollectionoptions.go @@ -6,6 +6,8 @@ package options +import "time" + // DefaultIndexOptions represents the default options for a collection to apply on new indexes. This type can be used // when creating a new collection through the CreateCollectionOptions.SetDefaultIndexOptions method. type DefaultIndexOptions struct { @@ -28,18 +30,30 @@ func (d *DefaultIndexOptions) SetStorageEngine(storageEngine interface{}) *Defau // TimeSeriesOptions specifies options on a time-series collection. type TimeSeriesOptions struct { - // Name of the top-level field to be used for time. Inserted documents must have this field, + // TimeField is the top-level field to be used for time. Inserted documents must have this field, // and the field must be of the BSON UTC datetime type (0x9). TimeField string - // Optional name of the top-level field describing the series. This field is used to group + // MetaField is the name of the top-level field describing the series. This field is used to group // related data and may be of any BSON type, except for array. This name may not be the same - // as the TimeField or _id. + // as the TimeField or _id. This field is optional. MetaField *string - // Optional string specifying granularity of time-series data. Allowed granularity options are - // "seconds", "minutes" and "hours". + // Granularity is the granularity of time-series data. Allowed granularity options are + // "seconds", "minutes" and "hours". This field is optional. Granularity *string + + // BucketMaxSpan is the maximum range of time values for a bucket. The + // time.Duration is rounded down to the nearest second and applied as + // the command option: "bucketRoundingSeconds". This field is optional. + BucketMaxSpan *time.Duration + + // BucketRounding is used to determine the minimum time boundary when + // opening a new bucket by rounding the first timestamp down to the next + // multiple of this value. The time.Duration is rounded down to the + // nearest second and applied as the command option: + // "bucketRoundingSeconds". This field is optional. + BucketRounding *time.Duration } // TimeSeries creates a new TimeSeriesOptions instance. @@ -65,6 +79,20 @@ func (tso *TimeSeriesOptions) SetGranularity(granularity string) *TimeSeriesOpti return tso } +// SetBucketMaxSpan sets the value for BucketMaxSpan. +func (tso *TimeSeriesOptions) SetBucketMaxSpan(dur time.Duration) *TimeSeriesOptions { + tso.BucketMaxSpan = &dur + + return tso +} + +// SetBucketRounding sets the value for BucketRounding. +func (tso *TimeSeriesOptions) SetBucketRounding(dur time.Duration) *TimeSeriesOptions { + tso.BucketRounding = &dur + + return tso +} + // CreateCollectionOptions represents options that can be used to configure a CreateCollection operation. type CreateCollectionOptions struct { // Specifies if the collection is capped (see https://www.mongodb.com/docs/manual/core/capped-collections/). If true, @@ -234,6 +262,9 @@ func (c *CreateCollectionOptions) SetClusteredIndex(clusteredIndex interface{}) // MergeCreateCollectionOptions combines the given CreateCollectionOptions instances into a single // CreateCollectionOptions in a last-one-wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeCreateCollectionOptions(opts ...*CreateCollectionOptions) *CreateCollectionOptions { cc := CreateCollection() @@ -309,6 +340,9 @@ func (c *CreateViewOptions) SetCollation(collation *Collation) *CreateViewOption // MergeCreateViewOptions combines the given CreateViewOptions instances into a single CreateViewOptions in a // last-one-wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeCreateViewOptions(opts ...*CreateViewOptions) *CreateViewOptions { cv := CreateView() diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/datakeyoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/datakeyoptions.go index 059805a6a..5afe8a248 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/datakeyoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/datakeyoptions.go @@ -79,6 +79,9 @@ func (dk *DataKeyOptions) SetKeyMaterial(keyMaterial []byte) *DataKeyOptions { } // MergeDataKeyOptions combines the argued DataKeyOptions in a last-one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeDataKeyOptions(opts ...*DataKeyOptions) *DataKeyOptions { dko := DataKey() for _, opt := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/dboptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/dboptions.go index 86e5cc80c..8a380d216 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/dboptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/dboptions.go @@ -27,6 +27,10 @@ type DatabaseOptions struct { // the read preference of the Client used to configure the Database will be used. ReadPreference *readpref.ReadPref + // BSONOptions configures optional BSON marshaling and unmarshaling + // behavior. + BSONOptions *BSONOptions + // Registry is the BSON registry to marshal and unmarshal documents for operations executed on the Database. The default value // is nil, which means that the registry of the Client used to configure the Database will be used. Registry *bsoncodec.Registry @@ -55,6 +59,12 @@ func (d *DatabaseOptions) SetReadPreference(rp *readpref.ReadPref) *DatabaseOpti return d } +// SetBSONOptions configures optional BSON marshaling and unmarshaling behavior. +func (d *DatabaseOptions) SetBSONOptions(opts *BSONOptions) *DatabaseOptions { + d.BSONOptions = opts + return d +} + // SetRegistry sets the value for the Registry field. func (d *DatabaseOptions) SetRegistry(r *bsoncodec.Registry) *DatabaseOptions { d.Registry = r @@ -63,6 +73,9 @@ func (d *DatabaseOptions) SetRegistry(r *bsoncodec.Registry) *DatabaseOptions { // MergeDatabaseOptions combines the given DatabaseOptions instances into a single DatabaseOptions in a last-one-wins // fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeDatabaseOptions(opts ...*DatabaseOptions) *DatabaseOptions { d := Database() diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/deleteoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/deleteoptions.go index 77e5d45cd..59aaef915 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/deleteoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/deleteoptions.go @@ -62,6 +62,9 @@ func (do *DeleteOptions) SetLet(let interface{}) *DeleteOptions { } // MergeDeleteOptions combines the given DeleteOptions instances into a single DeleteOptions in a last-one-wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeDeleteOptions(opts ...*DeleteOptions) *DeleteOptions { dOpts := Delete() for _, do := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/distinctoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/distinctoptions.go index fdd005c14..819f2a9a8 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/distinctoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/distinctoptions.go @@ -57,6 +57,9 @@ func (do *DistinctOptions) SetMaxTime(d time.Duration) *DistinctOptions { // MergeDistinctOptions combines the given DistinctOptions instances into a single DistinctOptions in a last-one-wins // fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeDistinctOptions(opts ...*DistinctOptions) *DistinctOptions { distinctOpts := Distinct() for _, do := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/encryptoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/encryptoptions.go index 31528b38e..88517d0c8 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/encryptoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/encryptoptions.go @@ -7,16 +7,25 @@ package options import ( + "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/primitive" ) // These constants specify valid values for QueryType // QueryType is used for Queryable Encryption. -// Queryable Encryption is in Public Technical Preview. Queryable Encryption should not be used in production and is subject to backwards breaking changes. const ( QueryTypeEquality string = "equality" ) +// RangeOptions specifies index options for a Queryable Encryption field supporting "rangePreview" queries. +// Beta: The Range algorithm is experimental only. It is not intended for public use. It is subject to breaking changes. +type RangeOptions struct { + Min *bson.RawValue + Max *bson.RawValue + Sparsity int64 + Precision *int32 +} + // EncryptOptions represents options to explicitly encrypt a value. type EncryptOptions struct { KeyID *primitive.Binary @@ -24,6 +33,7 @@ type EncryptOptions struct { Algorithm string QueryType string ContentionFactor *int64 + RangeOptions *RangeOptions } // Encrypt creates a new EncryptOptions instance. @@ -50,7 +60,6 @@ func (e *EncryptOptions) SetKeyAltName(keyAltName string) *EncryptOptions { // - Unindexed // This is required. // Indexed and Unindexed are used for Queryable Encryption. -// Queryable Encryption is in Public Technical Preview. Queryable Encryption should not be used in production and is subject to backwards breaking changes. func (e *EncryptOptions) SetAlgorithm(algorithm string) *EncryptOptions { e.Algorithm = algorithm return e @@ -60,7 +69,6 @@ func (e *EncryptOptions) SetAlgorithm(algorithm string) *EncryptOptions { // This should be one of the following: // - equality // QueryType is used for Queryable Encryption. -// Queryable Encryption is in Public Technical Preview. Queryable Encryption should not be used in production and is subject to backwards breaking changes. func (e *EncryptOptions) SetQueryType(queryType string) *EncryptOptions { e.QueryType = queryType return e @@ -68,13 +76,50 @@ func (e *EncryptOptions) SetQueryType(queryType string) *EncryptOptions { // SetContentionFactor specifies the contention factor. It is only valid to set if algorithm is "Indexed". // ContentionFactor is used for Queryable Encryption. -// Queryable Encryption is in Public Technical Preview. Queryable Encryption should not be used in production and is subject to backwards breaking changes. func (e *EncryptOptions) SetContentionFactor(contentionFactor int64) *EncryptOptions { e.ContentionFactor = &contentionFactor return e } +// SetRangeOptions specifies the options to use for explicit encryption with range. It is only valid to set if algorithm is "rangePreview". +// Beta: The Range algorithm is experimental only. It is not intended for public use. It is subject to breaking changes. +func (e *EncryptOptions) SetRangeOptions(ro RangeOptions) *EncryptOptions { + e.RangeOptions = &ro + return e +} + +// SetMin sets the range index minimum value. +// Beta: The Range algorithm is experimental only. It is not intended for public use. It is subject to breaking changes. +func (ro *RangeOptions) SetMin(min bson.RawValue) *RangeOptions { + ro.Min = &min + return ro +} + +// SetMax sets the range index maximum value. +// Beta: The Range algorithm is experimental only. It is not intended for public use. It is subject to breaking changes. +func (ro *RangeOptions) SetMax(max bson.RawValue) *RangeOptions { + ro.Max = &max + return ro +} + +// SetSparsity sets the range index sparsity. +// Beta: The Range algorithm is experimental only. It is not intended for public use. It is subject to breaking changes. +func (ro *RangeOptions) SetSparsity(sparsity int64) *RangeOptions { + ro.Sparsity = sparsity + return ro +} + +// SetPrecision sets the range index precision. +// Beta: The Range algorithm is experimental only. It is not intended for public use. It is subject to breaking changes. +func (ro *RangeOptions) SetPrecision(precision int32) *RangeOptions { + ro.Precision = &precision + return ro +} + // MergeEncryptOptions combines the argued EncryptOptions in a last-one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeEncryptOptions(opts ...*EncryptOptions) *EncryptOptions { eo := Encrypt() for _, opt := range opts { @@ -97,6 +142,9 @@ func MergeEncryptOptions(opts ...*EncryptOptions) *EncryptOptions { if opt.ContentionFactor != nil { eo.ContentionFactor = opt.ContentionFactor } + if opt.RangeOptions != nil { + eo.RangeOptions = opt.RangeOptions + } } return eo diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/estimatedcountoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/estimatedcountoptions.go index 6ac53809a..d088af9c9 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/estimatedcountoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/estimatedcountoptions.go @@ -46,6 +46,9 @@ func (eco *EstimatedDocumentCountOptions) SetMaxTime(d time.Duration) *Estimated // MergeEstimatedDocumentCountOptions combines the given EstimatedDocumentCountOptions instances into a single // EstimatedDocumentCountOptions in a last-one-wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeEstimatedDocumentCountOptions(opts ...*EstimatedDocumentCountOptions) *EstimatedDocumentCountOptions { e := EstimatedDocumentCount() for _, opt := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/findoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/findoptions.go index 219a95940..fa3bf1197 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/findoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/findoptions.go @@ -251,6 +251,9 @@ func (f *FindOptions) SetSort(sort interface{}) *FindOptions { } // MergeFindOptions combines the given FindOptions instances into a single FindOptions in a last-one-wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeFindOptions(opts ...*FindOptions) *FindOptions { fo := Find() for _, opt := range opts { @@ -549,6 +552,9 @@ func (f *FindOneOptions) SetSort(sort interface{}) *FindOneOptions { // MergeFindOneOptions combines the given FindOneOptions instances into a single FindOneOptions in a last-one-wins // fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeFindOneOptions(opts ...*FindOneOptions) *FindOneOptions { fo := FindOne() for _, opt := range opts { @@ -742,6 +748,9 @@ func (f *FindOneAndReplaceOptions) SetLet(let interface{}) *FindOneAndReplaceOpt // MergeFindOneAndReplaceOptions combines the given FindOneAndReplaceOptions instances into a single // FindOneAndReplaceOptions in a last-one-wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeFindOneAndReplaceOptions(opts ...*FindOneAndReplaceOptions) *FindOneAndReplaceOptions { fo := FindOneAndReplace() for _, opt := range opts { @@ -922,6 +931,9 @@ func (f *FindOneAndUpdateOptions) SetLet(let interface{}) *FindOneAndUpdateOptio // MergeFindOneAndUpdateOptions combines the given FindOneAndUpdateOptions instances into a single // FindOneAndUpdateOptions in a last-one-wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeFindOneAndUpdateOptions(opts ...*FindOneAndUpdateOptions) *FindOneAndUpdateOptions { fo := FindOneAndUpdate() for _, opt := range opts { @@ -1062,6 +1074,9 @@ func (f *FindOneAndDeleteOptions) SetLet(let interface{}) *FindOneAndDeleteOptio // MergeFindOneAndDeleteOptions combines the given FindOneAndDeleteOptions instances into a single // FindOneAndDeleteOptions in a last-one-wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeFindOneAndDeleteOptions(opts ...*FindOneAndDeleteOptions) *FindOneAndDeleteOptions { fo := FindOneAndDelete() for _, opt := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/gridfsoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/gridfsoptions.go index 9221585ba..c8d347f4e 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/gridfsoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/gridfsoptions.go @@ -85,6 +85,9 @@ func (b *BucketOptions) SetReadPreference(rp *readpref.ReadPref) *BucketOptions } // MergeBucketOptions combines the given BucketOptions instances into a single BucketOptions in a last-one-wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeBucketOptions(opts ...*BucketOptions) *BucketOptions { b := GridFSBucket() @@ -144,6 +147,9 @@ func (u *UploadOptions) SetMetadata(doc interface{}) *UploadOptions { } // MergeUploadOptions combines the given UploadOptions instances into a single UploadOptions in a last-one-wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeUploadOptions(opts ...*UploadOptions) *UploadOptions { u := GridFSUpload() @@ -192,6 +198,9 @@ func (n *NameOptions) SetRevision(r int32) *NameOptions { } // MergeNameOptions combines the given NameOptions instances into a single *NameOptions in a last-one-wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeNameOptions(opts ...*NameOptions) *NameOptions { n := GridFSName() n.Revision = &DefaultRevision @@ -296,6 +305,9 @@ func (f *GridFSFindOptions) SetSort(sort interface{}) *GridFSFindOptions { // MergeGridFSFindOptions combines the given GridFSFindOptions instances into a single GridFSFindOptions in a // last-one-wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeGridFSFindOptions(opts ...*GridFSFindOptions) *GridFSFindOptions { fo := GridFSFind() for _, opt := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/indexoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/indexoptions.go index 4c2d6920a..ab7e2b3f6 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/indexoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/indexoptions.go @@ -77,6 +77,9 @@ func (c *CreateIndexesOptions) SetCommitQuorumVotingMembers() *CreateIndexesOpti // MergeCreateIndexesOptions combines the given CreateIndexesOptions into a single CreateIndexesOptions in a last one // wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeCreateIndexesOptions(opts ...*CreateIndexesOptions) *CreateIndexesOptions { c := CreateIndexes() for _, opt := range opts { @@ -123,6 +126,9 @@ func (d *DropIndexesOptions) SetMaxTime(duration time.Duration) *DropIndexesOpti // MergeDropIndexesOptions combines the given DropIndexesOptions into a single DropIndexesOptions in a last-one-wins // fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeDropIndexesOptions(opts ...*DropIndexesOptions) *DropIndexesOptions { c := DropIndexes() for _, opt := range opts { @@ -174,6 +180,9 @@ func (l *ListIndexesOptions) SetMaxTime(d time.Duration) *ListIndexesOptions { // MergeListIndexesOptions combines the given ListIndexesOptions instances into a single *ListIndexesOptions in a // last-one-wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeListIndexesOptions(opts ...*ListIndexesOptions) *ListIndexesOptions { c := ListIndexes() for _, opt := range opts { @@ -409,6 +418,9 @@ func (i *IndexOptions) SetHidden(hidden bool) *IndexOptions { } // MergeIndexOptions combines the given IndexOptions into a single IndexOptions in a last-one-wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeIndexOptions(opts ...*IndexOptions) *IndexOptions { i := Index() diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/insertoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/insertoptions.go index 08c46b281..82137c60a 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/insertoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/insertoptions.go @@ -38,6 +38,9 @@ func (ioo *InsertOneOptions) SetComment(comment interface{}) *InsertOneOptions { // MergeInsertOneOptions combines the given InsertOneOptions instances into a single InsertOneOptions in a last-one-wins // fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeInsertOneOptions(opts ...*InsertOneOptions) *InsertOneOptions { ioOpts := InsertOne() for _, ioo := range opts { @@ -98,6 +101,9 @@ func (imo *InsertManyOptions) SetOrdered(b bool) *InsertManyOptions { // MergeInsertManyOptions combines the given InsertManyOptions instances into a single InsertManyOptions in a last one // wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeInsertManyOptions(opts ...*InsertManyOptions) *InsertManyOptions { imOpts := InsertMany() for _, imo := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/listcollectionsoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/listcollectionsoptions.go index 6f4b1cca6..69b8c997e 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/listcollectionsoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/listcollectionsoptions.go @@ -45,6 +45,9 @@ func (lc *ListCollectionsOptions) SetAuthorizedCollections(b bool) *ListCollecti // MergeListCollectionsOptions combines the given ListCollectionsOptions instances into a single *ListCollectionsOptions // in a last-one-wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeListCollectionsOptions(opts ...*ListCollectionsOptions) *ListCollectionsOptions { lc := ListCollections() for _, opt := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/listdatabasesoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/listdatabasesoptions.go index 496763667..fbd3df60d 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/listdatabasesoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/listdatabasesoptions.go @@ -37,6 +37,9 @@ func (ld *ListDatabasesOptions) SetAuthorizedDatabases(b bool) *ListDatabasesOpt // MergeListDatabasesOptions combines the given ListDatabasesOptions instances into a single *ListDatabasesOptions in a // last-one-wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeListDatabasesOptions(opts ...*ListDatabasesOptions) *ListDatabasesOptions { ld := ListDatabases() for _, opt := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/loggeroptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/loggeroptions.go new file mode 100644 index 000000000..b83793581 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/loggeroptions.go @@ -0,0 +1,115 @@ +// Copyright (C) MongoDB, Inc. 2023-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package options + +import ( + "go.mongodb.org/mongo-driver/internal/logger" +) + +// LogLevel is an enumeration representing the supported log severity levels. +type LogLevel int + +const ( + // LogLevelInfo enables logging of informational messages. These logs + // are high-level information about normal driver behavior. + LogLevelInfo LogLevel = LogLevel(logger.LevelInfo) + + // LogLevelDebug enables logging of debug messages. These logs can be + // voluminous and are intended for detailed information that may be + // helpful when debugging an application. + LogLevelDebug LogLevel = LogLevel(logger.LevelDebug) +) + +// LogComponent is an enumeration representing the "components" which can be +// logged against. A LogLevel can be configured on a per-component basis. +type LogComponent int + +const ( + // LogComponentAll enables logging for all components. + LogComponentAll LogComponent = LogComponent(logger.ComponentAll) + + // LogComponentCommand enables command monitor logging. + LogComponentCommand LogComponent = LogComponent(logger.ComponentCommand) + + // LogComponentTopology enables topology logging. + LogComponentTopology LogComponent = LogComponent(logger.ComponentTopology) + + // LogComponentServerSelection enables server selection logging. + LogComponentServerSelection LogComponent = LogComponent(logger.ComponentServerSelection) + + // LogComponentConnection enables connection services logging. + LogComponentConnection LogComponent = LogComponent(logger.ComponentConnection) +) + +// LogSink is an interface that can be implemented to provide a custom sink for +// the driver's logs. +type LogSink interface { + // Info logs a non-error message with the given key/value pairs. This + // method will only be called if the provided level has been defined + // for a component in the LoggerOptions. + // + // Here are the following level mappings for V = "Verbosity": + // + // - V(0): off + // - V(1): informational + // - V(2): debugging + // + // This level mapping is taken from the go-logr/logr library + // specifications, specifically: + // + // "Level V(0) is the default, and logger.V(0).Info() has the same + // meaning as logger.Info()." + Info(level int, message string, keysAndValues ...interface{}) + + // Error logs an error message with the given key/value pairs + Error(err error, message string, keysAndValues ...interface{}) +} + +// LoggerOptions represent options used to configure Logging in the Go Driver. +type LoggerOptions struct { + // ComponentLevels is a map of LogComponent to LogLevel. The LogLevel + // for a given LogComponent will be used to determine if a log message + // should be logged. + ComponentLevels map[LogComponent]LogLevel + + // Sink is the LogSink that will be used to log messages. If this is + // nil, the driver will use the standard logging library. + Sink LogSink + + // MaxDocumentLength is the maximum length of a document to be logged. + // If the underlying document is larger than this value, it will be + // truncated and appended with an ellipses "...". + MaxDocumentLength uint +} + +// Logger creates a new LoggerOptions instance. +func Logger() *LoggerOptions { + return &LoggerOptions{ + ComponentLevels: map[LogComponent]LogLevel{}, + } +} + +// SetComponentLevel sets the LogLevel value for a LogComponent. +func (opts *LoggerOptions) SetComponentLevel(component LogComponent, level LogLevel) *LoggerOptions { + opts.ComponentLevels[component] = level + + return opts +} + +// SetMaxDocumentLength sets the maximum length of a document to be logged. +func (opts *LoggerOptions) SetMaxDocumentLength(maxDocumentLength uint) *LoggerOptions { + opts.MaxDocumentLength = maxDocumentLength + + return opts +} + +// SetSink sets the LogSink to use for logging. +func (opts *LoggerOptions) SetSink(sink LogSink) *LoggerOptions { + opts.Sink = sink + + return opts +} diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/mongooptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/mongooptions.go index 25689f521..fd17ce44e 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/mongooptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/mongooptions.go @@ -31,6 +31,8 @@ type Collation struct { } // ToDocument converts the Collation to a bson.Raw. +// +// Deprecated: Marshaling a Collation to BSON will not be supported in Go Driver 2.0. func (co *Collation) ToDocument() bson.Raw { idx, doc := bsoncore.AppendDocumentStart(nil) if co.Locale != "" { @@ -107,14 +109,24 @@ const ( WhenAvailable FullDocument = "whenAvailable" ) +// TODO(GODRIVER-2617): Once Registry is removed, ArrayFilters doesn't need to +// TODO be a separate type. Remove the type and update all ArrayFilters fields +// TODO to be type []interface{}. + // ArrayFilters is used to hold filters for the array filters CRUD option. If a registry is nil, bson.DefaultRegistry // will be used when converting the filter interfaces to BSON. type ArrayFilters struct { - Registry *bsoncodec.Registry // The registry to use for converting filters. Defaults to bson.DefaultRegistry. - Filters []interface{} // The filters to apply + // Registry is the registry to use for converting filters. Defaults to bson.DefaultRegistry. + // + // Deprecated: Marshaling ArrayFilters to BSON will not be supported in Go Driver 2.0. + Registry *bsoncodec.Registry + + Filters []interface{} // The filters to apply } // ToArray builds a []bson.Raw from the provided ArrayFilters. +// +// Deprecated: Marshaling ArrayFilters to BSON will not be supported in Go Driver 2.0. func (af *ArrayFilters) ToArray() ([]bson.Raw, error) { registry := af.Registry if registry == nil { @@ -133,6 +145,8 @@ func (af *ArrayFilters) ToArray() ([]bson.Raw, error) { // ToArrayDocument builds a BSON array for the array filters CRUD option. If the registry for af is nil, // bson.DefaultRegistry will be used when converting the filter interfaces to BSON. +// +// Deprecated: Marshaling ArrayFilters to BSON will not be supported in Go Driver 2.0. func (af *ArrayFilters) ToArrayDocument() (bson.Raw, error) { registry := af.Registry if registry == nil { @@ -154,12 +168,16 @@ func (af *ArrayFilters) ToArrayDocument() (bson.Raw, error) { // MarshalError is returned when attempting to transform a value into a document // results in an error. +// +// Deprecated: MarshalError is unused and will be removed in Go Driver 2.0. type MarshalError struct { Value interface{} Err error } // Error implements the error interface. +// +// Deprecated: MarshalError is unused and will be removed in Go Driver 2.0. func (me MarshalError) Error() string { return fmt.Sprintf("cannot transform type %s to a bson.Raw", reflect.TypeOf(me.Value)) } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/replaceoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/replaceoptions.go index f5bc6b98e..f7d396019 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/replaceoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/replaceoptions.go @@ -85,6 +85,9 @@ func (ro *ReplaceOptions) SetLet(l interface{}) *ReplaceOptions { // MergeReplaceOptions combines the given ReplaceOptions instances into a single ReplaceOptions in a last-one-wins // fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeReplaceOptions(opts ...*ReplaceOptions) *ReplaceOptions { rOpts := Replace() for _, ro := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/rewrapdatakeyoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/rewrapdatakeyoptions.go index 2ab1b3c5b..22ba58604 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/rewrapdatakeyoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/rewrapdatakeyoptions.go @@ -35,6 +35,9 @@ func (rmdko *RewrapManyDataKeyOptions) SetMasterKey(masterKey interface{}) *Rewr // MergeRewrapManyDataKeyOptions combines the given RewrapManyDataKeyOptions instances into a single // RewrapManyDataKeyOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeRewrapManyDataKeyOptions(opts ...*RewrapManyDataKeyOptions) *RewrapManyDataKeyOptions { rmdkOpts := RewrapManyDataKey() for _, rmdko := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/runcmdoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/runcmdoptions.go index ce2ec728d..b0cdec32c 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/runcmdoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/runcmdoptions.go @@ -6,7 +6,9 @@ package options -import "go.mongodb.org/mongo-driver/mongo/readpref" +import ( + "go.mongodb.org/mongo-driver/mongo/readpref" +) // RunCmdOptions represents options that can be used to configure a RunCommand operation. type RunCmdOptions struct { @@ -27,6 +29,9 @@ func (rc *RunCmdOptions) SetReadPreference(rp *readpref.ReadPref) *RunCmdOptions } // MergeRunCmdOptions combines the given RunCmdOptions instances into one *RunCmdOptions in a last-one-wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeRunCmdOptions(opts ...*RunCmdOptions) *RunCmdOptions { rc := RunCmd() for _, opt := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/searchindexoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/searchindexoptions.go new file mode 100644 index 000000000..9774d615b --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/searchindexoptions.go @@ -0,0 +1,41 @@ +// Copyright (C) MongoDB, Inc. 2023-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package options + +// SearchIndexesOptions represents options that can be used to configure a SearchIndexView. +type SearchIndexesOptions struct { + Name *string +} + +// SearchIndexes creates a new SearchIndexesOptions instance. +func SearchIndexes() *SearchIndexesOptions { + return &SearchIndexesOptions{} +} + +// SetName sets the value for the Name field. +func (sio *SearchIndexesOptions) SetName(name string) *SearchIndexesOptions { + sio.Name = &name + return sio +} + +// CreateSearchIndexesOptions represents options that can be used to configure a SearchIndexView.CreateOne or +// SearchIndexView.CreateMany operation. +type CreateSearchIndexesOptions struct { +} + +// ListSearchIndexesOptions represents options that can be used to configure a SearchIndexView.List operation. +type ListSearchIndexesOptions struct { + AggregateOpts *AggregateOptions +} + +// DropSearchIndexOptions represents options that can be used to configure a SearchIndexView.DropOne operation. +type DropSearchIndexOptions struct { +} + +// UpdateSearchIndexOptions represents options that can be used to configure a SearchIndexView.UpdateOne operation. +type UpdateSearchIndexOptions struct { +} diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/sessionoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/sessionoptions.go index ab5ecb5a4..e1eab098b 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/sessionoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/sessionoptions.go @@ -98,6 +98,9 @@ func (s *SessionOptions) SetSnapshot(b bool) *SessionOptions { // MergeSessionOptions combines the given SessionOptions instances into a single SessionOptions in a last-one-wins // fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeSessionOptions(opts ...*SessionOptions) *SessionOptions { s := Session() for _, opt := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/transactionoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/transactionoptions.go index 0b9d1081b..9270cd20d 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/transactionoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/transactionoptions.go @@ -76,6 +76,9 @@ func (t *TransactionOptions) SetMaxCommitTime(mct *time.Duration) *TransactionOp // MergeTransactionOptions combines the given TransactionOptions instances into a single TransactionOptions in a // last-one-wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeTransactionOptions(opts ...*TransactionOptions) *TransactionOptions { t := Transaction() for _, opt := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/updateoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/updateoptions.go index 768d243ca..5206f9f01 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/updateoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/updateoptions.go @@ -95,6 +95,9 @@ func (uo *UpdateOptions) SetLet(l interface{}) *UpdateOptions { } // MergeUpdateOptions combines the given UpdateOptions instances into a single UpdateOptions in a last-one-wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeUpdateOptions(opts ...*UpdateOptions) *UpdateOptions { uOpts := Update() for _, uo := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/readconcern/readconcern.go b/vendor/go.mongodb.org/mongo-driver/mongo/readconcern/readconcern.go index 92429007f..51408e142 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/readconcern/readconcern.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/readconcern/readconcern.go @@ -5,57 +5,95 @@ // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 // Package readconcern defines read concerns for MongoDB operations. +// +// For more information about MongoDB read concerns, see +// https://www.mongodb.com/docs/manual/reference/read-concern/ package readconcern // import "go.mongodb.org/mongo-driver/mongo/readconcern" import ( + "errors" + "go.mongodb.org/mongo-driver/bson/bsontype" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" ) -// ReadConcern for replica sets and replica set shards determines which data to return from a query. +// A ReadConcern defines a MongoDB read concern, which allows you to control the consistency and +// isolation properties of the data read from replica sets and replica set shards. +// +// For more information about MongoDB read concerns, see +// https://www.mongodb.com/docs/manual/reference/read-concern/ type ReadConcern struct { - level string + Level string } // Option is an option to provide when creating a ReadConcern. +// +// Deprecated: Use the ReadConcern literal declaration instead. For example: +// +// &readconcern.ReadConcern{Level: "local"} type Option func(concern *ReadConcern) // Level creates an option that sets the level of a ReadConcern. +// +// Deprecated: Use the ReadConcern literal declaration instead. For example: +// +// &readconcern.ReadConcern{Level: "local"} func Level(level string) Option { return func(concern *ReadConcern) { - concern.level = level + concern.Level = level } } -// Local specifies that the query should return the instance’s most recent data. +// Local returns a ReadConcern that requests data from the instance with no guarantee that the data +// has been written to a majority of the replica set members (i.e. may be rolled back). +// +// For more information about read concern "local", see +// https://www.mongodb.com/docs/manual/reference/read-concern-local/ func Local() *ReadConcern { return New(Level("local")) } -// Majority specifies that the query should return the instance’s most recent data acknowledged as -// having been written to a majority of members in the replica set. +// Majority returns a ReadConcern that requests data that has been acknowledged by a majority of the +// replica set members (i.e. the documents read are durable and guaranteed not to roll back). +// +// For more information about read concern "majority", see +// https://www.mongodb.com/docs/manual/reference/read-concern-majority/ func Majority() *ReadConcern { return New(Level("majority")) } -// Linearizable specifies that the query should return data that reflects all successful writes -// issued with a write concern of "majority" and acknowledged prior to the start of the read operation. +// Linearizable returns a ReadConcern that requests data that reflects all successful +// majority-acknowledged writes that completed prior to the start of the read operation. +// +// For more information about read concern "linearizable", see +// https://www.mongodb.com/docs/manual/reference/read-concern-linearizable/ func Linearizable() *ReadConcern { return New(Level("linearizable")) } -// Available specifies that the query should return data from the instance with no guarantee -// that the data has been written to a majority of the replica set members (i.e. may be rolled back). +// Available returns a ReadConcern that requests data from an instance with no guarantee that the +// data has been written to a majority of the replica set members (i.e. may be rolled back). +// +// For more information about read concern "available", see +// https://www.mongodb.com/docs/manual/reference/read-concern-available/ func Available() *ReadConcern { return New(Level("available")) } -// Snapshot is only available for operations within multi-document transactions. +// Snapshot returns a ReadConcern that requests majority-committed data as it appears across shards +// from a specific single point in time in the recent past. +// +// For more information about read concern "snapshot", see +// https://www.mongodb.com/docs/manual/reference/read-concern-snapshot/ func Snapshot() *ReadConcern { return New(Level("snapshot")) } // New constructs a new read concern from the given string. +// +// Deprecated: Use the ReadConcern literal declaration instead. For example: +// +// &readconcern.ReadConcern{Level: "local"} func New(options ...Option) *ReadConcern { concern := &ReadConcern{} @@ -67,17 +105,25 @@ func New(options ...Option) *ReadConcern { } // MarshalBSONValue implements the bson.ValueMarshaler interface. +// +// Deprecated: Marshaling a ReadConcern to BSON will not be supported in Go Driver 2.0. func (rc *ReadConcern) MarshalBSONValue() (bsontype.Type, []byte, error) { + if rc == nil { + return 0, nil, errors.New("cannot marshal nil ReadConcern") + } + var elems []byte - if len(rc.level) > 0 { - elems = bsoncore.AppendStringElement(elems, "level", rc.level) + if len(rc.Level) > 0 { + elems = bsoncore.AppendStringElement(elems, "level", rc.Level) } return bsontype.EmbeddedDocument, bsoncore.BuildDocument(nil, elems), nil } // GetLevel returns the read concern level. +// +// Deprecated: Use the ReadConcern.Level field instead. func (rc *ReadConcern) GetLevel() string { - return rc.level + return rc.Level } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/readpref/options.go b/vendor/go.mongodb.org/mongo-driver/mongo/readpref/options.go index 815419845..c59b0705f 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/readpref/options.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/readpref/options.go @@ -29,9 +29,14 @@ func WithMaxStaleness(ms time.Duration) Option { } } -// WithTags sets a single tag set used to match -// a server. The last call to WithTags or WithTagSets -// overrides all previous calls to either method. +// WithTags specifies a single tag set used to match replica set members. If no members match the +// tag set, read operations will return an error. To avoid errors if no members match the tag set, use +// [WithTagSets] and include an empty tag set as the last tag set in the list. +// +// The last call to [WithTags] or [WithTagSets] overrides all previous calls to either method. +// +// For more information about read preference tags, see +// https://www.mongodb.com/docs/manual/core/read-preference-tags/ func WithTags(tags ...string) Option { return func(rp *ReadPref) error { length := len(tags) @@ -49,9 +54,16 @@ func WithTags(tags ...string) Option { } } -// WithTagSets sets the tag sets used to match -// a server. The last call to WithTags or WithTagSets -// overrides all previous calls to either method. +// WithTagSets specifies a list of tag sets used to match replica set members. If the list contains +// multiple tag sets, members are matched against each tag set in succession until a match is found. +// Once a match is found, the remaining tag sets are ignored. If no members match any of the tag +// sets, the read operation returns with an error. To avoid an error if no members match any of the +// tag sets, include an empty tag set as the last tag set in the list. +// +// The last call to [WithTags] or [WithTagSets] overrides all previous calls to either method. +// +// For more information about read preference tags, see +// https://www.mongodb.com/docs/manual/core/read-preference-tags/ func WithTagSets(tagSets ...tag.Set) Option { return func(rp *ReadPref) error { rp.tagSets = tagSets diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/readpref/readpref.go b/vendor/go.mongodb.org/mongo-driver/mongo/readpref/readpref.go index a07e2f8bc..e2a1d7f34 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/readpref/readpref.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/readpref/readpref.go @@ -20,11 +20,9 @@ var ( errInvalidReadPreference = errors.New("can not specify tags, max staleness, or hedge with mode primary") ) -var primary = ReadPref{mode: PrimaryMode} - // Primary constructs a read preference with a PrimaryMode. func Primary() *ReadPref { - return &primary + return &ReadPref{mode: PrimaryMode} } // PrimaryPreferred constructs a read preference with a PrimaryPreferredMode. diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/results.go b/vendor/go.mongodb.org/mongo-driver/mongo/results.go index 025823815..2dbaf2af6 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/results.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/results.go @@ -87,7 +87,7 @@ func newListDatabasesResultFromOperation(res operation.ListDatabasesResult) List type DatabaseSpecification struct { Name string // The name of the database. SizeOnDisk int64 // The total size of the database files on disk in bytes. - Empty bool // Specfies whether or not the database is empty. + Empty bool // Specifies whether or not the database is empty. } // UpdateResult is the result type returned from UpdateOne, UpdateMany, and ReplaceOne operations. @@ -201,6 +201,8 @@ type unmarshalIndexSpecification struct { } // UnmarshalBSON implements the bson.Unmarshaler interface. +// +// Deprecated: Unmarshaling an IndexSpecification from BSON will not be supported in Go Driver 2.0. func (i *IndexSpecification) UnmarshalBSON(data []byte) error { var temp unmarshalIndexSpecification if err := bson.Unmarshal(data, &temp); err != nil { @@ -258,6 +260,9 @@ type unmarshalCollectionSpecification struct { } // UnmarshalBSON implements the bson.Unmarshaler interface. +// +// Deprecated: Unmarshaling a CollectionSpecification from BSON will not be supported in Go Driver +// 2.0. func (cs *CollectionSpecification) UnmarshalBSON(data []byte) error { var temp unmarshalCollectionSpecification if err := bson.Unmarshal(data, &temp); err != nil { diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/search_index_view.go b/vendor/go.mongodb.org/mongo-driver/mongo/search_index_view.go new file mode 100644 index 000000000..6a7871531 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/mongo/search_index_view.go @@ -0,0 +1,279 @@ +// Copyright (C) MongoDB, Inc. 2023-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package mongo + +import ( + "context" + "fmt" + "strconv" + + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo/options" + "go.mongodb.org/mongo-driver/mongo/writeconcern" + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" + "go.mongodb.org/mongo-driver/x/mongo/driver" + "go.mongodb.org/mongo-driver/x/mongo/driver/operation" + "go.mongodb.org/mongo-driver/x/mongo/driver/session" +) + +// SearchIndexView is a type that can be used to create, drop, list and update search indexes on a collection. A SearchIndexView for +// a collection can be created by a call to Collection.SearchIndexes(). +type SearchIndexView struct { + coll *Collection +} + +// SearchIndexModel represents a new search index to be created. +type SearchIndexModel struct { + // A document describing the definition for the search index. It cannot be nil. + // See https://www.mongodb.com/docs/atlas/atlas-search/create-index/ for reference. + Definition interface{} + + // The search index options. + Options *options.SearchIndexesOptions +} + +// List executes a listSearchIndexes command and returns a cursor over the search indexes in the collection. +// +// The name parameter specifies the index name. A nil pointer matches all indexes. +// +// The opts parameter can be used to specify options for this operation (see the options.ListSearchIndexesOptions +// documentation). +func (siv SearchIndexView) List( + ctx context.Context, + searchIdxOpts *options.SearchIndexesOptions, + opts ...*options.ListSearchIndexesOptions, +) (*Cursor, error) { + if ctx == nil { + ctx = context.Background() + } + + index := bson.D{} + if searchIdxOpts != nil && searchIdxOpts.Name != nil { + index = bson.D{{"name", *searchIdxOpts.Name}} + } + + aggregateOpts := make([]*options.AggregateOptions, len(opts)) + for i, opt := range opts { + aggregateOpts[i] = opt.AggregateOpts + } + + return siv.coll.Aggregate(ctx, Pipeline{{{"$listSearchIndexes", index}}}, aggregateOpts...) +} + +// CreateOne executes a createSearchIndexes command to create a search index on the collection and returns the name of the new +// search index. See the SearchIndexView.CreateMany documentation for more information and an example. +func (siv SearchIndexView) CreateOne( + ctx context.Context, + model SearchIndexModel, + opts ...*options.CreateSearchIndexesOptions, +) (string, error) { + names, err := siv.CreateMany(ctx, []SearchIndexModel{model}, opts...) + if err != nil { + return "", err + } + + return names[0], nil +} + +// CreateMany executes a createSearchIndexes command to create multiple search indexes on the collection and returns +// the names of the new search indexes. +// +// For each SearchIndexModel in the models parameter, the index name can be specified. +// +// The opts parameter can be used to specify options for this operation (see the options.CreateSearchIndexesOptions +// documentation). +func (siv SearchIndexView) CreateMany( + ctx context.Context, + models []SearchIndexModel, + _ ...*options.CreateSearchIndexesOptions, +) ([]string, error) { + var indexes bsoncore.Document + aidx, indexes := bsoncore.AppendArrayStart(indexes) + + for i, model := range models { + if model.Definition == nil { + return nil, fmt.Errorf("search index model definition cannot be nil") + } + + definition, err := marshal(model.Definition, siv.coll.bsonOpts, siv.coll.registry) + if err != nil { + return nil, err + } + + var iidx int32 + iidx, indexes = bsoncore.AppendDocumentElementStart(indexes, strconv.Itoa(i)) + if model.Options != nil && model.Options.Name != nil { + indexes = bsoncore.AppendStringElement(indexes, "name", *model.Options.Name) + } + indexes = bsoncore.AppendDocumentElement(indexes, "definition", definition) + + indexes, err = bsoncore.AppendDocumentEnd(indexes, iidx) + if err != nil { + return nil, err + } + } + + indexes, err := bsoncore.AppendArrayEnd(indexes, aidx) + if err != nil { + return nil, err + } + + sess := sessionFromContext(ctx) + + if sess == nil && siv.coll.client.sessionPool != nil { + sess = session.NewImplicitClientSession(siv.coll.client.sessionPool, siv.coll.client.id) + defer sess.EndSession() + } + + err = siv.coll.client.validSession(sess) + if err != nil { + return nil, err + } + + wc := siv.coll.writeConcern + if sess.TransactionRunning() { + wc = nil + } + if !writeconcern.AckWrite(wc) { + sess = nil + } + + selector := makePinnedSelector(sess, siv.coll.writeSelector) + + op := operation.NewCreateSearchIndexes(indexes). + Session(sess).WriteConcern(wc).ClusterClock(siv.coll.client.clock). + Database(siv.coll.db.name).Collection(siv.coll.name).CommandMonitor(siv.coll.client.monitor). + Deployment(siv.coll.client.deployment).ServerSelector(selector).ServerAPI(siv.coll.client.serverAPI). + Timeout(siv.coll.client.timeout) + + err = op.Execute(ctx) + if err != nil { + _, err = processWriteError(err) + return nil, err + } + + indexesCreated := op.Result().IndexesCreated + names := make([]string, 0, len(indexesCreated)) + for _, index := range indexesCreated { + names = append(names, index.Name) + } + + return names, nil +} + +// DropOne executes a dropSearchIndexes operation to drop a search index on the collection. +// +// The name parameter should be the name of the search index to drop. If the name is "*", ErrMultipleIndexDrop will be returned +// without running the command because doing so would drop all search indexes. +// +// The opts parameter can be used to specify options for this operation (see the options.DropSearchIndexOptions +// documentation). +func (siv SearchIndexView) DropOne( + ctx context.Context, + name string, + _ ...*options.DropSearchIndexOptions, +) error { + if name == "*" { + return ErrMultipleIndexDrop + } + + if ctx == nil { + ctx = context.Background() + } + + sess := sessionFromContext(ctx) + if sess == nil && siv.coll.client.sessionPool != nil { + sess = session.NewImplicitClientSession(siv.coll.client.sessionPool, siv.coll.client.id) + defer sess.EndSession() + } + + err := siv.coll.client.validSession(sess) + if err != nil { + return err + } + + wc := siv.coll.writeConcern + if sess.TransactionRunning() { + wc = nil + } + if !writeconcern.AckWrite(wc) { + sess = nil + } + + selector := makePinnedSelector(sess, siv.coll.writeSelector) + + op := operation.NewDropSearchIndex(name). + Session(sess).WriteConcern(wc).CommandMonitor(siv.coll.client.monitor). + ServerSelector(selector).ClusterClock(siv.coll.client.clock). + Database(siv.coll.db.name).Collection(siv.coll.name). + Deployment(siv.coll.client.deployment).ServerAPI(siv.coll.client.serverAPI). + Timeout(siv.coll.client.timeout) + + err = op.Execute(ctx) + if de, ok := err.(driver.Error); ok && de.NamespaceNotFound() { + return nil + } + return err +} + +// UpdateOne executes a updateSearchIndex operation to update a search index on the collection. +// +// The name parameter should be the name of the search index to update. +// +// The definition parameter is a document describing the definition for the search index. It cannot be nil. +// +// The opts parameter can be used to specify options for this operation (see the options.UpdateSearchIndexOptions +// documentation). +func (siv SearchIndexView) UpdateOne( + ctx context.Context, + name string, + definition interface{}, + _ ...*options.UpdateSearchIndexOptions, +) error { + if definition == nil { + return fmt.Errorf("search index definition cannot be nil") + } + + indexDefinition, err := marshal(definition, siv.coll.bsonOpts, siv.coll.registry) + if err != nil { + return err + } + + if ctx == nil { + ctx = context.Background() + } + + sess := sessionFromContext(ctx) + if sess == nil && siv.coll.client.sessionPool != nil { + sess = session.NewImplicitClientSession(siv.coll.client.sessionPool, siv.coll.client.id) + defer sess.EndSession() + } + + err = siv.coll.client.validSession(sess) + if err != nil { + return err + } + + wc := siv.coll.writeConcern + if sess.TransactionRunning() { + wc = nil + } + if !writeconcern.AckWrite(wc) { + sess = nil + } + + selector := makePinnedSelector(sess, siv.coll.writeSelector) + + op := operation.NewUpdateSearchIndex(name, indexDefinition). + Session(sess).WriteConcern(wc).CommandMonitor(siv.coll.client.monitor). + ServerSelector(selector).ClusterClock(siv.coll.client.clock). + Database(siv.coll.db.name).Collection(siv.coll.name). + Deployment(siv.coll.client.deployment).ServerAPI(siv.coll.client.serverAPI). + Timeout(siv.coll.client.timeout) + + return op.Execute(ctx) +} diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/session.go b/vendor/go.mongodb.org/mongo-driver/mongo/session.go index 37d5b7576..8f1e029b9 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/session.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/session.go @@ -13,7 +13,6 @@ import ( "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/primitive" - "go.mongodb.org/mongo-driver/internal" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/options" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" @@ -84,53 +83,58 @@ func SessionFromContext(ctx context.Context) Session { // https://www.mongodb.com/docs/manual/core/transactions/. // // Implementations of Session are not safe for concurrent use by multiple goroutines. -// -// StartTransaction starts a new transaction, configured with the given options, on this session. This method will -// return an error if there is already a transaction in-progress for this session. -// -// CommitTransaction commits the active transaction for this session. This method will return an error if there is no -// active transaction for this session or the transaction has been aborted. -// -// AbortTransaction aborts the active transaction for this session. This method will return an error if there is no -// active transaction for this session or the transaction has been committed or aborted. -// -// WithTransaction starts a transaction on this session and runs the fn callback. Errors with the -// TransientTransactionError and UnknownTransactionCommitResult labels are retried for up to 120 seconds. Inside the -// callback, sessCtx must be used as the Context parameter for any operations that should be part of the transaction. If -// the ctx parameter already has a Session attached to it, it will be replaced by this session. The fn callback may be -// run multiple times during WithTransaction due to retry attempts, so it must be idempotent. Non-retryable operation -// errors or any operation errors that occur after the timeout expires will be returned without retrying. If the -// callback fails, the driver will call AbortTransaction. Because this method must succeed to ensure that server-side -// resources are properly cleaned up, context deadlines and cancellations will not be respected during this call. For a -// usage example, see the Client.StartSession method documentation. -// -// ClusterTime, OperationTime, Client, and ID return the session's current cluster time, the session's current operation -// time, the Client associated with the session, and the ID document associated with the session, respectively. The ID -// document for a session is in the form {"id": }. -// -// EndSession method should abort any existing transactions and close the session. -// -// AdvanceClusterTime advances the cluster time for a session. This method will return an error if the session has ended. -// -// AdvanceOperationTime advances the operation time for a session. This method will return an error if the session has -// ended. type Session interface { - // Functions to modify session state. + // StartTransaction starts a new transaction, configured with the given options, on this + // session. This method returns an error if there is already a transaction in-progress for this + // session. StartTransaction(...*options.TransactionOptions) error + + // AbortTransaction aborts the active transaction for this session. This method returns an error + // if there is no active transaction for this session or if the transaction has been committed + // or aborted. AbortTransaction(context.Context) error + + // CommitTransaction commits the active transaction for this session. This method returns an + // error if there is no active transaction for this session or if the transaction has been + // aborted. CommitTransaction(context.Context) error - WithTransaction(ctx context.Context, fn func(sessCtx SessionContext) (interface{}, error), + + // WithTransaction starts a transaction on this session and runs the fn callback. Errors with + // the TransientTransactionError and UnknownTransactionCommitResult labels are retried for up to + // 120 seconds. Inside the callback, the SessionContext must be used as the Context parameter + // for any operations that should be part of the transaction. If the ctx parameter already has a + // Session attached to it, it will be replaced by this session. The fn callback may be run + // multiple times during WithTransaction due to retry attempts, so it must be idempotent. + // Non-retryable operation errors or any operation errors that occur after the timeout expires + // will be returned without retrying. If the callback fails, the driver will call + // AbortTransaction. Because this method must succeed to ensure that server-side resources are + // properly cleaned up, context deadlines and cancellations will not be respected during this + // call. For a usage example, see the Client.StartSession method documentation. + WithTransaction(ctx context.Context, fn func(ctx SessionContext) (interface{}, error), opts ...*options.TransactionOptions) (interface{}, error) + + // EndSession aborts any existing transactions and close the session. EndSession(context.Context) - // Functions to retrieve session properties. + // ClusterTime returns the current cluster time document associated with the session. ClusterTime() bson.Raw + + // OperationTime returns the current operation time document associated with the session. OperationTime() *primitive.Timestamp + + // Client the Client associated with the session. Client() *Client + + // ID returns the current ID document associated with the session. The ID document is in the + // form {"id": }. ID() bson.Raw - // Functions to modify mutable session properties. + // AdvanceClusterTime advances the cluster time for a session. This method returns an error if + // the session has ended. AdvanceClusterTime(bson.Raw) error + + // AdvanceOperationTime advances the operation time for a session. This method returns an error + // if the session has ended. AdvanceOperationTime(*primitive.Timestamp) error session() @@ -175,7 +179,7 @@ func (s *sessionImpl) EndSession(ctx context.Context) { } // WithTransaction implements the Session interface. -func (s *sessionImpl) WithTransaction(ctx context.Context, fn func(sessCtx SessionContext) (interface{}, error), +func (s *sessionImpl) WithTransaction(ctx context.Context, fn func(ctx SessionContext) (interface{}, error), opts ...*options.TransactionOptions) (interface{}, error) { timeout := time.NewTimer(withTransactionTimeout) defer timeout.Stop() @@ -191,7 +195,7 @@ func (s *sessionImpl) WithTransaction(ctx context.Context, fn func(sessCtx Sessi if s.clientSession.TransactionRunning() { // Wrap the user-provided Context in a new one that behaves like context.Background() for deadlines and // cancellations, but forwards Value requests to the original one. - _ = s.AbortTransaction(internal.NewBackgroundContext(ctx)) + _ = s.AbortTransaction(newBackgroundContext(ctx)) } select { @@ -223,13 +227,13 @@ func (s *sessionImpl) WithTransaction(ctx context.Context, fn func(sessCtx Sessi if ctx.Err() != nil { // Wrap the user-provided Context in a new one that behaves like context.Background() for deadlines and // cancellations, but forwards Value requests to the original one. - _ = s.AbortTransaction(internal.NewBackgroundContext(ctx)) + _ = s.AbortTransaction(newBackgroundContext(ctx)) return nil, ctx.Err() } CommitLoop: for { - err = s.CommitTransaction(ctx) + err = s.CommitTransaction(newBackgroundContext(ctx)) // End when error is nil, as transaction has been committed. if err == nil { return res, nil diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/single_result.go b/vendor/go.mongodb.org/mongo-driver/mongo/single_result.go index 476025021..f6ed4dc88 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/single_result.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/single_result.go @@ -9,9 +9,11 @@ package mongo import ( "context" "errors" + "fmt" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/bsoncodec" + "go.mongodb.org/mongo-driver/mongo/options" ) // ErrNoDocuments is returned by SingleResult methods when the operation that created the SingleResult did not return @@ -22,10 +24,12 @@ var ErrNoDocuments = errors.New("mongo: no documents in result") // SingleResult methods will return that error. If the operation did not return any documents, all SingleResult methods // will return ErrNoDocuments. type SingleResult struct { - err error - cur *Cursor - rdr bson.Raw - reg *bsoncodec.Registry + ctx context.Context + err error + cur *Cursor + rdr bson.Raw + bsonOpts *options.BSONOptions + reg *bsoncodec.Registry } // NewSingleResultFromDocument creates a SingleResult with the provided error, registry, and an underlying Cursor pre-loaded with @@ -70,13 +74,20 @@ func (sr *SingleResult) Decode(v interface{}) error { if sr.err = sr.setRdrContents(); sr.err != nil { return sr.err } - return bson.UnmarshalWithRegistry(sr.reg, sr.rdr, v) + + dec, err := getDecoder(sr.rdr, sr.bsonOpts, sr.reg) + if err != nil { + return fmt.Errorf("error configuring BSON decoder: %w", err) + } + + return dec.Decode(v) } -// DecodeBytes will return the document represented by this SingleResult as a bson.Raw. If there was an error from the -// operation that created this SingleResult, both the result and that error will be returned. If the operation returned -// no documents, this will return (nil, ErrNoDocuments). -func (sr *SingleResult) DecodeBytes() (bson.Raw, error) { +// Raw returns the document represented by this SingleResult as a bson.Raw. If +// there was an error from the operation that created this SingleResult, both +// the result and that error will be returned. If the operation returned no +// documents, this will return (nil, ErrNoDocuments). +func (sr *SingleResult) Raw() (bson.Raw, error) { if sr.err != nil { return sr.rdr, sr.err } @@ -87,6 +98,15 @@ func (sr *SingleResult) DecodeBytes() (bson.Raw, error) { return sr.rdr, nil } +// DecodeBytes will return the document represented by this SingleResult as a bson.Raw. If there was an error from the +// operation that created this SingleResult, both the result and that error will be returned. If the operation returned +// no documents, this will return (nil, ErrNoDocuments). +// +// Deprecated: Use [SingleResult.Raw] instead. +func (sr *SingleResult) DecodeBytes() (bson.Raw, error) { + return sr.Raw() +} + // setRdrContents will set the contents of rdr by iterating the underlying cursor if necessary. func (sr *SingleResult) setRdrContents() error { switch { @@ -95,9 +115,9 @@ func (sr *SingleResult) setRdrContents() error { case sr.rdr != nil: return nil case sr.cur != nil: - defer sr.cur.Close(context.TODO()) + defer sr.cur.Close(sr.ctx) - if !sr.cur.Next(context.TODO()) { + if !sr.cur.Next(sr.ctx) { if err := sr.cur.Err(); err != nil { return err } @@ -111,9 +131,10 @@ func (sr *SingleResult) setRdrContents() error { return ErrNoDocuments } -// Err returns the error from the operation that created this SingleResult. If the operation was successful but did not -// return any documents, Err will return ErrNoDocuments. If the operation was successful and returned a document, Err -// will return nil. +// Err provides a way to check for query errors without calling Decode. Err returns the error, if +// any, that was encountered while running the operation. If the operation was successful but did +// not return any documents, Err returns ErrNoDocuments. If this error is not nil, this error will +// also be returned from Decode. func (sr *SingleResult) Err() error { sr.err = sr.setRdrContents() diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/writeconcern/writeconcern.go b/vendor/go.mongodb.org/mongo-driver/mongo/writeconcern/writeconcern.go index 29c667e33..8e288d10b 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/writeconcern/writeconcern.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/writeconcern/writeconcern.go @@ -5,10 +5,14 @@ // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 // Package writeconcern defines write concerns for MongoDB operations. +// +// For more information about MongoDB write concerns, see +// https://www.mongodb.com/docs/manual/reference/write-concern/ package writeconcern // import "go.mongodb.org/mongo-driver/mongo/writeconcern" import ( "errors" + "fmt" "time" "go.mongodb.org/mongo-driver/bson" @@ -16,35 +20,160 @@ import ( "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" ) +const majority = "majority" + // ErrInconsistent indicates that an inconsistent write concern was specified. +// +// Deprecated: ErrInconsistent will be removed in Go Driver 2.0. var ErrInconsistent = errors.New("a write concern cannot have both w=0 and j=true") // ErrEmptyWriteConcern indicates that a write concern has no fields set. +// +// Deprecated: ErrEmptyWriteConcern will be removed in Go Driver 2.0. var ErrEmptyWriteConcern = errors.New("a write concern must have at least one field set") // ErrNegativeW indicates that a negative integer `w` field was specified. +// +// Deprecated: ErrNegativeW will be removed in Go Driver 2.0. var ErrNegativeW = errors.New("write concern `w` field cannot be a negative number") // ErrNegativeWTimeout indicates that a negative WTimeout was specified. +// +// Deprecated: ErrNegativeWTimeout will be removed in Go Driver 2.0. var ErrNegativeWTimeout = errors.New("write concern `wtimeout` field cannot be negative") -// WriteConcern describes the level of acknowledgement requested from MongoDB for write operations -// to a standalone mongod or to replica sets or to sharded clusters. +// A WriteConcern defines a MongoDB read concern, which describes the level of acknowledgment +// requested from MongoDB for write operations to a standalone mongod, to replica sets, or to +// sharded clusters. +// +// For more information about MongoDB write concerns, see +// https://www.mongodb.com/docs/manual/reference/write-concern/ type WriteConcern struct { - w interface{} - j bool + // W requests acknowledgment that the write operation has propagated to a + // specified number of mongod instances or to mongod instances with + // specified tags. It sets the the "w" option in a MongoDB write concern. + // + // W values must be a string or an int. + // + // Common values are: + // - "majority": requests acknowledgment that write operations have been + // durably committed to the calculated majority of the data-bearing + // voting members. + // - 1: requests acknowledgment that write operations have been written + // to 1 node. + // - 0: requests no acknowledgment of write operations + // + // For more information about the "w" option, see + // https://www.mongodb.com/docs/manual/reference/write-concern/#w-option + W interface{} + + // Journal requests acknowledgment from MongoDB that the write operation has + // been written to the on-disk journal. It sets the "j" option in a MongoDB + // write concern. + // + // For more information about the "j" option, see + // https://www.mongodb.com/docs/manual/reference/write-concern/#j-option + Journal *bool + + // WTimeout specifies a time limit for the write concern. It sets the + // "wtimeout" option in a MongoDB write concern. + // + // It is only applicable for "w" values greater than 1. Using a WTimeout and + // setting Timeout on the Client at the same time will result in undefined + // behavior. + // + // For more information about the "wtimeout" option, see + // https://www.mongodb.com/docs/manual/reference/write-concern/#wtimeout + WTimeout time.Duration +} + +// Unacknowledged returns a WriteConcern that requests no acknowledgment of +// write operations. +// +// For more information about write concern "w: 0", see +// https://www.mongodb.com/docs/manual/reference/write-concern/#mongodb-writeconcern-writeconcern.-number- +func Unacknowledged() *WriteConcern { + return &WriteConcern{W: 0} +} + +// W1 returns a WriteConcern that requests acknowledgment that write operations +// have been written to memory on one node (e.g. the standalone mongod or the +// primary in a replica set). +// +// For more information about write concern "w: 1", see +// https://www.mongodb.com/docs/manual/reference/write-concern/#mongodb-writeconcern-writeconcern.-number- +func W1() *WriteConcern { + return &WriteConcern{W: 1} +} + +// Journaled returns a WriteConcern that requests acknowledgment that write +// operations have been written to the on-disk journal on MongoDB. +// +// The database's default value for "w" determines how many nodes must write to +// their on-disk journal before the write operation is acknowledged. +// +// For more information about write concern "j: true", see +// https://www.mongodb.com/docs/manual/reference/write-concern/#mongodb-writeconcern-ournal +func Journaled() *WriteConcern { + journal := true + return &WriteConcern{Journal: &journal} +} - // NOTE(benjirewis): wTimeout will be deprecated in a future release. The more general Timeout - // option may be used in its place to control the amount of time that a single operation can run - // before returning an error. Using wTimeout and setting Timeout on the client will result in - // undefined behavior. - wTimeout time.Duration +// Majority returns a WriteConcern that requests acknowledgment that write +// operations have been durably committed to the calculated majority of the +// data-bearing voting members. +// +// Write concern "w: majority" typically requires write operations to be written +// to the on-disk journal before they are acknowledged, unless journaling is +// disabled on MongoDB or the "writeConcernMajorityJournalDefault" replica set +// configuration is set to false. +// +// For more information about write concern "w: majority", see +// https://www.mongodb.com/docs/manual/reference/write-concern/#mongodb-writeconcern-writeconcern.-majority- +func Majority() *WriteConcern { + return &WriteConcern{W: majority} +} + +// Custom returns a WriteConcern that requests acknowledgment that write +// operations have propagated to tagged members that satisfy the custom write +// concern defined in "settings.getLastErrorModes". +// +// For more information about custom write concern names, see +// https://www.mongodb.com/docs/manual/reference/write-concern/#mongodb-writeconcern-writeconcern.-custom-write-concern-name- +func Custom(tag string) *WriteConcern { + return &WriteConcern{W: tag} } // Option is an option to provide when creating a WriteConcern. +// +// Deprecated: Use the WriteConcern convenience functions or define a struct literal instead. +// For example: +// +// writeconcern.Majority() +// +// or +// +// journal := true +// &writeconcern.WriteConcern{ +// W: 2, +// Journal: &journal, +// } type Option func(concern *WriteConcern) // New constructs a new WriteConcern. +// +// Deprecated: Use the WriteConcern convenience functions or define a struct literal instead. +// For example: +// +// writeconcern.Majority() +// +// or +// +// journal := true +// &writeconcern.WriteConcern{ +// W: 2, +// Journal: &journal, +// } func New(options ...Option) *WriteConcern { concern := &WriteConcern{} @@ -57,89 +186,153 @@ func New(options ...Option) *WriteConcern { // W requests acknowledgement that write operations propagate to the specified number of mongod // instances. +// +// Deprecated: Use the Unacknowledged or W1 functions or define a struct literal instead. +// For example: +// +// writeconcern.Unacknowledged() +// +// or +// +// journal := true +// &writeconcern.WriteConcern{ +// W: 2, +// Journal: &journal, +// } func W(w int) Option { return func(concern *WriteConcern) { - concern.w = w + concern.W = w } } // WMajority requests acknowledgement that write operations propagate to the majority of mongod // instances. +// +// Deprecated: Use [Majority] instead. func WMajority() Option { return func(concern *WriteConcern) { - concern.w = "majority" + concern.W = majority } } // WTagSet requests acknowledgement that write operations propagate to the specified mongod // instance. +// +// Deprecated: Use [Custom] instead. func WTagSet(tag string) Option { return func(concern *WriteConcern) { - concern.w = tag + concern.W = tag } } // J requests acknowledgement from MongoDB that write operations are written to // the journal. +// +// Deprecated: Use the Journaled function or define a struct literal instead. +// For example: +// +// writeconcern.Journaled() +// +// or +// +// journal := true +// &writeconcern.WriteConcern{ +// W: 2, +// Journal: &journal, +// } func J(j bool) Option { return func(concern *WriteConcern) { - concern.j = j + // To maintain backward compatible behavior (now that the J field is a + // bool pointer), only set a value for J if the input is true. If the + // input is false, do not set a value, which omits "j" from the + // marshaled write concern. + if j { + concern.Journal = &j + } } } -// WTimeout specifies specifies a time limit for the write concern. +// WTimeout specifies a time limit for the write concern. +// +// It is only applicable for "w" values greater than 1. Using a WTimeout and setting Timeout on the +// Client at the same time will result in undefined behavior. +// +// Deprecated: Use the WriteConcern convenience functions or define a struct literal instead. +// For example: +// +// wc := writeconcern.W1() +// wc.WTimeout = 30 * time.Second // -// NOTE(benjirewis): wTimeout will be deprecated in a future release. The more general Timeout -// option may be used in its place to control the amount of time that a single operation can run -// before returning an error. Using wTimeout and setting Timeout on the client will result in -// undefined behavior. +// or +// +// journal := true +// &writeconcern.WriteConcern{ +// W: "majority", +// WTimeout: 30 * time.Second, +// } func WTimeout(d time.Duration) Option { return func(concern *WriteConcern) { - concern.wTimeout = d + concern.WTimeout = d } } // MarshalBSONValue implements the bson.ValueMarshaler interface. +// +// Deprecated: Marshaling a WriteConcern to BSON will not be supported in Go +// Driver 2.0. func (wc *WriteConcern) MarshalBSONValue() (bsontype.Type, []byte, error) { - if !wc.IsValid() { - return bsontype.Type(0), nil, ErrInconsistent + if wc == nil { + return 0, nil, ErrEmptyWriteConcern } var elems []byte - - if wc.w != nil { - switch t := wc.w.(type) { + if wc.W != nil { + // Only support string or int values for W. That aligns with the + // documentation and the behavior of other functions, like Acknowledged. + switch w := wc.W.(type) { case int: - if t < 0 { - return bsontype.Type(0), nil, ErrNegativeW + if w < 0 { + return 0, nil, ErrNegativeW } - elems = bsoncore.AppendInt32Element(elems, "w", int32(t)) + // If Journal=true and W=0, return an error because that write + // concern is ambiguous. + if wc.Journal != nil && *wc.Journal && w == 0 { + return 0, nil, ErrInconsistent + } + + elems = bsoncore.AppendInt32Element(elems, "w", int32(w)) case string: - elems = bsoncore.AppendStringElement(elems, "w", t) + elems = bsoncore.AppendStringElement(elems, "w", w) + default: + return 0, + nil, + fmt.Errorf("WriteConcern.W must be a string or int, but is a %T", wc.W) } } - if wc.j { - elems = bsoncore.AppendBooleanElement(elems, "j", wc.j) + if wc.Journal != nil { + elems = bsoncore.AppendBooleanElement(elems, "j", *wc.Journal) } - if wc.wTimeout < 0 { - return bsontype.Type(0), nil, ErrNegativeWTimeout + if wc.WTimeout < 0 { + return 0, nil, ErrNegativeWTimeout } - if wc.wTimeout != 0 { - elems = bsoncore.AppendInt64Element(elems, "wtimeout", int64(wc.wTimeout/time.Millisecond)) + if wc.WTimeout != 0 { + elems = bsoncore.AppendInt64Element(elems, "wtimeout", int64(wc.WTimeout/time.Millisecond)) } if len(elems) == 0 { - return bsontype.Type(0), nil, ErrEmptyWriteConcern + return 0, nil, ErrEmptyWriteConcern } - return bsontype.EmbeddedDocument, bsoncore.BuildDocument(nil, elems), nil + return bson.TypeEmbeddedDocument, bsoncore.BuildDocument(nil, elems), nil } // AcknowledgedValue returns true if a BSON RawValue for a write concern represents an acknowledged write concern. // The element's value must be a document representing a write concern. +// +// Deprecated: AcknowledgedValue will not be supported in Go Driver 2.0. func AcknowledgedValue(rawv bson.RawValue) bool { doc, ok := bsoncore.Value{Type: rawv.Type, Data: rawv.Value}.DocumentOK() if !ok { @@ -161,52 +354,69 @@ func AcknowledgedValue(rawv bson.RawValue) bool { // Acknowledged indicates whether or not a write with the given write concern will be acknowledged. func (wc *WriteConcern) Acknowledged() bool { - if wc == nil || wc.j { - return true - } - - switch v := wc.w.(type) { - case int: - if v == 0 { - return false - } - } - - return true + // Only {w: 0} or {w: 0, j: false} are an unacknowledged write concerns. All other values are + // acknowledged. + return wc == nil || wc.W != 0 || (wc.Journal != nil && *wc.Journal) } -// IsValid checks whether the write concern is invalid. +// IsValid returns true if the WriteConcern is valid. func (wc *WriteConcern) IsValid() bool { - if !wc.j { + if wc == nil { return true } - switch v := wc.w.(type) { + switch w := wc.W.(type) { case int: - if v == 0 { - return false - } + // A write concern with {w: int} must have a non-negative value and + // cannot have the combination {w: 0, j: true}. + return w >= 0 && (w > 0 || wc.Journal == nil || !*wc.Journal) + case string, nil: + // A write concern with {w: string} or no w specified is always valid. + return true + default: + // A write concern with an unsupported w type is not valid. + return false } - - return true } // GetW returns the write concern w level. +// +// Deprecated: Use the WriteConcern.W field instead. func (wc *WriteConcern) GetW() interface{} { - return wc.w + return wc.W } // GetJ returns the write concern journaling level. +// +// Deprecated: Use the WriteConcern.Journal field instead. func (wc *WriteConcern) GetJ() bool { - return wc.j + // Treat a nil Journal as false. That maintains backward compatibility with the existing + // behavior of GetJ where unset is false. If users want the real value of Journal, they can + // access the Journal field. + return wc.Journal != nil && *wc.Journal } // GetWTimeout returns the write concern timeout. +// +// Deprecated: Use the WriteConcern.WTimeout field instead. func (wc *WriteConcern) GetWTimeout() time.Duration { - return wc.wTimeout + return wc.WTimeout } // WithOptions returns a copy of this WriteConcern with the options set. +// +// Deprecated: Use the WriteConcern convenience functions or define a struct literal instead. +// For example: +// +// writeconcern.Majority() +// +// or +// +// journal := true +// &writeconcern.WriteConcern{ +// W: 2, +// Journal: &journal, +// } func (wc *WriteConcern) WithOptions(options ...Option) *WriteConcern { if wc == nil { return New(options...) @@ -222,6 +432,8 @@ func (wc *WriteConcern) WithOptions(options ...Option) *WriteConcern { } // AckWrite returns true if a write concern represents an acknowledged write +// +// Deprecated: Use [WriteConcern.Acknowledged] instead. func AckWrite(wc *WriteConcern) bool { return wc == nil || wc.Acknowledged() } diff --git a/vendor/go.mongodb.org/mongo-driver/tag/tag.go b/vendor/go.mongodb.org/mongo-driver/tag/tag.go index dc45f1e47..4faff5254 100644 --- a/vendor/go.mongodb.org/mongo-driver/tag/tag.go +++ b/vendor/go.mongodb.org/mongo-driver/tag/tag.go @@ -4,7 +4,10 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// Package tag provides a way to define filters for tagged servers. +// Package tag provides types for filtering replica set members using tags in a read preference. +// +// For more information about read preference tags, see +// https://www.mongodb.com/docs/manual/core/read-preference-tags/ package tag // import "go.mongodb.org/mongo-driver/tag" import ( @@ -12,7 +15,7 @@ import ( "fmt" ) -// Tag is a name/vlaue pair. +// Tag is a name/value pair. type Tag struct { Name string Value string @@ -23,7 +26,10 @@ func (tag Tag) String() string { return fmt.Sprintf("%s=%s", tag.Name, tag.Value) } -// NewTagSetFromMap creates a new tag set from a map. +// NewTagSetFromMap creates a tag set from a map. +// +// For more information about read preference tags, see +// https://www.mongodb.com/docs/manual/core/read-preference-tags/ func NewTagSetFromMap(m map[string]string) Set { var set Set for k, v := range m { @@ -33,7 +39,10 @@ func NewTagSetFromMap(m map[string]string) Set { return set } -// NewTagSetsFromMaps creates new tag sets from maps. +// NewTagSetsFromMaps creates a list of tag sets from a slice of maps. +// +// For more information about read preference tags, see +// https://www.mongodb.com/docs/manual/core/read-preference-tags/ func NewTagSetsFromMaps(maps []map[string]string) []Set { sets := make([]Set, 0, len(maps)) for _, m := range maps { diff --git a/vendor/go.mongodb.org/mongo-driver/version/version.go b/vendor/go.mongodb.org/mongo-driver/version/version.go index d5f74e605..4bd1f9b5e 100644 --- a/vendor/go.mongodb.org/mongo-driver/version/version.go +++ b/vendor/go.mongodb.org/mongo-driver/version/version.go @@ -8,4 +8,4 @@ package version // import "go.mongodb.org/mongo-driver/version" // Driver is the current version of the driver. -var Driver = "v1.11.7" +var Driver = "v1.13.0" diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/array.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/array.go deleted file mode 100644 index 80359e8c7..000000000 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/array.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonx // import "go.mongodb.org/mongo-driver/x/bsonx" - -import ( - "bytes" - "errors" - "fmt" - "strconv" - - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" -) - -// ErrNilArray indicates that an operation was attempted on a nil *Array. -var ErrNilArray = errors.New("array is nil") - -// Arr represents an array in BSON. -type Arr []Val - -// String implements the fmt.Stringer interface. -func (a Arr) String() string { - var buf bytes.Buffer - buf.Write([]byte("bson.Array[")) - for idx, val := range a { - if idx > 0 { - buf.Write([]byte(", ")) - } - fmt.Fprintf(&buf, "%s", val) - } - buf.WriteByte(']') - - return buf.String() -} - -// MarshalBSONValue implements the bsoncodec.ValueMarshaler interface. -func (a Arr) MarshalBSONValue() (bsontype.Type, []byte, error) { - if a == nil { - // TODO: Should we do this? - return bsontype.Null, nil, nil - } - - idx, dst := bsoncore.ReserveLength(nil) - for idx, value := range a { - t, data, _ := value.MarshalBSONValue() // marshalBSONValue never returns an error. - dst = append(dst, byte(t)) - dst = append(dst, strconv.Itoa(idx)...) - dst = append(dst, 0x00) - dst = append(dst, data...) - } - dst = append(dst, 0x00) - dst = bsoncore.UpdateLength(dst, idx, int32(len(dst[idx:]))) - return bsontype.Array, dst, nil -} - -// UnmarshalBSONValue implements the bsoncodec.ValueUnmarshaler interface. -func (a *Arr) UnmarshalBSONValue(t bsontype.Type, data []byte) error { - if a == nil { - return ErrNilArray - } - *a = (*a)[:0] - - elements, err := bsoncore.Document(data).Elements() - if err != nil { - return err - } - - for _, elem := range elements { - var val Val - rawval := elem.Value() - err = val.UnmarshalBSONValue(rawval.Type, rawval.Data) - if err != nil { - return err - } - *a = append(*a, val) - } - return nil -} - -// Equal compares this document to another, returning true if they are equal. -func (a Arr) Equal(a2 Arr) bool { - if len(a) != len(a2) { - return false - } - for idx := range a { - if !a[idx].Equal(a2[idx]) { - return false - } - } - return true -} - -func (Arr) idoc() {} diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/array.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/array.go index 8ea60ba3c..6bc0afa70 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/array.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/array.go @@ -7,10 +7,10 @@ package bsoncore import ( - "bytes" "fmt" "io" "strconv" + "strings" ) // NewArrayLengthError creates and returns an error for when the length of an array exceeds the @@ -53,7 +53,7 @@ func (a Array) DebugString() string { if len(a) < 5 { return "" } - var buf bytes.Buffer + var buf strings.Builder buf.WriteString("Array") length, rem, _ := ReadLength(a) // We know we have enough bytes to read the length buf.WriteByte('(') @@ -69,7 +69,7 @@ func (a Array) DebugString() string { buf.WriteString(fmt.Sprintf("", length)) break } - fmt.Fprintf(&buf, "%s", elem.Value().DebugString()) + buf.WriteString(elem.Value().DebugString()) if length != 1 { buf.WriteByte(',') } @@ -85,7 +85,7 @@ func (a Array) String() string { if len(a) < 5 { return "" } - var buf bytes.Buffer + var buf strings.Builder buf.WriteByte('[') length, rem, _ := ReadLength(a) // We know we have enough bytes to read the length @@ -100,7 +100,7 @@ func (a Array) String() string { if !ok { return "" } - fmt.Fprintf(&buf, "%s", elem.Value().String()) + buf.WriteString(elem.Value().String()) if length > 1 { buf.WriteByte(',') } diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go index 17aad6d71..88133293e 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go @@ -4,25 +4,6 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// Package bsoncore contains functions that can be used to encode and decode BSON -// elements and values to or from a slice of bytes. These functions are aimed at -// allowing low level manipulation of BSON and can be used to build a higher -// level BSON library. -// -// The Read* functions within this package return the values of the element and -// a boolean indicating if the values are valid. A boolean was used instead of -// an error because any error that would be returned would be the same: not -// enough bytes. This library attempts to do no validation, it will only return -// false if there are not enough bytes for an item to be read. For example, the -// ReadDocument function checks the length, if that length is larger than the -// number of bytes available, it will return false, if there are enough bytes, it -// will return those bytes and true. It is the consumers responsibility to -// validate those bytes. -// -// The Append* functions within this package will append the type value to the -// given dst slice. If the slice has enough capacity, it will not grow the -// slice. The Append*Element functions within this package operate in the same -// way, but additionally append the BSON type and the key before the value. package bsoncore // import "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" import ( @@ -254,7 +235,7 @@ func BuildDocumentValue(elems ...[]byte) Value { return Value{Type: bsontype.EmbeddedDocument, Data: BuildDocument(nil, elems...)} } -// BuildDocumentElement will append a BSON embedded document elemnt using key and the provided +// BuildDocumentElement will append a BSON embedded document element using key and the provided // elements and return the extended buffer. func BuildDocumentElement(dst []byte, key string, elems ...[]byte) []byte { return BuildDocument(AppendHeader(dst, bsontype.EmbeddedDocument, key), elems...) @@ -844,6 +825,9 @@ func readLengthBytes(src []byte) ([]byte, []byte, bool) { if !ok { return nil, src, false } + if l < 4 { + return nil, src, false + } if len(src) < int(l) { return nil, src, false } diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/doc.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/doc.go new file mode 100644 index 000000000..6837b53fc --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/doc.go @@ -0,0 +1,29 @@ +// Copyright (C) MongoDB, Inc. 2022-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package bsoncore contains functions that can be used to encode and decode BSON +// elements and values to or from a slice of bytes. These functions are aimed at +// allowing low level manipulation of BSON and can be used to build a higher +// level BSON library. +// +// The Read* functions within this package return the values of the element and +// a boolean indicating if the values are valid. A boolean was used instead of +// an error because any error that would be returned would be the same: not +// enough bytes. This library attempts to do no validation, it will only return +// false if there are not enough bytes for an item to be read. For example, the +// ReadDocument function checks the length, if that length is larger than the +// number of bytes available, it will return false, if there are enough bytes, it +// will return those bytes and true. It is the consumers responsibility to +// validate those bytes. +// +// The Append* functions within this package will append the type value to the +// given dst slice. If the slice has enough capacity, it will not grow the +// slice. The Append*Element functions within this package operate in the same +// way, but additionally append the BSON type and the key before the value. +// +// Warning: Package bsoncore is unstable and there is no backward compatibility +// guarantee. It is experimental and subject to change. +package bsoncore diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document.go index d6e4bb069..3f360f1ae 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document.go @@ -7,11 +7,11 @@ package bsoncore import ( - "bytes" "errors" "fmt" "io" "strconv" + "strings" "go.mongodb.org/mongo-driver/bson/bsontype" ) @@ -237,7 +237,7 @@ func (d Document) DebugString() string { if len(d) < 5 { return "" } - var buf bytes.Buffer + var buf strings.Builder buf.WriteString("Document") length, rem, _ := ReadLength(d) // We know we have enough bytes to read the length buf.WriteByte('(') @@ -253,7 +253,7 @@ func (d Document) DebugString() string { buf.WriteString(fmt.Sprintf("", length)) break } - fmt.Fprintf(&buf, "%s ", elem.DebugString()) + buf.WriteString(elem.DebugString()) } buf.WriteByte('}') @@ -266,7 +266,7 @@ func (d Document) String() string { if len(d) < 5 { return "" } - var buf bytes.Buffer + var buf strings.Builder buf.WriteByte('{') length, rem, _ := ReadLength(d) // We know we have enough bytes to read the length @@ -285,7 +285,7 @@ func (d Document) String() string { if !ok { return "" } - fmt.Fprintf(&buf, "%s", elem.String()) + buf.WriteString(elem.String()) first = false } buf.WriteByte('}') diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/element.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/element.go index 3acb4222b..1fe0897c9 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/element.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/element.go @@ -129,7 +129,7 @@ func (e Element) String() string { if !valid { return "" } - return fmt.Sprintf(`"%s": %v`, key, val) + return "\"" + string(key) + "\": " + val.String() } // DebugString outputs a human readable version of RawElement. It will attempt to stringify the diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go index 9cf87d6d7..69c1f9edb 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go @@ -190,21 +190,14 @@ func (v Value) AsInt64OK() (int64, bool) { // AsFloat64 returns a BSON number as an float64. If the BSON type is not a numeric one, this method // will panic. // -// TODO(skriptble): Add support for Decimal128. -func (v Value) AsFloat64() float64 { return 0 } +// TODO(GODRIVER-2751): Implement AsFloat64. +// func (v Value) AsFloat64() float64 // AsFloat64OK functions the same as AsFloat64 but returns a boolean instead of panicking. False // indicates an error. // -// TODO(skriptble): Add support for Decimal128. -func (v Value) AsFloat64OK() (float64, bool) { return 0, false } - -// Add will add this value to another. This is currently only implemented for strings and numbers. -// If either value is a string, the other type is coerced into a string and added to the other. -// -// This method will alter v and will attempt to reuse the []byte of v. If the []byte is too small, -// it will be expanded. -func (v *Value) Add(v2 Value) error { return nil } +// TODO(GODRIVER-2751): Implement AsFloat64OK. +// func (v Value) AsFloat64OK() (float64, bool) // Equal compaes v to v2 and returns true if they are equal. func (v Value) Equal(v2 Value) bool { diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/constructor.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/constructor.go deleted file mode 100644 index a8be859dd..000000000 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/constructor.go +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonx - -import ( - "encoding/binary" - "math" - "time" - - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" -) - -// IDoc is the interface implemented by Doc and MDoc. It allows either of these types to be provided -// to the Document function to create a Value. -type IDoc interface { - idoc() -} - -// Double constructs a BSON double Value. -func Double(f64 float64) Val { - v := Val{t: bsontype.Double} - binary.LittleEndian.PutUint64(v.bootstrap[0:8], math.Float64bits(f64)) - return v -} - -// String constructs a BSON string Value. -func String(str string) Val { return Val{t: bsontype.String}.writestring(str) } - -// Document constructs a Value from the given IDoc. If nil is provided, a BSON Null value will be -// returned. -func Document(doc IDoc) Val { - var v Val - switch tt := doc.(type) { - case Doc: - if tt == nil { - v.t = bsontype.Null - break - } - v.t = bsontype.EmbeddedDocument - v.primitive = tt - case MDoc: - if tt == nil { - v.t = bsontype.Null - break - } - v.t = bsontype.EmbeddedDocument - v.primitive = tt - default: - v.t = bsontype.Null - } - return v -} - -// Array constructs a Value from arr. If arr is nil, a BSON Null value is returned. -func Array(arr Arr) Val { - if arr == nil { - return Val{t: bsontype.Null} - } - return Val{t: bsontype.Array, primitive: arr} -} - -// Binary constructs a BSON binary Value. -func Binary(subtype byte, data []byte) Val { - return Val{t: bsontype.Binary, primitive: primitive.Binary{Subtype: subtype, Data: data}} -} - -// Undefined constructs a BSON binary Value. -func Undefined() Val { return Val{t: bsontype.Undefined} } - -// ObjectID constructs a BSON objectid Value. -func ObjectID(oid primitive.ObjectID) Val { - v := Val{t: bsontype.ObjectID} - copy(v.bootstrap[0:12], oid[:]) - return v -} - -// Boolean constructs a BSON boolean Value. -func Boolean(b bool) Val { - v := Val{t: bsontype.Boolean} - if b { - v.bootstrap[0] = 0x01 - } - return v -} - -// DateTime constructs a BSON datetime Value. -func DateTime(dt int64) Val { return Val{t: bsontype.DateTime}.writei64(dt) } - -// Time constructs a BSON datetime Value. -func Time(t time.Time) Val { - return Val{t: bsontype.DateTime}.writei64(t.Unix()*1e3 + int64(t.Nanosecond()/1e6)) -} - -// Null constructs a BSON binary Value. -func Null() Val { return Val{t: bsontype.Null} } - -// Regex constructs a BSON regex Value. -func Regex(pattern, options string) Val { - regex := primitive.Regex{Pattern: pattern, Options: options} - return Val{t: bsontype.Regex, primitive: regex} -} - -// DBPointer constructs a BSON dbpointer Value. -func DBPointer(ns string, ptr primitive.ObjectID) Val { - dbptr := primitive.DBPointer{DB: ns, Pointer: ptr} - return Val{t: bsontype.DBPointer, primitive: dbptr} -} - -// JavaScript constructs a BSON javascript Value. -func JavaScript(js string) Val { - return Val{t: bsontype.JavaScript}.writestring(js) -} - -// Symbol constructs a BSON symbol Value. -func Symbol(symbol string) Val { - return Val{t: bsontype.Symbol}.writestring(symbol) -} - -// CodeWithScope constructs a BSON code with scope Value. -func CodeWithScope(code string, scope IDoc) Val { - cws := primitive.CodeWithScope{Code: primitive.JavaScript(code), Scope: scope} - return Val{t: bsontype.CodeWithScope, primitive: cws} -} - -// Int32 constructs a BSON int32 Value. -func Int32(i32 int32) Val { - v := Val{t: bsontype.Int32} - v.bootstrap[0] = byte(i32) - v.bootstrap[1] = byte(i32 >> 8) - v.bootstrap[2] = byte(i32 >> 16) - v.bootstrap[3] = byte(i32 >> 24) - return v -} - -// Timestamp constructs a BSON timestamp Value. -func Timestamp(t, i uint32) Val { - v := Val{t: bsontype.Timestamp} - v.bootstrap[0] = byte(i) - v.bootstrap[1] = byte(i >> 8) - v.bootstrap[2] = byte(i >> 16) - v.bootstrap[3] = byte(i >> 24) - v.bootstrap[4] = byte(t) - v.bootstrap[5] = byte(t >> 8) - v.bootstrap[6] = byte(t >> 16) - v.bootstrap[7] = byte(t >> 24) - return v -} - -// Int64 constructs a BSON int64 Value. -func Int64(i64 int64) Val { return Val{t: bsontype.Int64}.writei64(i64) } - -// Decimal128 constructs a BSON decimal128 Value. -func Decimal128(d128 primitive.Decimal128) Val { - return Val{t: bsontype.Decimal128, primitive: d128} -} - -// MinKey constructs a BSON minkey Value. -func MinKey() Val { return Val{t: bsontype.MinKey} } - -// MaxKey constructs a BSON maxkey Value. -func MaxKey() Val { return Val{t: bsontype.MaxKey} } diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/document.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/document.go deleted file mode 100644 index 2d53bc18b..000000000 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/document.go +++ /dev/null @@ -1,305 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonx - -import ( - "bytes" - "errors" - "fmt" - - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" -) - -// ErrNilDocument indicates that an operation was attempted on a nil *bson.Document. -var ErrNilDocument = errors.New("document is nil") - -// KeyNotFound is an error type returned from the Lookup methods on Document. This type contains -// information about which key was not found and if it was actually not found or if a component of -// the key except the last was not a document nor array. -type KeyNotFound struct { - Key []string // The keys that were searched for. - Depth uint // Which key either was not found or was an incorrect type. - Type bsontype.Type // The type of the key that was found but was an incorrect type. -} - -func (knf KeyNotFound) Error() string { - depth := knf.Depth - if depth >= uint(len(knf.Key)) { - depth = uint(len(knf.Key)) - 1 - } - - if len(knf.Key) == 0 { - return "no keys were provided for lookup" - } - - if knf.Type != bsontype.Type(0) { - return fmt.Sprintf(`key "%s" was found but was not valid to traverse BSON type %s`, knf.Key[depth], knf.Type) - } - - return fmt.Sprintf(`key "%s" was not found`, knf.Key[depth]) -} - -// Doc is a type safe, concise BSON document representation. -type Doc []Elem - -// ReadDoc will create a Document using the provided slice of bytes. If the -// slice of bytes is not a valid BSON document, this method will return an error. -func ReadDoc(b []byte) (Doc, error) { - doc := make(Doc, 0) - err := doc.UnmarshalBSON(b) - if err != nil { - return nil, err - } - return doc, nil -} - -// Copy makes a shallow copy of this document. -func (d Doc) Copy() Doc { - d2 := make(Doc, len(d)) - copy(d2, d) - return d2 -} - -// Append adds an element to the end of the document, creating it from the key and value provided. -func (d Doc) Append(key string, val Val) Doc { - return append(d, Elem{Key: key, Value: val}) -} - -// Prepend adds an element to the beginning of the document, creating it from the key and value provided. -func (d Doc) Prepend(key string, val Val) Doc { - // TODO: should we just modify d itself instead of doing an alloc here? - return append(Doc{{Key: key, Value: val}}, d...) -} - -// Set replaces an element of a document. If an element with a matching key is -// found, the element will be replaced with the one provided. If the document -// does not have an element with that key, the element is appended to the -// document instead. -func (d Doc) Set(key string, val Val) Doc { - idx := d.IndexOf(key) - if idx == -1 { - return append(d, Elem{Key: key, Value: val}) - } - d[idx] = Elem{Key: key, Value: val} - return d -} - -// IndexOf returns the index of the first element with a key of key, or -1 if no element with a key -// was found. -func (d Doc) IndexOf(key string) int { - for i, e := range d { - if e.Key == key { - return i - } - } - return -1 -} - -// Delete removes the element with key if it exists and returns the updated Doc. -func (d Doc) Delete(key string) Doc { - idx := d.IndexOf(key) - if idx == -1 { - return d - } - return append(d[:idx], d[idx+1:]...) -} - -// Lookup searches the document and potentially subdocuments or arrays for the -// provided key. Each key provided to this method represents a layer of depth. -// -// This method will return an empty Value if they key does not exist. To know if they key actually -// exists, use LookupErr. -func (d Doc) Lookup(key ...string) Val { - val, _ := d.LookupErr(key...) - return val -} - -// LookupErr searches the document and potentially subdocuments or arrays for the -// provided key. Each key provided to this method represents a layer of depth. -func (d Doc) LookupErr(key ...string) (Val, error) { - elem, err := d.LookupElementErr(key...) - return elem.Value, err -} - -// LookupElement searches the document and potentially subdocuments or arrays for the -// provided key. Each key provided to this method represents a layer of depth. -// -// This method will return an empty Element if they key does not exist. To know if they key actually -// exists, use LookupElementErr. -func (d Doc) LookupElement(key ...string) Elem { - elem, _ := d.LookupElementErr(key...) - return elem -} - -// LookupElementErr searches the document and potentially subdocuments for the -// provided key. Each key provided to this method represents a layer of depth. -func (d Doc) LookupElementErr(key ...string) (Elem, error) { - // KeyNotFound operates by being created where the error happens and then the depth is - // incremented by 1 as each function unwinds. Whenever this function returns, it also assigns - // the Key slice to the key slice it has. This ensures that the proper depth is identified and - // the proper keys. - if len(key) == 0 { - return Elem{}, KeyNotFound{Key: key} - } - - var elem Elem - var err error - idx := d.IndexOf(key[0]) - if idx == -1 { - return Elem{}, KeyNotFound{Key: key} - } - - elem = d[idx] - if len(key) == 1 { - return elem, nil - } - - switch elem.Value.Type() { - case bsontype.EmbeddedDocument: - switch tt := elem.Value.primitive.(type) { - case Doc: - elem, err = tt.LookupElementErr(key[1:]...) - case MDoc: - elem, err = tt.LookupElementErr(key[1:]...) - } - default: - return Elem{}, KeyNotFound{Type: elem.Value.Type()} - } - switch tt := err.(type) { - case KeyNotFound: - tt.Depth++ - tt.Key = key - return Elem{}, tt - case nil: - return elem, nil - default: - return Elem{}, err // We can't actually hit this. - } -} - -// MarshalBSONValue implements the bsoncodec.ValueMarshaler interface. -// -// This method will never return an error. -func (d Doc) MarshalBSONValue() (bsontype.Type, []byte, error) { - if d == nil { - // TODO: Should we do this? - return bsontype.Null, nil, nil - } - data, _ := d.MarshalBSON() - return bsontype.EmbeddedDocument, data, nil -} - -// MarshalBSON implements the Marshaler interface. -// -// This method will never return an error. -func (d Doc) MarshalBSON() ([]byte, error) { return d.AppendMarshalBSON(nil) } - -// AppendMarshalBSON marshals Doc to BSON bytes, appending to dst. -// -// This method will never return an error. -func (d Doc) AppendMarshalBSON(dst []byte) ([]byte, error) { - idx, dst := bsoncore.ReserveLength(dst) - for _, elem := range d { - t, data, _ := elem.Value.MarshalBSONValue() // Value.MarshalBSONValue never returns an error. - dst = append(dst, byte(t)) - dst = append(dst, elem.Key...) - dst = append(dst, 0x00) - dst = append(dst, data...) - } - dst = append(dst, 0x00) - dst = bsoncore.UpdateLength(dst, idx, int32(len(dst[idx:]))) - return dst, nil -} - -// UnmarshalBSON implements the Unmarshaler interface. -func (d *Doc) UnmarshalBSON(b []byte) error { - if d == nil { - return ErrNilDocument - } - - if err := bsoncore.Document(b).Validate(); err != nil { - return err - } - - elems, err := bsoncore.Document(b).Elements() - if err != nil { - return err - } - var val Val - for _, elem := range elems { - rawv := elem.Value() - err = val.UnmarshalBSONValue(rawv.Type, rawv.Data) - if err != nil { - return err - } - *d = d.Append(elem.Key(), val) - } - return nil -} - -// UnmarshalBSONValue implements the bson.ValueUnmarshaler interface. -func (d *Doc) UnmarshalBSONValue(t bsontype.Type, data []byte) error { - if t != bsontype.EmbeddedDocument { - return fmt.Errorf("cannot unmarshal %s into a bsonx.Doc", t) - } - return d.UnmarshalBSON(data) -} - -// Equal compares this document to another, returning true if they are equal. -func (d Doc) Equal(id IDoc) bool { - switch tt := id.(type) { - case Doc: - d2 := tt - if len(d) != len(d2) { - return false - } - for idx := range d { - if !d[idx].Equal(d2[idx]) { - return false - } - } - case MDoc: - unique := make(map[string]struct{}) - for _, elem := range d { - unique[elem.Key] = struct{}{} - val, ok := tt[elem.Key] - if !ok { - return false - } - if !val.Equal(elem.Value) { - return false - } - } - if len(unique) != len(tt) { - return false - } - case nil: - return d == nil - default: - return false - } - - return true -} - -// String implements the fmt.Stringer interface. -func (d Doc) String() string { - var buf bytes.Buffer - buf.Write([]byte("bson.Document{")) - for idx, elem := range d { - if idx > 0 { - buf.Write([]byte(", ")) - } - fmt.Fprintf(&buf, "%v", elem) - } - buf.WriteByte('}') - - return buf.String() -} - -func (Doc) idoc() {} diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/element.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/element.go deleted file mode 100644 index 00d1ba377..000000000 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/element.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonx - -import ( - "fmt" - - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -// ElementTypeError specifies that a method to obtain a BSON value an incorrect type was called on a bson.Value. -// -// TODO: rename this ValueTypeError. -type ElementTypeError struct { - Method string - Type bsontype.Type -} - -// Error implements the error interface. -func (ete ElementTypeError) Error() string { - return "Call of " + ete.Method + " on " + ete.Type.String() + " type" -} - -// Elem represents a BSON element. -// -// NOTE: Element cannot be the value of a map nor a property of a struct without special handling. -// The default encoders and decoders will not process Element correctly. To do so would require -// information loss since an Element contains a key, but the keys used when encoding a struct are -// the struct field names. Instead of using an Element, use a Value as a value in a map or a -// property of a struct. -type Elem struct { - Key string - Value Val -} - -// Equal compares e and e2 and returns true if they are equal. -func (e Elem) Equal(e2 Elem) bool { - if e.Key != e2.Key { - return false - } - return e.Value.Equal(e2.Value) -} - -func (e Elem) String() string { - // TODO(GODRIVER-612): When bsoncore has appenders for extended JSON use that here. - return fmt.Sprintf(`bson.Element{"%s": %v}`, e.Key, e.Value) -} diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/mdocument.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/mdocument.go deleted file mode 100644 index 7877f2240..000000000 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/mdocument.go +++ /dev/null @@ -1,231 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonx - -import ( - "bytes" - "fmt" - - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" -) - -// MDoc is an unordered, type safe, concise BSON document representation. This type should not be -// used if you require ordering of values or duplicate keys. -type MDoc map[string]Val - -// ReadMDoc will create a Doc using the provided slice of bytes. If the -// slice of bytes is not a valid BSON document, this method will return an error. -func ReadMDoc(b []byte) (MDoc, error) { - doc := make(MDoc) - err := doc.UnmarshalBSON(b) - if err != nil { - return nil, err - } - return doc, nil -} - -// Copy makes a shallow copy of this document. -func (d MDoc) Copy() MDoc { - d2 := make(MDoc, len(d)) - for k, v := range d { - d2[k] = v - } - return d2 -} - -// Lookup searches the document and potentially subdocuments or arrays for the -// provided key. Each key provided to this method represents a layer of depth. -// -// This method will return an empty Value if they key does not exist. To know if they key actually -// exists, use LookupErr. -func (d MDoc) Lookup(key ...string) Val { - val, _ := d.LookupErr(key...) - return val -} - -// LookupErr searches the document and potentially subdocuments or arrays for the -// provided key. Each key provided to this method represents a layer of depth. -func (d MDoc) LookupErr(key ...string) (Val, error) { - elem, err := d.LookupElementErr(key...) - return elem.Value, err -} - -// LookupElement searches the document and potentially subdocuments or arrays for the -// provided key. Each key provided to this method represents a layer of depth. -// -// This method will return an empty Element if they key does not exist. To know if they key actually -// exists, use LookupElementErr. -func (d MDoc) LookupElement(key ...string) Elem { - elem, _ := d.LookupElementErr(key...) - return elem -} - -// LookupElementErr searches the document and potentially subdocuments for the -// provided key. Each key provided to this method represents a layer of depth. -func (d MDoc) LookupElementErr(key ...string) (Elem, error) { - // KeyNotFound operates by being created where the error happens and then the depth is - // incremented by 1 as each function unwinds. Whenever this function returns, it also assigns - // the Key slice to the key slice it has. This ensures that the proper depth is identified and - // the proper keys. - if len(key) == 0 { - return Elem{}, KeyNotFound{Key: key} - } - - var elem Elem - var err error - val, ok := d[key[0]] - if !ok { - return Elem{}, KeyNotFound{Key: key} - } - - if len(key) == 1 { - return Elem{Key: key[0], Value: val}, nil - } - - switch val.Type() { - case bsontype.EmbeddedDocument: - switch tt := val.primitive.(type) { - case Doc: - elem, err = tt.LookupElementErr(key[1:]...) - case MDoc: - elem, err = tt.LookupElementErr(key[1:]...) - } - default: - return Elem{}, KeyNotFound{Type: val.Type()} - } - switch tt := err.(type) { - case KeyNotFound: - tt.Depth++ - tt.Key = key - return Elem{}, tt - case nil: - return elem, nil - default: - return Elem{}, err // We can't actually hit this. - } -} - -// MarshalBSONValue implements the bsoncodec.ValueMarshaler interface. -// -// This method will never return an error. -func (d MDoc) MarshalBSONValue() (bsontype.Type, []byte, error) { - if d == nil { - // TODO: Should we do this? - return bsontype.Null, nil, nil - } - data, _ := d.MarshalBSON() - return bsontype.EmbeddedDocument, data, nil -} - -// MarshalBSON implements the Marshaler interface. -// -// This method will never return an error. -func (d MDoc) MarshalBSON() ([]byte, error) { return d.AppendMarshalBSON(nil) } - -// AppendMarshalBSON marshals Doc to BSON bytes, appending to dst. -// -// This method will never return an error. -func (d MDoc) AppendMarshalBSON(dst []byte) ([]byte, error) { - idx, dst := bsoncore.ReserveLength(dst) - for k, v := range d { - t, data, _ := v.MarshalBSONValue() // Value.MarshalBSONValue never returns an error. - dst = append(dst, byte(t)) - dst = append(dst, k...) - dst = append(dst, 0x00) - dst = append(dst, data...) - } - dst = append(dst, 0x00) - dst = bsoncore.UpdateLength(dst, idx, int32(len(dst[idx:]))) - return dst, nil -} - -// UnmarshalBSON implements the Unmarshaler interface. -func (d *MDoc) UnmarshalBSON(b []byte) error { - if d == nil { - return ErrNilDocument - } - - if err := bsoncore.Document(b).Validate(); err != nil { - return err - } - - elems, err := bsoncore.Document(b).Elements() - if err != nil { - return err - } - var val Val - for _, elem := range elems { - rawv := elem.Value() - err = val.UnmarshalBSONValue(rawv.Type, rawv.Data) - if err != nil { - return err - } - (*d)[elem.Key()] = val - } - return nil -} - -// Equal compares this document to another, returning true if they are equal. -func (d MDoc) Equal(id IDoc) bool { - switch tt := id.(type) { - case MDoc: - d2 := tt - if len(d) != len(d2) { - return false - } - for key, value := range d { - value2, ok := d2[key] - if !ok { - return false - } - if !value.Equal(value2) { - return false - } - } - case Doc: - unique := make(map[string]struct{}) - for _, elem := range tt { - unique[elem.Key] = struct{}{} - val, ok := d[elem.Key] - if !ok { - return false - } - if !val.Equal(elem.Value) { - return false - } - } - if len(unique) != len(d) { - return false - } - case nil: - return d == nil - default: - return false - } - - return true -} - -// String implements the fmt.Stringer interface. -func (d MDoc) String() string { - var buf bytes.Buffer - buf.Write([]byte("bson.Document{")) - first := true - for key, value := range d { - if !first { - buf.Write([]byte(", ")) - } - fmt.Fprintf(&buf, "%v", Elem{Key: key, Value: value}) - first = false - } - buf.WriteByte('}') - - return buf.String() -} - -func (MDoc) idoc() {} diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/primitive_codecs.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/primitive_codecs.go deleted file mode 100644 index 01bd18267..000000000 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/primitive_codecs.go +++ /dev/null @@ -1,637 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonx - -import ( - "errors" - "fmt" - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsoncodec" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -var primitiveCodecs PrimitiveCodecs - -var tDocument = reflect.TypeOf((Doc)(nil)) -var tArray = reflect.TypeOf((Arr)(nil)) -var tValue = reflect.TypeOf(Val{}) -var tElementSlice = reflect.TypeOf(([]Elem)(nil)) - -// PrimitiveCodecs is a namespace for all of the default bsoncodec.Codecs for the primitive types -// defined in this package. -type PrimitiveCodecs struct{} - -// RegisterPrimitiveCodecs will register the encode and decode methods attached to PrimitiveCodecs -// with the provided RegistryBuilder. if rb is nil, a new empty RegistryBuilder will be created. -func (pc PrimitiveCodecs) RegisterPrimitiveCodecs(rb *bsoncodec.RegistryBuilder) { - if rb == nil { - panic(errors.New("argument to RegisterPrimitiveCodecs must not be nil")) - } - - rb. - RegisterTypeEncoder(tDocument, bsoncodec.ValueEncoderFunc(pc.DocumentEncodeValue)). - RegisterTypeEncoder(tArray, bsoncodec.ValueEncoderFunc(pc.ArrayEncodeValue)). - RegisterTypeEncoder(tValue, bsoncodec.ValueEncoderFunc(pc.ValueEncodeValue)). - RegisterTypeEncoder(tElementSlice, bsoncodec.ValueEncoderFunc(pc.ElementSliceEncodeValue)). - RegisterTypeDecoder(tDocument, bsoncodec.ValueDecoderFunc(pc.DocumentDecodeValue)). - RegisterTypeDecoder(tArray, bsoncodec.ValueDecoderFunc(pc.ArrayDecodeValue)). - RegisterTypeDecoder(tValue, bsoncodec.ValueDecoderFunc(pc.ValueDecodeValue)). - RegisterTypeDecoder(tElementSlice, bsoncodec.ValueDecoderFunc(pc.ElementSliceDecodeValue)) -} - -// DocumentEncodeValue is the ValueEncoderFunc for *Document. -func (pc PrimitiveCodecs) DocumentEncodeValue(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tDocument { - return bsoncodec.ValueEncoderError{Name: "DocumentEncodeValue", Types: []reflect.Type{tDocument}, Received: val} - } - - if val.IsNil() { - return vw.WriteNull() - } - - doc := val.Interface().(Doc) - - dw, err := vw.WriteDocument() - if err != nil { - return err - } - - return pc.encodeDocument(ec, dw, doc) -} - -// DocumentDecodeValue is the ValueDecoderFunc for *Document. -func (pc PrimitiveCodecs) DocumentDecodeValue(dctx bsoncodec.DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tDocument { - return bsoncodec.ValueDecoderError{Name: "DocumentDecodeValue", Types: []reflect.Type{tDocument}, Received: val} - } - - return pc.documentDecodeValue(dctx, vr, val.Addr().Interface().(*Doc)) -} - -func (pc PrimitiveCodecs) documentDecodeValue(dctx bsoncodec.DecodeContext, vr bsonrw.ValueReader, doc *Doc) error { - - dr, err := vr.ReadDocument() - if err != nil { - return err - } - - return pc.decodeDocument(dctx, dr, doc) -} - -// ArrayEncodeValue is the ValueEncoderFunc for *Array. -func (pc PrimitiveCodecs) ArrayEncodeValue(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tArray { - return bsoncodec.ValueEncoderError{Name: "ArrayEncodeValue", Types: []reflect.Type{tArray}, Received: val} - } - - if val.IsNil() { - return vw.WriteNull() - } - - arr := val.Interface().(Arr) - - aw, err := vw.WriteArray() - if err != nil { - return err - } - - for _, val := range arr { - dvw, err := aw.WriteArrayElement() - if err != nil { - return err - } - - err = pc.encodeValue(ec, dvw, val) - - if err != nil { - return err - } - } - - return aw.WriteArrayEnd() -} - -// ArrayDecodeValue is the ValueDecoderFunc for *Array. -func (pc PrimitiveCodecs) ArrayDecodeValue(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tArray { - return bsoncodec.ValueDecoderError{Name: "ArrayDecodeValue", Types: []reflect.Type{tArray}, Received: val} - } - - ar, err := vr.ReadArray() - if err != nil { - return err - } - - if val.IsNil() { - val.Set(reflect.MakeSlice(tArray, 0, 0)) - } - val.SetLen(0) - - for { - vr, err := ar.ReadValue() - if err == bsonrw.ErrEOA { - break - } - if err != nil { - return err - } - - var elem Val - err = pc.valueDecodeValue(dc, vr, &elem) - if err != nil { - return err - } - - val.Set(reflect.Append(val, reflect.ValueOf(elem))) - } - - return nil -} - -// ElementSliceEncodeValue is the ValueEncoderFunc for []*Element. -func (pc PrimitiveCodecs) ElementSliceEncodeValue(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tElementSlice { - return bsoncodec.ValueEncoderError{Name: "ElementSliceEncodeValue", Types: []reflect.Type{tElementSlice}, Received: val} - } - - if val.IsNil() { - return vw.WriteNull() - } - - return pc.DocumentEncodeValue(ec, vw, val.Convert(tDocument)) -} - -// ElementSliceDecodeValue is the ValueDecoderFunc for []*Element. -func (pc PrimitiveCodecs) ElementSliceDecodeValue(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tElementSlice { - return bsoncodec.ValueDecoderError{Name: "ElementSliceDecodeValue", Types: []reflect.Type{tElementSlice}, Received: val} - } - - if val.IsNil() { - val.Set(reflect.MakeSlice(val.Type(), 0, 0)) - } - - val.SetLen(0) - - dr, err := vr.ReadDocument() - if err != nil { - return err - } - elems := make([]reflect.Value, 0) - for { - key, vr, err := dr.ReadElement() - if err == bsonrw.ErrEOD { - break - } - if err != nil { - return err - } - - var elem Elem - err = pc.elementDecodeValue(dc, vr, key, &elem) - if err != nil { - return err - } - - elems = append(elems, reflect.ValueOf(elem)) - } - - val.Set(reflect.Append(val, elems...)) - return nil -} - -// ValueEncodeValue is the ValueEncoderFunc for *Value. -func (pc PrimitiveCodecs) ValueEncodeValue(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tValue { - return bsoncodec.ValueEncoderError{Name: "ValueEncodeValue", Types: []reflect.Type{tValue}, Received: val} - } - - v := val.Interface().(Val) - - return pc.encodeValue(ec, vw, v) -} - -// ValueDecodeValue is the ValueDecoderFunc for *Value. -func (pc PrimitiveCodecs) ValueDecodeValue(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tValue { - return bsoncodec.ValueDecoderError{Name: "ValueDecodeValue", Types: []reflect.Type{tValue}, Received: val} - } - - return pc.valueDecodeValue(dc, vr, val.Addr().Interface().(*Val)) -} - -// encodeDocument is a separate function that we use because CodeWithScope -// returns us a DocumentWriter and we need to do the same logic that we would do -// for a document but cannot use a Codec. -func (pc PrimitiveCodecs) encodeDocument(ec bsoncodec.EncodeContext, dw bsonrw.DocumentWriter, doc Doc) error { - for _, elem := range doc { - dvw, err := dw.WriteDocumentElement(elem.Key) - if err != nil { - return err - } - - err = pc.encodeValue(ec, dvw, elem.Value) - - if err != nil { - return err - } - } - - return dw.WriteDocumentEnd() -} - -// DecodeDocument haves decoding into a Doc from a bsonrw.DocumentReader. -func (pc PrimitiveCodecs) DecodeDocument(dctx bsoncodec.DecodeContext, dr bsonrw.DocumentReader, pdoc *Doc) error { - return pc.decodeDocument(dctx, dr, pdoc) -} - -func (pc PrimitiveCodecs) decodeDocument(dctx bsoncodec.DecodeContext, dr bsonrw.DocumentReader, pdoc *Doc) error { - if *pdoc == nil { - *pdoc = make(Doc, 0) - } - *pdoc = (*pdoc)[:0] - for { - key, vr, err := dr.ReadElement() - if err == bsonrw.ErrEOD { - break - } - if err != nil { - return err - } - - var elem Elem - err = pc.elementDecodeValue(dctx, vr, key, &elem) - if err != nil { - return err - } - - *pdoc = append(*pdoc, elem) - } - return nil -} - -func (pc PrimitiveCodecs) elementDecodeValue(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader, key string, elem *Elem) error { - var val Val - switch vr.Type() { - case bsontype.Double: - f64, err := vr.ReadDouble() - if err != nil { - return err - } - val = Double(f64) - case bsontype.String: - str, err := vr.ReadString() - if err != nil { - return err - } - val = String(str) - case bsontype.EmbeddedDocument: - var embeddedDoc Doc - err := pc.documentDecodeValue(dc, vr, &embeddedDoc) - if err != nil { - return err - } - val = Document(embeddedDoc) - case bsontype.Array: - arr := reflect.New(tArray).Elem() - err := pc.ArrayDecodeValue(dc, vr, arr) - if err != nil { - return err - } - val = Array(arr.Interface().(Arr)) - case bsontype.Binary: - data, subtype, err := vr.ReadBinary() - if err != nil { - return err - } - val = Binary(subtype, data) - case bsontype.Undefined: - err := vr.ReadUndefined() - if err != nil { - return err - } - val = Undefined() - case bsontype.ObjectID: - oid, err := vr.ReadObjectID() - if err != nil { - return err - } - val = ObjectID(oid) - case bsontype.Boolean: - b, err := vr.ReadBoolean() - if err != nil { - return err - } - val = Boolean(b) - case bsontype.DateTime: - dt, err := vr.ReadDateTime() - if err != nil { - return err - } - val = DateTime(dt) - case bsontype.Null: - err := vr.ReadNull() - if err != nil { - return err - } - val = Null() - case bsontype.Regex: - pattern, options, err := vr.ReadRegex() - if err != nil { - return err - } - val = Regex(pattern, options) - case bsontype.DBPointer: - ns, pointer, err := vr.ReadDBPointer() - if err != nil { - return err - } - val = DBPointer(ns, pointer) - case bsontype.JavaScript: - js, err := vr.ReadJavascript() - if err != nil { - return err - } - val = JavaScript(js) - case bsontype.Symbol: - symbol, err := vr.ReadSymbol() - if err != nil { - return err - } - val = Symbol(symbol) - case bsontype.CodeWithScope: - code, scope, err := vr.ReadCodeWithScope() - if err != nil { - return err - } - var doc Doc - err = pc.decodeDocument(dc, scope, &doc) - if err != nil { - return err - } - val = CodeWithScope(code, doc) - case bsontype.Int32: - i32, err := vr.ReadInt32() - if err != nil { - return err - } - val = Int32(i32) - case bsontype.Timestamp: - t, i, err := vr.ReadTimestamp() - if err != nil { - return err - } - val = Timestamp(t, i) - case bsontype.Int64: - i64, err := vr.ReadInt64() - if err != nil { - return err - } - val = Int64(i64) - case bsontype.Decimal128: - d128, err := vr.ReadDecimal128() - if err != nil { - return err - } - val = Decimal128(d128) - case bsontype.MinKey: - err := vr.ReadMinKey() - if err != nil { - return err - } - val = MinKey() - case bsontype.MaxKey: - err := vr.ReadMaxKey() - if err != nil { - return err - } - val = MaxKey() - default: - return fmt.Errorf("Cannot read unknown BSON type %s", vr.Type()) - } - - *elem = Elem{Key: key, Value: val} - return nil -} - -// encodeValue does not validation, and the callers must perform validation on val before calling -// this method. -func (pc PrimitiveCodecs) encodeValue(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val Val) error { - var err error - switch val.Type() { - case bsontype.Double: - err = vw.WriteDouble(val.Double()) - case bsontype.String: - err = vw.WriteString(val.StringValue()) - case bsontype.EmbeddedDocument: - var encoder bsoncodec.ValueEncoder - encoder, err = ec.LookupEncoder(tDocument) - if err != nil { - break - } - err = encoder.EncodeValue(ec, vw, reflect.ValueOf(val.Document())) - case bsontype.Array: - var encoder bsoncodec.ValueEncoder - encoder, err = ec.LookupEncoder(tArray) - if err != nil { - break - } - err = encoder.EncodeValue(ec, vw, reflect.ValueOf(val.Array())) - case bsontype.Binary: - // TODO: FIX THIS (╯°□°)╯︵ ┻━┻ - subtype, data := val.Binary() - err = vw.WriteBinaryWithSubtype(data, subtype) - case bsontype.Undefined: - err = vw.WriteUndefined() - case bsontype.ObjectID: - err = vw.WriteObjectID(val.ObjectID()) - case bsontype.Boolean: - err = vw.WriteBoolean(val.Boolean()) - case bsontype.DateTime: - err = vw.WriteDateTime(val.DateTime()) - case bsontype.Null: - err = vw.WriteNull() - case bsontype.Regex: - err = vw.WriteRegex(val.Regex()) - case bsontype.DBPointer: - err = vw.WriteDBPointer(val.DBPointer()) - case bsontype.JavaScript: - err = vw.WriteJavascript(val.JavaScript()) - case bsontype.Symbol: - err = vw.WriteSymbol(val.Symbol()) - case bsontype.CodeWithScope: - code, scope := val.CodeWithScope() - - var cwsw bsonrw.DocumentWriter - cwsw, err = vw.WriteCodeWithScope(code) - if err != nil { - break - } - - err = pc.encodeDocument(ec, cwsw, scope) - case bsontype.Int32: - err = vw.WriteInt32(val.Int32()) - case bsontype.Timestamp: - err = vw.WriteTimestamp(val.Timestamp()) - case bsontype.Int64: - err = vw.WriteInt64(val.Int64()) - case bsontype.Decimal128: - err = vw.WriteDecimal128(val.Decimal128()) - case bsontype.MinKey: - err = vw.WriteMinKey() - case bsontype.MaxKey: - err = vw.WriteMaxKey() - default: - err = fmt.Errorf("%T is not a valid BSON type to encode", val.Type()) - } - - return err -} - -func (pc PrimitiveCodecs) valueDecodeValue(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader, val *Val) error { - switch vr.Type() { - case bsontype.Double: - f64, err := vr.ReadDouble() - if err != nil { - return err - } - *val = Double(f64) - case bsontype.String: - str, err := vr.ReadString() - if err != nil { - return err - } - *val = String(str) - case bsontype.EmbeddedDocument: - var embeddedDoc Doc - err := pc.documentDecodeValue(dc, vr, &embeddedDoc) - if err != nil { - return err - } - *val = Document(embeddedDoc) - case bsontype.Array: - arr := reflect.New(tArray).Elem() - err := pc.ArrayDecodeValue(dc, vr, arr) - if err != nil { - return err - } - *val = Array(arr.Interface().(Arr)) - case bsontype.Binary: - data, subtype, err := vr.ReadBinary() - if err != nil { - return err - } - *val = Binary(subtype, data) - case bsontype.Undefined: - err := vr.ReadUndefined() - if err != nil { - return err - } - *val = Undefined() - case bsontype.ObjectID: - oid, err := vr.ReadObjectID() - if err != nil { - return err - } - *val = ObjectID(oid) - case bsontype.Boolean: - b, err := vr.ReadBoolean() - if err != nil { - return err - } - *val = Boolean(b) - case bsontype.DateTime: - dt, err := vr.ReadDateTime() - if err != nil { - return err - } - *val = DateTime(dt) - case bsontype.Null: - err := vr.ReadNull() - if err != nil { - return err - } - *val = Null() - case bsontype.Regex: - pattern, options, err := vr.ReadRegex() - if err != nil { - return err - } - *val = Regex(pattern, options) - case bsontype.DBPointer: - ns, pointer, err := vr.ReadDBPointer() - if err != nil { - return err - } - *val = DBPointer(ns, pointer) - case bsontype.JavaScript: - js, err := vr.ReadJavascript() - if err != nil { - return err - } - *val = JavaScript(js) - case bsontype.Symbol: - symbol, err := vr.ReadSymbol() - if err != nil { - return err - } - *val = Symbol(symbol) - case bsontype.CodeWithScope: - code, scope, err := vr.ReadCodeWithScope() - if err != nil { - return err - } - var scopeDoc Doc - err = pc.decodeDocument(dc, scope, &scopeDoc) - if err != nil { - return err - } - *val = CodeWithScope(code, scopeDoc) - case bsontype.Int32: - i32, err := vr.ReadInt32() - if err != nil { - return err - } - *val = Int32(i32) - case bsontype.Timestamp: - t, i, err := vr.ReadTimestamp() - if err != nil { - return err - } - *val = Timestamp(t, i) - case bsontype.Int64: - i64, err := vr.ReadInt64() - if err != nil { - return err - } - *val = Int64(i64) - case bsontype.Decimal128: - d128, err := vr.ReadDecimal128() - if err != nil { - return err - } - *val = Decimal128(d128) - case bsontype.MinKey: - err := vr.ReadMinKey() - if err != nil { - return err - } - *val = MinKey() - case bsontype.MaxKey: - err := vr.ReadMaxKey() - if err != nil { - return err - } - *val = MaxKey() - default: - return fmt.Errorf("Cannot read unknown BSON type %s", vr.Type()) - } - - return nil -} diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/reflectionfree_d_codec.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/reflectionfree_d_codec.go deleted file mode 100644 index 7e68e55c1..000000000 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/reflectionfree_d_codec.go +++ /dev/null @@ -1,1025 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonx - -import ( - "fmt" - "math" - "reflect" - "time" - - "go.mongodb.org/mongo-driver/bson/bsoncodec" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" -) - -var ( - tPrimitiveD = reflect.TypeOf(primitive.D{}) - tPrimitiveCWS = reflect.TypeOf(primitive.CodeWithScope{}) - defaultValueEncoders = bsoncodec.DefaultValueEncoders{} - defaultValueDecoders = bsoncodec.DefaultValueDecoders{} -) - -type reflectionFreeDCodec struct{} - -// ReflectionFreeDCodec is a ValueEncoder for the primitive.D type that does not use reflection. -var ReflectionFreeDCodec bsoncodec.ValueCodec = &reflectionFreeDCodec{} - -func (r *reflectionFreeDCodec) EncodeValue(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tPrimitiveD { - return bsoncodec.ValueEncoderError{Name: "DEncodeValue", Types: []reflect.Type{tPrimitiveD}, Received: val} - } - - if val.IsNil() { - return vw.WriteNull() - } - - doc := val.Interface().(primitive.D) - return r.encodeDocument(ec, vw, doc) -} - -func (r *reflectionFreeDCodec) DecodeValue(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.IsValid() || !val.CanSet() || val.Type() != tPrimitiveD { - return bsoncodec.ValueDecoderError{Name: "DDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} - } - - switch vrType := vr.Type(); vrType { - case bsontype.Type(0), bsontype.EmbeddedDocument: - case bsontype.Null: - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - default: - return fmt.Errorf("cannot decode %v into a primitive.D", vrType) - } - - doc, err := r.decodeDocument(dc, vr) - if err != nil { - return err - } - - val.Set(reflect.ValueOf(doc)) - return nil -} - -func (r *reflectionFreeDCodec) decodeDocument(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader) (primitive.D, error) { - dr, err := vr.ReadDocument() - if err != nil { - return nil, err - } - - doc := primitive.D{} - for { - key, elemVr, err := dr.ReadElement() - if err == bsonrw.ErrEOD { - break - } - if err != nil { - return nil, err - } - - val, err := r.decodeValue(dc, elemVr) - if err != nil { - return nil, err - } - doc = append(doc, primitive.E{Key: key, Value: val}) - } - - return doc, nil -} - -func (r *reflectionFreeDCodec) decodeArray(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader) (primitive.A, error) { - ar, err := vr.ReadArray() - if err != nil { - return nil, err - } - - array := primitive.A{} - for { - arrayValReader, err := ar.ReadValue() - if err == bsonrw.ErrEOA { - break - } - if err != nil { - return nil, err - } - - val, err := r.decodeValue(dc, arrayValReader) - if err != nil { - return nil, err - } - array = append(array, val) - } - - return array, nil -} - -func (r *reflectionFreeDCodec) decodeValue(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader) (interface{}, error) { - switch vrType := vr.Type(); vrType { - case bsontype.Null: - return nil, vr.ReadNull() - case bsontype.Double: - return vr.ReadDouble() - case bsontype.String: - return vr.ReadString() - case bsontype.Binary: - data, subtype, err := vr.ReadBinary() - if err != nil { - return nil, err - } - - return primitive.Binary{ - Data: data, - Subtype: subtype, - }, nil - case bsontype.Undefined: - return primitive.Undefined{}, vr.ReadUndefined() - case bsontype.ObjectID: - return vr.ReadObjectID() - case bsontype.Boolean: - return vr.ReadBoolean() - case bsontype.DateTime: - dt, err := vr.ReadDateTime() - if err != nil { - return nil, err - } - - return primitive.DateTime(dt), nil - case bsontype.Regex: - pattern, options, err := vr.ReadRegex() - if err != nil { - return nil, err - } - - return primitive.Regex{ - Pattern: pattern, - Options: options, - }, nil - case bsontype.DBPointer: - ns, oid, err := vr.ReadDBPointer() - if err != nil { - return nil, err - } - - return primitive.DBPointer{ - DB: ns, - Pointer: oid, - }, nil - case bsontype.JavaScript: - js, err := vr.ReadJavascript() - if err != nil { - return nil, err - } - - return primitive.JavaScript(js), nil - case bsontype.Symbol: - sym, err := vr.ReadSymbol() - if err != nil { - return nil, err - } - - return primitive.Symbol(sym), nil - case bsontype.CodeWithScope: - cws := reflect.New(tPrimitiveCWS).Elem() - err := defaultValueDecoders.CodeWithScopeDecodeValue(dc, vr, cws) - if err != nil { - return nil, err - } - - return cws.Interface().(primitive.CodeWithScope), nil - case bsontype.Int32: - return vr.ReadInt32() - case bsontype.Int64: - return vr.ReadInt64() - case bsontype.Timestamp: - t, i, err := vr.ReadTimestamp() - if err != nil { - return nil, err - } - - return primitive.Timestamp{ - T: t, - I: i, - }, nil - case bsontype.Decimal128: - return vr.ReadDecimal128() - case bsontype.MinKey: - return primitive.MinKey{}, vr.ReadMinKey() - case bsontype.MaxKey: - return primitive.MaxKey{}, vr.ReadMaxKey() - case bsontype.Type(0), bsontype.EmbeddedDocument: - return r.decodeDocument(dc, vr) - case bsontype.Array: - return r.decodeArray(dc, vr) - default: - return nil, fmt.Errorf("cannot decode invalid BSON type %s", vrType) - } -} - -func (r *reflectionFreeDCodec) encodeDocumentValue(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, v interface{}) error { - switch val := v.(type) { - case int: - return r.encodeInt(vw, val) - case int8: - return vw.WriteInt32(int32(val)) - case int16: - return vw.WriteInt32(int32(val)) - case int32: - return vw.WriteInt32(val) - case int64: - return r.encodeInt64(ec, vw, val) - case uint: - return r.encodeUint64(ec, vw, uint64(val)) - case uint8: - return vw.WriteInt32(int32(val)) - case uint16: - return vw.WriteInt32(int32(val)) - case uint32: - return r.encodeUint64(ec, vw, uint64(val)) - case uint64: - return r.encodeUint64(ec, vw, val) - case float32: - return vw.WriteDouble(float64(val)) - case float64: - return vw.WriteDouble(val) - case []byte: - return vw.WriteBinary(val) - case primitive.Binary: - return vw.WriteBinaryWithSubtype(val.Data, val.Subtype) - case bool: - return vw.WriteBoolean(val) - case primitive.CodeWithScope: - return defaultValueEncoders.CodeWithScopeEncodeValue(ec, vw, reflect.ValueOf(val)) - case primitive.DBPointer: - return vw.WriteDBPointer(val.DB, val.Pointer) - case primitive.DateTime: - return vw.WriteDateTime(int64(val)) - case time.Time: - dt := primitive.NewDateTimeFromTime(val) - return vw.WriteDateTime(int64(dt)) - case primitive.Decimal128: - return vw.WriteDecimal128(val) - case primitive.JavaScript: - return vw.WriteJavascript(string(val)) - case primitive.MinKey: - return vw.WriteMinKey() - case primitive.MaxKey: - return vw.WriteMaxKey() - case primitive.Null, nil: - return vw.WriteNull() - case primitive.ObjectID: - return vw.WriteObjectID(val) - case primitive.Regex: - return vw.WriteRegex(val.Pattern, val.Options) - case string: - return vw.WriteString(val) - case primitive.Symbol: - return vw.WriteSymbol(string(val)) - case primitive.Timestamp: - return vw.WriteTimestamp(val.T, val.I) - case primitive.Undefined: - return vw.WriteUndefined() - case primitive.D: - return r.encodeDocument(ec, vw, val) - case primitive.A: - return r.encodePrimitiveA(ec, vw, val) - case []interface{}: - return r.encodePrimitiveA(ec, vw, val) - case []primitive.D: - return r.encodeSliceD(ec, vw, val) - case []int: - return r.encodeSliceInt(vw, val) - case []int8: - return r.encodeSliceInt8(vw, val) - case []int16: - return r.encodeSliceInt16(vw, val) - case []int32: - return r.encodeSliceInt32(vw, val) - case []int64: - return r.encodeSliceInt64(ec, vw, val) - case []uint: - return r.encodeSliceUint(ec, vw, val) - case []uint16: - return r.encodeSliceUint16(vw, val) - case []uint32: - return r.encodeSliceUint32(ec, vw, val) - case []uint64: - return r.encodeSliceUint64(ec, vw, val) - case [][]byte: - return r.encodeSliceByteSlice(vw, val) - case []primitive.Binary: - return r.encodeSliceBinary(vw, val) - case []bool: - return r.encodeSliceBoolean(vw, val) - case []primitive.CodeWithScope: - return r.encodeSliceCWS(ec, vw, val) - case []primitive.DBPointer: - return r.encodeSliceDBPointer(vw, val) - case []primitive.DateTime: - return r.encodeSliceDateTime(vw, val) - case []time.Time: - return r.encodeSliceTimeTime(vw, val) - case []primitive.Decimal128: - return r.encodeSliceDecimal128(vw, val) - case []float32: - return r.encodeSliceFloat32(vw, val) - case []float64: - return r.encodeSliceFloat64(vw, val) - case []primitive.JavaScript: - return r.encodeSliceJavaScript(vw, val) - case []primitive.MinKey: - return r.encodeSliceMinKey(vw, val) - case []primitive.MaxKey: - return r.encodeSliceMaxKey(vw, val) - case []primitive.Null: - return r.encodeSliceNull(vw, val) - case []primitive.ObjectID: - return r.encodeSliceObjectID(vw, val) - case []primitive.Regex: - return r.encodeSliceRegex(vw, val) - case []string: - return r.encodeSliceString(vw, val) - case []primitive.Symbol: - return r.encodeSliceSymbol(vw, val) - case []primitive.Timestamp: - return r.encodeSliceTimestamp(vw, val) - case []primitive.Undefined: - return r.encodeSliceUndefined(vw, val) - default: - return fmt.Errorf("value of type %T not supported", v) - } -} - -func (r *reflectionFreeDCodec) encodeInt(vw bsonrw.ValueWriter, val int) error { - if fitsIn32Bits(int64(val)) { - return vw.WriteInt32(int32(val)) - } - return vw.WriteInt64(int64(val)) -} - -func (r *reflectionFreeDCodec) encodeInt64(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val int64) error { - if ec.MinSize && fitsIn32Bits(val) { - return vw.WriteInt32(int32(val)) - } - return vw.WriteInt64(val) -} - -func (r *reflectionFreeDCodec) encodeUint64(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val uint64) error { - if ec.MinSize && val <= math.MaxInt32 { - return vw.WriteInt32(int32(val)) - } - if val > math.MaxInt64 { - return fmt.Errorf("%d overflows int64", val) - } - - return vw.WriteInt64(int64(val)) -} - -func (r *reflectionFreeDCodec) encodeDocument(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, doc primitive.D) error { - dw, err := vw.WriteDocument() - if err != nil { - return err - } - - for _, elem := range doc { - docValWriter, err := dw.WriteDocumentElement(elem.Key) - if err != nil { - return err - } - - if err := r.encodeDocumentValue(ec, docValWriter, elem.Value); err != nil { - return err - } - } - - return dw.WriteDocumentEnd() -} - -func (r *reflectionFreeDCodec) encodeSliceByteSlice(vw bsonrw.ValueWriter, arr [][]byte) error { - aw, err := vw.WriteArray() - if err != nil { - return err - } - - for _, val := range arr { - arrayValWriter, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if err := arrayValWriter.WriteBinary(val); err != nil { - return err - } - } - - return aw.WriteArrayEnd() -} - -func (r *reflectionFreeDCodec) encodeSliceBinary(vw bsonrw.ValueWriter, arr []primitive.Binary) error { - aw, err := vw.WriteArray() - if err != nil { - return err - } - - for _, val := range arr { - arrayValWriter, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if err := arrayValWriter.WriteBinaryWithSubtype(val.Data, val.Subtype); err != nil { - return err - } - } - - return aw.WriteArrayEnd() -} - -func (r *reflectionFreeDCodec) encodeSliceBoolean(vw bsonrw.ValueWriter, arr []bool) error { - aw, err := vw.WriteArray() - if err != nil { - return err - } - - for _, val := range arr { - arrayValWriter, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if err := arrayValWriter.WriteBoolean(val); err != nil { - return err - } - } - - return aw.WriteArrayEnd() -} - -func (r *reflectionFreeDCodec) encodeSliceCWS(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, arr []primitive.CodeWithScope) error { - aw, err := vw.WriteArray() - if err != nil { - return err - } - - for _, val := range arr { - arrayValWriter, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if err := defaultValueEncoders.CodeWithScopeEncodeValue(ec, arrayValWriter, reflect.ValueOf(val)); err != nil { - return err - } - } - - return aw.WriteArrayEnd() -} - -func (r *reflectionFreeDCodec) encodeSliceDBPointer(vw bsonrw.ValueWriter, arr []primitive.DBPointer) error { - aw, err := vw.WriteArray() - if err != nil { - return err - } - - for _, val := range arr { - arrayValWriter, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if err := arrayValWriter.WriteDBPointer(val.DB, val.Pointer); err != nil { - return err - } - } - - return aw.WriteArrayEnd() -} - -func (r *reflectionFreeDCodec) encodeSliceDateTime(vw bsonrw.ValueWriter, arr []primitive.DateTime) error { - aw, err := vw.WriteArray() - if err != nil { - return err - } - - for _, val := range arr { - arrayValWriter, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if err := arrayValWriter.WriteDateTime(int64(val)); err != nil { - return err - } - } - - return aw.WriteArrayEnd() -} - -func (r *reflectionFreeDCodec) encodeSliceTimeTime(vw bsonrw.ValueWriter, arr []time.Time) error { - aw, err := vw.WriteArray() - if err != nil { - return err - } - - for _, val := range arr { - arrayValWriter, err := aw.WriteArrayElement() - if err != nil { - return err - } - - dt := primitive.NewDateTimeFromTime(val) - if err := arrayValWriter.WriteDateTime(int64(dt)); err != nil { - return err - } - } - - return aw.WriteArrayEnd() -} - -func (r *reflectionFreeDCodec) encodeSliceDecimal128(vw bsonrw.ValueWriter, arr []primitive.Decimal128) error { - aw, err := vw.WriteArray() - if err != nil { - return err - } - - for _, val := range arr { - arrayValWriter, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if err := arrayValWriter.WriteDecimal128(val); err != nil { - return err - } - } - - return aw.WriteArrayEnd() -} - -func (r *reflectionFreeDCodec) encodeSliceFloat32(vw bsonrw.ValueWriter, arr []float32) error { - aw, err := vw.WriteArray() - if err != nil { - return err - } - - for _, val := range arr { - arrayValWriter, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if err := arrayValWriter.WriteDouble(float64(val)); err != nil { - return err - } - } - - return aw.WriteArrayEnd() -} - -func (r *reflectionFreeDCodec) encodeSliceFloat64(vw bsonrw.ValueWriter, arr []float64) error { - aw, err := vw.WriteArray() - if err != nil { - return err - } - - for _, val := range arr { - arrayValWriter, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if err := arrayValWriter.WriteDouble(val); err != nil { - return err - } - } - - return aw.WriteArrayEnd() -} - -func (r *reflectionFreeDCodec) encodeSliceJavaScript(vw bsonrw.ValueWriter, arr []primitive.JavaScript) error { - aw, err := vw.WriteArray() - if err != nil { - return err - } - - for _, val := range arr { - arrayValWriter, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if err := arrayValWriter.WriteJavascript(string(val)); err != nil { - return err - } - } - - return aw.WriteArrayEnd() -} - -func (r *reflectionFreeDCodec) encodeSliceMinKey(vw bsonrw.ValueWriter, arr []primitive.MinKey) error { - aw, err := vw.WriteArray() - if err != nil { - return err - } - - for range arr { - arrayValWriter, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if err := arrayValWriter.WriteMinKey(); err != nil { - return err - } - } - - return aw.WriteArrayEnd() -} - -func (r *reflectionFreeDCodec) encodeSliceMaxKey(vw bsonrw.ValueWriter, arr []primitive.MaxKey) error { - aw, err := vw.WriteArray() - if err != nil { - return err - } - - for range arr { - arrayValWriter, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if err := arrayValWriter.WriteMaxKey(); err != nil { - return err - } - } - - return aw.WriteArrayEnd() -} - -func (r *reflectionFreeDCodec) encodeSliceNull(vw bsonrw.ValueWriter, arr []primitive.Null) error { - aw, err := vw.WriteArray() - if err != nil { - return err - } - - for range arr { - arrayValWriter, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if err := arrayValWriter.WriteNull(); err != nil { - return err - } - } - - return aw.WriteArrayEnd() -} - -func (r *reflectionFreeDCodec) encodeSliceObjectID(vw bsonrw.ValueWriter, arr []primitive.ObjectID) error { - aw, err := vw.WriteArray() - if err != nil { - return err - } - - for _, val := range arr { - arrayValWriter, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if err := arrayValWriter.WriteObjectID(val); err != nil { - return err - } - } - - return aw.WriteArrayEnd() -} - -func (r *reflectionFreeDCodec) encodeSliceRegex(vw bsonrw.ValueWriter, arr []primitive.Regex) error { - aw, err := vw.WriteArray() - if err != nil { - return err - } - - for _, val := range arr { - arrayValWriter, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if err := arrayValWriter.WriteRegex(val.Pattern, val.Options); err != nil { - return err - } - } - - return aw.WriteArrayEnd() -} - -func (r *reflectionFreeDCodec) encodeSliceString(vw bsonrw.ValueWriter, arr []string) error { - aw, err := vw.WriteArray() - if err != nil { - return err - } - - for _, val := range arr { - arrayValWriter, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if err := arrayValWriter.WriteString(val); err != nil { - return err - } - } - - return aw.WriteArrayEnd() -} - -func (r *reflectionFreeDCodec) encodeSliceSymbol(vw bsonrw.ValueWriter, arr []primitive.Symbol) error { - aw, err := vw.WriteArray() - if err != nil { - return err - } - - for _, val := range arr { - arrayValWriter, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if err := arrayValWriter.WriteSymbol(string(val)); err != nil { - return err - } - } - - return aw.WriteArrayEnd() -} - -func (r *reflectionFreeDCodec) encodeSliceTimestamp(vw bsonrw.ValueWriter, arr []primitive.Timestamp) error { - aw, err := vw.WriteArray() - if err != nil { - return err - } - - for _, val := range arr { - arrayValWriter, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if err := arrayValWriter.WriteTimestamp(val.T, val.I); err != nil { - return err - } - } - - return aw.WriteArrayEnd() -} - -func (r *reflectionFreeDCodec) encodeSliceUndefined(vw bsonrw.ValueWriter, arr []primitive.Undefined) error { - aw, err := vw.WriteArray() - if err != nil { - return err - } - - for range arr { - arrayValWriter, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if err := arrayValWriter.WriteUndefined(); err != nil { - return err - } - } - - return aw.WriteArrayEnd() -} - -func (r *reflectionFreeDCodec) encodePrimitiveA(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, arr primitive.A) error { - aw, err := vw.WriteArray() - if err != nil { - return err - } - - for _, val := range arr { - arrayValWriter, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if err := r.encodeDocumentValue(ec, arrayValWriter, val); err != nil { - return err - } - } - - return aw.WriteArrayEnd() -} - -func (r *reflectionFreeDCodec) encodeSliceD(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, arr []primitive.D) error { - aw, err := vw.WriteArray() - if err != nil { - return err - } - - for _, val := range arr { - arrayValWriter, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if err := r.encodeDocument(ec, arrayValWriter, val); err != nil { - return err - } - } - - return aw.WriteArrayEnd() -} - -func (r *reflectionFreeDCodec) encodeSliceInt(vw bsonrw.ValueWriter, arr []int) error { - aw, err := vw.WriteArray() - if err != nil { - return err - } - - for _, val := range arr { - arrayValWriter, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if err := r.encodeInt(arrayValWriter, val); err != nil { - return err - } - } - - return aw.WriteArrayEnd() -} - -func (r *reflectionFreeDCodec) encodeSliceInt8(vw bsonrw.ValueWriter, arr []int8) error { - aw, err := vw.WriteArray() - if err != nil { - return err - } - - for _, val := range arr { - arrayValWriter, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if err := arrayValWriter.WriteInt32(int32(val)); err != nil { - return err - } - } - - return aw.WriteArrayEnd() -} - -func (r *reflectionFreeDCodec) encodeSliceInt16(vw bsonrw.ValueWriter, arr []int16) error { - aw, err := vw.WriteArray() - if err != nil { - return err - } - - for _, val := range arr { - arrayValWriter, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if err := arrayValWriter.WriteInt32(int32(val)); err != nil { - return err - } - } - - return aw.WriteArrayEnd() -} - -func (r *reflectionFreeDCodec) encodeSliceInt32(vw bsonrw.ValueWriter, arr []int32) error { - aw, err := vw.WriteArray() - if err != nil { - return err - } - - for _, val := range arr { - arrayValWriter, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if err := arrayValWriter.WriteInt32(val); err != nil { - return err - } - } - - return aw.WriteArrayEnd() -} - -func (r *reflectionFreeDCodec) encodeSliceInt64(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, arr []int64) error { - aw, err := vw.WriteArray() - if err != nil { - return err - } - - for _, val := range arr { - arrayValWriter, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if err := r.encodeInt64(ec, arrayValWriter, val); err != nil { - return err - } - } - - return aw.WriteArrayEnd() -} - -func (r *reflectionFreeDCodec) encodeSliceUint(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, arr []uint) error { - aw, err := vw.WriteArray() - if err != nil { - return err - } - - for _, val := range arr { - arrayValWriter, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if err := r.encodeUint64(ec, arrayValWriter, uint64(val)); err != nil { - return err - } - } - - return aw.WriteArrayEnd() -} - -func (r *reflectionFreeDCodec) encodeSliceUint16(vw bsonrw.ValueWriter, arr []uint16) error { - aw, err := vw.WriteArray() - if err != nil { - return err - } - - for _, val := range arr { - arrayValWriter, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if err := arrayValWriter.WriteInt32(int32(val)); err != nil { - return err - } - } - - return aw.WriteArrayEnd() -} - -func (r *reflectionFreeDCodec) encodeSliceUint32(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, arr []uint32) error { - aw, err := vw.WriteArray() - if err != nil { - return err - } - - for _, val := range arr { - arrayValWriter, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if err := r.encodeUint64(ec, arrayValWriter, uint64(val)); err != nil { - return err - } - } - - return aw.WriteArrayEnd() -} - -func (r *reflectionFreeDCodec) encodeSliceUint64(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, arr []uint64) error { - aw, err := vw.WriteArray() - if err != nil { - return err - } - - for _, val := range arr { - arrayValWriter, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if err := r.encodeUint64(ec, arrayValWriter, val); err != nil { - return err - } - } - - return aw.WriteArrayEnd() -} - -func fitsIn32Bits(i int64) bool { - return math.MinInt32 <= i && i <= math.MaxInt32 -} diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/registry.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/registry.go deleted file mode 100644 index 3ca1c326c..000000000 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/registry.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2022-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonx - -import ( - "go.mongodb.org/mongo-driver/bson" - "go.mongodb.org/mongo-driver/bson/bsoncodec" -) - -// DefaultRegistry is the default bsoncodec.Registry. It contains the default codecs and the -// primitive codecs. -var DefaultRegistry = NewRegistryBuilder().Build() - -// NewRegistryBuilder creates a new RegistryBuilder configured with the default encoders and -// decoders from the bsoncodec.DefaultValueEncoders and bsoncodec.DefaultValueDecoders types and the -// PrimitiveCodecs type in this package. -func NewRegistryBuilder() *bsoncodec.RegistryBuilder { - rb := bsoncodec.NewRegistryBuilder() - bsoncodec.DefaultValueEncoders{}.RegisterDefaultEncoders(rb) - bsoncodec.DefaultValueDecoders{}.RegisterDefaultDecoders(rb) - bson.PrimitiveCodecs{}.RegisterPrimitiveCodecs(rb) - primitiveCodecs.RegisterPrimitiveCodecs(rb) - return rb -} diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/value.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/value.go deleted file mode 100644 index f66f6b240..000000000 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/value.go +++ /dev/null @@ -1,866 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonx - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "math" - "time" - - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" - "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" -) - -// Val represents a BSON value. -type Val struct { - // NOTE: The bootstrap is a small amount of space that'll be on the stack. At 15 bytes this - // doesn't make this type any larger, since there are 7 bytes of padding and we want an int64 to - // store small values (e.g. boolean, double, int64, etc...). The primitive property is where all - // of the larger values go. They will use either Go primitives or the primitive.* types. - t bsontype.Type - bootstrap [15]byte - primitive interface{} -} - -func (v Val) string() string { - if v.primitive != nil { - return v.primitive.(string) - } - // The string will either end with a null byte or it fills the entire bootstrap space. - length := v.bootstrap[0] - return string(v.bootstrap[1 : length+1]) -} - -func (v Val) writestring(str string) Val { - switch { - case len(str) < 15: - v.bootstrap[0] = uint8(len(str)) - copy(v.bootstrap[1:], str) - default: - v.primitive = str - } - return v -} - -func (v Val) i64() int64 { - return int64(v.bootstrap[0]) | int64(v.bootstrap[1])<<8 | int64(v.bootstrap[2])<<16 | - int64(v.bootstrap[3])<<24 | int64(v.bootstrap[4])<<32 | int64(v.bootstrap[5])<<40 | - int64(v.bootstrap[6])<<48 | int64(v.bootstrap[7])<<56 -} - -func (v Val) writei64(i64 int64) Val { - v.bootstrap[0] = byte(i64) - v.bootstrap[1] = byte(i64 >> 8) - v.bootstrap[2] = byte(i64 >> 16) - v.bootstrap[3] = byte(i64 >> 24) - v.bootstrap[4] = byte(i64 >> 32) - v.bootstrap[5] = byte(i64 >> 40) - v.bootstrap[6] = byte(i64 >> 48) - v.bootstrap[7] = byte(i64 >> 56) - return v -} - -// IsZero returns true if this value is zero or a BSON null. -func (v Val) IsZero() bool { return v.t == bsontype.Type(0) || v.t == bsontype.Null } - -func (v Val) String() string { - // TODO(GODRIVER-612): When bsoncore has appenders for extended JSON use that here. - return fmt.Sprintf("%v", v.Interface()) -} - -// Interface returns the Go value of this Value as an empty interface. -// -// This method will return nil if it is empty, otherwise it will return a Go primitive or a -// primitive.* instance. -func (v Val) Interface() interface{} { - switch v.Type() { - case bsontype.Double: - return v.Double() - case bsontype.String: - return v.StringValue() - case bsontype.EmbeddedDocument: - switch v.primitive.(type) { - case Doc: - return v.primitive.(Doc) - case MDoc: - return v.primitive.(MDoc) - default: - return primitive.Null{} - } - case bsontype.Array: - return v.Array() - case bsontype.Binary: - return v.primitive.(primitive.Binary) - case bsontype.Undefined: - return primitive.Undefined{} - case bsontype.ObjectID: - return v.ObjectID() - case bsontype.Boolean: - return v.Boolean() - case bsontype.DateTime: - return v.DateTime() - case bsontype.Null: - return primitive.Null{} - case bsontype.Regex: - return v.primitive.(primitive.Regex) - case bsontype.DBPointer: - return v.primitive.(primitive.DBPointer) - case bsontype.JavaScript: - return v.JavaScript() - case bsontype.Symbol: - return v.Symbol() - case bsontype.CodeWithScope: - return v.primitive.(primitive.CodeWithScope) - case bsontype.Int32: - return v.Int32() - case bsontype.Timestamp: - t, i := v.Timestamp() - return primitive.Timestamp{T: t, I: i} - case bsontype.Int64: - return v.Int64() - case bsontype.Decimal128: - return v.Decimal128() - case bsontype.MinKey: - return primitive.MinKey{} - case bsontype.MaxKey: - return primitive.MaxKey{} - default: - return primitive.Null{} - } -} - -// MarshalBSONValue implements the bsoncodec.ValueMarshaler interface. -func (v Val) MarshalBSONValue() (bsontype.Type, []byte, error) { - return v.MarshalAppendBSONValue(nil) -} - -// MarshalAppendBSONValue is similar to MarshalBSONValue, but allows the caller to specify a slice -// to add the bytes to. -func (v Val) MarshalAppendBSONValue(dst []byte) (bsontype.Type, []byte, error) { - t := v.Type() - switch v.Type() { - case bsontype.Double: - dst = bsoncore.AppendDouble(dst, v.Double()) - case bsontype.String: - dst = bsoncore.AppendString(dst, v.String()) - case bsontype.EmbeddedDocument: - switch v.primitive.(type) { - case Doc: - t, dst, _ = v.primitive.(Doc).MarshalBSONValue() // Doc.MarshalBSONValue never returns an error. - case MDoc: - t, dst, _ = v.primitive.(MDoc).MarshalBSONValue() // MDoc.MarshalBSONValue never returns an error. - } - case bsontype.Array: - t, dst, _ = v.Array().MarshalBSONValue() // Arr.MarshalBSON never returns an error. - case bsontype.Binary: - subtype, bindata := v.Binary() - dst = bsoncore.AppendBinary(dst, subtype, bindata) - case bsontype.Undefined: - case bsontype.ObjectID: - dst = bsoncore.AppendObjectID(dst, v.ObjectID()) - case bsontype.Boolean: - dst = bsoncore.AppendBoolean(dst, v.Boolean()) - case bsontype.DateTime: - dst = bsoncore.AppendDateTime(dst, v.DateTime()) - case bsontype.Null: - case bsontype.Regex: - pattern, options := v.Regex() - dst = bsoncore.AppendRegex(dst, pattern, options) - case bsontype.DBPointer: - ns, ptr := v.DBPointer() - dst = bsoncore.AppendDBPointer(dst, ns, ptr) - case bsontype.JavaScript: - dst = bsoncore.AppendJavaScript(dst, v.JavaScript()) - case bsontype.Symbol: - dst = bsoncore.AppendSymbol(dst, v.Symbol()) - case bsontype.CodeWithScope: - code, doc := v.CodeWithScope() - var scope []byte - scope, _ = doc.MarshalBSON() // Doc.MarshalBSON never returns an error. - dst = bsoncore.AppendCodeWithScope(dst, code, scope) - case bsontype.Int32: - dst = bsoncore.AppendInt32(dst, v.Int32()) - case bsontype.Timestamp: - t, i := v.Timestamp() - dst = bsoncore.AppendTimestamp(dst, t, i) - case bsontype.Int64: - dst = bsoncore.AppendInt64(dst, v.Int64()) - case bsontype.Decimal128: - dst = bsoncore.AppendDecimal128(dst, v.Decimal128()) - case bsontype.MinKey: - case bsontype.MaxKey: - default: - panic(fmt.Errorf("invalid BSON type %v", t)) - } - - return t, dst, nil -} - -// UnmarshalBSONValue implements the bsoncodec.ValueUnmarshaler interface. -func (v *Val) UnmarshalBSONValue(t bsontype.Type, data []byte) error { - if v == nil { - return errors.New("cannot unmarshal into nil Value") - } - var err error - var ok = true - var rem []byte - switch t { - case bsontype.Double: - var f64 float64 - f64, rem, ok = bsoncore.ReadDouble(data) - *v = Double(f64) - case bsontype.String: - var str string - str, rem, ok = bsoncore.ReadString(data) - *v = String(str) - case bsontype.EmbeddedDocument: - var raw []byte - var doc Doc - raw, rem, ok = bsoncore.ReadDocument(data) - doc, err = ReadDoc(raw) - *v = Document(doc) - case bsontype.Array: - var raw []byte - arr := make(Arr, 0) - raw, rem, ok = bsoncore.ReadArray(data) - err = arr.UnmarshalBSONValue(t, raw) - *v = Array(arr) - case bsontype.Binary: - var subtype byte - var bindata []byte - subtype, bindata, rem, ok = bsoncore.ReadBinary(data) - *v = Binary(subtype, bindata) - case bsontype.Undefined: - *v = Undefined() - case bsontype.ObjectID: - var oid primitive.ObjectID - oid, rem, ok = bsoncore.ReadObjectID(data) - *v = ObjectID(oid) - case bsontype.Boolean: - var b bool - b, rem, ok = bsoncore.ReadBoolean(data) - *v = Boolean(b) - case bsontype.DateTime: - var dt int64 - dt, rem, ok = bsoncore.ReadDateTime(data) - *v = DateTime(dt) - case bsontype.Null: - *v = Null() - case bsontype.Regex: - var pattern, options string - pattern, options, rem, ok = bsoncore.ReadRegex(data) - *v = Regex(pattern, options) - case bsontype.DBPointer: - var ns string - var ptr primitive.ObjectID - ns, ptr, rem, ok = bsoncore.ReadDBPointer(data) - *v = DBPointer(ns, ptr) - case bsontype.JavaScript: - var js string - js, rem, ok = bsoncore.ReadJavaScript(data) - *v = JavaScript(js) - case bsontype.Symbol: - var symbol string - symbol, rem, ok = bsoncore.ReadSymbol(data) - *v = Symbol(symbol) - case bsontype.CodeWithScope: - var raw []byte - var code string - var scope Doc - code, raw, rem, ok = bsoncore.ReadCodeWithScope(data) - scope, err = ReadDoc(raw) - *v = CodeWithScope(code, scope) - case bsontype.Int32: - var i32 int32 - i32, rem, ok = bsoncore.ReadInt32(data) - *v = Int32(i32) - case bsontype.Timestamp: - var i, t uint32 - t, i, rem, ok = bsoncore.ReadTimestamp(data) - *v = Timestamp(t, i) - case bsontype.Int64: - var i64 int64 - i64, rem, ok = bsoncore.ReadInt64(data) - *v = Int64(i64) - case bsontype.Decimal128: - var d128 primitive.Decimal128 - d128, rem, ok = bsoncore.ReadDecimal128(data) - *v = Decimal128(d128) - case bsontype.MinKey: - *v = MinKey() - case bsontype.MaxKey: - *v = MaxKey() - default: - err = fmt.Errorf("invalid BSON type %v", t) - } - - if !ok && err == nil { - err = bsoncore.NewInsufficientBytesError(data, rem) - } - - return err -} - -// Type returns the BSON type of this value. -func (v Val) Type() bsontype.Type { - if v.t == bsontype.Type(0) { - return bsontype.Null - } - return v.t -} - -// IsNumber returns true if the type of v is a numberic BSON type. -func (v Val) IsNumber() bool { - switch v.Type() { - case bsontype.Double, bsontype.Int32, bsontype.Int64, bsontype.Decimal128: - return true - default: - return false - } -} - -// Double returns the BSON double value the Value represents. It panics if the value is a BSON type -// other than double. -func (v Val) Double() float64 { - if v.t != bsontype.Double { - panic(ElementTypeError{"bson.Value.Double", v.t}) - } - return math.Float64frombits(binary.LittleEndian.Uint64(v.bootstrap[0:8])) -} - -// DoubleOK is the same as Double, but returns a boolean instead of panicking. -func (v Val) DoubleOK() (float64, bool) { - if v.t != bsontype.Double { - return 0, false - } - return math.Float64frombits(binary.LittleEndian.Uint64(v.bootstrap[0:8])), true -} - -// StringValue returns the BSON string the Value represents. It panics if the value is a BSON type -// other than string. -// -// NOTE: This method is called StringValue to avoid it implementing the -// fmt.Stringer interface. -func (v Val) StringValue() string { - if v.t != bsontype.String { - panic(ElementTypeError{"bson.Value.StringValue", v.t}) - } - return v.string() -} - -// StringValueOK is the same as StringValue, but returns a boolean instead of -// panicking. -func (v Val) StringValueOK() (string, bool) { - if v.t != bsontype.String { - return "", false - } - return v.string(), true -} - -func (v Val) asDoc() Doc { - doc, ok := v.primitive.(Doc) - if ok { - return doc - } - mdoc := v.primitive.(MDoc) - for k, v := range mdoc { - doc = append(doc, Elem{k, v}) - } - return doc -} - -func (v Val) asMDoc() MDoc { - mdoc, ok := v.primitive.(MDoc) - if ok { - return mdoc - } - mdoc = make(MDoc) - doc := v.primitive.(Doc) - for _, elem := range doc { - mdoc[elem.Key] = elem.Value - } - return mdoc -} - -// Document returns the BSON embedded document value the Value represents. It panics if the value -// is a BSON type other than embedded document. -func (v Val) Document() Doc { - if v.t != bsontype.EmbeddedDocument { - panic(ElementTypeError{"bson.Value.Document", v.t}) - } - return v.asDoc() -} - -// DocumentOK is the same as Document, except it returns a boolean -// instead of panicking. -func (v Val) DocumentOK() (Doc, bool) { - if v.t != bsontype.EmbeddedDocument { - return nil, false - } - return v.asDoc(), true -} - -// MDocument returns the BSON embedded document value the Value represents. It panics if the value -// is a BSON type other than embedded document. -func (v Val) MDocument() MDoc { - if v.t != bsontype.EmbeddedDocument { - panic(ElementTypeError{"bson.Value.MDocument", v.t}) - } - return v.asMDoc() -} - -// MDocumentOK is the same as Document, except it returns a boolean -// instead of panicking. -func (v Val) MDocumentOK() (MDoc, bool) { - if v.t != bsontype.EmbeddedDocument { - return nil, false - } - return v.asMDoc(), true -} - -// Array returns the BSON array value the Value represents. It panics if the value is a BSON type -// other than array. -func (v Val) Array() Arr { - if v.t != bsontype.Array { - panic(ElementTypeError{"bson.Value.Array", v.t}) - } - return v.primitive.(Arr) -} - -// ArrayOK is the same as Array, except it returns a boolean -// instead of panicking. -func (v Val) ArrayOK() (Arr, bool) { - if v.t != bsontype.Array { - return nil, false - } - return v.primitive.(Arr), true -} - -// Binary returns the BSON binary value the Value represents. It panics if the value is a BSON type -// other than binary. -func (v Val) Binary() (byte, []byte) { - if v.t != bsontype.Binary { - panic(ElementTypeError{"bson.Value.Binary", v.t}) - } - bin := v.primitive.(primitive.Binary) - return bin.Subtype, bin.Data -} - -// BinaryOK is the same as Binary, except it returns a boolean instead of -// panicking. -func (v Val) BinaryOK() (byte, []byte, bool) { - if v.t != bsontype.Binary { - return 0x00, nil, false - } - bin := v.primitive.(primitive.Binary) - return bin.Subtype, bin.Data, true -} - -// Undefined returns the BSON undefined the Value represents. It panics if the value is a BSON type -// other than binary. -func (v Val) Undefined() { - if v.t != bsontype.Undefined { - panic(ElementTypeError{"bson.Value.Undefined", v.t}) - } -} - -// UndefinedOK is the same as Undefined, except it returns a boolean instead of -// panicking. -func (v Val) UndefinedOK() bool { - return v.t == bsontype.Undefined -} - -// ObjectID returns the BSON ObjectID the Value represents. It panics if the value is a BSON type -// other than ObjectID. -func (v Val) ObjectID() primitive.ObjectID { - if v.t != bsontype.ObjectID { - panic(ElementTypeError{"bson.Value.ObjectID", v.t}) - } - var oid primitive.ObjectID - copy(oid[:], v.bootstrap[:12]) - return oid -} - -// ObjectIDOK is the same as ObjectID, except it returns a boolean instead of -// panicking. -func (v Val) ObjectIDOK() (primitive.ObjectID, bool) { - if v.t != bsontype.ObjectID { - return primitive.ObjectID{}, false - } - var oid primitive.ObjectID - copy(oid[:], v.bootstrap[:12]) - return oid, true -} - -// Boolean returns the BSON boolean the Value represents. It panics if the value is a BSON type -// other than boolean. -func (v Val) Boolean() bool { - if v.t != bsontype.Boolean { - panic(ElementTypeError{"bson.Value.Boolean", v.t}) - } - return v.bootstrap[0] == 0x01 -} - -// BooleanOK is the same as Boolean, except it returns a boolean instead of -// panicking. -func (v Val) BooleanOK() (bool, bool) { - if v.t != bsontype.Boolean { - return false, false - } - return v.bootstrap[0] == 0x01, true -} - -// DateTime returns the BSON datetime the Value represents. It panics if the value is a BSON type -// other than datetime. -func (v Val) DateTime() int64 { - if v.t != bsontype.DateTime { - panic(ElementTypeError{"bson.Value.DateTime", v.t}) - } - return v.i64() -} - -// DateTimeOK is the same as DateTime, except it returns a boolean instead of -// panicking. -func (v Val) DateTimeOK() (int64, bool) { - if v.t != bsontype.DateTime { - return 0, false - } - return v.i64(), true -} - -// Time returns the BSON datetime the Value represents as time.Time. It panics if the value is a BSON -// type other than datetime. -func (v Val) Time() time.Time { - if v.t != bsontype.DateTime { - panic(ElementTypeError{"bson.Value.Time", v.t}) - } - i := v.i64() - return time.Unix(i/1000, i%1000*1000000) -} - -// TimeOK is the same as Time, except it returns a boolean instead of -// panicking. -func (v Val) TimeOK() (time.Time, bool) { - if v.t != bsontype.DateTime { - return time.Time{}, false - } - i := v.i64() - return time.Unix(i/1000, i%1000*1000000), true -} - -// Null returns the BSON undefined the Value represents. It panics if the value is a BSON type -// other than binary. -func (v Val) Null() { - if v.t != bsontype.Null && v.t != bsontype.Type(0) { - panic(ElementTypeError{"bson.Value.Null", v.t}) - } -} - -// NullOK is the same as Null, except it returns a boolean instead of -// panicking. -func (v Val) NullOK() bool { - if v.t != bsontype.Null && v.t != bsontype.Type(0) { - return false - } - return true -} - -// Regex returns the BSON regex the Value represents. It panics if the value is a BSON type -// other than regex. -func (v Val) Regex() (pattern, options string) { - if v.t != bsontype.Regex { - panic(ElementTypeError{"bson.Value.Regex", v.t}) - } - regex := v.primitive.(primitive.Regex) - return regex.Pattern, regex.Options -} - -// RegexOK is the same as Regex, except that it returns a boolean -// instead of panicking. -func (v Val) RegexOK() (pattern, options string, ok bool) { - if v.t != bsontype.Regex { - return "", "", false - } - regex := v.primitive.(primitive.Regex) - return regex.Pattern, regex.Options, true -} - -// DBPointer returns the BSON dbpointer the Value represents. It panics if the value is a BSON type -// other than dbpointer. -func (v Val) DBPointer() (string, primitive.ObjectID) { - if v.t != bsontype.DBPointer { - panic(ElementTypeError{"bson.Value.DBPointer", v.t}) - } - dbptr := v.primitive.(primitive.DBPointer) - return dbptr.DB, dbptr.Pointer -} - -// DBPointerOK is the same as DBPoitner, except that it returns a boolean -// instead of panicking. -func (v Val) DBPointerOK() (string, primitive.ObjectID, bool) { - if v.t != bsontype.DBPointer { - return "", primitive.ObjectID{}, false - } - dbptr := v.primitive.(primitive.DBPointer) - return dbptr.DB, dbptr.Pointer, true -} - -// JavaScript returns the BSON JavaScript the Value represents. It panics if the value is a BSON type -// other than JavaScript. -func (v Val) JavaScript() string { - if v.t != bsontype.JavaScript { - panic(ElementTypeError{"bson.Value.JavaScript", v.t}) - } - return v.string() -} - -// JavaScriptOK is the same as Javascript, except that it returns a boolean -// instead of panicking. -func (v Val) JavaScriptOK() (string, bool) { - if v.t != bsontype.JavaScript { - return "", false - } - return v.string(), true -} - -// Symbol returns the BSON symbol the Value represents. It panics if the value is a BSON type -// other than symbol. -func (v Val) Symbol() string { - if v.t != bsontype.Symbol { - panic(ElementTypeError{"bson.Value.Symbol", v.t}) - } - return v.string() -} - -// SymbolOK is the same as Javascript, except that it returns a boolean -// instead of panicking. -func (v Val) SymbolOK() (string, bool) { - if v.t != bsontype.Symbol { - return "", false - } - return v.string(), true -} - -// CodeWithScope returns the BSON code with scope value the Value represents. It panics if the -// value is a BSON type other than code with scope. -func (v Val) CodeWithScope() (string, Doc) { - if v.t != bsontype.CodeWithScope { - panic(ElementTypeError{"bson.Value.CodeWithScope", v.t}) - } - cws := v.primitive.(primitive.CodeWithScope) - return string(cws.Code), cws.Scope.(Doc) -} - -// CodeWithScopeOK is the same as JavascriptWithScope, -// except that it returns a boolean instead of panicking. -func (v Val) CodeWithScopeOK() (string, Doc, bool) { - if v.t != bsontype.CodeWithScope { - return "", nil, false - } - cws := v.primitive.(primitive.CodeWithScope) - return string(cws.Code), cws.Scope.(Doc), true -} - -// Int32 returns the BSON int32 the Value represents. It panics if the value is a BSON type -// other than int32. -func (v Val) Int32() int32 { - if v.t != bsontype.Int32 { - panic(ElementTypeError{"bson.Value.Int32", v.t}) - } - return int32(v.bootstrap[0]) | int32(v.bootstrap[1])<<8 | - int32(v.bootstrap[2])<<16 | int32(v.bootstrap[3])<<24 -} - -// Int32OK is the same as Int32, except that it returns a boolean instead of -// panicking. -func (v Val) Int32OK() (int32, bool) { - if v.t != bsontype.Int32 { - return 0, false - } - return int32(v.bootstrap[0]) | int32(v.bootstrap[1])<<8 | - int32(v.bootstrap[2])<<16 | int32(v.bootstrap[3])<<24, - true -} - -// Timestamp returns the BSON timestamp the Value represents. It panics if the value is a -// BSON type other than timestamp. -func (v Val) Timestamp() (t, i uint32) { - if v.t != bsontype.Timestamp { - panic(ElementTypeError{"bson.Value.Timestamp", v.t}) - } - return uint32(v.bootstrap[4]) | uint32(v.bootstrap[5])<<8 | - uint32(v.bootstrap[6])<<16 | uint32(v.bootstrap[7])<<24, - uint32(v.bootstrap[0]) | uint32(v.bootstrap[1])<<8 | - uint32(v.bootstrap[2])<<16 | uint32(v.bootstrap[3])<<24 -} - -// TimestampOK is the same as Timestamp, except that it returns a boolean -// instead of panicking. -func (v Val) TimestampOK() (t uint32, i uint32, ok bool) { - if v.t != bsontype.Timestamp { - return 0, 0, false - } - return uint32(v.bootstrap[4]) | uint32(v.bootstrap[5])<<8 | - uint32(v.bootstrap[6])<<16 | uint32(v.bootstrap[7])<<24, - uint32(v.bootstrap[0]) | uint32(v.bootstrap[1])<<8 | - uint32(v.bootstrap[2])<<16 | uint32(v.bootstrap[3])<<24, - true -} - -// Int64 returns the BSON int64 the Value represents. It panics if the value is a BSON type -// other than int64. -func (v Val) Int64() int64 { - if v.t != bsontype.Int64 { - panic(ElementTypeError{"bson.Value.Int64", v.t}) - } - return v.i64() -} - -// Int64OK is the same as Int64, except that it returns a boolean instead of -// panicking. -func (v Val) Int64OK() (int64, bool) { - if v.t != bsontype.Int64 { - return 0, false - } - return v.i64(), true -} - -// Decimal128 returns the BSON decimal128 value the Value represents. It panics if the value is a -// BSON type other than decimal128. -func (v Val) Decimal128() primitive.Decimal128 { - if v.t != bsontype.Decimal128 { - panic(ElementTypeError{"bson.Value.Decimal128", v.t}) - } - return v.primitive.(primitive.Decimal128) -} - -// Decimal128OK is the same as Decimal128, except that it returns a boolean -// instead of panicking. -func (v Val) Decimal128OK() (primitive.Decimal128, bool) { - if v.t != bsontype.Decimal128 { - return primitive.Decimal128{}, false - } - return v.primitive.(primitive.Decimal128), true -} - -// MinKey returns the BSON minkey the Value represents. It panics if the value is a BSON type -// other than binary. -func (v Val) MinKey() { - if v.t != bsontype.MinKey { - panic(ElementTypeError{"bson.Value.MinKey", v.t}) - } -} - -// MinKeyOK is the same as MinKey, except it returns a boolean instead of -// panicking. -func (v Val) MinKeyOK() bool { - return v.t == bsontype.MinKey -} - -// MaxKey returns the BSON maxkey the Value represents. It panics if the value is a BSON type -// other than binary. -func (v Val) MaxKey() { - if v.t != bsontype.MaxKey { - panic(ElementTypeError{"bson.Value.MaxKey", v.t}) - } -} - -// MaxKeyOK is the same as MaxKey, except it returns a boolean instead of -// panicking. -func (v Val) MaxKeyOK() bool { - return v.t == bsontype.MaxKey -} - -// Equal compares v to v2 and returns true if they are equal. Unknown BSON types are -// never equal. Two empty values are equal. -func (v Val) Equal(v2 Val) bool { - if v.Type() != v2.Type() { - return false - } - if v.IsZero() && v2.IsZero() { - return true - } - - switch v.Type() { - case bsontype.Double, bsontype.DateTime, bsontype.Timestamp, bsontype.Int64: - return bytes.Equal(v.bootstrap[0:8], v2.bootstrap[0:8]) - case bsontype.String: - return v.string() == v2.string() - case bsontype.EmbeddedDocument: - return v.equalDocs(v2) - case bsontype.Array: - return v.Array().Equal(v2.Array()) - case bsontype.Binary: - return v.primitive.(primitive.Binary).Equal(v2.primitive.(primitive.Binary)) - case bsontype.Undefined: - return true - case bsontype.ObjectID: - return bytes.Equal(v.bootstrap[0:12], v2.bootstrap[0:12]) - case bsontype.Boolean: - return v.bootstrap[0] == v2.bootstrap[0] - case bsontype.Null: - return true - case bsontype.Regex: - return v.primitive.(primitive.Regex).Equal(v2.primitive.(primitive.Regex)) - case bsontype.DBPointer: - return v.primitive.(primitive.DBPointer).Equal(v2.primitive.(primitive.DBPointer)) - case bsontype.JavaScript: - return v.JavaScript() == v2.JavaScript() - case bsontype.Symbol: - return v.Symbol() == v2.Symbol() - case bsontype.CodeWithScope: - code1, scope1 := v.primitive.(primitive.CodeWithScope).Code, v.primitive.(primitive.CodeWithScope).Scope - code2, scope2 := v2.primitive.(primitive.CodeWithScope).Code, v2.primitive.(primitive.CodeWithScope).Scope - return code1 == code2 && v.equalInterfaceDocs(scope1, scope2) - case bsontype.Int32: - return v.Int32() == v2.Int32() - case bsontype.Decimal128: - h, l := v.Decimal128().GetBytes() - h2, l2 := v2.Decimal128().GetBytes() - return h == h2 && l == l2 - case bsontype.MinKey: - return true - case bsontype.MaxKey: - return true - default: - return false - } -} - -func (v Val) equalDocs(v2 Val) bool { - _, ok1 := v.primitive.(MDoc) - _, ok2 := v2.primitive.(MDoc) - if ok1 || ok2 { - return v.asMDoc().Equal(v2.asMDoc()) - } - return v.asDoc().Equal(v2.asDoc()) -} - -func (Val) equalInterfaceDocs(i, i2 interface{}) bool { - switch d := i.(type) { - case MDoc: - d2, ok := i2.(IDoc) - if !ok { - return false - } - return d.Equal(d2) - case Doc: - d2, ok := i2.(IDoc) - if !ok { - return false - } - return d.Equal(d2) - case nil: - return i2 == nil - default: - return false - } -} diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/DESIGN.md b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/DESIGN.md index 2fde89f81..3c3e6c56c 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/DESIGN.md +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/DESIGN.md @@ -1,12 +1,15 @@ # Driver Library Design + This document outlines the design for this package. ## Deployment, Server, and Connection + Acquiring a `Connection` from a `Server` selected from a `Deployment` enables sending and receiving wire messages. A `Deployment` represents an set of MongoDB servers and a `Server` represents a member of that set. These three types form the operation execution stack. ### Compression + Compression is handled by Connection type while uncompression is handled automatically by the Operation type. This is done because the compressor to use for compressing a wire message is chosen by the connection during handshake, while uncompression can be performed without this @@ -14,6 +17,7 @@ information. This does make the design of compression non-symmetric, but it make to implement and more consistent. ## Operation + The `Operation` type handles executing a series of commands using a `Deployment`. For most uses `Operation` will only execute a single command, but the main use case for a series of commands is batch split write commands, such as insert. The type itself is heavily documented, so reading the diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/aws_conv.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/aws_conv.go index 8509abfbd..616182d9c 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/aws_conv.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/aws_conv.go @@ -11,19 +11,17 @@ import ( "context" "crypto/rand" "encoding/base64" - "encoding/json" "errors" "fmt" - "io/ioutil" "net/http" - "os" "strings" "time" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/internal/aws/credentials" + v4signer "go.mongodb.org/mongo-driver/internal/aws/signer/v4" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" - "go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/awsv4" ) type clientState int @@ -36,13 +34,10 @@ const ( ) type awsConversation struct { - state clientState - valid bool - nonce []byte - username string - password string - token string - httpClient *http.Client + state clientState + valid bool + nonce []byte + credentials *credentials.Credentials } type serverMessage struct { @@ -50,21 +45,10 @@ type serverMessage struct { Host string `bson:"h"` } -type ecsResponse struct { - AccessKeyID string `json:"AccessKeyId"` - SecretAccessKey string `json:"SecretAccessKey"` - Token string `json:"Token"` -} - const ( amzDateFormat = "20060102T150405Z" - awsRelativeURI = "http://169.254.170.2/" - awsEC2URI = "http://169.254.169.254/" - awsEC2RolePath = "latest/meta-data/iam/security-credentials/" - awsEC2TokenPath = "latest/api/token" defaultRegion = "us-east-1" maxHostLength = 255 - defaultHTTPTimeout = 10 * time.Second responceNonceLength = 64 ) @@ -128,149 +112,6 @@ func getRegion(host string) (string, error) { return region, nil } -func (ac *awsConversation) validateAndMakeCredentials() (*awsv4.StaticProvider, error) { - if ac.username != "" && ac.password == "" { - return nil, errors.New("ACCESS_KEY_ID is set, but SECRET_ACCESS_KEY is missing") - } - if ac.username == "" && ac.password != "" { - return nil, errors.New("SECRET_ACCESS_KEY is set, but ACCESS_KEY_ID is missing") - } - if ac.username == "" && ac.password == "" && ac.token != "" { - return nil, errors.New("AWS_SESSION_TOKEN is set, but ACCESS_KEY_ID and SECRET_ACCESS_KEY are missing") - } - if ac.username != "" || ac.password != "" || ac.token != "" { - return &awsv4.StaticProvider{Value: awsv4.Value{ - AccessKeyID: ac.username, - SecretAccessKey: ac.password, - SessionToken: ac.token, - }}, nil - } - return nil, nil -} - -func executeAWSHTTPRequest(httpClient *http.Client, req *http.Request) ([]byte, error) { - ctx, cancel := context.WithTimeout(context.Background(), defaultHTTPTimeout) - defer cancel() - resp, err := httpClient.Do(req.WithContext(ctx)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - return ioutil.ReadAll(resp.Body) -} - -func (ac *awsConversation) getEC2Credentials() (*awsv4.StaticProvider, error) { - // get token - req, err := http.NewRequest("PUT", awsEC2URI+awsEC2TokenPath, nil) - if err != nil { - return nil, err - } - req.Header.Set("X-aws-ec2-metadata-token-ttl-seconds", "30") - - token, err := executeAWSHTTPRequest(ac.httpClient, req) - if err != nil { - return nil, err - } - if len(token) == 0 { - return nil, errors.New("unable to retrieve token from EC2 metadata") - } - tokenStr := string(token) - - // get role name - req, err = http.NewRequest("GET", awsEC2URI+awsEC2RolePath, nil) - if err != nil { - return nil, err - } - req.Header.Set("X-aws-ec2-metadata-token", tokenStr) - - role, err := executeAWSHTTPRequest(ac.httpClient, req) - if err != nil { - return nil, err - } - if len(role) == 0 { - return nil, errors.New("unable to retrieve role_name from EC2 metadata") - } - - // get credentials - pathWithRole := awsEC2URI + awsEC2RolePath + string(role) - req, err = http.NewRequest("GET", pathWithRole, nil) - if err != nil { - return nil, err - } - req.Header.Set("X-aws-ec2-metadata-token", tokenStr) - creds, err := executeAWSHTTPRequest(ac.httpClient, req) - if err != nil { - return nil, err - } - - var es2Resp ecsResponse - err = json.Unmarshal(creds, &es2Resp) - if err != nil { - return nil, err - } - ac.username = es2Resp.AccessKeyID - ac.password = es2Resp.SecretAccessKey - ac.token = es2Resp.Token - - return ac.validateAndMakeCredentials() -} - -func (ac *awsConversation) getCredentials() (*awsv4.StaticProvider, error) { - // Credentials passed through URI - creds, err := ac.validateAndMakeCredentials() - if creds != nil || err != nil { - return creds, err - } - - // Credentials from environment variables - ac.username = os.Getenv("AWS_ACCESS_KEY_ID") - ac.password = os.Getenv("AWS_SECRET_ACCESS_KEY") - ac.token = os.Getenv("AWS_SESSION_TOKEN") - - creds, err = ac.validateAndMakeCredentials() - if creds != nil || err != nil { - return creds, err - } - - // Credentials from ECS metadata - relativeEcsURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI") - if len(relativeEcsURI) > 0 { - fullURI := awsRelativeURI + relativeEcsURI - - req, err := http.NewRequest("GET", fullURI, nil) - if err != nil { - return nil, err - } - - body, err := executeAWSHTTPRequest(ac.httpClient, req) - if err != nil { - return nil, err - } - - var espResp ecsResponse - err = json.Unmarshal(body, &espResp) - if err != nil { - return nil, err - } - ac.username = espResp.AccessKeyID - ac.password = espResp.SecretAccessKey - ac.token = espResp.Token - - creds, err = ac.validateAndMakeCredentials() - if creds != nil || err != nil { - return creds, err - } - } - - // Credentials from EC2 metadata - creds, err = ac.getEC2Credentials() - if creds == nil && err == nil { - return nil, errors.New("unable to get credentials") - } - return creds, err -} - func (ac *awsConversation) firstMsg() []byte { // Values are cached for use in final message parameters ac.nonce = make([]byte, 32) @@ -306,7 +147,7 @@ func (ac *awsConversation) finalMsg(s1 []byte) ([]byte, error) { return nil, err } - creds, err := ac.getCredentials() + creds, err := ac.credentials.GetWithContext(context.Background()) if err != nil { return nil, err } @@ -320,14 +161,14 @@ func (ac *awsConversation) finalMsg(s1 []byte) ([]byte, error) { req.Header.Set("Content-Length", "43") req.Host = sm.Host req.Header.Set("X-Amz-Date", currentTime.Format(amzDateFormat)) - if len(ac.token) > 0 { - req.Header.Set("X-Amz-Security-Token", ac.token) + if len(creds.SessionToken) > 0 { + req.Header.Set("X-Amz-Security-Token", creds.SessionToken) } req.Header.Set("X-MongoDB-Server-Nonce", base64.StdEncoding.EncodeToString(sm.Nonce.Data)) req.Header.Set("X-MongoDB-GS2-CB-Flag", "n") // Create signer with credentials - signer := awsv4.NewSigner(creds) + signer := v4signer.NewSigner(ac.credentials) // Get signed header _, err = signer.Sign(req, strings.NewReader(body), "sts", region, currentTime) @@ -339,8 +180,8 @@ func (ac *awsConversation) finalMsg(s1 []byte) ([]byte, error) { idx, msg := bsoncore.AppendDocumentStart(nil) msg = bsoncore.AppendStringElement(msg, "a", req.Header.Get("Authorization")) msg = bsoncore.AppendStringElement(msg, "d", req.Header.Get("X-Amz-Date")) - if len(ac.token) > 0 { - msg = bsoncore.AppendStringElement(msg, "t", ac.token) + if len(creds.SessionToken) > 0 { + msg = bsoncore.AppendStringElement(msg, "t", creds.SessionToken) } msg, _ = bsoncore.AppendDocumentEnd(msg, idx) diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/creds/awscreds.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/creds/awscreds.go new file mode 100644 index 000000000..06bba4534 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/creds/awscreds.go @@ -0,0 +1,58 @@ +// Copyright (C) MongoDB, Inc. 2022-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package creds + +import ( + "context" + "net/http" + "time" + + "go.mongodb.org/mongo-driver/internal/aws/credentials" + "go.mongodb.org/mongo-driver/internal/credproviders" + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" +) + +const ( + // expiryWindow will allow the credentials to trigger refreshing prior to the credentials actually expiring. + // This is beneficial so expiring credentials do not cause request to fail unexpectedly due to exceptions. + // + // Set an early expiration of 5 minutes before the credentials are actually expired. + expiryWindow = 5 * time.Minute +) + +// AWSCredentialProvider wraps AWS credentials. +type AWSCredentialProvider struct { + Cred *credentials.Credentials +} + +// NewAWSCredentialProvider generates new AWSCredentialProvider +func NewAWSCredentialProvider(httpClient *http.Client, providers ...credentials.Provider) AWSCredentialProvider { + providers = append( + providers, + credproviders.NewEnvProvider(), + credproviders.NewAssumeRoleProvider(httpClient, expiryWindow), + credproviders.NewECSProvider(httpClient, expiryWindow), + credproviders.NewEC2Provider(httpClient, expiryWindow), + ) + + return AWSCredentialProvider{credentials.NewChainCredentials(providers)} +} + +// GetCredentialsDoc generates AWS credentials. +func (p AWSCredentialProvider) GetCredentialsDoc(ctx context.Context) (bsoncore.Document, error) { + creds, err := p.Cred.GetWithContext(ctx) + if err != nil { + return nil, err + } + builder := bsoncore.NewDocumentBuilder(). + AppendString("accessKeyId", creds.AccessKeyID). + AppendString("secretAccessKey", creds.SecretAccessKey) + if token := creds.SessionToken; len(token) > 0 { + builder.AppendString("sessionToken", token) + } + return builder.Build(), nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/creds/azurecreds.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/creds/azurecreds.go new file mode 100644 index 000000000..d8f105a9d --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/creds/azurecreds.go @@ -0,0 +1,40 @@ +// Copyright (C) MongoDB, Inc. 2023-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package creds + +import ( + "context" + "net/http" + "time" + + "go.mongodb.org/mongo-driver/internal/aws/credentials" + "go.mongodb.org/mongo-driver/internal/credproviders" + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" +) + +// AzureCredentialProvider provides Azure credentials. +type AzureCredentialProvider struct { + cred *credentials.Credentials +} + +// NewAzureCredentialProvider generates new AzureCredentialProvider +func NewAzureCredentialProvider(httpClient *http.Client) AzureCredentialProvider { + return AzureCredentialProvider{ + credentials.NewCredentials(credproviders.NewAzureProvider(httpClient, 1*time.Minute)), + } +} + +// GetCredentialsDoc generates Azure credentials. +func (p AzureCredentialProvider) GetCredentialsDoc(ctx context.Context) (bsoncore.Document, error) { + creds, err := p.cred.GetWithContext(ctx) + if err != nil { + return nil, err + } + builder := bsoncore.NewDocumentBuilder(). + AppendString("accessToken", creds.SessionToken) + return builder.Build(), nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/creds/gcpcreds.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/creds/gcpcreds.go new file mode 100644 index 000000000..74f352e36 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/creds/gcpcreds.go @@ -0,0 +1,74 @@ +// Copyright (C) MongoDB, Inc. 2022-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package creds + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "os" + + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" +) + +// GCPCredentialProvider provides GCP credentials. +type GCPCredentialProvider struct { + httpClient *http.Client +} + +// NewGCPCredentialProvider generates new GCPCredentialProvider +func NewGCPCredentialProvider(httpClient *http.Client) GCPCredentialProvider { + return GCPCredentialProvider{httpClient} +} + +// GetCredentialsDoc generates GCP credentials. +func (p GCPCredentialProvider) GetCredentialsDoc(ctx context.Context) (bsoncore.Document, error) { + metadataHost := "metadata.google.internal" + if envhost := os.Getenv("GCE_METADATA_HOST"); envhost != "" { + metadataHost = envhost + } + url := fmt.Sprintf("http://%s/computeMetadata/v1/instance/service-accounts/default/token", metadataHost) + req, err := http.NewRequest(http.MethodGet, url, nil) + if err != nil { + return nil, fmt.Errorf("unable to retrieve GCP credentials: %w", err) + } + req.Header.Set("Metadata-Flavor", "Google") + resp, err := p.httpClient.Do(req.WithContext(ctx)) + if err != nil { + return nil, fmt.Errorf("unable to retrieve GCP credentials: %w", err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("unable to retrieve GCP credentials: error reading response body: %w", err) + } + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf( + "unable to retrieve GCP credentials: expected StatusCode 200, got StatusCode: %v. Response body: %s", + resp.StatusCode, + body) + } + var tokenResponse struct { + AccessToken string `json:"access_token"` + } + // Attempt to read body as JSON + err = json.Unmarshal(body, &tokenResponse) + if err != nil { + return nil, fmt.Errorf( + "unable to retrieve GCP credentials: error reading body JSON: %w (response body: %s)", + err, + body) + } + if tokenResponse.AccessToken == "" { + return nil, fmt.Errorf("unable to retrieve GCP credentials: got unexpected empty accessToken from GCP Metadata Server. Response body: %s", body) + } + + builder := bsoncore.NewDocumentBuilder().AppendString("accessToken", tokenResponse.AccessToken) + return builder.Build(), nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/default.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/default.go index e266ad542..6f2ca5224 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/default.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/default.go @@ -9,8 +9,6 @@ package auth import ( "context" "fmt" - - "go.mongodb.org/mongo-driver/mongo/description" ) func newDefaultAuthenticator(cred *Cred) (Authenticator, error) { @@ -78,21 +76,7 @@ func chooseAuthMechanism(cfg *Config) string { return v } } - return SCRAMSHA1 - } - - if err := scramSHA1Supported(cfg.HandshakeInfo.Description.WireVersion); err == nil { - return SCRAMSHA1 - } - - return MONGODBCR -} - -// scramSHA1Supported returns an error if the given server version does not support scram-sha-1. -func scramSHA1Supported(wireVersion *description.VersionRange) error { - if wireVersion != nil && wireVersion.Max < 3 { - return fmt.Errorf("SCRAM-SHA-1 is only supported for servers 3.0 or newer") } - return nil + return SCRAMSHA1 } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/gssapi_not_enabled.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/gssapi_not_enabled.go index 50522cbb6..7ba5fe860 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/gssapi_not_enabled.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/gssapi_not_enabled.go @@ -12,6 +12,6 @@ package auth // GSSAPI is the mechanism name for GSSAPI. const GSSAPI = "GSSAPI" -func newGSSAPIAuthenticator(cred *Cred) (Authenticator, error) { +func newGSSAPIAuthenticator(*Cred) (Authenticator, error) { return nil, newAuthError("GSSAPI support not enabled during build (-tags gssapi)", nil) } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/awsv4/credentials.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/awsv4/credentials.go deleted file mode 100644 index 95225a471..000000000 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/awsv4/credentials.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// -// Based on github.com/aws/aws-sdk-go by Amazon.com, Inc. with code from: -// - github.com/aws/aws-sdk-go/blob/v1.34.28/aws/credentials/static_provider.go -// - github.com/aws/aws-sdk-go/blob/v1.34.28/aws/credentials/credentials.go -// See THIRD-PARTY-NOTICES for original license terms - -package awsv4 - -import ( - "errors" -) - -// StaticProviderName provides a name of Static provider -const StaticProviderName = "StaticProvider" - -var ( - // ErrStaticCredentialsEmpty is emitted when static credentials are empty. - ErrStaticCredentialsEmpty = errors.New("EmptyStaticCreds: static credentials are empty") -) - -// A Value is the AWS credentials value for individual credential fields. -type Value struct { - // AWS Access key ID - AccessKeyID string - - // AWS Secret Access Key - SecretAccessKey string - - // AWS Session Token - SessionToken string - - // Provider used to get credentials - ProviderName string -} - -// HasKeys returns if the credentials Value has both AccessKeyID and -// SecretAccessKey value set. -func (v Value) HasKeys() bool { - return len(v.AccessKeyID) != 0 && len(v.SecretAccessKey) != 0 -} - -// A StaticProvider is a set of credentials which are set programmatically, -// and will never expire. -type StaticProvider struct { - Value -} - -// Retrieve returns the credentials or error if the credentials are invalid. -func (s *StaticProvider) Retrieve() (Value, error) { - if s.AccessKeyID == "" || s.SecretAccessKey == "" { - return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty - } - - if len(s.Value.ProviderName) == 0 { - s.Value.ProviderName = StaticProviderName - } - return s.Value, nil -} diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/awsv4/doc.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/awsv4/doc.go deleted file mode 100644 index 6a29293d8..000000000 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/awsv4/doc.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// -// Based on github.com/aws/aws-sdk-go v1.34.28 by Amazon.com, Inc. -// See THIRD-PARTY-NOTICES for original license terms - -// Package awsv4 implements signing for AWS V4 signer with static credentials, -// and is based on and modified from code in the package aws-sdk-go. The -// modifications remove non-static credentials, support for non-sts services, -// and the options for v4.Signer. They also reduce the number of non-Go -// library dependencies. -package awsv4 diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/awsv4/rules.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/awsv4/rules.go deleted file mode 100644 index ad820d8e9..000000000 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/awsv4/rules.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// -// Based on github.com/aws/aws-sdk-go by Amazon.com, Inc. with code from: -// - github.com/aws/aws-sdk-go/blob/v1.34.28/aws/signer/v4/header_rules.go -// - github.com/aws/aws-sdk-go/blob/v1.34.28/internal/strings/strings.go -// See THIRD-PARTY-NOTICES for original license terms - -package awsv4 - -import ( - "strings" -) - -// validator houses a set of rule needed for validation of a -// string value -type rules []rule - -// rule interface allows for more flexible rules and just simply -// checks whether or not a value adheres to that rule -type rule interface { - IsValid(value string) bool -} - -// IsValid will iterate through all rules and see if any rules -// apply to the value and supports nested rules -func (r rules) IsValid(value string) bool { - for _, rule := range r { - if rule.IsValid(value) { - return true - } - } - return false -} - -// mapRule generic rule for maps -type mapRule map[string]struct{} - -// IsValid for the map rule satisfies whether it exists in the map -func (m mapRule) IsValid(value string) bool { - _, ok := m[value] - return ok -} - -// allowlist is a generic rule for allowlisting -type allowlist struct { - rule -} - -// IsValid for allowlist checks if the value is within the allowlist -func (a allowlist) IsValid(value string) bool { - return a.rule.IsValid(value) -} - -// denylist is a generic rule for denylisting -type denylist struct { - rule -} - -// IsValid for allowlist checks if the value is within the allowlist -func (d denylist) IsValid(value string) bool { - return !d.rule.IsValid(value) -} - -type patterns []string - -// hasPrefixFold tests whether the string s begins with prefix, interpreted as UTF-8 strings, -// under Unicode case-folding. -func hasPrefixFold(s, prefix string) bool { - return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix) -} - -// IsValid for patterns checks each pattern and returns if a match has -// been found -func (p patterns) IsValid(value string) bool { - for _, pattern := range p { - if hasPrefixFold(value, pattern) { - return true - } - } - return false -} - -// inclusiveRules rules allow for rules to depend on one another -type inclusiveRules []rule - -// IsValid will return true if all rules are true -func (r inclusiveRules) IsValid(value string) bool { - for _, rule := range r { - if !rule.IsValid(value) { - return false - } - } - return true -} diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/gss_wrapper.c b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/gss_wrapper.c index ec49d9612..68b725414 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/gss_wrapper.c +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/gss_wrapper.c @@ -12,9 +12,9 @@ #include "gss_wrapper.h" OM_uint32 gssapi_canonicalize_name( - OM_uint32* minor_status, - char *input_name, - gss_OID input_name_type, + OM_uint32* minor_status, + char *input_name, + gss_OID input_name_type, gss_name_t *output_name ) { @@ -39,8 +39,8 @@ OM_uint32 gssapi_canonicalize_name( } int gssapi_error_desc( - OM_uint32 maj_stat, - OM_uint32 min_stat, + OM_uint32 maj_stat, + OM_uint32 min_stat, char **desc ) { @@ -207,7 +207,7 @@ int gssapi_client_wrap_msg( void* input, size_t input_length, void** output, - size_t* output_length + size_t* output_length ) { gss_buffer_desc input_buffer = GSS_C_EMPTY_BUFFER; diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/gss_wrapper.h b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/gss_wrapper.h index 1cb9cd3c1..a105ba58b 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/gss_wrapper.h +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/gss_wrapper.h @@ -32,8 +32,8 @@ typedef struct { } gssapi_client_state; int gssapi_error_desc( - OM_uint32 maj_stat, - OM_uint32 min_stat, + OM_uint32 maj_stat, + OM_uint32 min_stat, char **desc ); @@ -62,11 +62,11 @@ int gssapi_client_wrap_msg( void* input, size_t input_length, void** output, - size_t* output_length + size_t* output_length ); int gssapi_client_destroy( gssapi_client_state *client ); -#endif \ No newline at end of file +#endif diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/sspi.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/sspi.go index 36e9633f8..6e7d3ed8a 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/sspi.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/sspi.go @@ -112,7 +112,7 @@ func (sc *SaslClient) Start() (string, []byte, error) { status := C.sspi_client_init(&sc.state, cusername, cpassword) if status != C.SSPI_OK { - return mechName, nil, sc.getError("unable to intitialize client") + return mechName, nil, sc.getError("unable to initialize client") } payload, err := sc.Next(nil) diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/sspi_wrapper.c b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/sspi_wrapper.c index f65565471..bc73723e8 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/sspi_wrapper.c +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/sspi_wrapper.c @@ -69,7 +69,7 @@ int sspi_client_init( if (username) { if (password) { SEC_WINNT_AUTH_IDENTITY auth_identity; - + #ifdef _UNICODE auth_identity.Flags = SEC_WINNT_AUTH_IDENTITY_UNICODE; #else @@ -186,7 +186,7 @@ int sspi_client_wrap_msg( PVOID input, ULONG input_length, PVOID* output, - ULONG* output_length + ULONG* output_length ) { SecPkgContext_Sizes sizes; @@ -246,4 +246,4 @@ int sspi_client_destroy( sspi_functions->FreeCredentialsHandle(&client->cred); return SSPI_OK; -} \ No newline at end of file +} diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/sspi_wrapper.h b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/sspi_wrapper.h index 2d08e939e..e59e55c69 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/sspi_wrapper.h +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi/sspi_wrapper.h @@ -54,11 +54,11 @@ int sspi_client_wrap_msg( PVOID input, ULONG input_length, PVOID* output, - ULONG* output_length + ULONG* output_length ); int sspi_client_destroy( sspi_client_state *client ); -#endif \ No newline at end of file +#endif diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/mongodbaws.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/mongodbaws.go index 8982f04da..7ae4b0899 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/mongodbaws.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/mongodbaws.go @@ -9,6 +9,10 @@ package auth import ( "context" "errors" + + "go.mongodb.org/mongo-driver/internal/aws/credentials" + "go.mongodb.org/mongo-driver/internal/credproviders" + "go.mongodb.org/mongo-driver/x/mongo/driver/auth/creds" ) // MongoDBAWS is the mechanism name for MongoDBAWS. @@ -19,19 +23,22 @@ func newMongoDBAWSAuthenticator(cred *Cred) (Authenticator, error) { return nil, newAuthError("MONGODB-AWS source must be empty or $external", nil) } return &MongoDBAWSAuthenticator{ - source: cred.Source, - username: cred.Username, - password: cred.Password, - sessionToken: cred.Props["AWS_SESSION_TOKEN"], + source: cred.Source, + credentials: &credproviders.StaticProvider{ + Value: credentials.Value{ + ProviderName: cred.Source, + AccessKeyID: cred.Username, + SecretAccessKey: cred.Password, + SessionToken: cred.Props["AWS_SESSION_TOKEN"], + }, + }, }, nil } // MongoDBAWSAuthenticator uses AWS-IAM credentials over SASL to authenticate a connection. type MongoDBAWSAuthenticator struct { - source string - username string - password string - sessionToken string + source string + credentials *credproviders.StaticProvider } // Auth authenticates the connection. @@ -40,12 +47,10 @@ func (a *MongoDBAWSAuthenticator) Auth(ctx context.Context, cfg *Config) error { if httpClient == nil { return errors.New("cfg.HTTPClient must not be nil") } + providers := creds.NewAWSCredentialProvider(httpClient, a.credentials) adapter := &awsSaslAdapter{ conversation: &awsConversation{ - username: a.username, - password: a.password, - token: a.sessionToken, - httpClient: httpClient, + credentials: providers.Cred, }, } err := ConductSaslConversation(ctx, cfg, a.source, adapter) diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/plain.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/plain.go index f88100350..532d43e39 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/plain.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/plain.go @@ -46,7 +46,7 @@ func (c *plainSaslClient) Start() (string, []byte, error) { return PLAIN, b, nil } -func (c *plainSaslClient) Next(challenge []byte) ([]byte, error) { +func (c *plainSaslClient) Next([]byte) ([]byte, error) { return nil, newAuthError("unexpected server challenge", nil) } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/x509.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/x509.go index e0a61eda8..03a9d750e 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/x509.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/x509.go @@ -9,7 +9,6 @@ package auth import ( "context" - "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver" "go.mongodb.org/mongo-driver/x/mongo/driver/operation" @@ -37,22 +36,16 @@ var _ SpeculativeConversation = (*x509Conversation)(nil) // FirstMessage returns the first message to be sent to the server. func (c *x509Conversation) FirstMessage() (bsoncore.Document, error) { - return createFirstX509Message(description.Server{}, ""), nil + return createFirstX509Message(), nil } // createFirstX509Message creates the first message for the X509 conversation. -func createFirstX509Message(desc description.Server, user string) bsoncore.Document { +func createFirstX509Message() bsoncore.Document { elements := [][]byte{ bsoncore.AppendInt32Element(nil, "authenticate", 1), bsoncore.AppendStringElement(nil, "mechanism", MongoDBX509), } - // Server versions < 3.4 require the username to be included in the message. Versions >= 3.4 will extract the - // username from the certificate. - if desc.WireVersion != nil && desc.WireVersion.Max < 5 { - elements = append(elements, bsoncore.AppendStringElement(nil, "user", user)) - } - return bsoncore.BuildDocument(nil, elements...) } @@ -69,7 +62,7 @@ func (a *MongoDBX509Authenticator) CreateSpeculativeConversation() (SpeculativeC // Auth authenticates the provided connection by conducting an X509 authentication conversation. func (a *MongoDBX509Authenticator) Auth(ctx context.Context, cfg *Config) error { - requestDoc := createFirstX509Message(cfg.Description, a.User) + requestDoc := createFirstX509Message() authCmd := operation. NewCommand(requestDoc). Database("$external"). diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/batch_cursor.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/batch_cursor.go index a3f21f96c..fefcfdb47 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/batch_cursor.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/batch_cursor.go @@ -10,22 +10,31 @@ import ( "context" "errors" "fmt" + "io" "strings" + "time" + "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/bsontype" "go.mongodb.org/mongo-driver/event" - "go.mongodb.org/mongo-driver/internal" + "go.mongodb.org/mongo-driver/internal/codecutil" + "go.mongodb.org/mongo-driver/internal/csot" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver/session" ) +// ErrNoCursor is returned by NewCursorResponse when the database response does +// not contain a cursor. +var ErrNoCursor = errors.New("database response does not contain a cursor") + // BatchCursor is a batch implementation of a cursor. It returns documents in entire batches instead // of one at a time. An individual document cursor can be built on top of this batch cursor. type BatchCursor struct { clientSession *session.Client clock *session.ClusterClock - comment bsoncore.Value + comment interface{} + encoderFn codecutil.EncoderFn database string collection string id int64 @@ -62,17 +71,27 @@ type CursorResponse struct { postBatchResumeToken bsoncore.Document } -// NewCursorResponse constructs a cursor response from the given response and server. This method -// can be used within the ProcessResponse method for an operation. +// NewCursorResponse constructs a cursor response from the given response and +// server. If the provided database response does not contain a cursor, it +// returns ErrNoCursor. +// +// NewCursorResponse can be used within the ProcessResponse method for an operation. func NewCursorResponse(info ResponseInfo) (CursorResponse, error) { response := info.ServerResponse - cur, ok := response.Lookup("cursor").DocumentOK() + cur, err := response.LookupErr("cursor") + if err == bsoncore.ErrElementNotFound { + return CursorResponse{}, ErrNoCursor + } + if err != nil { + return CursorResponse{}, fmt.Errorf("error getting cursor from database response: %w", err) + } + curDoc, ok := cur.DocumentOK() if !ok { - return CursorResponse{}, fmt.Errorf("cursor should be an embedded document but is of BSON type %s", response.Lookup("cursor").Type) + return CursorResponse{}, fmt.Errorf("cursor should be an embedded document but is BSON type %s", cur.Type) } - elems, err := cur.Elements() + elems, err := curDoc.Elements() if err != nil { - return CursorResponse{}, err + return CursorResponse{}, fmt.Errorf("error getting elements from cursor: %w", err) } curresp := CursorResponse{Server: info.Server, Desc: info.ConnectionDescription} @@ -133,13 +152,14 @@ func NewCursorResponse(info ResponseInfo) (CursorResponse, error) { // CursorOptions are extra options that are required to construct a BatchCursor. type CursorOptions struct { - BatchSize int32 - Comment bsoncore.Value - MaxTimeMS int64 - Limit int32 - CommandMonitor *event.CommandMonitor - Crypt Crypt - ServerAPI *ServerAPIOptions + BatchSize int32 + Comment bsoncore.Value + MaxTimeMS int64 + Limit int32 + CommandMonitor *event.CommandMonitor + Crypt Crypt + ServerAPI *ServerAPIOptions + MarshalValueEncoderFn func(io.Writer) (*bson.Encoder, error) } // NewBatchCursor creates a new BatchCursor from the provided parameters. @@ -163,12 +183,13 @@ func NewBatchCursor(cr CursorResponse, clientSession *session.Client, clock *ses crypt: opts.Crypt, serverAPI: opts.ServerAPI, serverDescription: cr.Desc, + encoderFn: opts.MarshalValueEncoderFn, } if ds != nil { bc.numReturned = int32(ds.DocumentCount()) } - if cr.Desc.WireVersion == nil || cr.Desc.WireVersion.Max < 4 { + if cr.Desc.WireVersion == nil { bc.limit = opts.Limit // Take as many documents from the batch as needed. @@ -305,6 +326,12 @@ func (bc *BatchCursor) KillCursor(ctx context.Context) error { Legacy: LegacyKillCursors, CommandMonitor: bc.cmdMonitor, ServerAPI: bc.serverAPI, + + // No read preference is passed to the killCursor command, + // resulting in the default read preference: "primaryPreferred". + // Since this could be confusing, and there is no requirement + // to use a read preference here, we omit it. + omitReadPreference: true, }.Execute(ctx) } @@ -351,10 +378,17 @@ func (bc *BatchCursor) getMore(ctx context.Context) { if bc.maxTimeMS > 0 { dst = bsoncore.AppendInt64Element(dst, "maxTimeMS", bc.maxTimeMS) } + + comment, err := codecutil.MarshalValue(bc.comment, bc.encoderFn) + if err != nil { + return nil, fmt.Errorf("error marshaling comment as a BSON value: %w", err) + } + // The getMore command does not support commenting pre-4.4. - if bc.comment.Type != bsontype.Type(0) && bc.serverDescription.WireVersion.Max >= 9 { - dst = bsoncore.AppendValueElement(dst, "comment", bc.comment) + if comment.Type != bsontype.Type(0) && bc.serverDescription.WireVersion.Max >= 9 { + dst = bsoncore.AppendValueElement(dst, "comment", comment) } + return dst, nil }, Database: bc.database, @@ -398,6 +432,12 @@ func (bc *BatchCursor) getMore(ctx context.Context) { CommandMonitor: bc.cmdMonitor, Crypt: bc.crypt, ServerAPI: bc.serverAPI, + + // No read preference is passed to the getMore command, + // resulting in the default read preference: "primaryPreferred". + // Since this could be confusing, and there is no requirement + // to use a read preference here, we omit it. + omitReadPreference: true, }.Execute(ctx) // Once the cursor has been drained, we can unpin the connection if one is currently pinned. @@ -430,11 +470,26 @@ func (bc *BatchCursor) PostBatchResumeToken() bsoncore.Document { return bc.postBatchResumeToken } -// SetBatchSize sets the batchSize for future getMores. +// SetBatchSize sets the batchSize for future getMore operations. func (bc *BatchCursor) SetBatchSize(size int32) { bc.batchSize = size } +// SetMaxTime will set the maximum amount of time the server will allow the +// operations to execute. The server will error if this field is set but the +// cursor is not configured with awaitData=true. +// +// The time.Duration value passed by this setter will be converted and rounded +// down to the nearest millisecond. +func (bc *BatchCursor) SetMaxTime(dur time.Duration) { + bc.maxTimeMS = int64(dur / time.Millisecond) +} + +// SetComment sets the comment for future getMore operations. +func (bc *BatchCursor) SetComment(comment interface{}) { + bc.comment = comment +} + func (bc *BatchCursor) getOperationDeployment() Deployment { if bc.connection != nil { return &loadBalancedCursorDeployment{ @@ -471,7 +526,7 @@ func (lbcd *loadBalancedCursorDeployment) Connection(_ context.Context) (Connect // RTTMonitor implements the driver.Server interface. func (lbcd *loadBalancedCursorDeployment) RTTMonitor() RTTMonitor { - return &internal.ZeroRTTMonitor{} + return &csot.ZeroRTTMonitor{} } func (lbcd *loadBalancedCursorDeployment) ProcessError(err error, conn Connection) ProcessErrorResult { diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/batches.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/batches.go index 3e7dca9ac..be430afa1 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/batches.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/batches.go @@ -17,7 +17,7 @@ import ( var ErrDocumentTooLarge = errors.New("an inserted document is too large") // Batches contains the necessary information to batch split an operation. This is only used for write -// oeprations. +// operations. type Batches struct { Identifier string Documents []bsoncore.Document diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/compression.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/compression.go index c474714ff..d79b024b7 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/compression.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/compression.go @@ -26,48 +26,72 @@ type CompressionOpts struct { UncompressedSize int32 } -var zstdEncoders sync.Map // map[zstd.EncoderLevel]*zstd.Encoder +// mustZstdNewWriter creates a zstd.Encoder with the given level and a nil +// destination writer. It panics on any errors and should only be used at +// package initialization time. +func mustZstdNewWriter(lvl zstd.EncoderLevel) *zstd.Encoder { + enc, err := zstd.NewWriter(nil, zstd.WithEncoderLevel(lvl)) + if err != nil { + panic(err) + } + return enc +} + +var zstdEncoders = [zstd.SpeedBestCompression + 1]*zstd.Encoder{ + 0: nil, // zstd.speedNotSet + zstd.SpeedFastest: mustZstdNewWriter(zstd.SpeedFastest), + zstd.SpeedDefault: mustZstdNewWriter(zstd.SpeedDefault), + zstd.SpeedBetterCompression: mustZstdNewWriter(zstd.SpeedBetterCompression), + zstd.SpeedBestCompression: mustZstdNewWriter(zstd.SpeedBestCompression), +} func getZstdEncoder(level zstd.EncoderLevel) (*zstd.Encoder, error) { - if v, ok := zstdEncoders.Load(level); ok { - return v.(*zstd.Encoder), nil - } - encoder, err := zstd.NewWriter(nil, zstd.WithEncoderLevel(level)) - if err != nil { - return nil, err + if zstd.SpeedFastest <= level && level <= zstd.SpeedBestCompression { + return zstdEncoders[level], nil } - zstdEncoders.Store(level, encoder) - return encoder, nil + // The level is outside the expected range, return an error. + return nil, fmt.Errorf("invalid zstd compression level: %d", level) } -var zlibEncoders sync.Map // map[int /*level*/]*zlibEncoder +// zlibEncodersOffset is the offset into the zlibEncoders array for a given +// compression level. +const zlibEncodersOffset = -zlib.HuffmanOnly // HuffmanOnly == -2 + +var zlibEncoders [zlib.BestCompression + zlibEncodersOffset + 1]sync.Pool func getZlibEncoder(level int) (*zlibEncoder, error) { - if v, ok := zlibEncoders.Load(level); ok { - return v.(*zlibEncoder), nil - } - writer, err := zlib.NewWriterLevel(nil, level) - if err != nil { - return nil, err + if zlib.HuffmanOnly <= level && level <= zlib.BestCompression { + if enc, _ := zlibEncoders[level+zlibEncodersOffset].Get().(*zlibEncoder); enc != nil { + return enc, nil + } + writer, err := zlib.NewWriterLevel(nil, level) + if err != nil { + return nil, err + } + enc := &zlibEncoder{writer: writer, level: level} + return enc, nil } - encoder := &zlibEncoder{writer: writer, buf: new(bytes.Buffer)} - zlibEncoders.Store(level, encoder) + // The level is outside the expected range, return an error. + return nil, fmt.Errorf("invalid zlib compression level: %d", level) +} - return encoder, nil +func putZlibEncoder(enc *zlibEncoder) { + if enc != nil { + zlibEncoders[enc.level+zlibEncodersOffset].Put(enc) + } } type zlibEncoder struct { - mu sync.Mutex writer *zlib.Writer - buf *bytes.Buffer + buf bytes.Buffer + level int } func (e *zlibEncoder) Encode(dst, src []byte) ([]byte, error) { - e.mu.Lock() - defer e.mu.Unlock() + defer putZlibEncoder(e) e.buf.Reset() - e.writer.Reset(e.buf) + e.writer.Reset(&e.buf) _, err := e.writer.Write(src) if err != nil { @@ -105,40 +129,48 @@ func CompressPayload(in []byte, opts CompressionOpts) ([]byte, error) { } } +var zstdReaderPool = sync.Pool{ + New: func() interface{} { + r, _ := zstd.NewReader(nil) + return r + }, +} + // DecompressPayload takes a byte slice that has been compressed and undoes it according to the options passed -func DecompressPayload(in []byte, opts CompressionOpts) (uncompressed []byte, err error) { +func DecompressPayload(in []byte, opts CompressionOpts) ([]byte, error) { switch opts.Compressor { case wiremessage.CompressorNoOp: return in, nil case wiremessage.CompressorSnappy: - uncompressed = make([]byte, opts.UncompressedSize) - return snappy.Decode(uncompressed, in) - case wiremessage.CompressorZLib: - r, err := zlib.NewReader(bytes.NewReader(in)) + l, err := snappy.DecodedLen(in) if err != nil { - return nil, err + return nil, fmt.Errorf("decoding compressed length %w", err) + } else if int32(l) != opts.UncompressedSize { + return nil, fmt.Errorf("unexpected decompression size, expected %v but got %v", opts.UncompressedSize, l) } - defer func() { - err = r.Close() - }() - uncompressed = make([]byte, opts.UncompressedSize) - _, err = io.ReadFull(r, uncompressed) + out := make([]byte, opts.UncompressedSize) + return snappy.Decode(out, in) + case wiremessage.CompressorZLib: + r, err := zlib.NewReader(bytes.NewReader(in)) if err != nil { return nil, err } - return uncompressed, nil - case wiremessage.CompressorZstd: - r, err := zstd.NewReader(bytes.NewBuffer(in)) - if err != nil { + out := make([]byte, opts.UncompressedSize) + if _, err := io.ReadFull(r, out); err != nil { return nil, err } - defer r.Close() - uncompressed = make([]byte, opts.UncompressedSize) - _, err = io.ReadFull(r, uncompressed) - if err != nil { + if err := r.Close(); err != nil { return nil, err } - return uncompressed, nil + return out, nil + case wiremessage.CompressorZstd: + buf := make([]byte, 0, opts.UncompressedSize) + // Using a pool here is about ~20% faster + // than using a single global zstd.Reader + r := zstdReaderPool.Get().(*zstd.Decoder) + out, err := r.DecodeAll(in, buf) + zstdReaderPool.Put(r) + return out, err default: return nil, fmt.Errorf("unknown compressor ID %v", opts.Compressor) } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/connstring/connstring.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/connstring/connstring.go index 6f03a5857..cd4313647 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/connstring/connstring.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/connstring/connstring.go @@ -15,13 +15,59 @@ import ( "strings" "time" - "go.mongodb.org/mongo-driver/internal" "go.mongodb.org/mongo-driver/internal/randutil" "go.mongodb.org/mongo-driver/mongo/writeconcern" "go.mongodb.org/mongo-driver/x/mongo/driver/dns" "go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage" ) +const ( + // ServerMonitoringModeAuto indicates that the client will behave like "poll" + // mode when running on a FaaS (Function as a Service) platform, or like + // "stream" mode otherwise. The client detects its execution environment by + // following the rules for generating the "client.env" handshake metadata field + // as specified in the MongoDB Handshake specification. This is the default + // mode. + ServerMonitoringModeAuto = "auto" + + // ServerMonitoringModePoll indicates that the client will periodically check + // the server using a hello or legacy hello command and then sleep for + // heartbeatFrequencyMS milliseconds before running another check. + ServerMonitoringModePoll = "poll" + + // ServerMonitoringModeStream indicates that the client will use a streaming + // protocol when the server supports it. The streaming protocol optimally + // reduces the time it takes for a client to discover server state changes. + ServerMonitoringModeStream = "stream" +) + +var ( + // ErrLoadBalancedWithMultipleHosts is returned when loadBalanced=true is + // specified in a URI with multiple hosts. + ErrLoadBalancedWithMultipleHosts = errors.New( + "loadBalanced cannot be set to true if multiple hosts are specified") + + // ErrLoadBalancedWithReplicaSet is returned when loadBalanced=true is + // specified in a URI with the replicaSet option. + ErrLoadBalancedWithReplicaSet = errors.New( + "loadBalanced cannot be set to true if a replica set name is specified") + + // ErrLoadBalancedWithDirectConnection is returned when loadBalanced=true is + // specified in a URI with the directConnection option. + ErrLoadBalancedWithDirectConnection = errors.New( + "loadBalanced cannot be set to true if the direct connection option is specified") + + // ErrSRVMaxHostsWithReplicaSet is returned when srvMaxHosts > 0 is + // specified in a URI with the replicaSet option. + ErrSRVMaxHostsWithReplicaSet = errors.New( + "srvMaxHosts cannot be a positive value if a replica set name is specified") + + // ErrSRVMaxHostsWithLoadBalanced is returned when srvMaxHosts > 0 is + // specified in a URI with loadBalanced=true. + ErrSRVMaxHostsWithLoadBalanced = errors.New( + "srvMaxHosts cannot be a positive value if loadBalanced is set to true") +) + // random is a package-global pseudo-random number generator. var random = randutil.NewLockedRand() @@ -31,11 +77,11 @@ func ParseAndValidate(s string) (ConnString, error) { p := parser{dnsResolver: dns.DefaultResolver} err := p.parse(s) if err != nil { - return p.ConnString, internal.WrapErrorf(err, "error parsing uri") + return p.ConnString, fmt.Errorf("error parsing uri: %w", err) } err = p.ConnString.Validate() if err != nil { - return p.ConnString, internal.WrapErrorf(err, "error validating uri") + return p.ConnString, fmt.Errorf("error validating uri: %w", err) } return p.ConnString, nil } @@ -47,7 +93,7 @@ func Parse(s string) (ConnString, error) { p := parser{dnsResolver: dns.DefaultResolver} err := p.parse(s) if err != nil { - err = internal.WrapErrorf(err, "error parsing uri") + err = fmt.Errorf("error parsing uri: %w", err) } return p.ConnString, err } @@ -99,6 +145,7 @@ type ConnString struct { MaxStalenessSet bool ReplicaSet string Scheme string + ServerMonitoringMode string ServerSelectionTimeout time.Duration ServerSelectionTimeoutSet bool SocketTimeout time.Duration @@ -213,7 +260,7 @@ func (p *parser) parse(original string) error { // remove the scheme uri = uri[len(SchemeMongoDB)+3:] } else { - return fmt.Errorf("scheme must be \"mongodb\" or \"mongodb+srv\"") + return errors.New(`scheme must be "mongodb" or "mongodb+srv"`) } if idx := strings.Index(uri, "@"); idx != -1 { @@ -235,7 +282,7 @@ func (p *parser) parse(original string) error { } p.Username, err = url.PathUnescape(username) if err != nil { - return internal.WrapErrorf(err, "invalid username") + return fmt.Errorf("invalid username: %w", err) } p.UsernameSet = true @@ -248,7 +295,7 @@ func (p *parser) parse(original string) error { } p.Password, err = url.PathUnescape(password) if err != nil { - return internal.WrapErrorf(err, "invalid password") + return fmt.Errorf("invalid password: %w", err) } } @@ -325,7 +372,7 @@ func (p *parser) parse(original string) error { for _, host := range parsedHosts { err = p.addHost(host) if err != nil { - return internal.WrapErrorf(err, "invalid host %q", host) + return fmt.Errorf("invalid host %q: %w", host, err) } } if len(p.Hosts) == 0 { @@ -371,27 +418,27 @@ func (p *parser) validate() error { return errors.New("a direct connection cannot be made if an SRV URI is used") } if p.LoadBalancedSet && p.LoadBalanced { - return internal.ErrLoadBalancedWithDirectConnection + return ErrLoadBalancedWithDirectConnection } } // Validation for load-balanced mode. if p.LoadBalancedSet && p.LoadBalanced { if len(p.Hosts) > 1 { - return internal.ErrLoadBalancedWithMultipleHosts + return ErrLoadBalancedWithMultipleHosts } if p.ReplicaSet != "" { - return internal.ErrLoadBalancedWithReplicaSet + return ErrLoadBalancedWithReplicaSet } } // Check for invalid use of SRVMaxHosts. if p.SRVMaxHosts > 0 { if p.ReplicaSet != "" { - return internal.ErrSRVMaxHostsWithReplicaSet + return ErrSRVMaxHostsWithReplicaSet } if p.LoadBalanced { - return internal.ErrSRVMaxHostsWithLoadBalanced + return ErrSRVMaxHostsWithLoadBalanced } } @@ -570,7 +617,7 @@ func (p *parser) addHost(host string) error { } host, err := url.QueryUnescape(host) if err != nil { - return internal.WrapErrorf(err, "invalid host %q", host) + return fmt.Errorf("invalid host %q: %w", host, err) } _, port, err := net.SplitHostPort(host) @@ -585,7 +632,7 @@ func (p *parser) addHost(host string) error { if port != "" { d, err := strconv.Atoi(port) if err != nil { - return internal.WrapErrorf(err, "port must be an integer") + return fmt.Errorf("port must be an integer: %w", err) } if d <= 0 || d >= 65536 { return fmt.Errorf("port must be in the range [1, 65535]") @@ -595,6 +642,14 @@ func (p *parser) addHost(host string) error { return nil } +// IsValidServerMonitoringMode will return true if the given string matches a +// valid server monitoring mode. +func IsValidServerMonitoringMode(mode string) bool { + return mode == ServerMonitoringModeAuto || + mode == ServerMonitoringModeStream || + mode == ServerMonitoringModePoll +} + func (p *parser) addOption(pair string) error { kv := strings.SplitN(pair, "=", 2) if len(kv) != 2 || kv[0] == "" { @@ -603,12 +658,12 @@ func (p *parser) addOption(pair string) error { key, err := url.QueryUnescape(kv[0]) if err != nil { - return internal.WrapErrorf(err, "invalid option key %q", kv[0]) + return fmt.Errorf("invalid option key %q: %w", kv[0], err) } value, err := url.QueryUnescape(kv[1]) if err != nil { - return internal.WrapErrorf(err, "invalid option value %q", kv[1]) + return fmt.Errorf("invalid option value %q: %w", kv[1], err) } lowerKey := strings.ToLower(key) @@ -797,6 +852,12 @@ func (p *parser) addOption(pair string) error { } p.RetryReadsSet = true + case "servermonitoringmode": + if !IsValidServerMonitoringMode(value) { + return fmt.Errorf("invalid value for %q: %q", key, value) + } + + p.ServerMonitoringMode = value case "serverselectiontimeoutms": n, err := strconv.Atoi(value) if err != nil || n < 0 { @@ -1024,7 +1085,7 @@ func extractDatabaseFromURI(uri string) (extractedDatabase, error) { escapedDatabase, err := url.QueryUnescape(database) if err != nil { - return extractedDatabase{}, internal.WrapErrorf(err, "invalid database %q", database) + return extractedDatabase{}, fmt.Errorf("invalid database %q: %w", database, err) } uri = uri[len(database):] diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/crypt.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/crypt.go index cd918fc46..4c254c03c 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/crypt.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/crypt.go @@ -9,17 +9,12 @@ package driver import ( "context" "crypto/tls" - "encoding/json" "fmt" "io" - "io/ioutil" - "net/http" - "os" "strings" "time" "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/internal" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt" "go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options" @@ -46,7 +41,6 @@ type CryptOptions struct { KeyFn KeyRetrieverFn MarkFn MarkCommandFn TLSConfig map[string]*tls.Config - HTTPClient *http.Client BypassAutoEncryption bool BypassQueryAnalysis bool } @@ -65,6 +59,8 @@ type Crypt interface { CreateDataKey(ctx context.Context, kmsProvider string, opts *options.DataKeyOptions) (bsoncore.Document, error) // EncryptExplicit encrypts the given value with the given options. EncryptExplicit(ctx context.Context, val bsoncore.Value, opts *options.ExplicitEncryptionOptions) (byte, []byte, error) + // EncryptExplicitExpression encrypts the given expression with the given options. + EncryptExplicitExpression(ctx context.Context, val bsoncore.Document, opts *options.ExplicitEncryptionOptions) (bsoncore.Document, error) // DecryptExplicit decrypts the given encrypted value. DecryptExplicit(ctx context.Context, subtype byte, data []byte) (bsoncore.Value, error) // Close cleans up any resources associated with the Crypt instance. @@ -84,7 +80,6 @@ type crypt struct { keyFn KeyRetrieverFn markFn MarkCommandFn tlsConfig map[string]*tls.Config - httpClient *http.Client bypassAutoEncryption bool } @@ -97,12 +92,8 @@ func NewCrypt(opts *CryptOptions) Crypt { keyFn: opts.KeyFn, markFn: opts.MarkFn, tlsConfig: opts.TLSConfig, - httpClient: opts.HTTPClient, bypassAutoEncryption: opts.BypassAutoEncryption, } - if c.httpClient == nil { - c.httpClient = internal.DefaultHTTPClient - } return c } @@ -215,6 +206,27 @@ func (c *crypt) EncryptExplicit(ctx context.Context, val bsoncore.Value, opts *o return sub, data, nil } +// EncryptExplicitExpression encrypts the given expression with the given options. +func (c *crypt) EncryptExplicitExpression(ctx context.Context, expr bsoncore.Document, opts *options.ExplicitEncryptionOptions) (bsoncore.Document, error) { + idx, doc := bsoncore.AppendDocumentStart(nil) + doc = bsoncore.AppendDocumentElement(doc, "v", expr) + doc, _ = bsoncore.AppendDocumentEnd(doc, idx) + + cryptCtx, err := c.mongoCrypt.CreateExplicitEncryptionExpressionContext(doc, opts) + if err != nil { + return nil, err + } + defer cryptCtx.Close() + + res, err := c.executeStateMachine(ctx, cryptCtx, "") + if err != nil { + return nil, err + } + + encryptedExpr := res.Lookup("v").Document() + return encryptedExpr, nil +} + // DecryptExplicit decrypts the given encrypted value. func (c *crypt) DecryptExplicit(ctx context.Context, subtype byte, data []byte) (bsoncore.Value, error) { idx, doc := bsoncore.AppendDocumentStart(nil) @@ -238,9 +250,6 @@ func (c *crypt) DecryptExplicit(ctx context.Context, subtype byte, data []byte) // Close cleans up any resources associated with the Crypt instance. func (c *crypt) Close() { c.mongoCrypt.Close() - if c.httpClient == internal.DefaultHTTPClient { - internal.CloseIdleHTTPConnections(c.httpClient) - } } func (c *crypt) BypassAutoEncryption() bool { @@ -400,74 +409,10 @@ func (c *crypt) decryptKey(kmsCtx *mongocrypt.KmsContext) error { } } -// needsKmsProvider returns true if provider was initially set to an empty document. -// An empty document signals the driver to fetch credentials. -func needsKmsProvider(kmsProviders bsoncore.Document, provider string) bool { - val, err := kmsProviders.LookupErr(provider) - if err != nil { - // KMS provider is not configured. - return false - } - doc, ok := val.DocumentOK() - // KMS provider is an empty document. - return ok && len(doc) == 5 -} - -func getGCPAccessToken(ctx context.Context, httpClient *http.Client) (string, error) { - metadataHost := "metadata.google.internal" - if envhost := os.Getenv("GCE_METADATA_HOST"); envhost != "" { - metadataHost = envhost - } - url := fmt.Sprintf("http://%s/computeMetadata/v1/instance/service-accounts/default/token", metadataHost) - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return "", internal.WrapErrorf(err, "unable to retrieve GCP credentials") - } - req.Header.Set("Metadata-Flavor", "Google") - resp, err := httpClient.Do(req.WithContext(ctx)) - if err != nil { - return "", internal.WrapErrorf(err, "unable to retrieve GCP credentials") - } - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", internal.WrapErrorf(err, "unable to retrieve GCP credentials: error reading response body") - } - if resp.StatusCode != http.StatusOK { - return "", internal.WrapErrorf(err, "unable to retrieve GCP credentials: expected StatusCode 200, got StatusCode: %v. Response body: %s", resp.StatusCode, body) - } - var tokenResponse struct { - AccessToken string `json:"access_token"` - } - // Attempt to read body as JSON - err = json.Unmarshal(body, &tokenResponse) - if err != nil { - return "", internal.WrapErrorf(err, "unable to retrieve GCP credentials: error reading body JSON. Response body: %s", body) - } - if tokenResponse.AccessToken == "" { - return "", fmt.Errorf("unable to retrieve GCP credentials: got unexpected empty accessToken from GCP Metadata Server. Response body: %s", body) - } - return tokenResponse.AccessToken, nil -} - func (c *crypt) provideKmsProviders(ctx context.Context, cryptCtx *mongocrypt.Context) error { - kmsProviders := c.mongoCrypt.GetKmsProviders() - builder := bsoncore.NewDocumentBuilder() - - if needsKmsProvider(kmsProviders, "gcp") { - // "gcp" KMS provider is an empty document. - // Attempt to fetch from GCP Instance Metadata server. - { - token, err := getGCPAccessToken(ctx, c.httpClient) - if err != nil { - return err - } - builder.StartDocument("gcp"). - AppendString("accessToken", token). - FinishDocument() - - } + kmsProviders, err := c.mongoCrypt.GetKmsProviders(ctx) + if err != nil { + return err } - - return cryptCtx.ProvideKmsProviders(builder.Build()) + return cryptCtx.ProvideKmsProviders(kmsProviders) } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/dns/dns.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/dns/dns.go index 16268b593..848554d3a 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/dns/dns.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/dns/dns.go @@ -80,7 +80,9 @@ func (r *Resolver) fetchSeedlistFromSRV(host string, srvName string, stopOnErr b srvName = "mongodb" } _, addresses, err := r.LookupSRV(srvName, "tcp", host) - if err != nil { + if err != nil && strings.Contains(err.Error(), "cannot unmarshal DNS message") { + return nil, fmt.Errorf("see https://pkg.go.dev/go.mongodb.org/mongo-driver/mongo#hdr-Potential_DNS_Issues: %w", err) + } else if err != nil { return nil, err } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/driver.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/driver.go index 38a0a2d13..5fd3ddcb4 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/driver.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/driver.go @@ -10,7 +10,7 @@ import ( "context" "time" - "go.mongodb.org/mongo-driver/internal" + "go.mongodb.org/mongo-driver/internal/csot" "go.mongodb.org/mongo-driver/mongo/address" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" @@ -89,7 +89,7 @@ type RTTMonitor interface { Stats() string } -var _ RTTMonitor = &internal.ZeroRTTMonitor{} +var _ RTTMonitor = &csot.ZeroRTTMonitor{} // PinnedConnection represents a Connection that can be pinned by one or more cursors or transactions. Implementations // of this interface should maintain the following invariants: @@ -210,21 +210,21 @@ var _ Server = SingleConnectionDeployment{} // SelectServer implements the Deployment interface. This method does not use the // description.SelectedServer provided and instead returns itself. The Connections returned from the // Connection method have a no-op Close method. -func (ssd SingleConnectionDeployment) SelectServer(context.Context, description.ServerSelector) (Server, error) { - return ssd, nil +func (scd SingleConnectionDeployment) SelectServer(context.Context, description.ServerSelector) (Server, error) { + return scd, nil } // Kind implements the Deployment interface. It always returns description.Single. -func (ssd SingleConnectionDeployment) Kind() description.TopologyKind { return description.Single } +func (SingleConnectionDeployment) Kind() description.TopologyKind { return description.Single } // Connection implements the Server interface. It always returns the embedded connection. -func (ssd SingleConnectionDeployment) Connection(context.Context) (Connection, error) { - return ssd.C, nil +func (scd SingleConnectionDeployment) Connection(context.Context) (Connection, error) { + return scd.C, nil } // RTTMonitor implements the driver.Server interface. -func (ssd SingleConnectionDeployment) RTTMonitor() RTTMonitor { - return &internal.ZeroRTTMonitor{} +func (scd SingleConnectionDeployment) RTTMonitor() RTTMonitor { + return &csot.ZeroRTTMonitor{} } // TODO(GODRIVER-617): We can likely use 1 type for both the Type and the RetryMode by using 2 bits for the mode and 1 diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/errors.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/errors.go index cb56b84f5..3b8b9823b 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/errors.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/errors.go @@ -8,16 +8,21 @@ package driver import ( "bytes" + "context" "errors" "fmt" "strings" "go.mongodb.org/mongo-driver/bson" - "go.mongodb.org/mongo-driver/internal" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" ) +// LegacyNotPrimaryErrMsg is the error message that older MongoDB servers (see +// SERVER-50412 for versions) return when a write operation is erroneously sent +// to a non-primary node. +const LegacyNotPrimaryErrMsg = "not master" + var ( retryableCodes = []int32{11600, 11602, 10107, 13435, 13436, 189, 91, 7, 6, 89, 9001, 262} nodeIsRecoveringCodes = []int32{11600, 11602, 13436, 189, 91} @@ -35,7 +40,7 @@ var ( TransientTransactionError = "TransientTransactionError" // NetworkError is an error label for network errors. NetworkError = "NetworkError" - // RetryableWriteError is an error lable for retryable write errors. + // RetryableWriteError is an error label for retryable write errors. RetryableWriteError = "RetryableWriteError" // NoWritesPerformed is an error label indicated that no writes were performed for an operation. NoWritesPerformed = "NoWritesPerformed" @@ -47,9 +52,12 @@ var ( // ErrUnsupportedStorageEngine is returned when a retryable write is attempted against a server // that uses a storage engine that does not support retryable writes ErrUnsupportedStorageEngine = errors.New("this MongoDB deployment does not support retryable writes. Please add retryWrites=false to your connection string") - // ErrDeadlineWouldBeExceeded is returned when a Timeout set on an operation would be exceeded - // if the operation were sent to the server. - ErrDeadlineWouldBeExceeded = errors.New("operation not sent to server, as Timeout would be exceeded") + // ErrDeadlineWouldBeExceeded is returned when a Timeout set on an operation + // would be exceeded if the operation were sent to the server. It wraps + // context.DeadlineExceeded. + ErrDeadlineWouldBeExceeded = fmt.Errorf( + "operation not sent to server, as Timeout would be exceeded: %w", + context.DeadlineExceeded) // ErrNegativeMaxTime is returned when MaxTime on an operation is a negative value. ErrNegativeMaxTime = errors.New("a negative value was provided for MaxTime on an operation") ) @@ -206,7 +214,7 @@ func (wce WriteConcernError) NotPrimary() bool { } } hasNoCode := wce.Code == 0 - return hasNoCode && strings.Contains(wce.Message, internal.LegacyNotPrimary) + return hasNoCode && strings.Contains(wce.Message, LegacyNotPrimaryErrMsg) } // WriteError is a non-write concern failure that occurred as a result of a write @@ -256,10 +264,15 @@ func (e Error) UnsupportedStorageEngine() bool { // Error implements the error interface. func (e Error) Error() string { + var msg string if e.Name != "" { - return fmt.Sprintf("(%v) %v", e.Name, e.Message) + msg = fmt.Sprintf("(%v)", e.Name) } - return e.Message + msg += " " + e.Message + if e.Wrapped != nil { + msg += ": " + e.Wrapped.Error() + } + return msg } // Unwrap returns the underlying error. @@ -354,7 +367,7 @@ func (e Error) NotPrimary() bool { } } hasNoCode := e.Code == 0 - return hasNoCode && strings.Contains(e.Message, internal.LegacyNotPrimary) + return hasNoCode && strings.Contains(e.Message, LegacyNotPrimaryErrMsg) } // NamespaceNotFound returns true if this errors is a NamespaceNotFound error. @@ -392,6 +405,10 @@ func ExtractErrorFromServerResponse(doc bsoncore.Document) error { if elem.Value().Double() == 1 { ok = true } + case bson.TypeBoolean: + if elem.Value().Boolean() { + ok = true + } } case "errmsg": if str, okay := elem.Value().StringValueOK(); okay { diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/legacy.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/legacy.go index 9f3b8a39a..c40f1f809 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/legacy.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/legacy.go @@ -19,4 +19,5 @@ const ( LegacyKillCursors LegacyListCollections LegacyListIndexes + LegacyHandshake ) diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/list_collections_batch_cursor.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/list_collections_batch_cursor.go deleted file mode 100644 index 3917218b7..000000000 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/list_collections_batch_cursor.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package driver - -import ( - "context" - "errors" - "io" - "strings" - - "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" -) - -// ListCollectionsBatchCursor is a special batch cursor returned from ListCollections that properly -// handles current and legacy ListCollections operations. -type ListCollectionsBatchCursor struct { - legacy bool // server version < 3.0 - bc *BatchCursor - currentBatch *bsoncore.DocumentSequence - err error -} - -// NewListCollectionsBatchCursor creates a new non-legacy ListCollectionsCursor. -func NewListCollectionsBatchCursor(bc *BatchCursor) (*ListCollectionsBatchCursor, error) { - if bc == nil { - return nil, errors.New("batch cursor must not be nil") - } - return &ListCollectionsBatchCursor{bc: bc, currentBatch: new(bsoncore.DocumentSequence)}, nil -} - -// NewLegacyListCollectionsBatchCursor creates a new legacy ListCollectionsCursor. -func NewLegacyListCollectionsBatchCursor(bc *BatchCursor) (*ListCollectionsBatchCursor, error) { - if bc == nil { - return nil, errors.New("batch cursor must not be nil") - } - return &ListCollectionsBatchCursor{legacy: true, bc: bc, currentBatch: new(bsoncore.DocumentSequence)}, nil -} - -// ID returns the cursor ID for this batch cursor. -func (lcbc *ListCollectionsBatchCursor) ID() int64 { - return lcbc.bc.ID() -} - -// Next indicates if there is another batch available. Returning false does not necessarily indicate -// that the cursor is closed. This method will return false when an empty batch is returned. -// -// If Next returns true, there is a valid batch of documents available. If Next returns false, there -// is not a valid batch of documents available. -func (lcbc *ListCollectionsBatchCursor) Next(ctx context.Context) bool { - if !lcbc.bc.Next(ctx) { - return false - } - - if !lcbc.legacy { - lcbc.currentBatch.Style = lcbc.bc.currentBatch.Style - lcbc.currentBatch.Data = lcbc.bc.currentBatch.Data - lcbc.currentBatch.ResetIterator() - return true - } - - lcbc.currentBatch.Style = bsoncore.SequenceStyle - lcbc.currentBatch.Data = lcbc.currentBatch.Data[:0] - - var doc bsoncore.Document - for { - doc, lcbc.err = lcbc.bc.currentBatch.Next() - if lcbc.err != nil { - if lcbc.err == io.EOF { - lcbc.err = nil - break - } - return false - } - doc, lcbc.err = lcbc.projectNameElement(doc) - if lcbc.err != nil { - return false - } - lcbc.currentBatch.Data = append(lcbc.currentBatch.Data, doc...) - } - - return true -} - -// Batch will return a DocumentSequence for the current batch of documents. The returned -// DocumentSequence is only valid until the next call to Next or Close. -func (lcbc *ListCollectionsBatchCursor) Batch() *bsoncore.DocumentSequence { return lcbc.currentBatch } - -// Server returns a pointer to the cursor's server. -func (lcbc *ListCollectionsBatchCursor) Server() Server { return lcbc.bc.server } - -// Err returns the latest error encountered. -func (lcbc *ListCollectionsBatchCursor) Err() error { - if lcbc.err != nil { - return lcbc.err - } - return lcbc.bc.Err() -} - -// Close closes this batch cursor. -func (lcbc *ListCollectionsBatchCursor) Close(ctx context.Context) error { return lcbc.bc.Close(ctx) } - -// project out the database name for a legacy server -func (*ListCollectionsBatchCursor) projectNameElement(rawDoc bsoncore.Document) (bsoncore.Document, error) { - elems, err := rawDoc.Elements() - if err != nil { - return nil, err - } - - var filteredElems []byte - for _, elem := range elems { - key := elem.Key() - if key != "name" { - filteredElems = append(filteredElems, elem...) - continue - } - - name := elem.Value().StringValue() - collName := name[strings.Index(name, ".")+1:] - filteredElems = bsoncore.AppendStringElement(filteredElems, "name", collName) - } - - var filteredDoc []byte - filteredDoc = bsoncore.BuildDocument(filteredDoc, filteredElems) - return filteredDoc, nil -} - -// SetBatchSize sets the batchSize for future getMores. -func (lcbc *ListCollectionsBatchCursor) SetBatchSize(size int32) { - lcbc.bc.SetBatchSize(size) -} diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/binary.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/binary.go index 9e887375a..4e4b51d74 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/binary.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/binary.go @@ -9,7 +9,10 @@ package mongocrypt -// #include +/* +#include +#include +*/ import "C" import ( "unsafe" @@ -17,6 +20,7 @@ import ( // binary is a wrapper type around a mongocrypt_binary_t* type binary struct { + p *C.uint8_t wrapped *C.mongocrypt_binary_t } @@ -33,11 +37,11 @@ func newBinaryFromBytes(data []byte) *binary { return newBinary() } - // We don't need C.CBytes here because data cannot go out of scope. Any mongocrypt function that takes a - // mongocrypt_binary_t will make a copy of the data so the data can be garbage collected after calling. - addr := (*C.uint8_t)(unsafe.Pointer(&data[0])) // uint8_t* - dataLen := C.uint32_t(len(data)) // uint32_t + // TODO: Consider using runtime.Pinner to replace the C.CBytes after using go1.21.0. + addr := (*C.uint8_t)(C.CBytes(data)) // uint8_t* + dataLen := C.uint32_t(len(data)) // uint32_t return &binary{ + p: addr, wrapped: C.mongocrypt_binary_new_from_data(addr, dataLen), } } @@ -52,5 +56,8 @@ func (b *binary) toBytes() []byte { // close cleans up any resources associated with the given binary instance. func (b *binary) close() { + if b.p != nil { + C.free(unsafe.Pointer(b.p)) + } C.mongocrypt_binary_destroy(b.wrapped) } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt.go index fb96a8219..20f6ff0aa 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt.go @@ -16,18 +16,27 @@ package mongocrypt // #include import "C" import ( + "context" "errors" "fmt" + "net/http" "unsafe" "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/internal/httputil" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" + "go.mongodb.org/mongo-driver/x/mongo/driver/auth/creds" "go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options" ) +type kmsProvider interface { + GetCredentialsDoc(context.Context) (bsoncore.Document, error) +} + type MongoCrypt struct { wrapped *C.mongocrypt_t - kmsProviders bsoncore.Document + kmsProviders map[string]kmsProvider + httpClient *http.Client } // Version returns the version string for the loaded libmongocrypt, or an empty string @@ -44,9 +53,24 @@ func NewMongoCrypt(opts *options.MongoCryptOptions) (*MongoCrypt, error) { if wrapped == nil { return nil, errors.New("could not create new mongocrypt object") } + httpClient := opts.HTTPClient + if httpClient == nil { + httpClient = httputil.DefaultHTTPClient + } + kmsProviders := make(map[string]kmsProvider) + if needsKmsProvider(opts.KmsProviders, "gcp") { + kmsProviders["gcp"] = creds.NewGCPCredentialProvider(httpClient) + } + if needsKmsProvider(opts.KmsProviders, "aws") { + kmsProviders["aws"] = creds.NewAWSCredentialProvider(httpClient) + } + if needsKmsProvider(opts.KmsProviders, "azure") { + kmsProviders["azure"] = creds.NewAzureCredentialProvider(httpClient) + } crypt := &MongoCrypt{ wrapped: wrapped, - kmsProviders: opts.KmsProviders, + kmsProviders: kmsProviders, + httpClient: httpClient, } // set options in mongocrypt @@ -222,9 +246,8 @@ const ( IndexTypeIndexed = 2 ) -// CreateExplicitEncryptionContext creates a Context to use for explicit encryption. -func (m *MongoCrypt) CreateExplicitEncryptionContext(doc bsoncore.Document, opts *options.ExplicitEncryptionOptions) (*Context, error) { - +// createExplicitEncryptionContext creates an explicit encryption context. +func (m *MongoCrypt) createExplicitEncryptionContext(opts *options.ExplicitEncryptionOptions) (*Context, error) { ctx := newContext(C.mongocrypt_ctx_new(m.wrapped)) if ctx.wrapped == nil { return nil, m.createErrorFromStatus() @@ -244,6 +267,32 @@ func (m *MongoCrypt) CreateExplicitEncryptionContext(doc bsoncore.Document, opts } } + if opts.RangeOptions != nil { + idx, mongocryptDoc := bsoncore.AppendDocumentStart(nil) + if opts.RangeOptions.Min != nil { + mongocryptDoc = bsoncore.AppendValueElement(mongocryptDoc, "min", *opts.RangeOptions.Min) + } + if opts.RangeOptions.Max != nil { + mongocryptDoc = bsoncore.AppendValueElement(mongocryptDoc, "max", *opts.RangeOptions.Max) + } + if opts.RangeOptions.Precision != nil { + mongocryptDoc = bsoncore.AppendInt32Element(mongocryptDoc, "precision", *opts.RangeOptions.Precision) + } + mongocryptDoc = bsoncore.AppendInt64Element(mongocryptDoc, "sparsity", opts.RangeOptions.Sparsity) + + mongocryptDoc, err := bsoncore.AppendDocumentEnd(mongocryptDoc, idx) + if err != nil { + return nil, err + } + + mongocryptBinary := newBinaryFromBytes(mongocryptDoc) + defer mongocryptBinary.close() + + if ok := C.mongocrypt_ctx_setopt_algorithm_range(ctx.wrapped, mongocryptBinary.wrapped); !ok { + return nil, ctx.createErrorFromStatus() + } + } + algoStr := C.CString(opts.Algorithm) defer C.free(unsafe.Pointer(algoStr)) @@ -264,7 +313,15 @@ func (m *MongoCrypt) CreateExplicitEncryptionContext(doc bsoncore.Document, opts return nil, ctx.createErrorFromStatus() } } + return ctx, nil +} +// CreateExplicitEncryptionContext creates a Context to use for explicit encryption. +func (m *MongoCrypt) CreateExplicitEncryptionContext(doc bsoncore.Document, opts *options.ExplicitEncryptionOptions) (*Context, error) { + ctx, err := m.createExplicitEncryptionContext(opts) + if err != nil { + return ctx, err + } docBinary := newBinaryFromBytes(doc) defer docBinary.close() if ok := C.mongocrypt_ctx_explicit_encrypt_init(ctx.wrapped, docBinary.wrapped); !ok { @@ -274,6 +331,21 @@ func (m *MongoCrypt) CreateExplicitEncryptionContext(doc bsoncore.Document, opts return ctx, nil } +// CreateExplicitEncryptionExpressionContext creates a Context to use for explicit encryption of an expression. +func (m *MongoCrypt) CreateExplicitEncryptionExpressionContext(doc bsoncore.Document, opts *options.ExplicitEncryptionOptions) (*Context, error) { + ctx, err := m.createExplicitEncryptionContext(opts) + if err != nil { + return ctx, err + } + docBinary := newBinaryFromBytes(doc) + defer docBinary.close() + if ok := C.mongocrypt_ctx_explicit_encrypt_expression_init(ctx.wrapped, docBinary.wrapped); !ok { + return nil, ctx.createErrorFromStatus() + } + + return ctx, nil +} + // CreateExplicitDecryptionContext creates a Context to use for explicit decryption. func (m *MongoCrypt) CreateExplicitDecryptionContext(doc bsoncore.Document) (*Context, error) { ctx := newContext(C.mongocrypt_ctx_new(m.wrapped)) @@ -309,6 +381,9 @@ func (m *MongoCrypt) CryptSharedLibVersionString() string { // Close cleans up any resources associated with the given MongoCrypt instance. func (m *MongoCrypt) Close() { C.mongocrypt_destroy(m.wrapped) + if m.httpClient == httputil.DefaultHTTPClient { + httputil.CloseIdleHTTPConnections(m.httpClient) + } } // RewrapDataKeyContext create a Context to use for rewrapping a data key. @@ -415,7 +490,30 @@ func (m *MongoCrypt) createErrorFromStatus() error { return errorFromStatus(status) } -// GetKmsProviders returns the originally configured KMS providers. -func (m *MongoCrypt) GetKmsProviders() bsoncore.Document { - return m.kmsProviders +// needsKmsProvider returns true if provider was initially set to an empty document. +// An empty document signals the driver to fetch credentials. +func needsKmsProvider(kmsProviders bsoncore.Document, provider string) bool { + val, err := kmsProviders.LookupErr(provider) + if err != nil { + // KMS provider is not configured. + return false + } + doc, ok := val.DocumentOK() + // KMS provider is an empty document if the length is 5. + // An empty document contains 4 bytes of "\x00" and a null byte. + return ok && len(doc) == 5 +} + +// GetKmsProviders attempts to obtain credentials from environment. +// It is expected to be called when a libmongocrypt context is in the mongocrypt.NeedKmsCredentials state. +func (m *MongoCrypt) GetKmsProviders(ctx context.Context) (bsoncore.Document, error) { + builder := bsoncore.NewDocumentBuilder() + for k, p := range m.kmsProviders { + doc, err := p.GetCredentialsDoc(ctx) + if err != nil { + return nil, fmt.Errorf("unable to retrieve %s credentials: %w", k, err) + } + builder.AppendDocument(k, doc) + } + return builder.Build(), nil } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_context_not_enabled.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_context_not_enabled.go index 2e2776914..734662e71 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_context_not_enabled.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_context_not_enabled.go @@ -27,7 +27,7 @@ func (c *Context) NextOperation() (bsoncore.Document, error) { } // AddOperationResult feeds the result of a database operation to mongocrypt. -func (c *Context) AddOperationResult(result bsoncore.Document) error { +func (c *Context) AddOperationResult(bsoncore.Document) error { panic(cseNotSupportedMsg) } @@ -57,6 +57,6 @@ func (c *Context) Close() { } // ProvideKmsProviders provides the KMS providers when in the NeedKmsCredentials state. -func (c *Context) ProvideKmsProviders(kmsProviders bsoncore.Document) error { +func (c *Context) ProvideKmsProviders(bsoncore.Document) error { panic(cseNotSupportedMsg) } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_kms_context_not_enabled.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_kms_context_not_enabled.go index 272367ea5..6bce2f029 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_kms_context_not_enabled.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_kms_context_not_enabled.go @@ -34,6 +34,6 @@ func (kc *KmsContext) BytesNeeded() int32 { } // FeedResponse feeds the bytes received from the KMS to mongocrypt. -func (kc *KmsContext) FeedResponse(response []byte) error { +func (kc *KmsContext) FeedResponse([]byte) error { panic(cseNotSupportedMsg) } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_not_enabled.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_not_enabled.go index a333dc536..24f9f9b0e 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_not_enabled.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_not_enabled.go @@ -10,6 +10,8 @@ package mongocrypt import ( + "context" + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options" ) @@ -26,37 +28,42 @@ func Version() string { } // NewMongoCrypt constructs a new MongoCrypt instance configured using the provided MongoCryptOptions. -func NewMongoCrypt(opts *options.MongoCryptOptions) (*MongoCrypt, error) { +func NewMongoCrypt(*options.MongoCryptOptions) (*MongoCrypt, error) { panic(cseNotSupportedMsg) } // CreateEncryptionContext creates a Context to use for encryption. -func (m *MongoCrypt) CreateEncryptionContext(db string, cmd bsoncore.Document) (*Context, error) { +func (m *MongoCrypt) CreateEncryptionContext(string, bsoncore.Document) (*Context, error) { + panic(cseNotSupportedMsg) +} + +// CreateExplicitEncryptionExpressionContext creates a Context to use for explicit encryption of an expression. +func (m *MongoCrypt) CreateExplicitEncryptionExpressionContext(bsoncore.Document, *options.ExplicitEncryptionOptions) (*Context, error) { panic(cseNotSupportedMsg) } // CreateDecryptionContext creates a Context to use for decryption. -func (m *MongoCrypt) CreateDecryptionContext(cmd bsoncore.Document) (*Context, error) { +func (m *MongoCrypt) CreateDecryptionContext(bsoncore.Document) (*Context, error) { panic(cseNotSupportedMsg) } // CreateDataKeyContext creates a Context to use for creating a data key. -func (m *MongoCrypt) CreateDataKeyContext(kmsProvider string, opts *options.DataKeyOptions) (*Context, error) { +func (m *MongoCrypt) CreateDataKeyContext(string, *options.DataKeyOptions) (*Context, error) { panic(cseNotSupportedMsg) } // CreateExplicitEncryptionContext creates a Context to use for explicit encryption. -func (m *MongoCrypt) CreateExplicitEncryptionContext(doc bsoncore.Document, opts *options.ExplicitEncryptionOptions) (*Context, error) { +func (m *MongoCrypt) CreateExplicitEncryptionContext(bsoncore.Document, *options.ExplicitEncryptionOptions) (*Context, error) { panic(cseNotSupportedMsg) } // RewrapDataKeyContext creates a Context to use for rewrapping a data key. -func (m *MongoCrypt) RewrapDataKeyContext(filter []byte, opts *options.RewrapManyDataKeyOptions) (*Context, error) { +func (m *MongoCrypt) RewrapDataKeyContext([]byte, *options.RewrapManyDataKeyOptions) (*Context, error) { panic(cseNotSupportedMsg) } // CreateExplicitDecryptionContext creates a Context to use for explicit decryption. -func (m *MongoCrypt) CreateExplicitDecryptionContext(doc bsoncore.Document) (*Context, error) { +func (m *MongoCrypt) CreateExplicitDecryptionContext(bsoncore.Document) (*Context, error) { panic(cseNotSupportedMsg) } @@ -78,6 +85,6 @@ func (m *MongoCrypt) Close() { } // GetKmsProviders returns the originally configured KMS providers. -func (m *MongoCrypt) GetKmsProviders() bsoncore.Document { +func (m *MongoCrypt) GetKmsProviders(context.Context) (bsoncore.Document, error) { panic(cseNotSupportedMsg) } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options/mongocrypt_context_options.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options/mongocrypt_context_options.go index fdf704ffa..325777eb2 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options/mongocrypt_context_options.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options/mongocrypt_context_options.go @@ -56,6 +56,15 @@ type ExplicitEncryptionOptions struct { Algorithm string QueryType string ContentionFactor *int64 + RangeOptions *ExplicitRangeOptions +} + +// ExplicitRangeOptions specifies options for the range index. +type ExplicitRangeOptions struct { + Min *bsoncore.Value + Max *bsoncore.Value + Sparsity int64 + Precision *int32 } // ExplicitEncryption creates a new ExplicitEncryptionOptions instance. @@ -93,6 +102,12 @@ func (eeo *ExplicitEncryptionOptions) SetContentionFactor(contentionFactor int64 return eeo } +// SetRangeOptions specifies the range options. +func (eeo *ExplicitEncryptionOptions) SetRangeOptions(ro ExplicitRangeOptions) *ExplicitEncryptionOptions { + eeo.RangeOptions = &ro + return eeo +} + // RewrapManyDataKeyOptions represents all possible options used to decrypt and encrypt all matching data keys with a // possibly new masterKey. type RewrapManyDataKeyOptions struct { @@ -122,6 +137,9 @@ func (rmdko *RewrapManyDataKeyOptions) SetMasterKey(masterKey bsoncore.Document) // MergeRewrapManyDataKeyOptions combines the given RewrapManyDataKeyOptions instances into a single // RewrapManyDataKeyOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeRewrapManyDataKeyOptions(opts ...*RewrapManyDataKeyOptions) *RewrapManyDataKeyOptions { rmdkOpts := RewrapManyDataKey() for _, rmdko := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options/mongocrypt_options.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options/mongocrypt_options.go index 7e90a0ecd..d800bc8db 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options/mongocrypt_options.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options/mongocrypt_options.go @@ -7,6 +7,8 @@ package options import ( + "net/http" + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" ) @@ -18,6 +20,7 @@ type MongoCryptOptions struct { EncryptedFieldsMap map[string]bsoncore.Document CryptSharedLibDisabled bool CryptSharedLibOverridePath string + HTTPClient *http.Client } // MongoCrypt creates a new MongoCryptOptions instance. @@ -61,3 +64,9 @@ func (mo *MongoCryptOptions) SetCryptSharedLibOverridePath(path string) *MongoCr mo.CryptSharedLibOverridePath = path return mo } + +// SetHTTPClient sets the http client. +func (mo *MongoCryptOptions) SetHTTPClient(httpClient *http.Client) *MongoCryptOptions { + mo.HTTPClient = httpClient + return mo +} diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/config.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/config.go index 4cb14e4d0..eac2aab7f 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/config.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/config.go @@ -12,7 +12,7 @@ import ( "fmt" "net/http" - "go.mongodb.org/mongo-driver/internal" + "go.mongodb.org/mongo-driver/internal/httputil" "golang.org/x/crypto/ocsp" ) @@ -33,7 +33,7 @@ func newConfig(certChain []*x509.Certificate, opts *VerifyOptions) (config, erro } if cfg.httpClient == nil { - cfg.httpClient = internal.DefaultHTTPClient + cfg.httpClient = httputil.DefaultHTTPClient } if len(certChain) == 0 { diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/ocsp.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/ocsp.go index 0e7dbfe2d..849530fde 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/ocsp.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/ocsp.go @@ -149,7 +149,7 @@ func processStaple(cfg config, staple []byte) (*ResponseDetails, error) { // If the server has a Must-Staple certificate and the server does not present a stapled OCSP response, error. if mustStaple && len(staple) == 0 { return nil, errors.New("server provided a certificate with the Must-Staple extension but did not " + - "provde a stapled OCSP response") + "provide a stapled OCSP response") } if len(staple) == 0 { diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation.go index fa51ba9c6..905c9cfc5 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation.go @@ -12,6 +12,7 @@ import ( "errors" "fmt" "math" + "net" "strconv" "strings" "sync" @@ -21,7 +22,11 @@ import ( "go.mongodb.org/mongo-driver/bson/bsontype" "go.mongodb.org/mongo-driver/bson/primitive" "go.mongodb.org/mongo-driver/event" - "go.mongodb.org/mongo-driver/internal" + "go.mongodb.org/mongo-driver/internal/csot" + "go.mongodb.org/mongo-driver/internal/driverutil" + "go.mongodb.org/mongo-driver/internal/handshake" + "go.mongodb.org/mongo-driver/internal/logger" + "go.mongodb.org/mongo-driver/mongo/address" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/readconcern" "go.mongodb.org/mongo-driver/mongo/readpref" @@ -33,8 +38,6 @@ import ( const defaultLocalThreshold = 15 * time.Millisecond -var dollarCmd = [...]byte{'.', '$', 'c', 'm', 'd'} - var ( // ErrNoDocCommandResponse occurs when the server indicated a response existed, but none was found. ErrNoDocCommandResponse = errors.New("command returned no documents") @@ -44,6 +47,8 @@ var ( ErrReplyDocumentMismatch = errors.New("number of documents returned does not match numberReturned field") // ErrNonPrimaryReadPref is returned when a read is attempted in a transaction with a non-primary read preference. ErrNonPrimaryReadPref = errors.New("read preference in a transaction must be primary") + // errDatabaseNameEmpty occurs when a database name is not provided. + errDatabaseNameEmpty = errors.New("database name cannot be empty") ) const ( @@ -96,6 +101,7 @@ type startedInformation struct { serverConnID *int64 redacted bool serviceID *primitive.ObjectID + serverAddress address.Address } // finishedInformation keeps track of all of the information necessary for monitoring success and failure events. @@ -107,9 +113,10 @@ type finishedInformation struct { connID string driverConnectionID uint64 // TODO(GODRIVER-2824): change type to int64. serverConnID *int64 - startTime time.Time redacted bool serviceID *primitive.ObjectID + serverAddress address.Address + duration time.Duration } // convertInt64PtrToInt32Ptr will convert an int64 pointer reference to an int32 pointer @@ -128,6 +135,20 @@ func convertInt64PtrToInt32Ptr(i64 *int64) *int32 { return &i32 } +// success returns true if there was no command error or the command error is a +// "WriteCommandError". Commands that executed on the server and return a status +// of { ok: 1.0 } are considered successful commands and MUST generate a +// CommandSucceededEvent and "command succeeded" log message. Commands that have +// write errors are included since the actual command did succeed, only writes +// failed. +func (info finishedInformation) success() bool { + if _, ok := info.cmdErr.(WriteCommandError); ok { + return true + } + + return info.cmdErr == nil +} + // ResponseInfo contains the context required to parse a server response. type ResponseInfo struct { ServerResponse bsoncore.Document @@ -137,6 +158,37 @@ type ResponseInfo struct { CurrentIndex int } +func redactStartedInformationCmd(op Operation, info startedInformation) bson.Raw { + var cmdCopy bson.Raw + + // Make a copy of the command. Redact if the command is security + // sensitive and cannot be monitored. If there was a type 1 payload for + // the current batch, convert it to a BSON array + if !info.redacted { + cmdCopy = make([]byte, len(info.cmd)) + copy(cmdCopy, info.cmd) + + if info.documentSequenceIncluded { + // remove 0 byte at end + cmdCopy = cmdCopy[:len(info.cmd)-1] + cmdCopy = op.addBatchArray(cmdCopy) + + // add back 0 byte and update length + cmdCopy, _ = bsoncore.AppendDocumentEnd(cmdCopy, 0) + } + } + + return cmdCopy +} + +func redactFinishedInformationResponse(info finishedInformation) bson.Raw { + if !info.redacted { + return bson.Raw(info.response) + } + + return bson.Raw{} +} + // Operation is used to execute an operation. It contains all of the common code required to // select a server, transform an operation into a command, write the command to a connection from // the selected server, read a response from that connection, process the response, and potentially @@ -252,8 +304,17 @@ type Operation struct { // nil, which means that the timeout of the operation's caller will be used. Timeout *time.Duration - // cmdName is only set when serializing OP_MSG and is used internally in readWireMessage. - cmdName string + Logger *logger.Logger + + // Name is the name of the operation. This is used when serializing + // OP_MSG as well as for logging server selection data. + Name string + + // omitReadPreference is a boolean that indicates whether to omit the + // read preference from the command. This omition includes the case + // where a default read preference is used when the operation + // ReadPreference is not specified. + omitReadPreference bool } // shouldEncrypt returns true if this operation should automatically be encrypted. @@ -261,8 +322,73 @@ func (op Operation) shouldEncrypt() bool { return op.Crypt != nil && !op.Crypt.BypassAutoEncryption() } +// filterDeprioritizedServers will filter out the server candidates that have +// been deprioritized by the operation due to failure. +// +// The server selector should try to select a server that is not in the +// deprioritization list. However, if this is not possible (e.g. there are no +// other healthy servers in the cluster), the selector may return a +// deprioritized server. +func filterDeprioritizedServers(candidates, deprioritized []description.Server) []description.Server { + if len(deprioritized) == 0 { + return candidates + } + + dpaSet := make(map[address.Address]*description.Server) + for i, srv := range deprioritized { + dpaSet[srv.Addr] = &deprioritized[i] + } + + allowed := []description.Server{} + + // Iterate over the candidates and append them to the allowdIndexes slice if + // they are not in the deprioritizedServers list. + for _, candidate := range candidates { + if srv, ok := dpaSet[candidate.Addr]; !ok || !srv.Equal(candidate) { + allowed = append(allowed, candidate) + } + } + + // If nothing is allowed, then all available servers must have been + // deprioritized. In this case, return the candidates list as-is so that the + // selector can find a suitable server + if len(allowed) == 0 { + return candidates + } + + return allowed +} + +// opServerSelector is a wrapper for the server selector that is assigned to the +// operation. The purpose of this wrapper is to filter candidates with +// operation-specific logic, such as deprioritizing failing servers. +type opServerSelector struct { + selector description.ServerSelector + deprioritizedServers []description.Server +} + +// SelectServer will filter candidates with operation-specific logic before +// passing them onto the user-defined or default selector. +func (oss *opServerSelector) SelectServer( + topo description.Topology, + candidates []description.Server, +) ([]description.Server, error) { + selectedServers, err := oss.selector.SelectServer(topo, candidates) + if err != nil { + return nil, err + } + + filteredServers := filterDeprioritizedServers(selectedServers, oss.deprioritizedServers) + + return filteredServers, nil +} + // selectServer handles performing server selection for an operation. -func (op Operation) selectServer(ctx context.Context) (Server, error) { +func (op Operation) selectServer( + ctx context.Context, + requestID int32, + deprioritized []description.Server, +) (Server, error) { if err := op.Validate(); err != nil { return nil, err } @@ -279,12 +405,24 @@ func (op Operation) selectServer(ctx context.Context) (Server, error) { }) } - return op.Deployment.SelectServer(ctx, selector) + oss := &opServerSelector{ + selector: selector, + deprioritizedServers: deprioritized, + } + + ctx = logger.WithOperationName(ctx, op.Name) + ctx = logger.WithOperationID(ctx, requestID) + + return op.Deployment.SelectServer(ctx, oss) } // getServerAndConnection should be used to retrieve a Server and Connection to execute an operation. -func (op Operation) getServerAndConnection(ctx context.Context) (Server, Connection, error) { - server, err := op.selectServer(ctx) +func (op Operation) getServerAndConnection( + ctx context.Context, + requestID int32, + deprioritized []description.Server, +) (Server, Connection, error) { + server, err := op.selectServer(ctx, requestID, deprioritized) if err != nil { if op.Client != nil && !(op.Client.Committing || op.Client.Aborting) && op.Client.TransactionRunning() { @@ -337,7 +475,7 @@ func (op Operation) Validate() error { return InvalidOperationError{MissingField: "Deployment"} } if op.Database == "" { - return InvalidOperationError{MissingField: "Database"} + return errDatabaseNameEmpty } if op.Client != nil && !writeconcern.AckWrite(op.WriteConcern) { return errors.New("session provided for an unacknowledged write") @@ -363,8 +501,8 @@ func (op Operation) Execute(ctx context.Context) error { // If no deadline is set on the passed-in context, op.Timeout is set, and context is not already // a Timeout context, honor op.Timeout in new Timeout context for operation execution. - if _, deadlineSet := ctx.Deadline(); !deadlineSet && op.Timeout != nil && !internal.IsTimeoutContext(ctx) { - newCtx, cancelFunc := internal.MakeTimeoutContext(ctx, *op.Timeout) + if _, deadlineSet := ctx.Deadline(); !deadlineSet && op.Timeout != nil && !csot.IsTimeoutContext(ctx) { + newCtx, cancelFunc := csot.MakeTimeoutContext(ctx, *op.Timeout) // Redefine ctx to be the new timeout-derived context. ctx = newCtx // Cancel the timeout-derived context at the end of Execute to avoid a context leak. @@ -402,7 +540,7 @@ func (op Operation) Execute(ctx context.Context) error { // If context is a Timeout context, automatically set retries to -1 (infinite) if retrying is // enabled. retryEnabled := op.RetryMode != nil && op.RetryMode.Enabled() - if internal.IsTimeoutContext(ctx) && retryEnabled { + if csot.IsTimeoutContext(ctx) && retryEnabled { retries = -1 } @@ -417,6 +555,11 @@ func (op Operation) Execute(ctx context.Context) error { first := true currIndex := 0 + // deprioritizedServers are a running list of servers that should be + // deprioritized during server selection. Per the specifications, we should + // only ever deprioritize the "previous server". + var deprioritizedServers []description.Server + // resetForRetry records the error that caused the retry, decrements retries, and resets the // retry loop variables to request a new server and a new connection for the next attempt. resetForRetry := func(err error) { @@ -442,11 +585,18 @@ func (op Operation) Execute(ctx context.Context) error { } } - // If we got a connection, close it immediately to release pool resources for - // subsequent retries. + // If we got a connection, close it immediately to release pool resources + // for subsequent retries. if conn != nil { + // If we are dealing with a sharded cluster, then mark the failed server + // as "deprioritized". + if desc := conn.Description; desc != nil && op.Deployment.Kind() == description.Sharded { + deprioritizedServers = []description.Server{conn.Description()} + } + conn.Close() } + // Set the server and connection to nil to request a new server and connection. srvr = nil conn = nil @@ -467,9 +617,11 @@ func (op Operation) Execute(ctx context.Context) error { } }() for { + requestID := wiremessage.NextRequestID() + // If the server or connection are nil, try to select a new server and get a new connection. if srvr == nil || conn == nil { - srvr, conn, err = op.getServerAndConnection(ctx) + srvr, conn, err = op.getServerAndConnection(ctx, requestID, deprioritizedServers) if err != nil { // If the returned error is retryable and there are retries remaining (negative // retries means retry indefinitely), then retry the operation. Set the server @@ -564,7 +716,8 @@ func (op Operation) Execute(ctx context.Context) error { } var startedInfo startedInformation - *wm, startedInfo, err = op.createWireMessage(ctx, (*wm)[:0], desc, maxTimeMS, conn) + *wm, startedInfo, err = op.createWireMessage(ctx, maxTimeMS, (*wm)[:0], desc, conn, requestID) + if err != nil { return err } @@ -573,10 +726,20 @@ func (op Operation) Execute(ctx context.Context) error { startedInfo.connID = conn.ID() startedInfo.driverConnectionID = conn.DriverConnectionID() startedInfo.cmdName = op.getCommandName(startedInfo.cmd) - op.cmdName = startedInfo.cmdName + + // If the command name does not match the operation name, update + // the operation name as a sanity check. It's more correct to + // be aligned with the data passed to the server via the + // wire message. + if startedInfo.cmdName != op.Name { + op.Name = startedInfo.cmdName + } + startedInfo.redacted = op.redactCommand(startedInfo.cmdName, startedInfo.cmd) startedInfo.serviceID = conn.Description().ServiceID startedInfo.serverConnID = conn.ServerConnectionID() + startedInfo.serverAddress = conn.Description().Addr + op.publishStartedEvent(ctx, startedInfo) // get the moreToCome flag information before we compress @@ -595,24 +758,29 @@ func (op Operation) Execute(ctx context.Context) error { finishedInfo := finishedInformation{ cmdName: startedInfo.cmdName, + driverConnectionID: startedInfo.driverConnectionID, requestID: startedInfo.requestID, - startTime: time.Now(), connID: startedInfo.connID, - driverConnectionID: startedInfo.driverConnectionID, serverConnID: startedInfo.serverConnID, redacted: startedInfo.redacted, serviceID: startedInfo.serviceID, + serverAddress: desc.Server.Addr, } + startedTime := time.Now() + // Check for possible context error. If no context error, check if there's enough time to perform a // round trip before the Context deadline. If ctx is a Timeout Context, use the 90th percentile RTT // as a threshold. Otherwise, use the minimum observed RTT. if ctx.Err() != nil { err = ctx.Err() } else if deadline, ok := ctx.Deadline(); ok { - if internal.IsTimeoutContext(ctx) && time.Now().Add(srvr.RTTMonitor().P90()).After(deadline) { - err = internal.WrapErrorf(ErrDeadlineWouldBeExceeded, - "remaining time %v until context deadline is less than 90th percentile RTT\n%v", time.Until(deadline), srvr.RTTMonitor().Stats()) + if csot.IsTimeoutContext(ctx) && time.Now().Add(srvr.RTTMonitor().P90()).After(deadline) { + err = fmt.Errorf( + "remaining time %v until context deadline is less than 90th percentile RTT: %w\n%v", + time.Until(deadline), + ErrDeadlineWouldBeExceeded, + srvr.RTTMonitor().Stats()) } else if time.Now().Add(srvr.RTTMonitor().Min()).After(deadline) { err = context.DeadlineExceeded } @@ -621,7 +789,7 @@ func (op Operation) Execute(ctx context.Context) error { if err == nil { // roundtrip using either the full roundTripper or a special one for when the moreToCome // flag is set - var roundTrip = op.roundTrip + roundTrip := op.roundTrip if moreToCome { roundTrip = op.moreToComeRoundTrip } @@ -634,6 +802,8 @@ func (op Operation) Execute(ctx context.Context) error { finishedInfo.response = res finishedInfo.cmdErr = err + finishedInfo.duration = time.Since(startedTime) + op.publishFinishedEvent(ctx, finishedInfo) // prevIndefiniteErrorIsSet is "true" if the "err" variable has been set to the "prevIndefiniteErr" in @@ -676,7 +846,7 @@ func (op Operation) Execute(ctx context.Context) error { // If the error is no longer retryable and has the NoWritesPerformed label, then we should // set the error to the "previous indefinite error" unless the current error is already the - // "previous indefinite error". After reseting, repeat the error check. + // "previous indefinite error". After resetting, repeat the error check. if tt.HasErrorLabel(NoWritesPerformed) && !prevIndefiniteErrIsSet { err = prevIndefiniteErr prevIndefiniteErrIsSet = true @@ -773,7 +943,7 @@ func (op Operation) Execute(ctx context.Context) error { // If the error is no longer retryable and has the NoWritesPerformed label, then we should // set the error to the "previous indefinite error" unless the current error is already the - // "previous indefinite error". After reseting, repeat the error check. + // "previous indefinite error". After resetting, repeat the error check. if tt.HasErrorLabel(NoWritesPerformed) && !prevIndefiniteErrIsSet { err = prevIndefiniteErr prevIndefiniteErrIsSet = true @@ -843,7 +1013,7 @@ func (op Operation) Execute(ctx context.Context) error { } // Reset the retries number for RetryOncePerCommand unless context is a Timeout context, in // which case retries should remain as -1 (as many times as possible). - if *op.RetryMode == RetryOncePerCommand && !internal.IsTimeoutContext(ctx) { + if *op.RetryMode == RetryOncePerCommand && !csot.IsTimeoutContext(ctx) { retries = 1 } } @@ -927,7 +1097,7 @@ func (op Operation) readWireMessage(ctx context.Context, conn Connection) (resul op.Client.UpdateRecoveryToken(bson.Raw(res)) // Update snapshot time if operation was a "find", "aggregate" or "distinct". - if op.cmdName == "find" || op.cmdName == "aggregate" || op.cmdName == "distinct" { + if op.Name == driverutil.FindOp || op.Name == driverutil.AggregateOp || op.Name == driverutil.DistinctOp { op.Client.UpdateSnapshotTime(res) } @@ -1011,22 +1181,6 @@ func (Operation) decompressWireMessage(wm []byte) (wiremessage.OpCode, []byte, e return opcode, uncompressed, nil } -func (op Operation) createWireMessage( - ctx context.Context, - dst []byte, - desc description.SelectedServer, - maxTimeMS uint64, - conn Connection) ([]byte, startedInformation, error) { - - // If topology is not LoadBalanced, API version is not declared, and wire version is unknown - // or less than 6, use OP_QUERY. Otherwise, use OP_MSG. - if desc.Kind != description.LoadBalanced && op.ServerAPI == nil && - (desc.WireVersion == nil || desc.WireVersion.Max < wiremessage.OpmsgWireVersion) { - return op.createQueryWireMessage(maxTimeMS, dst, desc) - } - return op.createMsgWireMessage(ctx, maxTimeMS, dst, desc, conn) -} - func (op Operation) addBatchArray(dst []byte) []byte { aidx, dst := bsoncore.AppendArrayElementStart(dst, op.Batches.Identifier) for i, doc := range op.Batches.Current { @@ -1036,13 +1190,20 @@ func (op Operation) addBatchArray(dst []byte) []byte { return dst } -func (op Operation) createQueryWireMessage(maxTimeMS uint64, dst []byte, desc description.SelectedServer) ([]byte, startedInformation, error) { +func (op Operation) createLegacyHandshakeWireMessage( + maxTimeMS uint64, + dst []byte, + desc description.SelectedServer, +) ([]byte, startedInformation, error) { var info startedInformation flags := op.secondaryOK(desc) var wmindex int32 info.requestID = wiremessage.NextRequestID() wmindex, dst = wiremessage.AppendHeaderStart(dst, info.requestID, 0, wiremessage.OpQuery) dst = wiremessage.AppendQueryFlags(dst, flags) + + dollarCmd := [...]byte{'.', '$', 'c', 'm', 'd'} + // FullCollectionName dst = append(dst, op.Database...) dst = append(dst, dollarCmd[:]...) @@ -1108,9 +1269,14 @@ func (op Operation) createQueryWireMessage(maxTimeMS uint64, dst []byte, desc de return bsoncore.UpdateLength(dst, wmindex, int32(len(dst[wmindex:]))), info, nil } -func (op Operation) createMsgWireMessage(ctx context.Context, maxTimeMS uint64, dst []byte, desc description.SelectedServer, - conn Connection) ([]byte, startedInformation, error) { - +func (op Operation) createMsgWireMessage( + ctx context.Context, + maxTimeMS uint64, + dst []byte, + desc description.SelectedServer, + conn Connection, + requestID int32, +) ([]byte, startedInformation, error) { var info startedInformation var flags wiremessage.MsgFlag var wmindex int32 @@ -1125,7 +1291,7 @@ func (op Operation) createMsgWireMessage(ctx context.Context, maxTimeMS uint64, flags |= wiremessage.ExhaustAllowed } - info.requestID = wiremessage.NextRequestID() + info.requestID = requestID wmindex, dst = wiremessage.AppendHeaderStart(dst, info.requestID, 0, wiremessage.OpMsg) dst = wiremessage.AppendMsgFlags(dst, flags) // Body @@ -1191,6 +1357,29 @@ func (op Operation) createMsgWireMessage(ctx context.Context, maxTimeMS uint64, return bsoncore.UpdateLength(dst, wmindex, int32(len(dst[wmindex:]))), info, nil } +// isLegacyHandshake returns True if the operation is the first message of +// the initial handshake and should use a legacy hello. +func isLegacyHandshake(op Operation, desc description.SelectedServer) bool { + isInitialHandshake := desc.WireVersion == nil || desc.WireVersion.Max == 0 + + return op.Legacy == LegacyHandshake && isInitialHandshake +} + +func (op Operation) createWireMessage( + ctx context.Context, + maxTimeMS uint64, + dst []byte, + desc description.SelectedServer, + conn Connection, + requestID int32, +) ([]byte, startedInformation, error) { + if isLegacyHandshake(op, desc) { + return op.createLegacyHandshakeWireMessage(maxTimeMS, dst, desc) + } + + return op.createMsgWireMessage(ctx, maxTimeMS, dst, desc, conn, requestID) +} + // addCommandFields adds the fields for a command to the wire message in dst. This assumes that the start of the document // has already been added and does not add the final 0 byte. func (op Operation) addCommandFields(ctx context.Context, dst []byte, desc description.SelectedServer) ([]byte, error) { @@ -1315,7 +1504,14 @@ func (op Operation) addWriteConcern(dst []byte, desc description.SelectedServer) func (op Operation) addSession(dst []byte, desc description.SelectedServer) ([]byte, error) { client := op.Client - if client == nil || !sessionsSupported(desc.WireVersion) || desc.SessionTimeoutMinutes == 0 { + + // If the operation is defined for an explicit session but the server + // does not support sessions, then throw an error. + if client != nil && !client.IsImplicit && desc.SessionTimeoutMinutesPtr == nil { + return nil, fmt.Errorf("current topology does not support sessions") + } + + if client == nil || !sessionsSupported(desc.WireVersion) || desc.SessionTimeoutMinutesPtr == nil { return dst, nil } if err := client.UpdateUseTime(); err != nil { @@ -1367,7 +1563,7 @@ func (op Operation) addClusterTime(dst []byte, desc description.SelectedServer) // operation's MaxTimeMS if set. If no MaxTimeMS is set on the operation, and context is // not a Timeout context, calculateMaxTimeMS returns 0. func (op Operation) calculateMaxTimeMS(ctx context.Context, rtt90 time.Duration, rttStats string) (uint64, error) { - if internal.IsTimeoutContext(ctx) { + if csot.IsTimeoutContext(ctx) { if deadline, ok := ctx.Deadline(); ok { remainingTimeout := time.Until(deadline) maxTime := remainingTimeout - rtt90 @@ -1376,9 +1572,11 @@ func (op Operation) calculateMaxTimeMS(ctx context.Context, rtt90 time.Duration, // maxTimeMS value (e.g. 400 microseconds evaluates to 1ms, not 0ms). maxTimeMS := int64((maxTime + (time.Millisecond - 1)) / time.Millisecond) if maxTimeMS <= 0 { - return 0, internal.WrapErrorf(ErrDeadlineWouldBeExceeded, - "remaining time %v until context deadline is less than or equal to 90th percentile RTT\n%v", - remainingTimeout, rttStats) + return 0, fmt.Errorf( + "remaining time %v until context deadline is less than or equal to 90th percentile RTT: %w\n%v", + remainingTimeout, + ErrDeadlineWouldBeExceeded, + rttStats) } return uint64(maxTimeMS), nil } @@ -1454,7 +1652,14 @@ func (op Operation) getReadPrefBasedOnTransaction() (*readpref.ReadPref, error) return op.ReadPreference, nil } +// createReadPref will attempt to create a document with the "readPreference" +// object and various related fields such as "mode", "tags", and +// "maxStalenessSeconds". func (op Operation) createReadPref(desc description.SelectedServer, isOpQuery bool) (bsoncore.Document, error) { + if op.omitReadPreference { + return nil, nil + } + // TODO(GODRIVER-2231): Instead of checking if isOutputAggregate and desc.Server.WireVersion.Max < 13, somehow check // TODO if supplied readPreference was "overwritten" with primary in description.selectForReplicaSet. if desc.Server.Kind == description.Standalone || (isOpQuery && desc.Server.Kind != description.Mongos) || @@ -1493,7 +1698,14 @@ func (op Operation) createReadPref(desc description.SelectedServer, isOpQuery bo doc, _ = bsoncore.AppendDocumentEnd(doc, idx) return doc, nil } - doc = bsoncore.AppendStringElement(doc, "mode", "primary") + + // OP_MSG requires never sending read preference "primary" + // except for topology "single". + // + // It is important to note that although the Go Driver does not + // support legacy opcodes, OP_QUERY has different rules for + // adding read preference to commands. + return nil, nil case readpref.PrimaryPreferredMode: doc = bsoncore.AppendStringElement(doc, "mode", "primaryPreferred") case readpref.SecondaryPreferredMode: @@ -1557,7 +1769,7 @@ func (op Operation) secondaryOK(desc description.SelectedServer) wiremessage.Que } func (Operation) canCompress(cmd string) bool { - if cmd == internal.LegacyHello || cmd == "hello" || cmd == "saslStart" || cmd == "saslContinue" || cmd == "getnonce" || cmd == "authenticate" || + if cmd == handshake.LegacyHello || cmd == "hello" || cmd == "saslStart" || cmd == "saslContinue" || cmd == "getnonce" || cmd == "authenticate" || cmd == "createUser" || cmd == "updateUser" || cmd == "copydbSaslStart" || cmd == "copydbgetnonce" || cmd == "copydb" { return false } @@ -1661,7 +1873,7 @@ func (op Operation) decodeResult(opcode wiremessage.OpCode, wm []byte) (bsoncore return nil, errors.New("malformed wire message: insufficient bytes to read document sequence") } default: - return nil, fmt.Errorf("malformed wire message: uknown section type %v", stype) + return nil, fmt.Errorf("malformed wire message: unknown section type %v", stype) } } @@ -1689,7 +1901,7 @@ func (op *Operation) redactCommand(cmd string, doc bsoncore.Document) bool { return true } - if strings.ToLower(cmd) != internal.LegacyHelloLowercase && cmd != "hello" { + if strings.ToLower(cmd) != handshake.LegacyHelloLowercase && cmd != "hello" { return false } @@ -1698,76 +1910,144 @@ func (op *Operation) redactCommand(cmd string, doc bsoncore.Document) bool { return err == nil } +// canLogCommandMessage returns true if the command can be logged. +func (op Operation) canLogCommandMessage() bool { + return op.Logger != nil && op.Logger.LevelComponentEnabled(logger.LevelDebug, logger.ComponentCommand) +} + +func (op Operation) canPublishStartedEvent() bool { + return op.CommandMonitor != nil && op.CommandMonitor.Started != nil +} + // publishStartedEvent publishes a CommandStartedEvent to the operation's command monitor if possible. If the command is // an unacknowledged write, a CommandSucceededEvent will be published as well. If started events are not being monitored, // no events are published. func (op Operation) publishStartedEvent(ctx context.Context, info startedInformation) { - if op.CommandMonitor == nil || op.CommandMonitor.Started == nil { - return - } - - // Make a copy of the command. Redact if the command is security sensitive and cannot be monitored. - // If there was a type 1 payload for the current batch, convert it to a BSON array. - cmdCopy := bson.Raw{} - if !info.redacted { - cmdCopy = make([]byte, len(info.cmd)) - copy(cmdCopy, info.cmd) - if info.documentSequenceIncluded { - cmdCopy = cmdCopy[:len(info.cmd)-1] // remove 0 byte at end - cmdCopy = op.addBatchArray(cmdCopy) - cmdCopy, _ = bsoncore.AppendDocumentEnd(cmdCopy, 0) // add back 0 byte and update length + // If logging is enabled for the command component at the debug level, log the command response. + if op.canLogCommandMessage() { + host, port, _ := net.SplitHostPort(info.serverAddress.String()) + + redactedCmd := redactStartedInformationCmd(op, info).String() + formattedCmd := logger.FormatMessage(redactedCmd, op.Logger.MaxDocumentLength) + + op.Logger.Print(logger.LevelDebug, + logger.ComponentCommand, + logger.CommandStarted, + logger.SerializeCommand(logger.Command{ + DriverConnectionID: info.driverConnectionID, + Message: logger.CommandStarted, + Name: info.cmdName, + DatabaseName: op.Database, + RequestID: int64(info.requestID), + ServerConnectionID: info.serverConnID, + ServerHost: host, + ServerPort: port, + ServiceID: info.serviceID, + }, + logger.KeyCommand, formattedCmd)...) + + } + + if op.canPublishStartedEvent() { + started := &event.CommandStartedEvent{ + Command: redactStartedInformationCmd(op, info), + DatabaseName: op.Database, + CommandName: info.cmdName, + RequestID: int64(info.requestID), + ConnectionID: info.connID, + ServerConnectionID: convertInt64PtrToInt32Ptr(info.serverConnID), + ServerConnectionID64: info.serverConnID, + ServiceID: info.serviceID, } + op.CommandMonitor.Started(ctx, started) } +} - started := &event.CommandStartedEvent{ - Command: cmdCopy, - DatabaseName: op.Database, - CommandName: info.cmdName, - RequestID: int64(info.requestID), - ConnectionID: info.connID, - ServerConnectionID: convertInt64PtrToInt32Ptr(info.serverConnID), - ServiceID: info.serviceID, - } - op.CommandMonitor.Started(ctx, started) +// canPublishSucceededEvent returns true if a CommandSucceededEvent can be +// published for the given command. This is true if the command is not an +// unacknowledged write and the command monitor is monitoring succeeded events. +func (op Operation) canPublishFinishedEvent(info finishedInformation) bool { + success := info.success() + + return op.CommandMonitor != nil && + (!success || op.CommandMonitor.Succeeded != nil) && + (success || op.CommandMonitor.Failed != nil) } // publishFinishedEvent publishes either a CommandSucceededEvent or a CommandFailedEvent to the operation's command // monitor if possible. If success/failure events aren't being monitored, no events are published. func (op Operation) publishFinishedEvent(ctx context.Context, info finishedInformation) { - success := info.cmdErr == nil - if _, ok := info.cmdErr.(WriteCommandError); ok { - success = true - } - if op.CommandMonitor == nil || (success && op.CommandMonitor.Succeeded == nil) || (!success && op.CommandMonitor.Failed == nil) { + if op.canLogCommandMessage() && info.success() { + host, port, _ := net.SplitHostPort(info.serverAddress.String()) + + redactedReply := redactFinishedInformationResponse(info).String() + formattedReply := logger.FormatMessage(redactedReply, op.Logger.MaxDocumentLength) + + op.Logger.Print(logger.LevelDebug, + logger.ComponentCommand, + logger.CommandSucceeded, + logger.SerializeCommand(logger.Command{ + DriverConnectionID: info.driverConnectionID, + Message: logger.CommandSucceeded, + Name: info.cmdName, + DatabaseName: op.Database, + RequestID: int64(info.requestID), + ServerConnectionID: info.serverConnID, + ServerHost: host, + ServerPort: port, + ServiceID: info.serviceID, + }, + logger.KeyDurationMS, info.duration.Milliseconds(), + logger.KeyReply, formattedReply)...) + } + + if op.canLogCommandMessage() && !info.success() { + host, port, _ := net.SplitHostPort(info.serverAddress.String()) + + formattedReply := logger.FormatMessage(info.cmdErr.Error(), op.Logger.MaxDocumentLength) + + op.Logger.Print(logger.LevelDebug, + logger.ComponentCommand, + logger.CommandFailed, + logger.SerializeCommand(logger.Command{ + DriverConnectionID: info.driverConnectionID, + Message: logger.CommandFailed, + Name: info.cmdName, + DatabaseName: op.Database, + RequestID: int64(info.requestID), + ServerConnectionID: info.serverConnID, + ServerHost: host, + ServerPort: port, + ServiceID: info.serviceID, + }, + logger.KeyDurationMS, info.duration.Milliseconds(), + logger.KeyFailure, formattedReply)...) + } + + // If the finished event cannot be published, return early. + if !op.canPublishFinishedEvent(info) { return } - var durationNanos int64 - var emptyTime time.Time - if info.startTime != emptyTime { - durationNanos = time.Since(info.startTime).Nanoseconds() - } - finished := event.CommandFinishedEvent{ - CommandName: info.cmdName, - RequestID: int64(info.requestID), - ConnectionID: info.connID, - DurationNanos: durationNanos, - ServerConnectionID: convertInt64PtrToInt32Ptr(info.serverConnID), - ServiceID: info.serviceID, - } - - if success { - res := bson.Raw{} - // Only copy the reply for commands that are not security sensitive - if !info.redacted { - res = bson.Raw(info.response) - } + CommandName: info.cmdName, + DatabaseName: op.Database, + RequestID: int64(info.requestID), + ConnectionID: info.connID, + Duration: info.duration, + DurationNanos: info.duration.Nanoseconds(), + ServerConnectionID: convertInt64PtrToInt32Ptr(info.serverConnID), + ServerConnectionID64: info.serverConnID, + ServiceID: info.serviceID, + } + + if info.success() { successEvent := &event.CommandSucceededEvent{ - Reply: res, + Reply: redactFinishedInformationResponse(info), CommandFinishedEvent: finished, } op.CommandMonitor.Succeeded(ctx, successEvent) + return } @@ -1780,10 +2060,10 @@ func (op Operation) publishFinishedEvent(ctx context.Context, info finishedInfor // sessionsSupported returns true of the given server version indicates that it supports sessions. func sessionsSupported(wireVersion *description.VersionRange) bool { - return wireVersion != nil && wireVersion.Max >= 6 + return wireVersion != nil } // retryWritesSupported returns true if this description represents a server that supports retryable writes. func retryWritesSupported(s description.Server) bool { - return s.SessionTimeoutMinutes != 0 && s.Kind != description.Standalone + return s.SessionTimeoutMinutesPtr != nil && s.Kind != description.Standalone } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/abort_transaction.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/abort_transaction.go index 2bf0ec052..941372713 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/abort_transaction.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/abort_transaction.go @@ -11,6 +11,7 @@ import ( "errors" "go.mongodb.org/mongo-driver/event" + "go.mongodb.org/mongo-driver/internal/driverutil" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/writeconcern" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" @@ -64,11 +65,12 @@ func (at *AbortTransaction) Execute(ctx context.Context) error { Selector: at.selector, WriteConcern: at.writeConcern, ServerAPI: at.serverAPI, + Name: driverutil.AbortTransactionOp, }.Execute(ctx) } -func (at *AbortTransaction) command(dst []byte, desc description.SelectedServer) ([]byte, error) { +func (at *AbortTransaction) command(dst []byte, _ description.SelectedServer) ([]byte, error) { dst = bsoncore.AppendInt32Element(dst, "abortTransaction", 1) if at.recoveryToken != nil { diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/aggregate.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/aggregate.go index 4ea2263cb..ca0e79652 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/aggregate.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/aggregate.go @@ -13,6 +13,7 @@ import ( "go.mongodb.org/mongo-driver/bson/bsontype" "go.mongodb.org/mongo-driver/event" + "go.mongodb.org/mongo-driver/internal/driverutil" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/readconcern" "go.mongodb.org/mongo-driver/mongo/readpref" @@ -111,6 +112,7 @@ func (a *Aggregate) Execute(ctx context.Context) error { IsOutputAggregate: a.hasOutputStage, MaxTime: a.maxTime, Timeout: a.timeout, + Name: driverutil.AggregateOp, }.Execute(ctx) } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/command.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/command.go index 574cb0314..5aad3f72e 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/command.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/command.go @@ -12,6 +12,7 @@ import ( "time" "go.mongodb.org/mongo-driver/event" + "go.mongodb.org/mongo-driver/internal/logger" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/readconcern" "go.mongodb.org/mongo-driver/mongo/readpref" @@ -38,6 +39,7 @@ type Command struct { createCursor bool cursorOpts driver.CursorOptions timeout *time.Duration + logger *logger.Logger } // NewCommand constructs and returns a new Command. Once the operation is executed, the result may only be accessed via @@ -77,6 +79,7 @@ func (c *Command) Execute(ctx context.Context) error { return errors.New("the Command operation must have a Deployment set before Execute can be called") } + // TODO(GODRIVER-2649): Actually pass readConcern to underlying driver.Operation. return driver.Operation{ CommandFn: func(dst []byte, desc description.SelectedServer) ([]byte, error) { return append(dst, c.command[4:len(c.command)-1]...), nil @@ -106,6 +109,7 @@ func (c *Command) Execute(ctx context.Context) error { Crypt: c.crypt, ServerAPI: c.serverAPI, Timeout: c.timeout, + Logger: c.logger, }.Execute(ctx) } @@ -218,3 +222,13 @@ func (c *Command) Timeout(timeout *time.Duration) *Command { c.timeout = timeout return c } + +// Logger sets the logger for this operation. +func (c *Command) Logger(logger *logger.Logger) *Command { + if c == nil { + c = new(Command) + } + + c.logger = logger + return c +} diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/commit_transaction.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/commit_transaction.go index ff2794a1f..11c6f69dd 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/commit_transaction.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/commit_transaction.go @@ -12,6 +12,7 @@ import ( "time" "go.mongodb.org/mongo-driver/event" + "go.mongodb.org/mongo-driver/internal/driverutil" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/writeconcern" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" @@ -66,11 +67,12 @@ func (ct *CommitTransaction) Execute(ctx context.Context) error { Selector: ct.selector, WriteConcern: ct.writeConcern, ServerAPI: ct.serverAPI, + Name: driverutil.CommitTransactionOp, }.Execute(ctx) } -func (ct *CommitTransaction) command(dst []byte, desc description.SelectedServer) ([]byte, error) { +func (ct *CommitTransaction) command(dst []byte, _ description.SelectedServer) ([]byte, error) { dst = bsoncore.AppendInt32Element(dst, "commitTransaction", 1) if ct.recoveryToken != nil { diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/count.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/count.go index 7e605572d..8de1e9f8d 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/count.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/count.go @@ -14,6 +14,7 @@ import ( "go.mongodb.org/mongo-driver/bson/bsontype" "go.mongodb.org/mongo-driver/event" + "go.mongodb.org/mongo-driver/internal/driverutil" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/readconcern" "go.mongodb.org/mongo-driver/mongo/readpref" @@ -126,6 +127,7 @@ func (c *Count) Execute(ctx context.Context) error { Selector: c.selector, ServerAPI: c.serverAPI, Timeout: c.timeout, + Name: driverutil.CountOp, }.Execute(ctx) // Swallow error if NamespaceNotFound(26) is returned from aggregate on non-existent namespace @@ -138,7 +140,7 @@ func (c *Count) Execute(ctx context.Context) error { return err } -func (c *Count) command(dst []byte, desc description.SelectedServer) ([]byte, error) { +func (c *Count) command(dst []byte, _ description.SelectedServer) ([]byte, error) { dst = bsoncore.AppendStringElement(dst, "count", c.collection) if c.query != nil { dst = bsoncore.AppendDocumentElement(dst, "query", c.query) diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create.go index c333c5a99..45b26cb70 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create.go @@ -78,7 +78,6 @@ func (c *Create) Execute(ctx context.Context) error { WriteConcern: c.writeConcern, ServerAPI: c.serverAPI, }.Execute(ctx) - } func (c *Create) command(dst []byte, desc description.SelectedServer) ([]byte, error) { diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/createIndexes.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create_indexes.go similarity index 98% rename from vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/createIndexes.go rename to vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create_indexes.go index 70f7b5495..77daf676a 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/createIndexes.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create_indexes.go @@ -14,6 +14,7 @@ import ( "go.mongodb.org/mongo-driver/bson/bsontype" "go.mongodb.org/mongo-driver/event" + "go.mongodb.org/mongo-driver/internal/driverutil" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/writeconcern" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" @@ -117,6 +118,7 @@ func (ci *CreateIndexes) Execute(ctx context.Context) error { WriteConcern: ci.writeConcern, ServerAPI: ci.serverAPI, Timeout: ci.timeout, + Name: driverutil.CreateIndexesOp, }.Execute(ctx) } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create_search_indexes.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create_search_indexes.go new file mode 100644 index 000000000..a16f9d716 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create_search_indexes.go @@ -0,0 +1,245 @@ +// Copyright (C) MongoDB, Inc. 2023-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package operation + +import ( + "context" + "errors" + "fmt" + "time" + + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/event" + "go.mongodb.org/mongo-driver/mongo/description" + "go.mongodb.org/mongo-driver/mongo/writeconcern" + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" + "go.mongodb.org/mongo-driver/x/mongo/driver" + "go.mongodb.org/mongo-driver/x/mongo/driver/session" +) + +// CreateSearchIndexes performs a createSearchIndexes operation. +type CreateSearchIndexes struct { + indexes bsoncore.Document + session *session.Client + clock *session.ClusterClock + collection string + monitor *event.CommandMonitor + crypt driver.Crypt + database string + deployment driver.Deployment + selector description.ServerSelector + writeConcern *writeconcern.WriteConcern + result CreateSearchIndexesResult + serverAPI *driver.ServerAPIOptions + timeout *time.Duration +} + +// CreateSearchIndexResult represents a single search index result in CreateSearchIndexesResult. +type CreateSearchIndexResult struct { + Name string +} + +// CreateSearchIndexesResult represents a createSearchIndexes result returned by the server. +type CreateSearchIndexesResult struct { + IndexesCreated []CreateSearchIndexResult +} + +func buildCreateSearchIndexesResult(response bsoncore.Document) (CreateSearchIndexesResult, error) { + elements, err := response.Elements() + if err != nil { + return CreateSearchIndexesResult{}, err + } + csir := CreateSearchIndexesResult{} + for _, element := range elements { + switch element.Key() { + case "indexesCreated": + arr, ok := element.Value().ArrayOK() + if !ok { + return csir, fmt.Errorf("response field 'indexesCreated' is type array, but received BSON type %s", element.Value().Type) + } + + var values []bsoncore.Value + values, err = arr.Values() + if err != nil { + break + } + + for _, val := range values { + valDoc, ok := val.DocumentOK() + if !ok { + return csir, fmt.Errorf("indexesCreated value is type document, but received BSON type %s", val.Type) + } + var indexesCreated CreateSearchIndexResult + if err = bson.Unmarshal(valDoc, &indexesCreated); err != nil { + return csir, err + } + csir.IndexesCreated = append(csir.IndexesCreated, indexesCreated) + } + } + } + return csir, nil +} + +// NewCreateSearchIndexes constructs and returns a new CreateSearchIndexes. +func NewCreateSearchIndexes(indexes bsoncore.Document) *CreateSearchIndexes { + return &CreateSearchIndexes{ + indexes: indexes, + } +} + +// Result returns the result of executing this operation. +func (csi *CreateSearchIndexes) Result() CreateSearchIndexesResult { return csi.result } + +func (csi *CreateSearchIndexes) processResponse(info driver.ResponseInfo) error { + var err error + csi.result, err = buildCreateSearchIndexesResult(info.ServerResponse) + return err +} + +// Execute runs this operations and returns an error if the operation did not execute successfully. +func (csi *CreateSearchIndexes) Execute(ctx context.Context) error { + if csi.deployment == nil { + return errors.New("the CreateSearchIndexes operation must have a Deployment set before Execute can be called") + } + + return driver.Operation{ + CommandFn: csi.command, + ProcessResponseFn: csi.processResponse, + CommandMonitor: csi.monitor, + Database: csi.database, + Deployment: csi.deployment, + }.Execute(ctx) + +} + +func (csi *CreateSearchIndexes) command(dst []byte, _ description.SelectedServer) ([]byte, error) { + dst = bsoncore.AppendStringElement(dst, "createSearchIndexes", csi.collection) + if csi.indexes != nil { + dst = bsoncore.AppendArrayElement(dst, "indexes", csi.indexes) + } + return dst, nil +} + +// Indexes specifies an array containing index specification documents for the indexes being created. +func (csi *CreateSearchIndexes) Indexes(indexes bsoncore.Document) *CreateSearchIndexes { + if csi == nil { + csi = new(CreateSearchIndexes) + } + + csi.indexes = indexes + return csi +} + +// Session sets the session for this operation. +func (csi *CreateSearchIndexes) Session(session *session.Client) *CreateSearchIndexes { + if csi == nil { + csi = new(CreateSearchIndexes) + } + + csi.session = session + return csi +} + +// ClusterClock sets the cluster clock for this operation. +func (csi *CreateSearchIndexes) ClusterClock(clock *session.ClusterClock) *CreateSearchIndexes { + if csi == nil { + csi = new(CreateSearchIndexes) + } + + csi.clock = clock + return csi +} + +// Collection sets the collection that this command will run against. +func (csi *CreateSearchIndexes) Collection(collection string) *CreateSearchIndexes { + if csi == nil { + csi = new(CreateSearchIndexes) + } + + csi.collection = collection + return csi +} + +// CommandMonitor sets the monitor to use for APM events. +func (csi *CreateSearchIndexes) CommandMonitor(monitor *event.CommandMonitor) *CreateSearchIndexes { + if csi == nil { + csi = new(CreateSearchIndexes) + } + + csi.monitor = monitor + return csi +} + +// Crypt sets the Crypt object to use for automatic encryption and decryption. +func (csi *CreateSearchIndexes) Crypt(crypt driver.Crypt) *CreateSearchIndexes { + if csi == nil { + csi = new(CreateSearchIndexes) + } + + csi.crypt = crypt + return csi +} + +// Database sets the database to run this operation against. +func (csi *CreateSearchIndexes) Database(database string) *CreateSearchIndexes { + if csi == nil { + csi = new(CreateSearchIndexes) + } + + csi.database = database + return csi +} + +// Deployment sets the deployment to use for this operation. +func (csi *CreateSearchIndexes) Deployment(deployment driver.Deployment) *CreateSearchIndexes { + if csi == nil { + csi = new(CreateSearchIndexes) + } + + csi.deployment = deployment + return csi +} + +// ServerSelector sets the selector used to retrieve a server. +func (csi *CreateSearchIndexes) ServerSelector(selector description.ServerSelector) *CreateSearchIndexes { + if csi == nil { + csi = new(CreateSearchIndexes) + } + + csi.selector = selector + return csi +} + +// WriteConcern sets the write concern for this operation. +func (csi *CreateSearchIndexes) WriteConcern(writeConcern *writeconcern.WriteConcern) *CreateSearchIndexes { + if csi == nil { + csi = new(CreateSearchIndexes) + } + + csi.writeConcern = writeConcern + return csi +} + +// ServerAPI sets the server API version for this operation. +func (csi *CreateSearchIndexes) ServerAPI(serverAPI *driver.ServerAPIOptions) *CreateSearchIndexes { + if csi == nil { + csi = new(CreateSearchIndexes) + } + + csi.serverAPI = serverAPI + return csi +} + +// Timeout sets the timeout for this operation. +func (csi *CreateSearchIndexes) Timeout(timeout *time.Duration) *CreateSearchIndexes { + if csi == nil { + csi = new(CreateSearchIndexes) + } + + csi.timeout = timeout + return csi +} diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/delete.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/delete.go index bb8359425..bf95cf496 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/delete.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/delete.go @@ -14,6 +14,8 @@ import ( "go.mongodb.org/mongo-driver/bson/bsontype" "go.mongodb.org/mongo-driver/event" + "go.mongodb.org/mongo-driver/internal/driverutil" + "go.mongodb.org/mongo-driver/internal/logger" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/writeconcern" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" @@ -41,6 +43,7 @@ type Delete struct { serverAPI *driver.ServerAPIOptions let bsoncore.Document timeout *time.Duration + logger *logger.Logger } // DeleteResult represents a delete result returned by the server. @@ -111,6 +114,8 @@ func (d *Delete) Execute(ctx context.Context) error { WriteConcern: d.writeConcern, ServerAPI: d.serverAPI, Timeout: d.timeout, + Logger: d.logger, + Name: driverutil.DeleteOp, }.Execute(ctx) } @@ -312,3 +317,14 @@ func (d *Delete) Timeout(timeout *time.Duration) *Delete { d.timeout = timeout return d } + +// Logger sets the logger for this operation. +func (d *Delete) Logger(logger *logger.Logger) *Delete { + if d == nil { + d = new(Delete) + } + + d.logger = logger + + return d +} diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/distinct.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/distinct.go index af66654d6..b7e675ce4 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/distinct.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/distinct.go @@ -13,6 +13,7 @@ import ( "go.mongodb.org/mongo-driver/bson/bsontype" "go.mongodb.org/mongo-driver/event" + "go.mongodb.org/mongo-driver/internal/driverutil" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/readconcern" "go.mongodb.org/mongo-driver/mongo/readpref" @@ -105,6 +106,7 @@ func (d *Distinct) Execute(ctx context.Context) error { Selector: d.selector, ServerAPI: d.serverAPI, Timeout: d.timeout, + Name: driverutil.DistinctOp, }.Execute(ctx) } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_collection.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_collection.go index 2b65d4844..8c6596756 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_collection.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_collection.go @@ -13,6 +13,7 @@ import ( "time" "go.mongodb.org/mongo-driver/event" + "go.mongodb.org/mongo-driver/internal/driverutil" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/writeconcern" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" @@ -102,11 +103,12 @@ func (dc *DropCollection) Execute(ctx context.Context) error { WriteConcern: dc.writeConcern, ServerAPI: dc.serverAPI, Timeout: dc.timeout, + Name: driverutil.DropOp, }.Execute(ctx) } -func (dc *DropCollection) command(dst []byte, desc description.SelectedServer) ([]byte, error) { +func (dc *DropCollection) command(dst []byte, _ description.SelectedServer) ([]byte, error) { dst = bsoncore.AppendStringElement(dst, "drop", dc.collection) return dst, nil } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_database.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_database.go index ae011e2ba..a8f9b45ba 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_database.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_database.go @@ -11,6 +11,7 @@ import ( "errors" "go.mongodb.org/mongo-driver/event" + "go.mongodb.org/mongo-driver/internal/driverutil" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/writeconcern" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" @@ -53,11 +54,12 @@ func (dd *DropDatabase) Execute(ctx context.Context) error { Selector: dd.selector, WriteConcern: dd.writeConcern, ServerAPI: dd.serverAPI, + Name: driverutil.DropDatabaseOp, }.Execute(ctx) } -func (dd *DropDatabase) command(dst []byte, desc description.SelectedServer) ([]byte, error) { +func (dd *DropDatabase) command(dst []byte, _ description.SelectedServer) ([]byte, error) { dst = bsoncore.AppendInt32Element(dst, "dropDatabase", 1) return dst, nil diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_indexes.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_indexes.go index 2e8569021..0c3d45970 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_indexes.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_indexes.go @@ -13,6 +13,7 @@ import ( "time" "go.mongodb.org/mongo-driver/event" + "go.mongodb.org/mongo-driver/internal/driverutil" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/writeconcern" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" @@ -99,11 +100,12 @@ func (di *DropIndexes) Execute(ctx context.Context) error { WriteConcern: di.writeConcern, ServerAPI: di.serverAPI, Timeout: di.timeout, + Name: driverutil.DropIndexesOp, }.Execute(ctx) } -func (di *DropIndexes) command(dst []byte, desc description.SelectedServer) ([]byte, error) { +func (di *DropIndexes) command(dst []byte, _ description.SelectedServer) ([]byte, error) { dst = bsoncore.AppendStringElement(dst, "dropIndexes", di.collection) if di.index != nil { dst = bsoncore.AppendStringElement(dst, "index", *di.index) diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_search_index.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_search_index.go new file mode 100644 index 000000000..25cde8154 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_search_index.go @@ -0,0 +1,227 @@ +// Copyright (C) MongoDB, Inc. 2023-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package operation + +import ( + "context" + "errors" + "fmt" + "time" + + "go.mongodb.org/mongo-driver/event" + "go.mongodb.org/mongo-driver/mongo/description" + "go.mongodb.org/mongo-driver/mongo/writeconcern" + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" + "go.mongodb.org/mongo-driver/x/mongo/driver" + "go.mongodb.org/mongo-driver/x/mongo/driver/session" +) + +// DropSearchIndex performs an dropSearchIndex operation. +type DropSearchIndex struct { + index string + session *session.Client + clock *session.ClusterClock + collection string + monitor *event.CommandMonitor + crypt driver.Crypt + database string + deployment driver.Deployment + selector description.ServerSelector + writeConcern *writeconcern.WriteConcern + result DropSearchIndexResult + serverAPI *driver.ServerAPIOptions + timeout *time.Duration +} + +// DropSearchIndexResult represents a dropSearchIndex result returned by the server. +type DropSearchIndexResult struct { + Ok int32 +} + +func buildDropSearchIndexResult(response bsoncore.Document) (DropSearchIndexResult, error) { + elements, err := response.Elements() + if err != nil { + return DropSearchIndexResult{}, err + } + dsir := DropSearchIndexResult{} + for _, element := range elements { + switch element.Key() { + case "ok": + var ok bool + dsir.Ok, ok = element.Value().AsInt32OK() + if !ok { + return dsir, fmt.Errorf("response field 'ok' is type int32, but received BSON type %s", element.Value().Type) + } + } + } + return dsir, nil +} + +// NewDropSearchIndex constructs and returns a new DropSearchIndex. +func NewDropSearchIndex(index string) *DropSearchIndex { + return &DropSearchIndex{ + index: index, + } +} + +// Result returns the result of executing this operation. +func (dsi *DropSearchIndex) Result() DropSearchIndexResult { return dsi.result } + +func (dsi *DropSearchIndex) processResponse(info driver.ResponseInfo) error { + var err error + dsi.result, err = buildDropSearchIndexResult(info.ServerResponse) + return err +} + +// Execute runs this operations and returns an error if the operation did not execute successfully. +func (dsi *DropSearchIndex) Execute(ctx context.Context) error { + if dsi.deployment == nil { + return errors.New("the DropSearchIndex operation must have a Deployment set before Execute can be called") + } + + return driver.Operation{ + CommandFn: dsi.command, + ProcessResponseFn: dsi.processResponse, + Client: dsi.session, + Clock: dsi.clock, + CommandMonitor: dsi.monitor, + Crypt: dsi.crypt, + Database: dsi.database, + Deployment: dsi.deployment, + Selector: dsi.selector, + WriteConcern: dsi.writeConcern, + ServerAPI: dsi.serverAPI, + Timeout: dsi.timeout, + }.Execute(ctx) + +} + +func (dsi *DropSearchIndex) command(dst []byte, _ description.SelectedServer) ([]byte, error) { + dst = bsoncore.AppendStringElement(dst, "dropSearchIndex", dsi.collection) + dst = bsoncore.AppendStringElement(dst, "name", dsi.index) + return dst, nil +} + +// Index specifies the name of the index to drop. If '*' is specified, all indexes will be dropped. +func (dsi *DropSearchIndex) Index(index string) *DropSearchIndex { + if dsi == nil { + dsi = new(DropSearchIndex) + } + + dsi.index = index + return dsi +} + +// Session sets the session for this operation. +func (dsi *DropSearchIndex) Session(session *session.Client) *DropSearchIndex { + if dsi == nil { + dsi = new(DropSearchIndex) + } + + dsi.session = session + return dsi +} + +// ClusterClock sets the cluster clock for this operation. +func (dsi *DropSearchIndex) ClusterClock(clock *session.ClusterClock) *DropSearchIndex { + if dsi == nil { + dsi = new(DropSearchIndex) + } + + dsi.clock = clock + return dsi +} + +// Collection sets the collection that this command will run against. +func (dsi *DropSearchIndex) Collection(collection string) *DropSearchIndex { + if dsi == nil { + dsi = new(DropSearchIndex) + } + + dsi.collection = collection + return dsi +} + +// CommandMonitor sets the monitor to use for APM events. +func (dsi *DropSearchIndex) CommandMonitor(monitor *event.CommandMonitor) *DropSearchIndex { + if dsi == nil { + dsi = new(DropSearchIndex) + } + + dsi.monitor = monitor + return dsi +} + +// Crypt sets the Crypt object to use for automatic encryption and decryption. +func (dsi *DropSearchIndex) Crypt(crypt driver.Crypt) *DropSearchIndex { + if dsi == nil { + dsi = new(DropSearchIndex) + } + + dsi.crypt = crypt + return dsi +} + +// Database sets the database to run this operation against. +func (dsi *DropSearchIndex) Database(database string) *DropSearchIndex { + if dsi == nil { + dsi = new(DropSearchIndex) + } + + dsi.database = database + return dsi +} + +// Deployment sets the deployment to use for this operation. +func (dsi *DropSearchIndex) Deployment(deployment driver.Deployment) *DropSearchIndex { + if dsi == nil { + dsi = new(DropSearchIndex) + } + + dsi.deployment = deployment + return dsi +} + +// ServerSelector sets the selector used to retrieve a server. +func (dsi *DropSearchIndex) ServerSelector(selector description.ServerSelector) *DropSearchIndex { + if dsi == nil { + dsi = new(DropSearchIndex) + } + + dsi.selector = selector + return dsi +} + +// WriteConcern sets the write concern for this operation. +func (dsi *DropSearchIndex) WriteConcern(writeConcern *writeconcern.WriteConcern) *DropSearchIndex { + if dsi == nil { + dsi = new(DropSearchIndex) + } + + dsi.writeConcern = writeConcern + return dsi +} + +// ServerAPI sets the server API version for this operation. +func (dsi *DropSearchIndex) ServerAPI(serverAPI *driver.ServerAPIOptions) *DropSearchIndex { + if dsi == nil { + dsi = new(DropSearchIndex) + } + + dsi.serverAPI = serverAPI + return dsi +} + +// Timeout sets the timeout for this operation. +func (dsi *DropSearchIndex) Timeout(timeout *time.Duration) *DropSearchIndex { + if dsi == nil { + dsi = new(DropSearchIndex) + } + + dsi.timeout = timeout + return dsi +} diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/end_sessions.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/end_sessions.go index 644d19555..52f300bb7 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/end_sessions.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/end_sessions.go @@ -11,6 +11,7 @@ import ( "errors" "go.mongodb.org/mongo-driver/event" + "go.mongodb.org/mongo-driver/internal/driverutil" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver" @@ -59,11 +60,12 @@ func (es *EndSessions) Execute(ctx context.Context) error { Deployment: es.deployment, Selector: es.selector, ServerAPI: es.serverAPI, + Name: driverutil.EndSessionsOp, }.Execute(ctx) } -func (es *EndSessions) command(dst []byte, desc description.SelectedServer) ([]byte, error) { +func (es *EndSessions) command(dst []byte, _ description.SelectedServer) ([]byte, error) { if es.sessionIDs != nil { dst = bsoncore.AppendArrayElement(dst, "endSessions", es.sessionIDs) } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/find.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/find.go index 6ccdfcae8..27bb5b4f9 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/find.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/find.go @@ -13,6 +13,8 @@ import ( "go.mongodb.org/mongo-driver/bson/bsontype" "go.mongodb.org/mongo-driver/event" + "go.mongodb.org/mongo-driver/internal/driverutil" + "go.mongodb.org/mongo-driver/internal/logger" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/readconcern" "go.mongodb.org/mongo-driver/mongo/readpref" @@ -60,6 +62,7 @@ type Find struct { result driver.CursorResponse serverAPI *driver.ServerAPIOptions timeout *time.Duration + logger *logger.Logger } // NewFind constructs and returns a new Find. @@ -105,6 +108,8 @@ func (f *Find) Execute(ctx context.Context) error { Legacy: driver.LegacyFind, ServerAPI: f.serverAPI, Timeout: f.timeout, + Logger: f.logger, + Name: driverutil.FindOp, }.Execute(ctx) } @@ -546,3 +551,13 @@ func (f *Find) Timeout(timeout *time.Duration) *Find { f.timeout = timeout return f } + +// Logger sets the logger for this operation. +func (f *Find) Logger(logger *logger.Logger) *Find { + if f == nil { + f = new(Find) + } + + f.logger = logger + return f +} diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/find_and_modify.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/find_and_modify.go index 7c4cb527b..7faf56113 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/find_and_modify.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/find_and_modify.go @@ -15,6 +15,7 @@ import ( "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/bsontype" "go.mongodb.org/mongo-driver/event" + "go.mongodb.org/mongo-driver/internal/driverutil" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/writeconcern" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" @@ -24,7 +25,7 @@ import ( // FindAndModify performs a findAndModify operation. type FindAndModify struct { - arrayFilters bsoncore.Document + arrayFilters bsoncore.Array bypassDocumentValidation *bool collation bsoncore.Document comment bsoncore.Value @@ -143,6 +144,7 @@ func (fam *FindAndModify) Execute(ctx context.Context) error { Crypt: fam.crypt, ServerAPI: fam.serverAPI, Timeout: fam.timeout, + Name: driverutil.FindAndModifyOp, }.Execute(ctx) } @@ -215,7 +217,7 @@ func (fam *FindAndModify) command(dst []byte, desc description.SelectedServer) ( } // ArrayFilters specifies an array of filter documents that determines which array elements to modify for an update operation on an array field. -func (fam *FindAndModify) ArrayFilters(arrayFilters bsoncore.Document) *FindAndModify { +func (fam *FindAndModify) ArrayFilters(arrayFilters bsoncore.Array) *FindAndModify { if fam == nil { fam = new(FindAndModify) } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/hello.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/hello.go index b2f657715..6e750fd03 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/hello.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/hello.go @@ -9,11 +9,15 @@ package operation import ( "context" "errors" + "os" "runtime" "strconv" + "strings" "go.mongodb.org/mongo-driver/bson" - "go.mongodb.org/mongo-driver/internal" + "go.mongodb.org/mongo-driver/internal/bsonutil" + "go.mongodb.org/mongo-driver/internal/driverutil" + "go.mongodb.org/mongo-driver/internal/handshake" "go.mongodb.org/mongo-driver/mongo/address" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/version" @@ -22,6 +26,14 @@ import ( "go.mongodb.org/mongo-driver/x/mongo/driver/session" ) +// maxClientMetadataSize is the maximum size of the client metadata document +// that can be sent to the server. Note that the maximum document size on +// standalone and replica servers is 1024, but the maximum document size on +// sharded clusters is 512. +const maxClientMetadataSize = 512 + +const driverName = "mongo-go-driver" + // Hello is used to run the handshake operation. type Hello struct { appname string @@ -113,6 +125,377 @@ func (h *Hello) Result(addr address.Address) description.Server { return description.NewServer(addr, bson.Raw(h.res)) } +const dockerEnvPath = "/.dockerenv" + +const ( + // Runtime names + runtimeNameDocker = "docker" + + // Orchestrator names + orchestratorNameK8s = "kubernetes" +) + +// getFaasEnvName parses the FaaS environment variable name and returns the +// corresponding name used by the client. If none of the variables or variables +// for multiple names are populated the FaaS values MUST be entirely omitted. +// When variables for multiple "client.env.name" values are present, "vercel" +// takes precedence over "aws.lambda"; any other combination MUST cause FaaS +// values to be entirely omitted. +func getFaasEnvName() string { + envVars := []string{ + driverutil.EnvVarAWSExecutionEnv, + driverutil.EnvVarAWSLambdaRuntimeAPI, + driverutil.EnvVarFunctionsWorkerRuntime, + driverutil.EnvVarKService, + driverutil.EnvVarFunctionName, + driverutil.EnvVarVercel, + } + + // If none of the variables are populated the client.env value MUST be + // entirely omitted. + names := make(map[string]struct{}) + + for _, envVar := range envVars { + val := os.Getenv(envVar) + if val == "" { + continue + } + + var name string + + switch envVar { + case driverutil.EnvVarAWSExecutionEnv: + if !strings.HasPrefix(val, driverutil.AwsLambdaPrefix) { + continue + } + + name = driverutil.EnvNameAWSLambda + case driverutil.EnvVarAWSLambdaRuntimeAPI: + name = driverutil.EnvNameAWSLambda + case driverutil.EnvVarFunctionsWorkerRuntime: + name = driverutil.EnvNameAzureFunc + case driverutil.EnvVarKService, driverutil.EnvVarFunctionName: + name = driverutil.EnvNameGCPFunc + case driverutil.EnvVarVercel: + // "vercel" takes precedence over "aws.lambda". + delete(names, driverutil.EnvNameAWSLambda) + + name = driverutil.EnvNameVercel + } + + names[name] = struct{}{} + if len(names) > 1 { + // If multiple names are populated the client.env value + // MUST be entirely omitted. + names = nil + + break + } + } + + for name := range names { + return name + } + + return "" +} + +type containerInfo struct { + runtime string + orchestrator string +} + +// getContainerEnvInfo returns runtime and orchestrator of a container. +// If no fields is populated, the client.env.container value MUST be entirely +// omitted. +func getContainerEnvInfo() *containerInfo { + var runtime, orchestrator string + if _, err := os.Stat(dockerEnvPath); !os.IsNotExist(err) { + runtime = runtimeNameDocker + } + if v := os.Getenv(driverutil.EnvVarK8s); v != "" { + orchestrator = orchestratorNameK8s + } + if runtime != "" || orchestrator != "" { + return &containerInfo{ + runtime: runtime, + orchestrator: orchestrator, + } + } + return nil +} + +// appendClientAppName appends the application metadata to the dst. It is the +// responsibility of the caller to check that this appending does not cause dst +// to exceed any size limitations. +func appendClientAppName(dst []byte, name string) ([]byte, error) { + if name == "" { + return dst, nil + } + + var idx int32 + idx, dst = bsoncore.AppendDocumentElementStart(dst, "application") + + dst = bsoncore.AppendStringElement(dst, "name", name) + + return bsoncore.AppendDocumentEnd(dst, idx) +} + +// appendClientDriver appends the driver metadata to dst. It is the +// responsibility of the caller to check that this appending does not cause dst +// to exceed any size limitations. +func appendClientDriver(dst []byte) ([]byte, error) { + var idx int32 + idx, dst = bsoncore.AppendDocumentElementStart(dst, "driver") + + dst = bsoncore.AppendStringElement(dst, "name", driverName) + dst = bsoncore.AppendStringElement(dst, "version", version.Driver) + + return bsoncore.AppendDocumentEnd(dst, idx) +} + +// appendClientEnv appends the environment metadata to dst. It is the +// responsibility of the caller to check that this appending does not cause dst +// to exceed any size limitations. +func appendClientEnv(dst []byte, omitNonName, omitDoc bool) ([]byte, error) { + if omitDoc { + return dst, nil + } + + name := getFaasEnvName() + container := getContainerEnvInfo() + // Omit the entire 'env' if both name and container are empty because other + // fields depend on either of them. + if name == "" && container == nil { + return dst, nil + } + + var idx int32 + + idx, dst = bsoncore.AppendDocumentElementStart(dst, "env") + + if name != "" { + dst = bsoncore.AppendStringElement(dst, "name", name) + } + + addMem := func(envVar string) []byte { + mem := os.Getenv(envVar) + if mem == "" { + return dst + } + + memInt64, err := strconv.ParseInt(mem, 10, 32) + if err != nil { + return dst + } + + memInt32 := int32(memInt64) + + return bsoncore.AppendInt32Element(dst, "memory_mb", memInt32) + } + + addRegion := func(envVar string) []byte { + region := os.Getenv(envVar) + if region == "" { + return dst + } + + return bsoncore.AppendStringElement(dst, "region", region) + } + + addTimeout := func(envVar string) []byte { + timeout := os.Getenv(envVar) + if timeout == "" { + return dst + } + + timeoutInt64, err := strconv.ParseInt(timeout, 10, 32) + if err != nil { + return dst + } + + timeoutInt32 := int32(timeoutInt64) + return bsoncore.AppendInt32Element(dst, "timeout_sec", timeoutInt32) + } + + if !omitNonName { + // No other FaaS fields will be populated if the name is empty. + switch name { + case driverutil.EnvNameAWSLambda: + dst = addMem(driverutil.EnvVarAWSLambdaFunctionMemorySize) + dst = addRegion(driverutil.EnvVarAWSRegion) + case driverutil.EnvNameGCPFunc: + dst = addMem(driverutil.EnvVarFunctionMemoryMB) + dst = addRegion(driverutil.EnvVarFunctionRegion) + dst = addTimeout(driverutil.EnvVarFunctionTimeoutSec) + case driverutil.EnvNameVercel: + dst = addRegion(driverutil.EnvVarVercelRegion) + } + } + + if container != nil { + var idxCntnr int32 + idxCntnr, dst = bsoncore.AppendDocumentElementStart(dst, "container") + if container.runtime != "" { + dst = bsoncore.AppendStringElement(dst, "runtime", container.runtime) + } + if container.orchestrator != "" { + dst = bsoncore.AppendStringElement(dst, "orchestrator", container.orchestrator) + } + var err error + dst, err = bsoncore.AppendDocumentEnd(dst, idxCntnr) + if err != nil { + return dst, err + } + } + + return bsoncore.AppendDocumentEnd(dst, idx) +} + +// appendClientOS appends the OS metadata to dst. It is the responsibility of the +// caller to check that this appending does not cause dst to exceed any size +// limitations. +func appendClientOS(dst []byte, omitNonType bool) ([]byte, error) { + var idx int32 + + idx, dst = bsoncore.AppendDocumentElementStart(dst, "os") + + dst = bsoncore.AppendStringElement(dst, "type", runtime.GOOS) + if !omitNonType { + dst = bsoncore.AppendStringElement(dst, "architecture", runtime.GOARCH) + } + + return bsoncore.AppendDocumentEnd(dst, idx) +} + +// appendClientPlatform appends the platform metadata to dst. It is the +// responsibility of the caller to check that this appending does not cause dst +// to exceed any size limitations. +func appendClientPlatform(dst []byte) []byte { + return bsoncore.AppendStringElement(dst, "platform", runtime.Version()) +} + +// encodeClientMetadata encodes the client metadata into a BSON document. maxLen +// is the maximum length the document can be. If the document exceeds maxLen, +// then an empty byte slice is returned. If there is not enough space to encode +// a document, the document is truncated and returned. +// +// This function attempts to build the following document. Fields are omitted to +// save space following the MongoDB Handshake. +// +// { +// application: { +// name: "" +// }, +// driver: { +// name: "", +// version: "" +// }, +// platform: "", +// os: { +// type: "", +// name: "", +// architecture: "", +// version: "" +// }, +// env: { +// name: "", +// timeout_sec: 42, +// memory_mb: 1024, +// region: "", +// container: { +// runtime: "", +// orchestrator: "" +// } +// } +// } +func encodeClientMetadata(appname string, maxLen int) ([]byte, error) { + dst := make([]byte, 0, maxLen) + + omitEnvDoc := false + omitEnvNonName := false + omitOSNonType := false + omitEnvDocument := false + truncatePlatform := false + +retry: + var idx int32 + idx, dst = bsoncore.AppendDocumentStart(dst) + + var err error + dst, err = appendClientAppName(dst, appname) + if err != nil { + return nil, err + } + + dst, err = appendClientDriver(dst) + if err != nil { + return nil, err + } + + dst, err = appendClientOS(dst, omitOSNonType) + if err != nil { + return nil, err + } + + if !truncatePlatform { + dst = appendClientPlatform(dst) + } + + if !omitEnvDocument { + dst, err = appendClientEnv(dst, omitEnvNonName, omitEnvDoc) + if err != nil { + return nil, err + } + } + + dst, err = bsoncore.AppendDocumentEnd(dst, idx) + if err != nil { + return nil, err + } + + if len(dst) > maxLen { + // Implementers SHOULD cumulatively update fields in the + // following order until the document is under the size limit + // + // 1. Omit fields from ``env`` except ``env.name`` + // 2. Omit fields from ``os`` except ``os.type`` + // 3. Omit the ``env`` document entirely + // 4. Truncate ``platform`` + dst = dst[:0] + + if !omitEnvNonName { + omitEnvNonName = true + + goto retry + } + + if !omitOSNonType { + omitOSNonType = true + + goto retry + } + + if !omitEnvDoc { + omitEnvDoc = true + + goto retry + } + + if !truncatePlatform { + truncatePlatform = true + + goto retry + } + + // There is nothing left to update. Return an empty slice to + // tell caller not to append a `client` document. + return nil, nil + } + + return dst, nil +} + // handshakeCommand appends all necessary command fields as well as client metadata, SASL supported mechs, and compression. func (h *Hello) handshakeCommand(dst []byte, desc description.SelectedServer) ([]byte, error) { dst, err := h.command(dst, desc) @@ -133,26 +516,12 @@ func (h *Hello) handshakeCommand(dst []byte, desc description.SelectedServer) ([ } dst, _ = bsoncore.AppendArrayEnd(dst, idx) - // append client metadata - idx, dst = bsoncore.AppendDocumentElementStart(dst, "client") - - didx, dst := bsoncore.AppendDocumentElementStart(dst, "driver") - dst = bsoncore.AppendStringElement(dst, "name", "mongo-go-driver") - dst = bsoncore.AppendStringElement(dst, "version", version.Driver) - dst, _ = bsoncore.AppendDocumentEnd(dst, didx) - - didx, dst = bsoncore.AppendDocumentElementStart(dst, "os") - dst = bsoncore.AppendStringElement(dst, "type", runtime.GOOS) - dst = bsoncore.AppendStringElement(dst, "architecture", runtime.GOARCH) - dst, _ = bsoncore.AppendDocumentEnd(dst, didx) + clientMetadata, _ := encodeClientMetadata(h.appname, maxClientMetadataSize) - dst = bsoncore.AppendStringElement(dst, "platform", runtime.Version()) - if h.appname != "" { - didx, dst = bsoncore.AppendDocumentElementStart(dst, "application") - dst = bsoncore.AppendStringElement(dst, "name", h.appname) - dst, _ = bsoncore.AppendDocumentEnd(dst, didx) + // If the client metadata is empty, do not append it to the command. + if len(clientMetadata) > 0 { + dst = bsoncore.AppendDocumentElement(dst, "client", clientMetadata) } - dst, _ = bsoncore.AppendDocumentEnd(dst, idx) return dst, nil } @@ -164,7 +533,7 @@ func (h *Hello) command(dst []byte, desc description.SelectedServer) ([]byte, er if desc.Kind == description.LoadBalanced || h.serverAPI != nil || desc.Server.HelloOK { dst = bsoncore.AppendInt32Element(dst, "hello", 1) } else { - dst = bsoncore.AppendInt32Element(dst, internal.LegacyHello, 1) + dst = bsoncore.AppendInt32Element(dst, handshake.LegacyHello, 1) } dst = bsoncore.AppendBooleanElement(dst, "helloOk", true) @@ -202,8 +571,16 @@ func (h *Hello) StreamResponse(ctx context.Context, conn driver.StreamerConnecti return h.createOperation().ExecuteExhaust(ctx, conn) } +// isLegacyHandshake returns True if server API version is not requested and +// loadBalanced is False. If this is the case, then the drivers MUST use legacy +// hello for the first message of the initial handshake with the OP_QUERY +// protocol +func isLegacyHandshake(srvAPI *driver.ServerAPIOptions, deployment driver.Deployment) bool { + return srvAPI == nil && deployment.Kind() != description.LoadBalanced +} + func (h *Hello) createOperation() driver.Operation { - return driver.Operation{ + op := driver.Operation{ Clock: h.clock, CommandFn: h.command, Database: "admin", @@ -214,23 +591,36 @@ func (h *Hello) createOperation() driver.Operation { }, ServerAPI: h.serverAPI, } + + if isLegacyHandshake(h.serverAPI, h.d) { + op.Legacy = driver.LegacyHandshake + } + + return op } // GetHandshakeInformation performs the MongoDB handshake for the provided connection and returns the relevant // information about the server. This function implements the driver.Handshaker interface. func (h *Hello) GetHandshakeInformation(ctx context.Context, _ address.Address, c driver.Connection) (driver.HandshakeInformation, error) { - err := driver.Operation{ + deployment := driver.SingleConnectionDeployment{C: c} + + op := driver.Operation{ Clock: h.clock, CommandFn: h.handshakeCommand, - Deployment: driver.SingleConnectionDeployment{c}, + Deployment: deployment, Database: "admin", ProcessResponseFn: func(info driver.ResponseInfo) error { h.res = info.ServerResponse return nil }, ServerAPI: h.serverAPI, - }.Execute(ctx) - if err != nil { + } + + if isLegacyHandshake(h.serverAPI, deployment) { + op.Legacy = driver.LegacyHandshake + } + + if err := op.Execute(ctx); err != nil { return driver.HandshakeInformation{}, err } @@ -243,10 +633,13 @@ func (h *Hello) GetHandshakeInformation(ctx context.Context, _ address.Address, if serverConnectionID, ok := h.res.Lookup("connectionId").AsInt64OK(); ok { info.ServerConnectionID = &serverConnectionID } + + var err error + // Cast to bson.Raw to lookup saslSupportedMechs to avoid converting from bsoncore.Value to bson.RawValue for the // StringSliceFromRawValue call. if saslSupportedMechs, lookupErr := bson.Raw(h.res).LookupErr("saslSupportedMechs"); lookupErr == nil { - info.SaslSupportedMechs, err = internal.StringSliceFromRawValue("saslSupportedMechs", saslSupportedMechs) + info.SaslSupportedMechs, err = bsonutil.StringSliceFromRawValue("saslSupportedMechs", saslSupportedMechs) } return info, err } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/insert.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/insert.go index 83ba5e6e1..7da4b8b0f 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/insert.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/insert.go @@ -14,6 +14,8 @@ import ( "go.mongodb.org/mongo-driver/bson/bsontype" "go.mongodb.org/mongo-driver/event" + "go.mongodb.org/mongo-driver/internal/driverutil" + "go.mongodb.org/mongo-driver/internal/logger" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/writeconcern" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" @@ -40,6 +42,7 @@ type Insert struct { result InsertResult serverAPI *driver.ServerAPIOptions timeout *time.Duration + logger *logger.Logger } // InsertResult represents an insert result returned by the server. @@ -110,6 +113,8 @@ func (i *Insert) Execute(ctx context.Context) error { WriteConcern: i.writeConcern, ServerAPI: i.serverAPI, Timeout: i.timeout, + Logger: i.logger, + Name: driverutil.InsertOp, }.Execute(ctx) } @@ -291,3 +296,13 @@ func (i *Insert) Timeout(timeout *time.Duration) *Insert { i.timeout = timeout return i } + +// Logger sets the logger for this operation. +func (i *Insert) Logger(logger *logger.Logger) *Insert { + if i == nil { + i = new(Insert) + } + + i.logger = logger + return i +} diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/listDatabases.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/listDatabases.go index 28f72dd12..c70248e2a 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/listDatabases.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/listDatabases.go @@ -14,6 +14,7 @@ import ( "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/event" + "go.mongodb.org/mongo-driver/internal/driverutil" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/readpref" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" @@ -163,11 +164,12 @@ func (ld *ListDatabases) Execute(ctx context.Context) error { Crypt: ld.crypt, ServerAPI: ld.serverAPI, Timeout: ld.timeout, + Name: driverutil.ListDatabasesOp, }.Execute(ctx) } -func (ld *ListDatabases) command(dst []byte, desc description.SelectedServer) ([]byte, error) { +func (ld *ListDatabases) command(dst []byte, _ description.SelectedServer) ([]byte, error) { dst = bsoncore.AppendInt32Element(dst, "listDatabases", 1) if ld.filter != nil { diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/list_collections.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/list_collections.go index 7118417f7..6fe68fa03 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/list_collections.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/list_collections.go @@ -12,6 +12,7 @@ import ( "time" "go.mongodb.org/mongo-driver/event" + "go.mongodb.org/mongo-driver/internal/driverutil" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/readpref" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" @@ -47,17 +48,10 @@ func NewListCollections(filter bsoncore.Document) *ListCollections { } // Result returns the result of executing this operation. -func (lc *ListCollections) Result(opts driver.CursorOptions) (*driver.ListCollectionsBatchCursor, error) { +func (lc *ListCollections) Result(opts driver.CursorOptions) (*driver.BatchCursor, error) { opts.ServerAPI = lc.serverAPI - bc, err := driver.NewBatchCursor(lc.result, lc.session, lc.clock, opts) - if err != nil { - return nil, err - } - desc := lc.result.Desc - if desc.WireVersion == nil || desc.WireVersion.Max < 3 { - return driver.NewLegacyListCollectionsBatchCursor(bc) - } - return driver.NewListCollectionsBatchCursor(bc) + + return driver.NewBatchCursor(lc.result, lc.session, lc.clock, opts) } func (lc *ListCollections) processResponse(info driver.ResponseInfo) error { @@ -88,11 +82,12 @@ func (lc *ListCollections) Execute(ctx context.Context) error { Legacy: driver.LegacyListCollections, ServerAPI: lc.serverAPI, Timeout: lc.timeout, + Name: driverutil.ListCollectionsOp, }.Execute(ctx) } -func (lc *ListCollections) command(dst []byte, desc description.SelectedServer) ([]byte, error) { +func (lc *ListCollections) command(dst []byte, _ description.SelectedServer) ([]byte, error) { dst = bsoncore.AppendInt32Element(dst, "listCollections", 1) if lc.filter != nil { dst = bsoncore.AppendDocumentElement(dst, "filter", lc.filter) diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/list_indexes.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/list_indexes.go index 9e5901b99..79d50eca9 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/list_indexes.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/list_indexes.go @@ -12,6 +12,7 @@ import ( "time" "go.mongodb.org/mongo-driver/event" + "go.mongodb.org/mongo-driver/internal/driverutil" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver" @@ -83,11 +84,12 @@ func (li *ListIndexes) Execute(ctx context.Context) error { Type: driver.Read, ServerAPI: li.serverAPI, Timeout: li.timeout, + Name: driverutil.ListIndexesOp, }.Execute(ctx) } -func (li *ListIndexes) command(dst []byte, desc description.SelectedServer) ([]byte, error) { +func (li *ListIndexes) command(dst []byte, _ description.SelectedServer) ([]byte, error) { dst = bsoncore.AppendStringElement(dst, "listIndexes", li.collection) cursorIdx, cursorDoc := bsoncore.AppendDocumentStart(nil) diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/update.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/update.go index 816b3709b..881b1bcf7 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/update.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/update.go @@ -15,6 +15,8 @@ import ( "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/bsontype" "go.mongodb.org/mongo-driver/event" + "go.mongodb.org/mongo-driver/internal/driverutil" + "go.mongodb.org/mongo-driver/internal/logger" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/writeconcern" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" @@ -44,6 +46,7 @@ type Update struct { serverAPI *driver.ServerAPIOptions let bsoncore.Document timeout *time.Duration + logger *logger.Logger } // Upsert contains the information for an upsert in an Update operation. @@ -162,6 +165,8 @@ func (u *Update) Execute(ctx context.Context) error { Crypt: u.crypt, ServerAPI: u.serverAPI, Timeout: u.timeout, + Logger: u.logger, + Name: driverutil.UpdateOp, }.Execute(ctx) } @@ -399,3 +404,13 @@ func (u *Update) Timeout(timeout *time.Duration) *Update { u.timeout = timeout return u } + +// Logger sets the logger for this operation. +func (u *Update) Logger(logger *logger.Logger) *Update { + if u == nil { + u = new(Update) + } + + u.logger = logger + return u +} diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/update_search_index.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/update_search_index.go new file mode 100644 index 000000000..ba807986c --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/update_search_index.go @@ -0,0 +1,240 @@ +// Copyright (C) MongoDB, Inc. 2023-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package operation + +import ( + "context" + "errors" + "fmt" + "time" + + "go.mongodb.org/mongo-driver/event" + "go.mongodb.org/mongo-driver/mongo/description" + "go.mongodb.org/mongo-driver/mongo/writeconcern" + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" + "go.mongodb.org/mongo-driver/x/mongo/driver" + "go.mongodb.org/mongo-driver/x/mongo/driver/session" +) + +// UpdateSearchIndex performs a updateSearchIndex operation. +type UpdateSearchIndex struct { + index string + definition bsoncore.Document + session *session.Client + clock *session.ClusterClock + collection string + monitor *event.CommandMonitor + crypt driver.Crypt + database string + deployment driver.Deployment + selector description.ServerSelector + writeConcern *writeconcern.WriteConcern + result UpdateSearchIndexResult + serverAPI *driver.ServerAPIOptions + timeout *time.Duration +} + +// UpdateSearchIndexResult represents a single index in the updateSearchIndexResult result. +type UpdateSearchIndexResult struct { + Ok int32 +} + +func buildUpdateSearchIndexResult(response bsoncore.Document) (UpdateSearchIndexResult, error) { + elements, err := response.Elements() + if err != nil { + return UpdateSearchIndexResult{}, err + } + usir := UpdateSearchIndexResult{} + for _, element := range elements { + switch element.Key() { + case "ok": + var ok bool + usir.Ok, ok = element.Value().AsInt32OK() + if !ok { + return usir, fmt.Errorf("response field 'ok' is type int32, but received BSON type %s", element.Value().Type) + } + } + } + return usir, nil +} + +// NewUpdateSearchIndex constructs and returns a new UpdateSearchIndex. +func NewUpdateSearchIndex(index string, definition bsoncore.Document) *UpdateSearchIndex { + return &UpdateSearchIndex{ + index: index, + definition: definition, + } +} + +// Result returns the result of executing this operation. +func (usi *UpdateSearchIndex) Result() UpdateSearchIndexResult { return usi.result } + +func (usi *UpdateSearchIndex) processResponse(info driver.ResponseInfo) error { + var err error + usi.result, err = buildUpdateSearchIndexResult(info.ServerResponse) + return err +} + +// Execute runs this operations and returns an error if the operation did not execute successfully. +func (usi *UpdateSearchIndex) Execute(ctx context.Context) error { + if usi.deployment == nil { + return errors.New("the UpdateSearchIndex operation must have a Deployment set before Execute can be called") + } + + return driver.Operation{ + CommandFn: usi.command, + ProcessResponseFn: usi.processResponse, + Client: usi.session, + Clock: usi.clock, + CommandMonitor: usi.monitor, + Crypt: usi.crypt, + Database: usi.database, + Deployment: usi.deployment, + Selector: usi.selector, + WriteConcern: usi.writeConcern, + ServerAPI: usi.serverAPI, + Timeout: usi.timeout, + }.Execute(ctx) + +} + +func (usi *UpdateSearchIndex) command(dst []byte, _ description.SelectedServer) ([]byte, error) { + dst = bsoncore.AppendStringElement(dst, "updateSearchIndex", usi.collection) + dst = bsoncore.AppendStringElement(dst, "name", usi.index) + dst = bsoncore.AppendDocumentElement(dst, "definition", usi.definition) + return dst, nil +} + +// Index specifies the index of the document being updated. +func (usi *UpdateSearchIndex) Index(name string) *UpdateSearchIndex { + if usi == nil { + usi = new(UpdateSearchIndex) + } + + usi.index = name + return usi +} + +// Definition specifies the definition for the document being created. +func (usi *UpdateSearchIndex) Definition(definition bsoncore.Document) *UpdateSearchIndex { + if usi == nil { + usi = new(UpdateSearchIndex) + } + + usi.definition = definition + return usi +} + +// Session sets the session for this operation. +func (usi *UpdateSearchIndex) Session(session *session.Client) *UpdateSearchIndex { + if usi == nil { + usi = new(UpdateSearchIndex) + } + + usi.session = session + return usi +} + +// ClusterClock sets the cluster clock for this operation. +func (usi *UpdateSearchIndex) ClusterClock(clock *session.ClusterClock) *UpdateSearchIndex { + if usi == nil { + usi = new(UpdateSearchIndex) + } + + usi.clock = clock + return usi +} + +// Collection sets the collection that this command will run against. +func (usi *UpdateSearchIndex) Collection(collection string) *UpdateSearchIndex { + if usi == nil { + usi = new(UpdateSearchIndex) + } + + usi.collection = collection + return usi +} + +// CommandMonitor sets the monitor to use for APM events. +func (usi *UpdateSearchIndex) CommandMonitor(monitor *event.CommandMonitor) *UpdateSearchIndex { + if usi == nil { + usi = new(UpdateSearchIndex) + } + + usi.monitor = monitor + return usi +} + +// Crypt sets the Crypt object to use for automatic encryption and decryption. +func (usi *UpdateSearchIndex) Crypt(crypt driver.Crypt) *UpdateSearchIndex { + if usi == nil { + usi = new(UpdateSearchIndex) + } + + usi.crypt = crypt + return usi +} + +// Database sets the database to run this operation against. +func (usi *UpdateSearchIndex) Database(database string) *UpdateSearchIndex { + if usi == nil { + usi = new(UpdateSearchIndex) + } + + usi.database = database + return usi +} + +// Deployment sets the deployment to use for this operation. +func (usi *UpdateSearchIndex) Deployment(deployment driver.Deployment) *UpdateSearchIndex { + if usi == nil { + usi = new(UpdateSearchIndex) + } + + usi.deployment = deployment + return usi +} + +// ServerSelector sets the selector used to retrieve a server. +func (usi *UpdateSearchIndex) ServerSelector(selector description.ServerSelector) *UpdateSearchIndex { + if usi == nil { + usi = new(UpdateSearchIndex) + } + + usi.selector = selector + return usi +} + +// WriteConcern sets the write concern for this operation. +func (usi *UpdateSearchIndex) WriteConcern(writeConcern *writeconcern.WriteConcern) *UpdateSearchIndex { + if usi == nil { + usi = new(UpdateSearchIndex) + } + + usi.writeConcern = writeConcern + return usi +} + +// ServerAPI sets the server API version for this operation. +func (usi *UpdateSearchIndex) ServerAPI(serverAPI *driver.ServerAPIOptions) *UpdateSearchIndex { + if usi == nil { + usi = new(UpdateSearchIndex) + } + + usi.serverAPI = serverAPI + return usi +} + +// Timeout sets the timeout for this operation. +func (usi *UpdateSearchIndex) Timeout(timeout *time.Duration) *UpdateSearchIndex { + if usi == nil { + usi = new(UpdateSearchIndex) + } + + usi.timeout = timeout + return usi +} diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/client_session.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/client_session.go index ba244b101..8dac0932d 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/client_session.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/client_session.go @@ -331,9 +331,10 @@ func (c *Client) ClearPinnedResources() error { return nil } -// UnpinConnection gracefully unpins the connection associated with the session if there is one. This is done via -// the pinned connection's UnpinFromTransaction function. -func (c *Client) UnpinConnection() error { +// unpinConnection gracefully unpins the connection associated with the session +// if there is one. This is done via the pinned connection's +// UnpinFromTransaction function. +func (c *Client) unpinConnection() error { if c == nil || c.PinnedConnection == nil { return nil } @@ -353,6 +354,12 @@ func (c *Client) EndSession() { return } c.Terminated = true + + // Ignore the error when unpinning the connection because we can't do + // anything about it if it doesn't work. Typically the only errors that can + // happen here indicate that something went wrong with the connection state, + // like it wasn't marked as pinned or attempted to return to the wrong pool. + _ = c.unpinConnection() c.pool.ReturnSession(c.Server) } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/server_session.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/server_session.go index 044cbd497..b1e45552a 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/server_session.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/server_session.go @@ -31,11 +31,11 @@ func (ss *Server) expired(topoDesc topologyDescription) bool { return false } - if topoDesc.timeoutMinutes <= 0 { + if topoDesc.timeoutMinutes == nil || *topoDesc.timeoutMinutes <= 0 { return true } timeUnused := time.Since(ss.LastUsed).Minutes() - return timeUnused > float64(topoDesc.timeoutMinutes-1) + return timeUnused > float64(*topoDesc.timeoutMinutes-1) } // update the last used time for this session. diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/session_pool.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/session_pool.go index 34b863c11..7336f5451 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/session_pool.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/session_pool.go @@ -25,7 +25,7 @@ type Node struct { // relevant for determining session expiration. type topologyDescription struct { kind description.TopologyKind - timeoutMinutes uint32 + timeoutMinutes *int64 } // Pool is a pool of server sessions that can be reused. @@ -65,7 +65,7 @@ func (p *Pool) updateTimeout() { case newDesc := <-p.descChan: p.latestTopology = topologyDescription{ kind: newDesc.Kind, - timeoutMinutes: newDesc.SessionTimeoutMinutes, + timeoutMinutes: newDesc.SessionTimeoutMinutesPtr, } default: // no new description waiting diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/DESIGN.md b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/DESIGN.md index 6594a85d0..8a67dd993 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/DESIGN.md +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/DESIGN.md @@ -1,7 +1,9 @@ # Topology Package Design + This document outlines the design for this package. ## Topology + The `Topology` type handles monitoring the state of a MongoDB deployment and selecting servers. Updating the description is handled by finite state machine which implements the server discovery and monitoring specification. A `Topology` can be connected and fully disconnected, which enables @@ -9,9 +11,11 @@ saving resources. The `Topology` type also handles server selection following th specification. ## Server + The `Server` type handles heartbeating a MongoDB server and holds a pool of connections. ## Connection + Connections are handled by two main types and an auxiliary type. The two main types are `connection` and `Connection`. The first holds most of the logic required to actually read and write wire messages. Instances can be created with the `newConnection` method. Inside the `newConnection` @@ -26,6 +30,7 @@ The connection implementations in this package are conduits for wire messages bu ability to encode, decode, or validate wire messages. That must be handled by consumers. ## Pool + The `pool` type implements a connection pool. It handles caching idle connections and dialing new ones, but it does not track a maximum number of connections. That is the responsibility of a wrapping type, like `Server`. @@ -37,4 +42,4 @@ There is a `close` method, but this is used to close a connection. There are three methods related to getting and putting connections: `get`, `close`, and `put`. The `get` method will either retrieve a connection from the cache or it will dial a new `connection`. The `close` method will close the underlying socket of a `connection`. The `put` method will put a -connection into the pool, placing it in the cahce if there is space, otherwise it will close it. +connection into the pool, placing it in the cache if there is space, otherwise it will close it. diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection.go index 6e8fd5297..af25b1f68 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection.go @@ -18,7 +18,6 @@ import ( "sync/atomic" "time" - "go.mongodb.org/mongo-driver/internal" "go.mongodb.org/mongo-driver/mongo/address" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" @@ -95,7 +94,7 @@ func newConnection(addr address.Address, opts ...ConnectionOption) *connection { connectDone: make(chan struct{}), config: cfg, connectContextMade: make(chan struct{}), - cancellationListener: internal.NewCancellationListener(), + cancellationListener: newCancellListener(), } // Connections to non-load balanced deployments should eagerly set the generation numbers so errors encountered // at any point during connection establishment can be processed without the connection being considered stale. @@ -840,3 +839,47 @@ func configureTLS(ctx context.Context, } return client, nil } + +// TODO: Naming? + +// cancellListener listens for context cancellation and notifies listeners via a +// callback function. +type cancellListener struct { + aborted bool + done chan struct{} +} + +// newCancellListener constructs a cancellListener. +func newCancellListener() *cancellListener { + return &cancellListener{ + done: make(chan struct{}), + } +} + +// Listen blocks until the provided context is cancelled or listening is aborted +// via the StopListening function. If this detects that the context has been +// cancelled (i.e. ctx.Err() == context.Canceled), the provided callback is +// called to abort in-progress work. Even if the context expires, this function +// will block until StopListening is called. +func (c *cancellListener) Listen(ctx context.Context, abortFn func()) { + c.aborted = false + + select { + case <-ctx.Done(): + if ctx.Err() == context.Canceled { + c.aborted = true + abortFn() + } + + <-c.done + case <-c.done: + } +} + +// StopListening stops the in-progress Listen call. This blocks if there is no +// in-progress Listen call. This function will return true if the provided abort +// callback was called when listening for cancellation on the previous context. +func (c *cancellListener) StopListening() bool { + c.done <- struct{}{} + return c.aborted +} diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection_options.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection_options.go index 6e6ea01d8..43e6f3f50 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection_options.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection_options.go @@ -15,7 +15,7 @@ import ( "go.mongodb.org/mongo-driver/bson/primitive" "go.mongodb.org/mongo-driver/event" - "go.mongodb.org/mongo-driver/internal" + "go.mongodb.org/mongo-driver/internal/httputil" "go.mongodb.org/mongo-driver/x/mongo/driver" "go.mongodb.org/mongo-driver/x/mongo/driver/ocsp" ) @@ -72,7 +72,7 @@ func newConnectionConfig(opts ...ConnectionOption) *connectionConfig { connectTimeout: 30 * time.Second, dialer: nil, tlsConnectionSource: defaultTLSConnectionSource, - httpClient: internal.DefaultHTTPClient, + httpClient: httputil.DefaultHTTPClient, } for _, opt := range opts { @@ -83,6 +83,8 @@ func newConnectionConfig(opts ...ConnectionOption) *connectionConfig { } if cfg.dialer == nil { + // Use a zero value of net.Dialer when nothing is specified, so the Go driver applies default default behaviors + // such as Timeout, KeepAlive, DNS resolving, etc. See https://golang.org/pkg/net/#Dialer for more information. cfg.dialer = &net.Dialer{} } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/errors.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/errors.go index 4f7b48540..7ce41864e 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/errors.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/errors.go @@ -9,6 +9,7 @@ package topology import ( "context" "fmt" + "time" "go.mongodb.org/mongo-driver/mongo/description" ) @@ -69,11 +70,17 @@ func (e ServerSelectionError) Unwrap() error { // WaitQueueTimeoutError represents a timeout when requesting a connection from the pool type WaitQueueTimeoutError struct { - Wrapped error - PinnedCursorConnections uint64 - PinnedTransactionConnections uint64 - maxPoolSize uint64 - totalConnectionCount int + Wrapped error + pinnedConnections *pinnedConnections + maxPoolSize uint64 + totalConnections int + availableConnections int + waitDuration time.Duration +} + +type pinnedConnections struct { + cursorConnections uint64 + transactionConnections uint64 } // Error implements the error interface. @@ -95,14 +102,19 @@ func (w WaitQueueTimeoutError) Error() string { ) } - return fmt.Sprintf( - "%s; maxPoolSize: %d, connections in use by cursors: %d"+ - ", connections in use by transactions: %d, connections in use by other operations: %d", - errorMsg, - w.maxPoolSize, - w.PinnedCursorConnections, - w.PinnedTransactionConnections, - uint64(w.totalConnectionCount)-w.PinnedCursorConnections-w.PinnedTransactionConnections) + msg := fmt.Sprintf("%s; total connections: %d, maxPoolSize: %d, ", errorMsg, w.totalConnections, w.maxPoolSize) + if pinnedConnections := w.pinnedConnections; pinnedConnections != nil { + openConnectionCount := uint64(w.totalConnections) - + pinnedConnections.cursorConnections - + pinnedConnections.transactionConnections + msg += fmt.Sprintf("connections in use by cursors: %d, connections in use by transactions: %d, connections in use by other operations: %d, ", + pinnedConnections.cursorConnections, + pinnedConnections.transactionConnections, + openConnectionCount, + ) + } + msg += fmt.Sprintf("idle connections: %d, wait duration: %s", w.availableConnections, w.waitDuration.String()) + return msg } // Unwrap returns the underlying error. diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/fsm.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/fsm.go index 1ee8dce49..2acf527b9 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/fsm.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/fsm.go @@ -12,6 +12,7 @@ import ( "sync/atomic" "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/internal/ptrutil" "go.mongodb.org/mongo-driver/mongo/address" "go.mongodb.org/mongo-driver/mongo/description" ) @@ -21,7 +22,7 @@ var ( MinSupportedMongoDBVersion = "3.6" // SupportedWireVersions is the range of wire versions supported by the driver. - SupportedWireVersions = description.NewVersionRange(6, 17) + SupportedWireVersions = description.NewVersionRange(6, 21) ) type fsm struct { @@ -38,6 +39,63 @@ func newFSM() *fsm { return &f } +// selectFSMSessionTimeout selects the timeout to return for the topology's +// finite state machine. If the logicalSessionTimeoutMinutes on the FSM exists +// and the server is data-bearing, then we determine this value by returning +// +// min{server timeout, FSM timeout} +// +// where a "nil" value is considered less than 0. +// +// Otherwise, if the FSM's logicalSessionTimeoutMinutes exist, then this +// function returns the FSM timeout. +// +// In the case where the FSM timeout DNE, we check all servers to see if any +// still do not have a timeout. This function chooses the lowest of the existing +// timeouts. +func selectFSMSessionTimeout(f *fsm, s description.Server) *int64 { + oldMinutes := f.SessionTimeoutMinutesPtr + comp := ptrutil.CompareInt64(oldMinutes, s.SessionTimeoutMinutesPtr) + + // If the server is data-bearing and the current timeout exists and is + // either: + // + // 1. larger than the server timeout, or + // 2. non-nil while the server timeout is nil + // + // then return the server timeout. + if s.DataBearing() && (comp == 1 || comp == 2) { + return s.SessionTimeoutMinutesPtr + } + + // If the current timeout exists and the server is not data-bearing OR + // min{server timeout, current timeout} = current timeout, then return + // the current timeout. + if oldMinutes != nil { + return oldMinutes + } + + timeout := s.SessionTimeoutMinutesPtr + for _, server := range f.Servers { + // If the server is not data-bearing, then we do not consider + // it's timeout whether set or not. + if !server.DataBearing() { + continue + } + + srvTimeout := server.SessionTimeoutMinutesPtr + comp := ptrutil.CompareInt64(timeout, srvTimeout) + + if comp <= 0 { // timeout <= srvTimout + continue + } + + timeout = server.SessionTimeoutMinutesPtr + } + + return timeout +} + // apply takes a new server description and modifies the FSM's topology description based on it. It returns the // updated topology description as well as a server description. The returned server description is either the same // one that was passed in, or a new one in the case that it had to be changed. @@ -48,30 +106,20 @@ func (f *fsm) apply(s description.Server) (description.Topology, description.Ser newServers := make([]description.Server, len(f.Servers)) copy(newServers, f.Servers) - oldMinutes := f.SessionTimeoutMinutes + // Reset the logicalSessionTimeoutMinutes to the minimum of the FSM + // and the description.server/f.servers. + serverTimeoutMinutes := selectFSMSessionTimeout(f, s) + f.Topology = description.Topology{ Kind: f.Kind, Servers: newServers, SetName: f.SetName, } - // For data bearing servers, set SessionTimeoutMinutes to the lowest among them - if oldMinutes == 0 { - // If timeout currently 0, check all servers to see if any still don't have a timeout - // If they all have timeout, pick the lowest. - timeout := s.SessionTimeoutMinutes - for _, server := range f.Servers { - if server.DataBearing() && server.SessionTimeoutMinutes < timeout { - timeout = server.SessionTimeoutMinutes - } - } - f.SessionTimeoutMinutes = timeout - } else { - if s.DataBearing() && oldMinutes > s.SessionTimeoutMinutes { - f.SessionTimeoutMinutes = s.SessionTimeoutMinutes - } else { - f.SessionTimeoutMinutes = oldMinutes - } + f.Topology.SessionTimeoutMinutesPtr = serverTimeoutMinutes + + if serverTimeoutMinutes != nil { + f.SessionTimeoutMinutes = uint32(*serverTimeoutMinutes) } if _, ok := f.findServer(s.Addr); !ok { @@ -124,6 +172,7 @@ func (f *fsm) apply(s description.Server) (description.Topology, description.Ser f.compatible.Store(true) f.compatibilityErr = nil + return f.Topology, updatedDesc } @@ -234,7 +283,7 @@ func hasStalePrimary(fsm fsm, srv description.Server) bool { compRes := bytes.Compare(srv.ElectionID[:], fsm.maxElectionID[:]) if wireVersion := srv.WireVersion; wireVersion != nil && wireVersion.Max >= 17 { - // In the Post-6.0 case, a primary is considered "stale" if the server's election ID is greather than the + // In the Post-6.0 case, a primary is considered "stale" if the server's election ID is greater than the // topology's max election ID. In these versions, the primary is also considered "stale" if the server's // election ID is LTE to the topologies election ID and the server's "setVersion" is less than the topology's // max "setVersion". diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/pool.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/pool.go index b7a15e4ec..6e150344d 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/pool.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/pool.go @@ -9,12 +9,14 @@ package topology import ( "context" "fmt" + "net" "sync" "sync/atomic" "time" "go.mongodb.org/mongo-driver/bson/primitive" "go.mongodb.org/mongo-driver/event" + "go.mongodb.org/mongo-driver/internal/logger" "go.mongodb.org/mongo-driver/mongo/address" "go.mongodb.org/mongo-driver/x/mongo/driver" ) @@ -72,7 +74,9 @@ type poolConfig struct { MaxConnecting uint64 MaxIdleTime time.Duration MaintainInterval time.Duration + LoadBalanced bool PoolMonitor *event.PoolMonitor + Logger *logger.Logger handshakeErrFn func(error, uint64, *primitive.ObjectID) } @@ -90,7 +94,9 @@ type pool struct { minSize uint64 maxSize uint64 maxConnecting uint64 + loadBalanced bool monitor *event.PoolMonitor + logger *logger.Logger // handshakeErrFn is used to handle any errors that happen during connection establishment and // handshaking. @@ -129,18 +135,56 @@ func (p *pool) getState() int { return p.state } +func mustLogPoolMessage(pool *pool) bool { + return pool.logger != nil && pool.logger.LevelComponentEnabled( + logger.LevelDebug, logger.ComponentConnection) +} + +func logPoolMessage(pool *pool, msg string, keysAndValues ...interface{}) { + host, port, err := net.SplitHostPort(pool.address.String()) + if err != nil { + host = pool.address.String() + port = "" + } + + pool.logger.Print(logger.LevelDebug, + logger.ComponentConnection, + msg, + logger.SerializeConnection(logger.Connection{ + Message: msg, + ServerHost: host, + ServerPort: port, + }, keysAndValues...)...) + +} + +type reason struct { + loggerConn string + event string +} + // connectionPerished checks if a given connection is perished and should be removed from the pool. -func connectionPerished(conn *connection) (string, bool) { +func connectionPerished(conn *connection) (reason, bool) { switch { case conn.closed(): // A connection would only be closed if it encountered a network error during an operation and closed itself. - return event.ReasonError, true + return reason{ + loggerConn: logger.ReasonConnClosedError, + event: event.ReasonError, + }, true case conn.idleTimeoutExpired(): - return event.ReasonIdle, true + return reason{ + loggerConn: logger.ReasonConnClosedIdle, + event: event.ReasonIdle, + }, true case conn.pool.stale(conn): - return event.ReasonStale, true + return reason{ + loggerConn: logger.ReasonConnClosedStale, + event: event.ReasonStale, + }, true } - return "", false + + return reason{}, false } // newPool creates a new pool. It will use the provided options when creating connections. @@ -164,7 +208,9 @@ func newPool(config poolConfig, connOpts ...ConnectionOption) *pool { minSize: config.MinPoolSize, maxSize: config.MaxPoolSize, maxConnecting: maxConnecting, + loadBalanced: config.LoadBalanced, monitor: config.PoolMonitor, + logger: config.Logger, handshakeErrFn: config.handshakeErrFn, connOpts: connOpts, generation: newPoolGenerationMap(), @@ -202,6 +248,17 @@ func newPool(config poolConfig, connOpts ...ConnectionOption) *pool { go pool.maintain(ctx, pool.backgroundDone) } + if mustLogPoolMessage(pool) { + keysAndValues := logger.KeyValues{ + logger.KeyMaxIdleTimeMS, config.MaxIdleTime.Milliseconds(), + logger.KeyMinPoolSize, config.MinPoolSize, + logger.KeyMaxPoolSize, config.MaxPoolSize, + logger.KeyMaxConnecting, config.MaxConnecting, + } + + logPoolMessage(pool, logger.ConnectionPoolCreated, keysAndValues...) + } + if pool.monitor != nil { pool.monitor.Event(&event.PoolEvent{ Type: event.PoolCreated, @@ -239,12 +296,12 @@ func (p *pool) ready() error { p.state = poolReady p.stateMu.Unlock() - // Signal maintain() to wake up immediately when marking the pool "ready". - select { - case p.maintainReady <- struct{}{}: - default: + if mustLogPoolMessage(p) { + logPoolMessage(p, logger.ConnectionPoolReady) } + // Send event.PoolReady before resuming the maintain() goroutine to guarantee that the + // "pool ready" event is always sent before maintain() starts creating connections. if p.monitor != nil { p.monitor.Event(&event.PoolEvent{ Type: event.PoolReady, @@ -252,6 +309,12 @@ func (p *pool) ready() error { }) } + // Signal maintain() to wake up immediately when marking the pool "ready". + select { + case p.maintainReady <- struct{}{}: + default: + } + return nil } @@ -342,10 +405,17 @@ func (p *pool) close(ctx context.Context) { // Now that we're not holding any locks, remove all of the connections we collected from the // pool. for _, conn := range conns { - _ = p.removeConnection(conn, event.ReasonPoolClosed) + _ = p.removeConnection(conn, reason{ + loggerConn: logger.ReasonConnClosedPoolClosed, + event: event.ReasonPoolClosed, + }, nil) _ = p.closeConnection(conn) // We don't care about errors while closing the connection. } + if mustLogPoolMessage(p) { + logPoolMessage(p, logger.ConnectionPoolClosed) + } + if p.monitor != nil { p.monitor.Event(&event.PoolEvent{ Type: event.PoolClosedEvent, @@ -377,6 +447,10 @@ func (p *pool) unpinConnectionFromTransaction() { // ready, checkOut returns an error. // Based partially on https://cs.opensource.google/go/go/+/refs/tags/go1.16.6:src/net/http/transport.go;l=1324 func (p *pool) checkOut(ctx context.Context) (conn *connection, err error) { + if mustLogPoolMessage(p) { + logPoolMessage(p, logger.ConnectionCheckoutStarted) + } + // TODO(CSOT): If a Timeout was specified at any level, respect the Timeout is server selection, connection // TODO checkout. if p.monitor != nil { @@ -395,6 +469,15 @@ func (p *pool) checkOut(ctx context.Context) (conn *connection, err error) { switch p.state { case poolClosed: p.stateMu.RUnlock() + + if mustLogPoolMessage(p) { + keysAndValues := logger.KeyValues{ + logger.KeyReason, logger.ReasonConnCheckoutFailedPoolClosed, + } + + logPoolMessage(p, logger.ConnectionCheckoutFailed, keysAndValues...) + } + if p.monitor != nil { p.monitor.Event(&event.PoolEvent{ Type: event.GetFailed, @@ -406,11 +489,21 @@ func (p *pool) checkOut(ctx context.Context) (conn *connection, err error) { case poolPaused: err := poolClearedError{err: p.lastClearErr, address: p.address} p.stateMu.RUnlock() + + if mustLogPoolMessage(p) { + keysAndValues := logger.KeyValues{ + logger.KeyReason, logger.ReasonConnCheckoutFailedError, + } + + logPoolMessage(p, logger.ConnectionCheckoutFailed, keysAndValues...) + } + if p.monitor != nil { p.monitor.Event(&event.PoolEvent{ Type: event.GetFailed, Address: p.address.String(), Reason: event.ReasonConnectionErrored, + Error: err, }) } return nil, err @@ -440,16 +533,33 @@ func (p *pool) checkOut(ctx context.Context) (conn *connection, err error) { p.stateMu.RUnlock() if w.err != nil { + if mustLogPoolMessage(p) { + keysAndValues := logger.KeyValues{ + logger.KeyReason, logger.ReasonConnCheckoutFailedError, + } + + logPoolMessage(p, logger.ConnectionCheckoutFailed, keysAndValues...) + } + if p.monitor != nil { p.monitor.Event(&event.PoolEvent{ Type: event.GetFailed, Address: p.address.String(), Reason: event.ReasonConnectionErrored, + Error: w.err, }) } return nil, w.err } + if mustLogPoolMessage(p) { + keysAndValues := logger.KeyValues{ + logger.KeyDriverConnectionID, w.conn.driverConnectionID, + } + + logPoolMessage(p, logger.ConnectionCheckedOut, keysAndValues...) + } + if p.monitor != nil { p.monitor.Event(&event.PoolEvent{ Type: event.GetSucceeded, @@ -457,6 +567,7 @@ func (p *pool) checkOut(ctx context.Context) (conn *connection, err error) { ConnectionID: w.conn.driverConnectionID, }) } + return w.conn, nil } @@ -466,19 +577,39 @@ func (p *pool) checkOut(ctx context.Context) (conn *connection, err error) { p.stateMu.RUnlock() // Wait for either the wantConn to be ready or for the Context to time out. + start := time.Now() select { case <-w.ready: if w.err != nil { + if mustLogPoolMessage(p) { + keysAndValues := logger.KeyValues{ + logger.KeyReason, logger.ReasonConnCheckoutFailedError, + logger.KeyError, w.err.Error(), + } + + logPoolMessage(p, logger.ConnectionCheckoutFailed, keysAndValues...) + } + if p.monitor != nil { p.monitor.Event(&event.PoolEvent{ Type: event.GetFailed, Address: p.address.String(), Reason: event.ReasonConnectionErrored, + Error: w.err, }) } + return nil, w.err } + if mustLogPoolMessage(p) { + keysAndValues := logger.KeyValues{ + logger.KeyDriverConnectionID, w.conn.driverConnectionID, + } + + logPoolMessage(p, logger.ConnectionCheckedOut, keysAndValues...) + } + if p.monitor != nil { p.monitor.Event(&event.PoolEvent{ Type: event.GetSucceeded, @@ -488,20 +619,39 @@ func (p *pool) checkOut(ctx context.Context) (conn *connection, err error) { } return w.conn, nil case <-ctx.Done(): + duration := time.Since(start) + + if mustLogPoolMessage(p) { + keysAndValues := logger.KeyValues{ + logger.KeyReason, logger.ReasonConnCheckoutFailedTimout, + } + + logPoolMessage(p, logger.ConnectionCheckoutFailed, keysAndValues...) + } + if p.monitor != nil { p.monitor.Event(&event.PoolEvent{ Type: event.GetFailed, Address: p.address.String(), Reason: event.ReasonTimedOut, + Error: ctx.Err(), }) } - return nil, WaitQueueTimeoutError{ - Wrapped: ctx.Err(), - PinnedCursorConnections: atomic.LoadUint64(&p.pinnedCursorConnections), - PinnedTransactionConnections: atomic.LoadUint64(&p.pinnedTransactionConnections), - maxPoolSize: p.maxSize, - totalConnectionCount: p.totalConnectionCount(), + + err := WaitQueueTimeoutError{ + Wrapped: ctx.Err(), + maxPoolSize: p.maxSize, + totalConnections: p.totalConnectionCount(), + availableConnections: p.availableConnectionCount(), + waitDuration: duration, + } + if p.loadBalanced { + err.pinnedConnections = &pinnedConnections{ + cursorConnections: atomic.LoadUint64(&p.pinnedCursorConnections), + transactionConnections: atomic.LoadUint64(&p.pinnedTransactionConnections), + } } + return nil, err } } @@ -529,7 +679,7 @@ func (p *pool) getGenerationForNewConnection(serviceID *primitive.ObjectID) uint } // removeConnection removes a connection from the pool and emits a "ConnectionClosed" event. -func (p *pool) removeConnection(conn *connection, reason string) error { +func (p *pool) removeConnection(conn *connection, reason reason, err error) error { if conn == nil { return nil } @@ -559,12 +709,26 @@ func (p *pool) removeConnection(conn *connection, reason string) error { p.generation.removeConnection(conn.desc.ServiceID) } + if mustLogPoolMessage(p) { + keysAndValues := logger.KeyValues{ + logger.KeyDriverConnectionID, conn.driverConnectionID, + logger.KeyReason, reason.loggerConn, + } + + if err != nil { + keysAndValues.Add(logger.KeyError, err.Error()) + } + + logPoolMessage(p, logger.ConnectionClosed, keysAndValues...) + } + if p.monitor != nil { p.monitor.Event(&event.PoolEvent{ Type: event.ConnectionClosed, Address: p.address.String(), ConnectionID: conn.driverConnectionID, - Reason: reason, + Reason: reason.event, + Error: err, }) } @@ -581,6 +745,14 @@ func (p *pool) checkIn(conn *connection) error { return ErrWrongPool } + if mustLogPoolMessage(p) { + keysAndValues := logger.KeyValues{ + logger.KeyDriverConnectionID, conn.driverConnectionID, + } + + logPoolMessage(p, logger.ConnectionCheckedIn, keysAndValues...) + } + if p.monitor != nil { p.monitor.Event(&event.PoolEvent{ Type: event.ConnectionReturned, @@ -611,7 +783,7 @@ func (p *pool) checkInNoEvent(conn *connection) error { conn.bumpIdleDeadline() if reason, perished := connectionPerished(conn); perished { - _ = p.removeConnection(conn, reason) + _ = p.removeConnection(conn, reason, nil) go func() { _ = p.closeConnection(conn) }() @@ -619,7 +791,11 @@ func (p *pool) checkInNoEvent(conn *connection) error { } if conn.pool.getState() == poolClosed { - _ = p.removeConnection(conn, event.ReasonPoolClosed) + _ = p.removeConnection(conn, reason{ + loggerConn: logger.ReasonConnClosedPoolClosed, + event: event.ReasonPoolClosed, + }, nil) + go func() { _ = p.closeConnection(conn) }() @@ -706,11 +882,20 @@ func (p *pool) clear(err error, serviceID *primitive.ObjectID) { p.createConnectionsCond.L.Unlock() } + if mustLogPoolMessage(p) { + keysAndValues := logger.KeyValues{ + logger.KeyServiceID, serviceID, + } + + logPoolMessage(p, logger.ConnectionPoolCleared, keysAndValues...) + } + if sendEvent && p.monitor != nil { p.monitor.Event(&event.PoolEvent{ Type: event.PoolCleared, Address: p.address.String(), ServiceID: serviceID, + Error: err, }) } } @@ -733,7 +918,7 @@ func (p *pool) getOrQueueForIdleConn(w *wantConn) bool { } if reason, perished := connectionPerished(conn); perished { - _ = conn.pool.removeConnection(conn, reason) + _ = conn.pool.removeConnection(conn, reason, nil) go func() { _ = conn.pool.closeConnection(conn) }() @@ -829,6 +1014,14 @@ func (p *pool) createConnections(ctx context.Context, wg *sync.WaitGroup) { continue } + if mustLogPoolMessage(p) { + keysAndValues := logger.KeyValues{ + logger.KeyDriverConnectionID, conn.driverConnectionID, + } + + logPoolMessage(p, logger.ConnectionCreated, keysAndValues...) + } + if p.monitor != nil { p.monitor.Event(&event.PoolEvent{ Type: event.ConnectionCreated, @@ -853,11 +1046,24 @@ func (p *pool) createConnections(ctx context.Context, wg *sync.WaitGroup) { p.handshakeErrFn(err, conn.generation, conn.desc.ServiceID) } - _ = p.removeConnection(conn, event.ReasonError) + _ = p.removeConnection(conn, reason{ + loggerConn: logger.ReasonConnClosedError, + event: event.ReasonError, + }, err) + _ = p.closeConnection(conn) + continue } + if mustLogPoolMessage(p) { + keysAndValues := logger.KeyValues{ + logger.KeyDriverConnectionID, conn.driverConnectionID, + } + + logPoolMessage(p, logger.ConnectionReady, keysAndValues...) + } + if p.monitor != nil { p.monitor.Event(&event.PoolEvent{ Type: event.ConnectionReady, @@ -976,7 +1182,7 @@ func (p *pool) removePerishedConns() { if reason, perished := connectionPerished(conn); perished { p.idleConns[i] = nil - _ = p.removeConnection(conn, reason) + _ = p.removeConnection(conn, reason, nil) go func() { _ = p.closeConnection(conn) }() @@ -1043,7 +1249,9 @@ func (w *wantConn) tryDeliver(conn *connection, err error) bool { if w.conn == nil && w.err == nil { panic("x/mongo/driver/topology: internal error: misuse of tryDeliver") } + close(w.ready) + return true } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/rtt_monitor.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/rtt_monitor.go index 998d2a025..0934beed8 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/rtt_monitor.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/rtt_monitor.go @@ -39,7 +39,12 @@ type rttConfig struct { } type rttMonitor struct { - mu sync.RWMutex // mu guards samples, offset, minRTT, averageRTT, and averageRTTSet + mu sync.RWMutex // mu guards samples, offset, minRTT, averageRTT, and averageRTTSet + + // connMu guards connecting and disconnecting. This is necessary since + // disconnecting will await the cancellation of a started connection. The + // use case for rttMonitor.connect needs to be goroutine safe. + connMu sync.Mutex samples []time.Duration offset int minRTT time.Duration @@ -51,6 +56,7 @@ type rttMonitor struct { cfg *rttConfig ctx context.Context cancelFn context.CancelFunc + started bool } var _ driver.RTTMonitor = &rttMonitor{} @@ -74,19 +80,34 @@ func newRTTMonitor(cfg *rttConfig) *rttMonitor { } func (r *rttMonitor) connect() { + r.connMu.Lock() + defer r.connMu.Unlock() + + r.started = true r.closeWg.Add(1) - go r.start() + + go func() { + defer r.closeWg.Done() + + r.start() + }() } func (r *rttMonitor) disconnect() { - // Signal for the routine to stop. + r.connMu.Lock() + defer r.connMu.Unlock() + + if !r.started { + return + } + r.cancelFn() + + // Wait for the existing connection to complete. r.closeWg.Wait() } func (r *rttMonitor) start() { - defer r.closeWg.Done() - var conn *connection defer func() { if conn != nil { diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server.go index 95ca8e85b..5823d3d7a 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server.go @@ -17,9 +17,12 @@ import ( "go.mongodb.org/mongo-driver/bson/primitive" "go.mongodb.org/mongo-driver/event" + "go.mongodb.org/mongo-driver/internal/driverutil" + "go.mongodb.org/mongo-driver/internal/logger" "go.mongodb.org/mongo-driver/mongo/address" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/x/mongo/driver" + "go.mongodb.org/mongo-driver/x/mongo/driver/connstring" "go.mongodb.org/mongo-driver/x/mongo/driver/operation" ) @@ -130,7 +133,12 @@ type updateTopologyCallback func(description.Server) description.Server // ConnectServer creates a new Server and then initializes it using the // Connect method. -func ConnectServer(addr address.Address, updateCallback updateTopologyCallback, topologyID primitive.ObjectID, opts ...ServerOption) (*Server, error) { +func ConnectServer( + addr address.Address, + updateCallback updateTopologyCallback, + topologyID primitive.ObjectID, + opts ...ServerOption, +) (*Server, error) { srvr := NewServer(addr, topologyID, opts...) err := srvr.Connect(updateCallback) if err != nil { @@ -176,7 +184,9 @@ func NewServer(addr address.Address, topologyID primitive.ObjectID, opts ...Serv MaxConnecting: cfg.maxConnecting, MaxIdleTime: cfg.poolMaxIdleTime, MaintainInterval: cfg.poolMaintainInterval, + LoadBalanced: cfg.loadBalanced, PoolMonitor: cfg.poolMonitor, + Logger: cfg.logger, handshakeErrFn: s.ProcessHandshakeError, } @@ -187,6 +197,39 @@ func NewServer(addr address.Address, topologyID primitive.ObjectID, opts ...Serv return s } +func mustLogServerMessage(srv *Server) bool { + return srv.cfg.logger != nil && srv.cfg.logger.LevelComponentEnabled( + logger.LevelDebug, logger.ComponentTopology) +} + +func logServerMessage(srv *Server, msg string, keysAndValues ...interface{}) { + serverHost, serverPort, err := net.SplitHostPort(srv.address.String()) + if err != nil { + serverHost = srv.address.String() + serverPort = "" + } + + var driverConnectionID uint64 + var serverConnectionID *int64 + + if srv.conn != nil { + driverConnectionID = srv.conn.driverConnectionID + serverConnectionID = srv.conn.serverConnectionID + } + + srv.cfg.logger.Print(logger.LevelDebug, + logger.ComponentTopology, + msg, + logger.SerializeServer(logger.Server{ + DriverConnectionID: driverConnectionID, + TopologyID: srv.topologyID, + Message: msg, + ServerConnectionID: serverConnectionID, + ServerHost: serverHost, + ServerPort: serverPort, + }, keysAndValues...)...) +} + // Connect initializes the Server by starting background monitoring goroutines. // This method must be called before a Server can be used. func (s *Server) Connect(updateCallback updateTopologyCallback) error { @@ -203,7 +246,6 @@ func (s *Server) Connect(updateCallback updateTopologyCallback) error { s.updateTopologyCallback.Store(updateCallback) if !s.cfg.monitoringDisabled && !s.cfg.loadBalanced { - s.rttMonitor.connect() s.closewg.Add(1) go s.update() } @@ -393,7 +435,7 @@ func (s *Server) ProcessError(err error, conn driver.Connection) driver.ProcessE } // Ignore errors from stale connections because the error came from a previous generation of the - // connection pool. The root cause of the error has aleady been handled, which is what caused + // connection pool. The root cause of the error has already been handled, which is what caused // the pool generation to increment. Processing errors for stale connections could result in // handling the same error root cause multiple times (e.g. a temporary network interrupt causing // all connections to the same server to return errors). @@ -496,7 +538,7 @@ func (s *Server) ProcessError(err error, conn driver.Connection) driver.ProcessE return driver.ConnectionPoolCleared } -// update handles performing heartbeats and updating any subscribers of the +// update handle performing heartbeats and updating any subscribers of the // newest description.Server retrieved. func (s *Server) update() { defer s.closewg.Done() @@ -612,12 +654,15 @@ func (s *Server) update() { // If the server supports streaming or we're already streaming, we want to move to streaming the next response // without waiting. If the server has transitioned to Unknown from a network error, we want to do another // check without waiting in case it was a transient error and the server isn't actually down. - serverSupportsStreaming := desc.Kind != description.Unknown && desc.TopologyVersion != nil connectionIsStreaming := s.conn != nil && s.conn.getCurrentlyStreaming() transitionedFromNetworkError := desc.LastError != nil && unwrapConnectionError(desc.LastError) != nil && previousDescription.Kind != description.Unknown - if serverSupportsStreaming || connectionIsStreaming || transitionedFromNetworkError { + if isStreamingEnabled(s) && isStreamable(s) && !s.rttMonitor.started { + s.rttMonitor.connect() + } + + if isStreamable(s) || connectionIsStreaming || transitionedFromNetworkError { continue } @@ -749,38 +794,55 @@ func (s *Server) createBaseOperation(conn driver.Connection) *operation.Hello { return operation. NewHello(). ClusterClock(s.cfg.clock). - Deployment(driver.SingleConnectionDeployment{conn}). + Deployment(driver.SingleConnectionDeployment{C: conn}). ServerAPI(s.cfg.serverAPI) } +func isStreamingEnabled(srv *Server) bool { + switch srv.cfg.serverMonitoringMode { + case connstring.ServerMonitoringModeStream: + return true + case connstring.ServerMonitoringModePoll: + return false + default: + return driverutil.GetFaasEnvName() == "" + } +} + +func isStreamable(srv *Server) bool { + return srv.Description().Kind != description.Unknown && srv.Description().TopologyVersion != nil +} + func (s *Server) check() (description.Server, error) { var descPtr *description.Server var err error - var durationNanos int64 + var duration time.Duration start := time.Now() + + // Create a new connection if this is the first check, the connection was closed after an error during the previous + // check, or the previous check was cancelled. if s.conn == nil || s.conn.closed() || s.checkWasCancelled() { - // Create a new connection if this is the first check, the connection was closed after an error during the previous - // check, or the previous check was cancelled. - isNilConn := s.conn == nil - if !isNilConn { - s.publishServerHeartbeatStartedEvent(s.conn.ID(), false) + connID := "0" + if s.conn != nil { + connID = s.conn.ID() } + s.publishServerHeartbeatStartedEvent(connID, false) // Create a new connection and add it's handshake RTT as a sample. err = s.setupHeartbeatConnection() - durationNanos = time.Since(start).Nanoseconds() + duration = time.Since(start) + connID = "0" + if s.conn != nil { + connID = s.conn.ID() + } if err == nil { // Use the description from the connection handshake as the value for this check. s.rttMonitor.addSample(s.conn.helloRTT) descPtr = &s.conn.desc - if !isNilConn { - s.publishServerHeartbeatSucceededEvent(s.conn.ID(), durationNanos, s.conn.desc, false) - } + s.publishServerHeartbeatSucceededEvent(connID, duration, s.conn.desc, false) } else { err = unwrapConnectionError(err) - if !isNilConn { - s.publishServerHeartbeatFailedEvent(s.conn.ID(), durationNanos, err, false) - } + s.publishServerHeartbeatFailedEvent(connID, duration, err, false) } } else { // An existing connection is being used. Use the server description properties to execute the right heartbeat. @@ -789,9 +851,10 @@ func (s *Server) check() (description.Server, error) { heartbeatConn := initConnection{s.conn} baseOperation := s.createBaseOperation(heartbeatConn) previousDescription := s.Description() - streamable := previousDescription.TopologyVersion != nil + streamable := isStreamingEnabled(s) && isStreamable(s) s.publishServerHeartbeatStartedEvent(s.conn.ID(), s.conn.getCurrentlyStreaming() || streamable) + switch { case s.conn.getCurrentlyStreaming(): // The connection is already in a streaming state, so we stream the next response. @@ -822,19 +885,27 @@ func (s *Server) check() (description.Server, error) { s.conn.setSocketTimeout(s.cfg.heartbeatTimeout) err = baseOperation.Execute(s.heartbeatCtx) } - durationNanos = time.Since(start).Nanoseconds() + + duration = time.Since(start) + + // We need to record an RTT sample in the polling case so that if the server + // is < 4.4, or if polling is specified by the user, then the + // RTT-short-circuit feature of CSOT is not disabled. + if !streamable { + s.rttMonitor.addSample(duration) + } if err == nil { tempDesc := baseOperation.Result(s.address) descPtr = &tempDesc - s.publishServerHeartbeatSucceededEvent(s.conn.ID(), durationNanos, tempDesc, s.conn.getCurrentlyStreaming() || streamable) + s.publishServerHeartbeatSucceededEvent(s.conn.ID(), duration, tempDesc, s.conn.getCurrentlyStreaming() || streamable) } else { // Close the connection here rather than below so we ensure we're not closing a connection that wasn't // successfully created. if s.conn != nil { _ = s.conn.close() } - s.publishServerHeartbeatFailedEvent(s.conn.ID(), durationNanos, err, s.conn.getCurrentlyStreaming() || streamable) + s.publishServerHeartbeatFailedEvent(s.conn.ID(), duration, err, s.conn.getCurrentlyStreaming() || streamable) } } @@ -947,6 +1018,10 @@ func (s *Server) publishServerOpeningEvent(addr address.Address) { if s.cfg.serverMonitor != nil && s.cfg.serverMonitor.ServerOpening != nil { s.cfg.serverMonitor.ServerOpening(serverOpening) } + + if mustLogServerMessage(s) { + logServerMessage(s, logger.TopologyServerOpening) + } } // publishes a ServerHeartbeatStartedEvent to indicate a hello command has started @@ -959,15 +1034,22 @@ func (s *Server) publishServerHeartbeatStartedEvent(connectionID string, await b if s != nil && s.cfg.serverMonitor != nil && s.cfg.serverMonitor.ServerHeartbeatStarted != nil { s.cfg.serverMonitor.ServerHeartbeatStarted(serverHeartbeatStarted) } + + if mustLogServerMessage(s) { + logServerMessage(s, logger.TopologyServerHeartbeatStarted, + logger.KeyAwaited, await) + } } // publishes a ServerHeartbeatSucceededEvent to indicate hello has succeeded func (s *Server) publishServerHeartbeatSucceededEvent(connectionID string, - durationNanos int64, + duration time.Duration, desc description.Server, - await bool) { + await bool, +) { serverHeartbeatSucceeded := &event.ServerHeartbeatSucceededEvent{ - DurationNanos: durationNanos, + DurationNanos: duration.Nanoseconds(), + Duration: duration, Reply: desc, ConnectionID: connectionID, Awaited: await, @@ -976,15 +1058,24 @@ func (s *Server) publishServerHeartbeatSucceededEvent(connectionID string, if s != nil && s.cfg.serverMonitor != nil && s.cfg.serverMonitor.ServerHeartbeatSucceeded != nil { s.cfg.serverMonitor.ServerHeartbeatSucceeded(serverHeartbeatSucceeded) } + + if mustLogServerMessage(s) { + logServerMessage(s, logger.TopologyServerHeartbeatStarted, + logger.KeyAwaited, await, + logger.KeyDurationMS, duration.Milliseconds(), + logger.KeyReply, desc) + } } // publishes a ServerHeartbeatFailedEvent to indicate hello has failed func (s *Server) publishServerHeartbeatFailedEvent(connectionID string, - durationNanos int64, + duration time.Duration, err error, - await bool) { + await bool, +) { serverHeartbeatFailed := &event.ServerHeartbeatFailedEvent{ - DurationNanos: durationNanos, + DurationNanos: duration.Nanoseconds(), + Duration: duration, Failure: err, ConnectionID: connectionID, Awaited: await, @@ -993,6 +1084,13 @@ func (s *Server) publishServerHeartbeatFailedEvent(connectionID string, if s != nil && s.cfg.serverMonitor != nil && s.cfg.serverMonitor.ServerHeartbeatFailed != nil { s.cfg.serverMonitor.ServerHeartbeatFailed(serverHeartbeatFailed) } + + if mustLogServerMessage(s) { + logServerMessage(s, logger.TopologyServerHeartbeatFailed, + logger.KeyAwaited, await, + logger.KeyDurationMS, duration.Milliseconds(), + logger.KeyFailure, err.Error()) + } } // unwrapConnectionError returns the connection error wrapped by err, or nil if err does not wrap a connection error. diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server_options.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server_options.go index 73819f9fc..4504a2535 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server_options.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server_options.go @@ -12,30 +12,34 @@ import ( "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/bsoncodec" "go.mongodb.org/mongo-driver/event" + "go.mongodb.org/mongo-driver/internal/logger" "go.mongodb.org/mongo-driver/x/mongo/driver" + "go.mongodb.org/mongo-driver/x/mongo/driver/connstring" "go.mongodb.org/mongo-driver/x/mongo/driver/session" ) var defaultRegistry = bson.NewRegistryBuilder().Build() type serverConfig struct { - clock *session.ClusterClock - compressionOpts []string - connectionOpts []ConnectionOption - appname string - heartbeatInterval time.Duration - heartbeatTimeout time.Duration - serverMonitor *event.ServerMonitor - registry *bsoncodec.Registry - monitoringDisabled bool - serverAPI *driver.ServerAPIOptions - loadBalanced bool + clock *session.ClusterClock + compressionOpts []string + connectionOpts []ConnectionOption + appname string + heartbeatInterval time.Duration + heartbeatTimeout time.Duration + serverMonitoringMode string + serverMonitor *event.ServerMonitor + registry *bsoncodec.Registry + monitoringDisabled bool + serverAPI *driver.ServerAPIOptions + loadBalanced bool // Connection pool options. maxConns uint64 minConns uint64 maxConnecting uint64 poolMonitor *event.PoolMonitor + logger *logger.Logger poolMaxIdleTime time.Duration poolMaintainInterval time.Duration } @@ -193,3 +197,24 @@ func WithServerLoadBalanced(fn func(bool) bool) ServerOption { cfg.loadBalanced = fn(cfg.loadBalanced) } } + +// withLogger configures the logger for the server to use. +func withLogger(fn func() *logger.Logger) ServerOption { + return func(cfg *serverConfig) { + cfg.logger = fn() + } +} + +// withServerMonitoringMode configures the mode (stream, poll, or auto) to use +// for monitoring. +func withServerMonitoringMode(mode *string) ServerOption { + return func(cfg *serverConfig) { + if mode != nil { + cfg.serverMonitoringMode = *mode + + return + } + + cfg.serverMonitoringMode = connstring.ServerMonitoringModeAuto + } +} diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology.go index eb0ac425a..bbffbd1da 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology.go @@ -16,6 +16,7 @@ import ( "fmt" "net" "net/url" + "strconv" "strings" "sync" "sync/atomic" @@ -23,6 +24,7 @@ import ( "go.mongodb.org/mongo-driver/bson/primitive" "go.mongodb.org/mongo-driver/event" + "go.mongodb.org/mongo-driver/internal/logger" "go.mongodb.org/mongo-driver/internal/randutil" "go.mongodb.org/mongo-driver/mongo/address" "go.mongodb.org/mongo-driver/mongo/description" @@ -96,7 +98,7 @@ type Topology struct { subscriptionsClosed bool subLock sync.Mutex - // We should redesign how we Connect and handle individal servers. This is + // We should redesign how we Connect and handle individual servers. This is // too difficult to maintain and it's rather easy to accidentally access // the servers without acquiring the lock or checking if the servers are // closed. This lock should also be an RWMutex. @@ -107,8 +109,10 @@ type Topology struct { id primitive.ObjectID } -var _ driver.Deployment = &Topology{} -var _ driver.Subscriber = &Topology{} +var ( + _ driver.Deployment = &Topology{} + _ driver.Subscriber = &Topology{} +) type serverSelectionState struct { selector description.ServerSelector @@ -157,6 +161,114 @@ func New(cfg *Config) (*Topology, error) { return t, nil } +func mustLogTopologyMessage(topo *Topology, level logger.Level) bool { + return topo.cfg.logger != nil && topo.cfg.logger.LevelComponentEnabled( + level, logger.ComponentTopology) +} + +func logTopologyMessage(topo *Topology, level logger.Level, msg string, keysAndValues ...interface{}) { + topo.cfg.logger.Print(level, + logger.ComponentTopology, + msg, + logger.SerializeTopology(logger.Topology{ + ID: topo.id, + Message: msg, + }, keysAndValues...)...) +} + +func logTopologyThirdPartyUsage(topo *Topology, parsedHosts []string) { + thirdPartyMessages := [2]string{ + `You appear to be connected to a CosmosDB cluster. For more information regarding feature compatibility and support please visit https://www.mongodb.com/supportability/cosmosdb`, + `You appear to be connected to a DocumentDB cluster. For more information regarding feature compatibility and support please visit https://www.mongodb.com/supportability/documentdb`, + } + + thirdPartySuffixes := map[string]int{ + ".cosmos.azure.com": 0, + ".docdb.amazonaws.com": 1, + ".docdb-elastic.amazonaws.com": 1, + } + + hostSet := make([]bool, len(thirdPartyMessages)) + for _, host := range parsedHosts { + if h, _, err := net.SplitHostPort(host); err == nil { + host = h + } + for suffix, env := range thirdPartySuffixes { + if !strings.HasSuffix(host, suffix) { + continue + } + if hostSet[env] { + break + } + hostSet[env] = true + logTopologyMessage(topo, logger.LevelInfo, thirdPartyMessages[env]) + } + } +} + +func mustLogServerSelection(topo *Topology, level logger.Level) bool { + return topo.cfg.logger != nil && topo.cfg.logger.LevelComponentEnabled( + level, logger.ComponentServerSelection) +} + +func logServerSelection( + ctx context.Context, + topo *Topology, + level logger.Level, + msg string, + srvSelector description.ServerSelector, + keysAndValues ...interface{}, +) { + var srvSelectorString string + + selectorStringer, ok := srvSelector.(fmt.Stringer) + if ok { + srvSelectorString = selectorStringer.String() + } + + operationName, _ := logger.OperationName(ctx) + operationID, _ := logger.OperationID(ctx) + + topo.cfg.logger.Print(level, + logger.ComponentServerSelection, + msg, + logger.SerializeServerSelection(logger.ServerSelection{ + Selector: srvSelectorString, + Operation: operationName, + OperationID: &operationID, + TopologyDescription: topo.String(), + }, keysAndValues...)...) +} + +func logServerSelectionSucceeded( + ctx context.Context, + topo *Topology, + srvSelector description.ServerSelector, + server *SelectedServer, +) { + host, port, err := net.SplitHostPort(server.address.String()) + if err != nil { + host = server.address.String() + port = "" + } + + portInt64, _ := strconv.ParseInt(port, 10, 32) + + logServerSelection(ctx, topo, logger.LevelDebug, logger.ServerSelectionSucceeded, srvSelector, + logger.KeyServerHost, host, + logger.KeyServerPort, portInt64) +} + +func logServerSelectionFailed( + ctx context.Context, + topo *Topology, + srvSelector description.ServerSelector, + err error, +) { + logServerSelection(ctx, topo, logger.LevelDebug, logger.ServerSelectionFailed, srvSelector, + logger.KeyFailure, err.Error()) +} + // Connect initializes a Topology and starts the monitoring process. This function // must be called to properly monitor the topology. func (t *Topology) Connect() error { @@ -218,8 +330,12 @@ func (t *Topology) Connect() error { // server monitoring goroutines. newDesc := description.Topology{ - Kind: t.fsm.Kind, - Servers: t.fsm.Servers, + Kind: t.fsm.Kind, + Servers: t.fsm.Servers, + SessionTimeoutMinutesPtr: t.fsm.SessionTimeoutMinutesPtr, + + // TODO(GODRIVER-2885): This field can be removed once + // legacy SessionTimeoutMinutes is removed. SessionTimeoutMinutes: t.fsm.SessionTimeoutMinutes, } t.desc.Store(newDesc) @@ -235,13 +351,17 @@ func (t *Topology) Connect() error { } t.serversLock.Unlock() + uri, err := url.Parse(t.cfg.URI) + if err != nil { + return err + } + parsedHosts := strings.Split(uri.Host, ",") + if mustLogTopologyMessage(t, logger.LevelInfo) { + logTopologyThirdPartyUsage(t, parsedHosts) + } if t.pollingRequired { - uri, err := url.Parse(t.cfg.URI) - if err != nil { - return err - } // sanity check before passing the hostname to resolver - if parsedHosts := strings.Split(uri.Host, ","); len(parsedHosts) != 1 { + if len(parsedHosts) != 1 { return fmt.Errorf("URI with SRV must include one and only one hostname") } _, _, err = net.SplitHostPort(uri.Host) @@ -380,6 +500,10 @@ func (t *Topology) RequestImmediateCheck() { // parent context is done. func (t *Topology) SelectServer(ctx context.Context, ss description.ServerSelector) (driver.Server, error) { if atomic.LoadInt64(&t.state) != topologyConnected { + if mustLogServerSelection(t, logger.LevelDebug) { + logServerSelectionFailed(ctx, t, ss, ErrTopologyClosed) + } + return nil, ErrTopologyClosed } var ssTimeoutCh <-chan time.Time @@ -393,11 +517,18 @@ func (t *Topology) SelectServer(ctx context.Context, ss description.ServerSelect var doneOnce bool var sub *driver.Subscription selectionState := newServerSelectionState(ss, ssTimeoutCh) + + // Record the start time. + startTime := time.Now() for { var suitable []description.Server var selectErr error if !doneOnce { + if mustLogServerSelection(t, logger.LevelDebug) { + logServerSelection(ctx, t, logger.LevelDebug, logger.ServerSelectionStarted, ss) + } + // for the first pass, select a server from the current description. // this improves selection speed for up-to-date topology descriptions. suitable, selectErr = t.selectServerFromDescription(t.Description(), selectionState) @@ -409,6 +540,10 @@ func (t *Topology) SelectServer(ctx context.Context, ss description.ServerSelect var err error sub, err = t.Subscribe() if err != nil { + if mustLogServerSelection(t, logger.LevelDebug) { + logServerSelectionFailed(ctx, t, ss, err) + } + return nil, err } defer t.Unsubscribe(sub) @@ -417,11 +552,23 @@ func (t *Topology) SelectServer(ctx context.Context, ss description.ServerSelect suitable, selectErr = t.selectServerFromSubscription(ctx, sub.Updates, selectionState) } if selectErr != nil { + if mustLogServerSelection(t, logger.LevelDebug) { + logServerSelectionFailed(ctx, t, ss, selectErr) + } + return nil, selectErr } if len(suitable) == 0 { // try again if there are no servers available + if mustLogServerSelection(t, logger.LevelInfo) { + elapsed := time.Since(startTime) + remainingTimeMS := t.cfg.ServerSelectionTimeout - elapsed + + logServerSelection(ctx, t, logger.LevelInfo, logger.ServerSelectionWaiting, ss, + logger.KeyRemainingTimeMS, remainingTimeMS.Milliseconds()) + } + continue } @@ -430,11 +577,20 @@ func (t *Topology) SelectServer(ctx context.Context, ss description.ServerSelect if len(suitable) == 1 { server, err := t.FindServer(suitable[0]) if err != nil { + if mustLogServerSelection(t, logger.LevelDebug) { + logServerSelectionFailed(ctx, t, ss, err) + } + return nil, err } if server == nil { continue } + + if mustLogServerSelection(t, logger.LevelDebug) { + logServerSelectionSucceeded(ctx, t, ss, server) + } + return server, nil } @@ -443,10 +599,18 @@ func (t *Topology) SelectServer(ctx context.Context, ss description.ServerSelect desc1, desc2 := pick2(suitable) server1, err := t.FindServer(desc1) if err != nil { + if mustLogServerSelection(t, logger.LevelDebug) { + logServerSelectionFailed(ctx, t, ss, err) + } + return nil, err } server2, err := t.FindServer(desc2) if err != nil { + if mustLogServerSelection(t, logger.LevelDebug) { + logServerSelectionFailed(ctx, t, ss, err) + } + return nil, err } @@ -458,9 +622,18 @@ func (t *Topology) SelectServer(ctx context.Context, ss description.ServerSelect if server1 == nil && server2 == nil { continue } + if server1 != nil { + if mustLogServerSelection(t, logger.LevelDebug) { + logServerSelectionSucceeded(ctx, t, ss, server1) + } return server1, nil } + + if mustLogServerSelection(t, logger.LevelDebug) { + logServerSelectionSucceeded(ctx, t, ss, server2) + } + return server2, nil } @@ -468,8 +641,16 @@ func (t *Topology) SelectServer(ctx context.Context, ss description.ServerSelect // We use in-use connections as an analog for in-progress operations because they are almost // always the same value for a given server. if server1.OperationCount() < server2.OperationCount() { + if mustLogServerSelection(t, logger.LevelDebug) { + logServerSelectionSucceeded(ctx, t, ss, server1) + } + return server1, nil } + + if mustLogServerSelection(t, logger.LevelDebug) { + logServerSelectionSucceeded(ctx, t, ss, server2) + } return server2, nil } } @@ -558,13 +739,18 @@ func (t *Topology) selectServerFromDescription(desc description.Topology, return desc.Servers, nil } - var allowed []description.Server - for _, s := range desc.Servers { + allowedIndexes := make([]int, 0, len(desc.Servers)) + for i, s := range desc.Servers { if s.Kind != description.Unknown { - allowed = append(allowed, s) + allowedIndexes = append(allowedIndexes, i) } } + allowed := make([]description.Server, len(allowedIndexes)) + for i, idx := range allowedIndexes { + allowed[i] = desc.Servers[idx] + } + suitable, err := selectionState.selector.SelectServer(desc, allowed) if err != nil { return nil, ServerSelectionError{Wrapped: err, Desc: desc} @@ -674,10 +860,14 @@ func (t *Topology) processSRVResults(parsedHosts []string) bool { t.fsm.addServer(addr) } - //store new description + // store new description newDesc := description.Topology{ - Kind: t.fsm.Kind, - Servers: t.fsm.Servers, + Kind: t.fsm.Kind, + Servers: t.fsm.Servers, + SessionTimeoutMinutesPtr: t.fsm.SessionTimeoutMinutesPtr, + + // TODO(GODRIVER-2885): This field can be removed once legacy + // SessionTimeoutMinutes is removed. SessionTimeoutMinutes: t.fsm.SessionTimeoutMinutes, } t.desc.Store(newDesc) @@ -813,6 +1003,20 @@ func (t *Topology) publishServerClosedEvent(addr address.Address) { if t.cfg.ServerMonitor != nil && t.cfg.ServerMonitor.ServerClosed != nil { t.cfg.ServerMonitor.ServerClosed(serverClosed) } + + if mustLogTopologyMessage(t, logger.LevelDebug) { + serverHost, serverPort, err := net.SplitHostPort(addr.String()) + if err != nil { + serverHost = addr.String() + serverPort = "" + } + + portInt64, _ := strconv.ParseInt(serverPort, 10, 32) + + logTopologyMessage(t, logger.LevelDebug, logger.TopologyServerClosed, + logger.KeyServerHost, serverHost, + logger.KeyServerPort, portInt64) + } } // publishes a TopologyDescriptionChangedEvent to indicate the topology description has changed @@ -826,6 +1030,12 @@ func (t *Topology) publishTopologyDescriptionChangedEvent(prev description.Topol if t.cfg.ServerMonitor != nil && t.cfg.ServerMonitor.TopologyDescriptionChanged != nil { t.cfg.ServerMonitor.TopologyDescriptionChanged(topologyDescriptionChanged) } + + if mustLogTopologyMessage(t, logger.LevelDebug) { + logTopologyMessage(t, logger.LevelDebug, logger.TopologyDescriptionChanged, + logger.KeyPreviousDescription, prev.String(), + logger.KeyNewDescription, current.String()) + } } // publishes a TopologyOpeningEvent to indicate the topology is being initialized @@ -837,6 +1047,10 @@ func (t *Topology) publishTopologyOpeningEvent() { if t.cfg.ServerMonitor != nil && t.cfg.ServerMonitor.TopologyOpening != nil { t.cfg.ServerMonitor.TopologyOpening(topologyOpening) } + + if mustLogTopologyMessage(t, logger.LevelDebug) { + logTopologyMessage(t, logger.LevelDebug, logger.TopologyOpening) + } } // publishes a TopologyClosedEvent to indicate the topology has been closed @@ -848,4 +1062,8 @@ func (t *Topology) publishTopologyClosedEvent() { if t.cfg.ServerMonitor != nil && t.cfg.ServerMonitor.TopologyClosed != nil { t.cfg.ServerMonitor.TopologyClosed(topologyClosed) } + + if mustLogTopologyMessage(t, logger.LevelDebug) { + logTopologyMessage(t, logger.LevelDebug, logger.TopologyClosed) + } } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology_options.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology_options.go index 98b71ea38..b5eb4a972 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology_options.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology_options.go @@ -8,11 +8,13 @@ package topology import ( "crypto/tls" + "fmt" "net/http" "strings" "time" "go.mongodb.org/mongo-driver/event" + "go.mongodb.org/mongo-driver/internal/logger" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/options" "go.mongodb.org/mongo-driver/x/mongo/driver" @@ -36,6 +38,7 @@ type Config struct { SRVMaxHosts int SRVServiceName string LoadBalanced bool + logger *logger.Logger } // ConvertToDriverAPIOptions converts a options.ServerAPIOptions instance to a driver.ServerAPIOptions. @@ -50,8 +53,26 @@ func ConvertToDriverAPIOptions(s *options.ServerAPIOptions) *driver.ServerAPIOpt return driverOpts } +func newLogger(opts *options.LoggerOptions) (*logger.Logger, error) { + if opts == nil { + opts = options.Logger() + } + + componentLevels := make(map[logger.Component]logger.Level) + for component, level := range opts.ComponentLevels { + componentLevels[logger.Component(component)] = logger.Level(level) + } + + log, err := logger.New(opts.Sink, opts.MaxDocumentLength, componentLevels) + if err != nil { + return nil, fmt.Errorf("error creating logger: %w", err) + } + + return log, nil +} + // NewConfig will translate data from client options into a topology config for building non-default deployments. -// Server and topoplogy options are not honored if a custom deployment is used. +// Server and topology options are not honored if a custom deployment is used. func NewConfig(co *options.ClientOptions, clock *session.ClusterClock) (*Config, error) { var serverAPI *driver.ServerAPIOptions @@ -62,7 +83,7 @@ func NewConfig(co *options.ClientOptions, clock *session.ClusterClock) (*Config, var connOpts []ConnectionOption var serverOpts []ServerOption - cfgp := new(Config) + cfgp := &Config{} // Set the default "ServerSelectionTimeout" to 30 seconds. cfgp.ServerSelectionTimeout = defaultServerSelectionTimeout @@ -224,7 +245,7 @@ func NewConfig(co *options.ClientOptions, clock *session.ClusterClock) (*Config, // MaxConIdleTime if co.MaxConnIdleTime != nil { - connOpts = append(connOpts, WithIdleTimeout( + serverOpts = append(serverOpts, WithConnectionPoolMaxIdleTime( func(time.Duration) time.Duration { return *co.MaxConnIdleTime }, )) } @@ -333,6 +354,19 @@ func NewConfig(co *options.ClientOptions, clock *session.ClusterClock) (*Config, ) } + lgr, err := newLogger(co.LoggerOptions) + if err != nil { + return nil, err + } + + serverOpts = append( + serverOpts, + withLogger(func() *logger.Logger { return lgr }), + withServerMonitoringMode(co.ServerMonitoringMode), + ) + + cfgp.logger = lgr + serverOpts = append( serverOpts, WithClock(func(*session.ClusterClock) *session.ClusterClock { return clock }), diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage/wiremessage.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage/wiremessage.go index e3aa09673..abf09c15b 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage/wiremessage.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage/wiremessage.go @@ -19,9 +19,6 @@ type WireMessage []byte var globalRequestID int32 -// CurrentRequestID returns the current request ID. -func CurrentRequestID() int32 { return atomic.LoadInt32(&globalRequestID) } - // NextRequestID returns the next request ID. func NextRequestID() int32 { return atomic.AddInt32(&globalRequestID, 1) } @@ -32,11 +29,12 @@ type OpCode int32 // supported by this library. The skipped OpCodes are historical OpCodes that // are no longer used. const ( - OpReply OpCode = 1 - _ OpCode = 1001 - OpUpdate OpCode = 2001 - OpInsert OpCode = 2002 - _ OpCode = 2003 + OpReply OpCode = 1 + _ OpCode = 1001 + OpUpdate OpCode = 2001 + OpInsert OpCode = 2002 + _ OpCode = 2003 + // Deprecated: Use OpMsg instead. OpQuery OpCode = 2004 OpGetMore OpCode = 2005 OpDelete OpCode = 2006 @@ -174,9 +172,6 @@ const ( DocumentSequence ) -// OpmsgWireVersion is the minimum wire version needed to use OP_MSG -const OpmsgWireVersion = 6 - // CompressorID is the ID for each type of Compressor. type CompressorID uint8 @@ -432,32 +427,50 @@ func ReadMsgChecksum(src []byte) (checksum uint32, rem []byte, ok bool) { } // ReadQueryFlags reads OP_QUERY flags from src. +// +// Deprecated: Construct wiremessages with OpMsg and use the ReadMsg* functions +// instead. func ReadQueryFlags(src []byte) (flags QueryFlag, rem []byte, ok bool) { i32, rem, ok := readi32(src) return QueryFlag(i32), rem, ok } // ReadQueryFullCollectionName reads the full collection name from src. +// +// Deprecated: Construct wiremessages with OpMsg and use the ReadMsg* functions +// instead. func ReadQueryFullCollectionName(src []byte) (collname string, rem []byte, ok bool) { return readcstring(src) } // ReadQueryNumberToSkip reads the number to skip from src. +// +// Deprecated: Construct wiremessages with OpMsg and use the ReadMsg* functions +// instead. func ReadQueryNumberToSkip(src []byte) (nts int32, rem []byte, ok bool) { return readi32(src) } // ReadQueryNumberToReturn reads the number to return from src. +// +// Deprecated: Construct wiremessages with OpMsg and use the ReadMsg* functions +// instead. func ReadQueryNumberToReturn(src []byte) (ntr int32, rem []byte, ok bool) { return readi32(src) } // ReadQueryQuery reads the query from src. +// +// Deprecated: Construct wiremessages with OpMsg and use the ReadMsg* functions +// instead. func ReadQueryQuery(src []byte) (query bsoncore.Document, rem []byte, ok bool) { return bsoncore.ReadDocument(src) } // ReadQueryReturnFieldsSelector reads a return fields selector document from src. +// +// Deprecated: Construct wiremessages with OpMsg and use the ReadMsg* functions +// instead. func ReadQueryReturnFieldsSelector(src []byte) (rfs bsoncore.Document, rem []byte, ok bool) { return bsoncore.ReadDocument(src) } diff --git a/vendor/golang.org/x/exp/slices/cmp.go b/vendor/golang.org/x/exp/slices/cmp.go new file mode 100644 index 000000000..fbf1934a0 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/cmp.go @@ -0,0 +1,44 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +import "golang.org/x/exp/constraints" + +// min is a version of the predeclared function from the Go 1.21 release. +func min[T constraints.Ordered](a, b T) T { + if a < b || isNaN(a) { + return a + } + return b +} + +// max is a version of the predeclared function from the Go 1.21 release. +func max[T constraints.Ordered](a, b T) T { + if a > b || isNaN(a) { + return a + } + return b +} + +// cmpLess is a copy of cmp.Less from the Go 1.21 release. +func cmpLess[T constraints.Ordered](x, y T) bool { + return (isNaN(x) && !isNaN(y)) || x < y +} + +// cmpCompare is a copy of cmp.Compare from the Go 1.21 release. +func cmpCompare[T constraints.Ordered](x, y T) int { + xNaN := isNaN(x) + yNaN := isNaN(y) + if xNaN && yNaN { + return 0 + } + if xNaN || x < y { + return -1 + } + if yNaN || x > y { + return +1 + } + return 0 +} diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go index cff0cd49e..5e8158bba 100644 --- a/vendor/golang.org/x/exp/slices/slices.go +++ b/vendor/golang.org/x/exp/slices/slices.go @@ -3,23 +3,20 @@ // license that can be found in the LICENSE file. // Package slices defines various functions useful with slices of any type. -// Unless otherwise specified, these functions all apply to the elements -// of a slice at index 0 <= i < len(s). -// -// Note that the less function in IsSortedFunc, SortFunc, SortStableFunc requires a -// strict weak ordering (https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings), -// or the sorting may fail to sort correctly. A common case is when sorting slices of -// floating-point numbers containing NaN values. package slices -import "golang.org/x/exp/constraints" +import ( + "unsafe" + + "golang.org/x/exp/constraints" +) // Equal reports whether two slices are equal: the same length and all // elements equal. If the lengths are different, Equal returns false. // Otherwise, the elements are compared in increasing index order, and the // comparison stops at the first unequal pair. // Floating point NaNs are not considered equal. -func Equal[E comparable](s1, s2 []E) bool { +func Equal[S ~[]E, E comparable](s1, s2 S) bool { if len(s1) != len(s2) { return false } @@ -31,12 +28,12 @@ func Equal[E comparable](s1, s2 []E) bool { return true } -// EqualFunc reports whether two slices are equal using a comparison +// EqualFunc reports whether two slices are equal using an equality // function on each pair of elements. If the lengths are different, // EqualFunc returns false. Otherwise, the elements are compared in // increasing index order, and the comparison stops at the first index // for which eq returns false. -func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool { +func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool { if len(s1) != len(s2) { return false } @@ -49,45 +46,37 @@ func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool { return true } -// Compare compares the elements of s1 and s2. -// The elements are compared sequentially, starting at index 0, +// Compare compares the elements of s1 and s2, using [cmp.Compare] on each pair +// of elements. The elements are compared sequentially, starting at index 0, // until one element is not equal to the other. // The result of comparing the first non-matching elements is returned. // If both slices are equal until one of them ends, the shorter slice is // considered less than the longer one. // The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2. -// Comparisons involving floating point NaNs are ignored. -func Compare[E constraints.Ordered](s1, s2 []E) int { - s2len := len(s2) +func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int { for i, v1 := range s1 { - if i >= s2len { + if i >= len(s2) { return +1 } v2 := s2[i] - switch { - case v1 < v2: - return -1 - case v1 > v2: - return +1 + if c := cmpCompare(v1, v2); c != 0 { + return c } } - if len(s1) < s2len { + if len(s1) < len(s2) { return -1 } return 0 } -// CompareFunc is like Compare but uses a comparison function -// on each pair of elements. The elements are compared in increasing -// index order, and the comparisons stop after the first time cmp -// returns non-zero. +// CompareFunc is like [Compare] but uses a custom comparison function on each +// pair of elements. // The result is the first non-zero result of cmp; if cmp always // returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2), // and +1 if len(s1) > len(s2). -func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int { - s2len := len(s2) +func CompareFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int { for i, v1 := range s1 { - if i >= s2len { + if i >= len(s2) { return +1 } v2 := s2[i] @@ -95,7 +84,7 @@ func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int { return c } } - if len(s1) < s2len { + if len(s1) < len(s2) { return -1 } return 0 @@ -103,9 +92,9 @@ func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int { // Index returns the index of the first occurrence of v in s, // or -1 if not present. -func Index[E comparable](s []E, v E) int { - for i, vs := range s { - if v == vs { +func Index[S ~[]E, E comparable](s S, v E) int { + for i := range s { + if v == s[i] { return i } } @@ -114,9 +103,9 @@ func Index[E comparable](s []E, v E) int { // IndexFunc returns the first index i satisfying f(s[i]), // or -1 if none do. -func IndexFunc[E any](s []E, f func(E) bool) int { - for i, v := range s { - if f(v) { +func IndexFunc[S ~[]E, E any](s S, f func(E) bool) int { + for i := range s { + if f(s[i]) { return i } } @@ -124,39 +113,104 @@ func IndexFunc[E any](s []E, f func(E) bool) int { } // Contains reports whether v is present in s. -func Contains[E comparable](s []E, v E) bool { +func Contains[S ~[]E, E comparable](s S, v E) bool { return Index(s, v) >= 0 } // ContainsFunc reports whether at least one // element e of s satisfies f(e). -func ContainsFunc[E any](s []E, f func(E) bool) bool { +func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool { return IndexFunc(s, f) >= 0 } // Insert inserts the values v... into s at index i, // returning the modified slice. -// In the returned slice r, r[i] == v[0]. +// The elements at s[i:] are shifted up to make room. +// In the returned slice r, r[i] == v[0], +// and r[i+len(v)] == value originally at r[i]. // Insert panics if i is out of range. // This function is O(len(s) + len(v)). func Insert[S ~[]E, E any](s S, i int, v ...E) S { - tot := len(s) + len(v) - if tot <= cap(s) { - s2 := s[:tot] - copy(s2[i+len(v):], s[i:]) + m := len(v) + if m == 0 { + return s + } + n := len(s) + if i == n { + return append(s, v...) + } + if n+m > cap(s) { + // Use append rather than make so that we bump the size of + // the slice up to the next storage class. + // This is what Grow does but we don't call Grow because + // that might copy the values twice. + s2 := append(s[:i], make(S, n+m-i)...) copy(s2[i:], v) + copy(s2[i+m:], s[i:]) return s2 } - s2 := make(S, tot) - copy(s2, s[:i]) - copy(s2[i:], v) - copy(s2[i+len(v):], s[i:]) - return s2 + s = s[:n+m] + + // before: + // s: aaaaaaaabbbbccccccccdddd + // ^ ^ ^ ^ + // i i+m n n+m + // after: + // s: aaaaaaaavvvvbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // + // a are the values that don't move in s. + // v are the values copied in from v. + // b and c are the values from s that are shifted up in index. + // d are the values that get overwritten, never to be seen again. + + if !overlaps(v, s[i+m:]) { + // Easy case - v does not overlap either the c or d regions. + // (It might be in some of a or b, or elsewhere entirely.) + // The data we copy up doesn't write to v at all, so just do it. + + copy(s[i+m:], s[i:]) + + // Now we have + // s: aaaaaaaabbbbbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // Note the b values are duplicated. + + copy(s[i:], v) + + // Now we have + // s: aaaaaaaavvvvbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // That's the result we want. + return s + } + + // The hard case - v overlaps c or d. We can't just shift up + // the data because we'd move or clobber the values we're trying + // to insert. + // So instead, write v on top of d, then rotate. + copy(s[n:], v) + + // Now we have + // s: aaaaaaaabbbbccccccccvvvv + // ^ ^ ^ ^ + // i i+m n n+m + + rotateRight(s[i:], m) + + // Now we have + // s: aaaaaaaavvvvbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // That's the result we want. + return s } // Delete removes the elements s[i:j] from s, returning the modified slice. // Delete panics if s[i:j] is not a valid slice of s. -// Delete modifies the contents of the slice s; it does not create a new slice. // Delete is O(len(s)-j), so if many items must be deleted, it is better to // make a single call deleting them all together than to delete one at a time. // Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those @@ -168,22 +222,113 @@ func Delete[S ~[]E, E any](s S, i, j int) S { return append(s[:i], s[j:]...) } +// DeleteFunc removes any elements from s for which del returns true, +// returning the modified slice. +// When DeleteFunc removes m elements, it might not modify the elements +// s[len(s)-m:len(s)]. If those elements contain pointers you might consider +// zeroing those elements so that objects they reference can be garbage +// collected. +func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { + i := IndexFunc(s, del) + if i == -1 { + return s + } + // Don't start copying elements until we find one to delete. + for j := i + 1; j < len(s); j++ { + if v := s[j]; !del(v) { + s[i] = v + i++ + } + } + return s[:i] +} + // Replace replaces the elements s[i:j] by the given v, and returns the // modified slice. Replace panics if s[i:j] is not a valid slice of s. func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { _ = s[i:j] // verify that i:j is a valid subslice + + if i == j { + return Insert(s, i, v...) + } + if j == len(s) { + return append(s[:i], v...) + } + tot := len(s[:i]) + len(v) + len(s[j:]) - if tot <= cap(s) { - s2 := s[:tot] - copy(s2[i+len(v):], s[j:]) + if tot > cap(s) { + // Too big to fit, allocate and copy over. + s2 := append(s[:i], make(S, tot-i)...) // See Insert copy(s2[i:], v) + copy(s2[i+len(v):], s[j:]) return s2 } - s2 := make(S, tot) - copy(s2, s[:i]) - copy(s2[i:], v) - copy(s2[i+len(v):], s[j:]) - return s2 + + r := s[:tot] + + if i+len(v) <= j { + // Easy, as v fits in the deleted portion. + copy(r[i:], v) + if i+len(v) != j { + copy(r[i+len(v):], s[j:]) + } + return r + } + + // We are expanding (v is bigger than j-i). + // The situation is something like this: + // (example has i=4,j=8,len(s)=16,len(v)=6) + // s: aaaaxxxxbbbbbbbbyy + // ^ ^ ^ ^ + // i j len(s) tot + // a: prefix of s + // x: deleted range + // b: more of s + // y: area to expand into + + if !overlaps(r[i+len(v):], v) { + // Easy, as v is not clobbered by the first copy. + copy(r[i+len(v):], s[j:]) + copy(r[i:], v) + return r + } + + // This is a situation where we don't have a single place to which + // we can copy v. Parts of it need to go to two different places. + // We want to copy the prefix of v into y and the suffix into x, then + // rotate |y| spots to the right. + // + // v[2:] v[:2] + // | | + // s: aaaavvvvbbbbbbbbvv + // ^ ^ ^ ^ + // i j len(s) tot + // + // If either of those two destinations don't alias v, then we're good. + y := len(v) - (j - i) // length of y portion + + if !overlaps(r[i:j], v) { + copy(r[i:j], v[y:]) + copy(r[len(s):], v[:y]) + rotateRight(r[i:], y) + return r + } + if !overlaps(r[len(s):], v) { + copy(r[len(s):], v[:y]) + copy(r[i:j], v[y:]) + rotateRight(r[i:], y) + return r + } + + // Now we know that v overlaps both x and y. + // That means that the entirety of b is *inside* v. + // So we don't need to preserve b at all; instead we + // can copy v first, then copy the b part of v out of + // v to the right destination. + k := startIdx(v, s[j:]) + copy(r[i:], v) + copy(r[i+len(v):], r[i+k:]) + return r } // Clone returns a copy of the slice. @@ -198,7 +343,8 @@ func Clone[S ~[]E, E any](s S) S { // Compact replaces consecutive runs of equal elements with a single copy. // This is like the uniq command found on Unix. -// Compact modifies the contents of the slice s; it does not create a new slice. +// Compact modifies the contents of the slice s and returns the modified slice, +// which may have a smaller length. // When Compact discards m elements in total, it might not modify the elements // s[len(s)-m:len(s)]. If those elements contain pointers you might consider // zeroing those elements so that objects they reference can be garbage collected. @@ -207,29 +353,30 @@ func Compact[S ~[]E, E comparable](s S) S { return s } i := 1 - last := s[0] - for _, v := range s[1:] { - if v != last { - s[i] = v + for k := 1; k < len(s); k++ { + if s[k] != s[k-1] { + if i != k { + s[i] = s[k] + } i++ - last = v } } return s[:i] } -// CompactFunc is like Compact but uses a comparison function. +// CompactFunc is like [Compact] but uses an equality function to compare elements. +// For runs of elements that compare equal, CompactFunc keeps the first one. func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { if len(s) < 2 { return s } i := 1 - last := s[0] - for _, v := range s[1:] { - if !eq(v, last) { - s[i] = v + for k := 1; k < len(s); k++ { + if !eq(s[k], s[k-1]) { + if i != k { + s[i] = s[k] + } i++ - last = v } } return s[:i] @@ -256,3 +403,97 @@ func Grow[S ~[]E, E any](s S, n int) S { func Clip[S ~[]E, E any](s S) S { return s[:len(s):len(s)] } + +// Rotation algorithm explanation: +// +// rotate left by 2 +// start with +// 0123456789 +// split up like this +// 01 234567 89 +// swap first 2 and last 2 +// 89 234567 01 +// join first parts +// 89234567 01 +// recursively rotate first left part by 2 +// 23456789 01 +// join at the end +// 2345678901 +// +// rotate left by 8 +// start with +// 0123456789 +// split up like this +// 01 234567 89 +// swap first 2 and last 2 +// 89 234567 01 +// join last parts +// 89 23456701 +// recursively rotate second part left by 6 +// 89 01234567 +// join at the end +// 8901234567 + +// TODO: There are other rotate algorithms. +// This algorithm has the desirable property that it moves each element exactly twice. +// The triple-reverse algorithm is simpler and more cache friendly, but takes more writes. +// The follow-cycles algorithm can be 1-write but it is not very cache friendly. + +// rotateLeft rotates b left by n spaces. +// s_final[i] = s_orig[i+r], wrapping around. +func rotateLeft[E any](s []E, r int) { + for r != 0 && r != len(s) { + if r*2 <= len(s) { + swap(s[:r], s[len(s)-r:]) + s = s[:len(s)-r] + } else { + swap(s[:len(s)-r], s[r:]) + s, r = s[len(s)-r:], r*2-len(s) + } + } +} +func rotateRight[E any](s []E, r int) { + rotateLeft(s, len(s)-r) +} + +// swap swaps the contents of x and y. x and y must be equal length and disjoint. +func swap[E any](x, y []E) { + for i := 0; i < len(x); i++ { + x[i], y[i] = y[i], x[i] + } +} + +// overlaps reports whether the memory ranges a[0:len(a)] and b[0:len(b)] overlap. +func overlaps[E any](a, b []E) bool { + if len(a) == 0 || len(b) == 0 { + return false + } + elemSize := unsafe.Sizeof(a[0]) + if elemSize == 0 { + return false + } + // TODO: use a runtime/unsafe facility once one becomes available. See issue 12445. + // Also see crypto/internal/alias/alias.go:AnyOverlap + return uintptr(unsafe.Pointer(&a[0])) <= uintptr(unsafe.Pointer(&b[len(b)-1]))+(elemSize-1) && + uintptr(unsafe.Pointer(&b[0])) <= uintptr(unsafe.Pointer(&a[len(a)-1]))+(elemSize-1) +} + +// startIdx returns the index in haystack where the needle starts. +// prerequisite: the needle must be aliased entirely inside the haystack. +func startIdx[E any](haystack, needle []E) int { + p := &needle[0] + for i := range haystack { + if p == &haystack[i] { + return i + } + } + // TODO: what if the overlap is by a non-integral number of Es? + panic("needle not found") +} + +// Reverse reverses the elements of the slice in place. +func Reverse[S ~[]E, E any](s S) { + for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { + s[i], s[j] = s[j], s[i] + } +} diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go index f14f40da7..b67897f76 100644 --- a/vendor/golang.org/x/exp/slices/sort.go +++ b/vendor/golang.org/x/exp/slices/sort.go @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:generate go run $GOROOT/src/sort/gen_sort_variants.go -exp + package slices import ( @@ -11,57 +13,116 @@ import ( ) // Sort sorts a slice of any ordered type in ascending order. -// Sort may fail to sort correctly when sorting slices of floating-point -// numbers containing Not-a-number (NaN) values. -// Use slices.SortFunc(x, func(a, b float64) bool {return a < b || (math.IsNaN(a) && !math.IsNaN(b))}) -// instead if the input may contain NaNs. -func Sort[E constraints.Ordered](x []E) { +// When sorting floating-point numbers, NaNs are ordered before other values. +func Sort[S ~[]E, E constraints.Ordered](x S) { n := len(x) pdqsortOrdered(x, 0, n, bits.Len(uint(n))) } -// SortFunc sorts the slice x in ascending order as determined by the less function. -// This sort is not guaranteed to be stable. +// SortFunc sorts the slice x in ascending order as determined by the cmp +// function. This sort is not guaranteed to be stable. +// cmp(a, b) should return a negative number when a < b, a positive number when +// a > b and zero when a == b. // -// SortFunc requires that less is a strict weak ordering. +// SortFunc requires that cmp is a strict weak ordering. // See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings. -func SortFunc[E any](x []E, less func(a, b E) bool) { +func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { n := len(x) - pdqsortLessFunc(x, 0, n, bits.Len(uint(n)), less) + pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp) } // SortStableFunc sorts the slice x while keeping the original order of equal -// elements, using less to compare elements. -func SortStableFunc[E any](x []E, less func(a, b E) bool) { - stableLessFunc(x, len(x), less) +// elements, using cmp to compare elements in the same way as [SortFunc]. +func SortStableFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { + stableCmpFunc(x, len(x), cmp) } // IsSorted reports whether x is sorted in ascending order. -func IsSorted[E constraints.Ordered](x []E) bool { +func IsSorted[S ~[]E, E constraints.Ordered](x S) bool { for i := len(x) - 1; i > 0; i-- { - if x[i] < x[i-1] { + if cmpLess(x[i], x[i-1]) { return false } } return true } -// IsSortedFunc reports whether x is sorted in ascending order, with less as the -// comparison function. -func IsSortedFunc[E any](x []E, less func(a, b E) bool) bool { +// IsSortedFunc reports whether x is sorted in ascending order, with cmp as the +// comparison function as defined by [SortFunc]. +func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool { for i := len(x) - 1; i > 0; i-- { - if less(x[i], x[i-1]) { + if cmp(x[i], x[i-1]) < 0 { return false } } return true } +// Min returns the minimal value in x. It panics if x is empty. +// For floating-point numbers, Min propagates NaNs (any NaN value in x +// forces the output to be NaN). +func Min[S ~[]E, E constraints.Ordered](x S) E { + if len(x) < 1 { + panic("slices.Min: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + m = min(m, x[i]) + } + return m +} + +// MinFunc returns the minimal value in x, using cmp to compare elements. +// It panics if x is empty. If there is more than one minimal element +// according to the cmp function, MinFunc returns the first one. +func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { + if len(x) < 1 { + panic("slices.MinFunc: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + if cmp(x[i], m) < 0 { + m = x[i] + } + } + return m +} + +// Max returns the maximal value in x. It panics if x is empty. +// For floating-point E, Max propagates NaNs (any NaN value in x +// forces the output to be NaN). +func Max[S ~[]E, E constraints.Ordered](x S) E { + if len(x) < 1 { + panic("slices.Max: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + m = max(m, x[i]) + } + return m +} + +// MaxFunc returns the maximal value in x, using cmp to compare elements. +// It panics if x is empty. If there is more than one maximal element +// according to the cmp function, MaxFunc returns the first one. +func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { + if len(x) < 1 { + panic("slices.MaxFunc: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + if cmp(x[i], m) > 0 { + m = x[i] + } + } + return m +} + // BinarySearch searches for target in a sorted slice and returns the position // where target is found, or the position where target would appear in the // sort order; it also returns a bool saying whether the target is really found // in the slice. The slice must be sorted in increasing order. -func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) { +func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) { // Inlining is faster than calling BinarySearchFunc with a lambda. n := len(x) // Define x[-1] < target and x[n] >= target. @@ -70,22 +131,24 @@ func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) { for i < j { h := int(uint(i+j) >> 1) // avoid overflow when computing h // i ≤ h < j - if x[h] < target { + if cmpLess(x[h], target) { i = h + 1 // preserves x[i-1] < target } else { j = h // preserves x[j] >= target } } // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i. - return i, i < n && x[i] == target + return i, i < n && (x[i] == target || (isNaN(x[i]) && isNaN(target))) } -// BinarySearchFunc works like BinarySearch, but uses a custom comparison -// function. The slice must be sorted in increasing order, where "increasing" is -// defined by cmp. cmp(a, b) is expected to return an integer comparing the two -// parameters: 0 if a == b, a negative number if a < b and a positive number if -// a > b. -func BinarySearchFunc[E, T any](x []E, target T, cmp func(E, T) int) (int, bool) { +// BinarySearchFunc works like [BinarySearch], but uses a custom comparison +// function. The slice must be sorted in increasing order, where "increasing" +// is defined by cmp. cmp should return 0 if the slice element matches +// the target, a negative number if the slice element precedes the target, +// or a positive number if the slice element follows the target. +// cmp must implement the same ordering as the slice, such that if +// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice. +func BinarySearchFunc[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool) { n := len(x) // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 . // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0. @@ -124,3 +187,9 @@ func (r *xorshift) Next() uint64 { func nextPowerOfTwo(length int) uint { return 1 << bits.Len(uint(length)) } + +// isNaN reports whether x is a NaN without requiring the math package. +// This will always return false if T is not floating-point. +func isNaN[T constraints.Ordered](x T) bool { + return x != x +} diff --git a/vendor/golang.org/x/exp/slices/zsortfunc.go b/vendor/golang.org/x/exp/slices/zsortanyfunc.go similarity index 64% rename from vendor/golang.org/x/exp/slices/zsortfunc.go rename to vendor/golang.org/x/exp/slices/zsortanyfunc.go index 2a632476c..06f2c7a24 100644 --- a/vendor/golang.org/x/exp/slices/zsortfunc.go +++ b/vendor/golang.org/x/exp/slices/zsortanyfunc.go @@ -6,28 +6,28 @@ package slices -// insertionSortLessFunc sorts data[a:b] using insertion sort. -func insertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { +// insertionSortCmpFunc sorts data[a:b] using insertion sort. +func insertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { for i := a + 1; i < b; i++ { - for j := i; j > a && less(data[j], data[j-1]); j-- { + for j := i; j > a && (cmp(data[j], data[j-1]) < 0); j-- { data[j], data[j-1] = data[j-1], data[j] } } } -// siftDownLessFunc implements the heap property on data[lo:hi]. +// siftDownCmpFunc implements the heap property on data[lo:hi]. // first is an offset into the array where the root of the heap lies. -func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool) { +func siftDownCmpFunc[E any](data []E, lo, hi, first int, cmp func(a, b E) int) { root := lo for { child := 2*root + 1 if child >= hi { break } - if child+1 < hi && less(data[first+child], data[first+child+1]) { + if child+1 < hi && (cmp(data[first+child], data[first+child+1]) < 0) { child++ } - if !less(data[first+root], data[first+child]) { + if !(cmp(data[first+root], data[first+child]) < 0) { return } data[first+root], data[first+child] = data[first+child], data[first+root] @@ -35,30 +35,30 @@ func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool } } -func heapSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { +func heapSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { first := a lo := 0 hi := b - a // Build heap with greatest element at top. for i := (hi - 1) / 2; i >= 0; i-- { - siftDownLessFunc(data, i, hi, first, less) + siftDownCmpFunc(data, i, hi, first, cmp) } // Pop elements, largest first, into end of data. for i := hi - 1; i >= 0; i-- { data[first], data[first+i] = data[first+i], data[first] - siftDownLessFunc(data, lo, i, first, less) + siftDownCmpFunc(data, lo, i, first, cmp) } } -// pdqsortLessFunc sorts data[a:b]. +// pdqsortCmpFunc sorts data[a:b]. // The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. // pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf // C++ implementation: https://github.com/orlp/pdqsort // Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ // limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. -func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) { +func pdqsortCmpFunc[E any](data []E, a, b, limit int, cmp func(a, b E) int) { const maxInsertion = 12 var ( @@ -70,25 +70,25 @@ func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) { length := b - a if length <= maxInsertion { - insertionSortLessFunc(data, a, b, less) + insertionSortCmpFunc(data, a, b, cmp) return } // Fall back to heapsort if too many bad choices were made. if limit == 0 { - heapSortLessFunc(data, a, b, less) + heapSortCmpFunc(data, a, b, cmp) return } // If the last partitioning was imbalanced, we need to breaking patterns. if !wasBalanced { - breakPatternsLessFunc(data, a, b, less) + breakPatternsCmpFunc(data, a, b, cmp) limit-- } - pivot, hint := choosePivotLessFunc(data, a, b, less) + pivot, hint := choosePivotCmpFunc(data, a, b, cmp) if hint == decreasingHint { - reverseRangeLessFunc(data, a, b, less) + reverseRangeCmpFunc(data, a, b, cmp) // The chosen pivot was pivot-a elements after the start of the array. // After reversing it is pivot-a elements before the end of the array. // The idea came from Rust's implementation. @@ -98,48 +98,48 @@ func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) { // The slice is likely already sorted. if wasBalanced && wasPartitioned && hint == increasingHint { - if partialInsertionSortLessFunc(data, a, b, less) { + if partialInsertionSortCmpFunc(data, a, b, cmp) { return } } // Probably the slice contains many duplicate elements, partition the slice into // elements equal to and elements greater than the pivot. - if a > 0 && !less(data[a-1], data[pivot]) { - mid := partitionEqualLessFunc(data, a, b, pivot, less) + if a > 0 && !(cmp(data[a-1], data[pivot]) < 0) { + mid := partitionEqualCmpFunc(data, a, b, pivot, cmp) a = mid continue } - mid, alreadyPartitioned := partitionLessFunc(data, a, b, pivot, less) + mid, alreadyPartitioned := partitionCmpFunc(data, a, b, pivot, cmp) wasPartitioned = alreadyPartitioned leftLen, rightLen := mid-a, b-mid balanceThreshold := length / 8 if leftLen < rightLen { wasBalanced = leftLen >= balanceThreshold - pdqsortLessFunc(data, a, mid, limit, less) + pdqsortCmpFunc(data, a, mid, limit, cmp) a = mid + 1 } else { wasBalanced = rightLen >= balanceThreshold - pdqsortLessFunc(data, mid+1, b, limit, less) + pdqsortCmpFunc(data, mid+1, b, limit, cmp) b = mid } } } -// partitionLessFunc does one quicksort partition. +// partitionCmpFunc does one quicksort partition. // Let p = data[pivot] // Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. // On return, data[newpivot] = p -func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int, alreadyPartitioned bool) { +func partitionCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int, alreadyPartitioned bool) { data[a], data[pivot] = data[pivot], data[a] i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - for i <= j && less(data[i], data[a]) { + for i <= j && (cmp(data[i], data[a]) < 0) { i++ } - for i <= j && !less(data[j], data[a]) { + for i <= j && !(cmp(data[j], data[a]) < 0) { j-- } if i > j { @@ -151,10 +151,10 @@ func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) j-- for { - for i <= j && less(data[i], data[a]) { + for i <= j && (cmp(data[i], data[a]) < 0) { i++ } - for i <= j && !less(data[j], data[a]) { + for i <= j && !(cmp(data[j], data[a]) < 0) { j-- } if i > j { @@ -168,17 +168,17 @@ func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) return j, false } -// partitionEqualLessFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. +// partitionEqualCmpFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. // It assumed that data[a:b] does not contain elements smaller than the data[pivot]. -func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int) { +func partitionEqualCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int) { data[a], data[pivot] = data[pivot], data[a] i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned for { - for i <= j && !less(data[a], data[i]) { + for i <= j && !(cmp(data[a], data[i]) < 0) { i++ } - for i <= j && less(data[a], data[j]) { + for i <= j && (cmp(data[a], data[j]) < 0) { j-- } if i > j { @@ -191,15 +191,15 @@ func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) return i } -// partialInsertionSortLessFunc partially sorts a slice, returns true if the slice is sorted at the end. -func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) bool { +// partialInsertionSortCmpFunc partially sorts a slice, returns true if the slice is sorted at the end. +func partialInsertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) bool { const ( maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted shortestShifting = 50 // don't shift any elements on short arrays ) i := a + 1 for j := 0; j < maxSteps; j++ { - for i < b && !less(data[i], data[i-1]) { + for i < b && !(cmp(data[i], data[i-1]) < 0) { i++ } @@ -216,7 +216,7 @@ func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) b // Shift the smaller one to the left. if i-a >= 2 { for j := i - 1; j >= 1; j-- { - if !less(data[j], data[j-1]) { + if !(cmp(data[j], data[j-1]) < 0) { break } data[j], data[j-1] = data[j-1], data[j] @@ -225,7 +225,7 @@ func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) b // Shift the greater one to the right. if b-i >= 2 { for j := i + 1; j < b; j++ { - if !less(data[j], data[j-1]) { + if !(cmp(data[j], data[j-1]) < 0) { break } data[j], data[j-1] = data[j-1], data[j] @@ -235,9 +235,9 @@ func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) b return false } -// breakPatternsLessFunc scatters some elements around in an attempt to break some patterns +// breakPatternsCmpFunc scatters some elements around in an attempt to break some patterns // that might cause imbalanced partitions in quicksort. -func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { +func breakPatternsCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { length := b - a if length >= 8 { random := xorshift(length) @@ -253,12 +253,12 @@ func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { } } -// choosePivotLessFunc chooses a pivot in data[a:b]. +// choosePivotCmpFunc chooses a pivot in data[a:b]. // // [0,8): chooses a static pivot. // [8,shortestNinther): uses the simple median-of-three method. // [shortestNinther,∞): uses the Tukey ninther method. -func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (pivot int, hint sortedHint) { +func choosePivotCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) (pivot int, hint sortedHint) { const ( shortestNinther = 50 maxSwaps = 4 * 3 @@ -276,12 +276,12 @@ func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (piv if l >= 8 { if l >= shortestNinther { // Tukey ninther method, the idea came from Rust's implementation. - i = medianAdjacentLessFunc(data, i, &swaps, less) - j = medianAdjacentLessFunc(data, j, &swaps, less) - k = medianAdjacentLessFunc(data, k, &swaps, less) + i = medianAdjacentCmpFunc(data, i, &swaps, cmp) + j = medianAdjacentCmpFunc(data, j, &swaps, cmp) + k = medianAdjacentCmpFunc(data, k, &swaps, cmp) } // Find the median among i, j, k and stores it into j. - j = medianLessFunc(data, i, j, k, &swaps, less) + j = medianCmpFunc(data, i, j, k, &swaps, cmp) } switch swaps { @@ -294,29 +294,29 @@ func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (piv } } -// order2LessFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. -func order2LessFunc[E any](data []E, a, b int, swaps *int, less func(a, b E) bool) (int, int) { - if less(data[b], data[a]) { +// order2CmpFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. +func order2CmpFunc[E any](data []E, a, b int, swaps *int, cmp func(a, b E) int) (int, int) { + if cmp(data[b], data[a]) < 0 { *swaps++ return b, a } return a, b } -// medianLessFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. -func medianLessFunc[E any](data []E, a, b, c int, swaps *int, less func(a, b E) bool) int { - a, b = order2LessFunc(data, a, b, swaps, less) - b, c = order2LessFunc(data, b, c, swaps, less) - a, b = order2LessFunc(data, a, b, swaps, less) +// medianCmpFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. +func medianCmpFunc[E any](data []E, a, b, c int, swaps *int, cmp func(a, b E) int) int { + a, b = order2CmpFunc(data, a, b, swaps, cmp) + b, c = order2CmpFunc(data, b, c, swaps, cmp) + a, b = order2CmpFunc(data, a, b, swaps, cmp) return b } -// medianAdjacentLessFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. -func medianAdjacentLessFunc[E any](data []E, a int, swaps *int, less func(a, b E) bool) int { - return medianLessFunc(data, a-1, a, a+1, swaps, less) +// medianAdjacentCmpFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. +func medianAdjacentCmpFunc[E any](data []E, a int, swaps *int, cmp func(a, b E) int) int { + return medianCmpFunc(data, a-1, a, a+1, swaps, cmp) } -func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { +func reverseRangeCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { i := a j := b - 1 for i < j { @@ -326,37 +326,37 @@ func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { } } -func swapRangeLessFunc[E any](data []E, a, b, n int, less func(a, b E) bool) { +func swapRangeCmpFunc[E any](data []E, a, b, n int, cmp func(a, b E) int) { for i := 0; i < n; i++ { data[a+i], data[b+i] = data[b+i], data[a+i] } } -func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) { +func stableCmpFunc[E any](data []E, n int, cmp func(a, b E) int) { blockSize := 20 // must be > 0 a, b := 0, blockSize for b <= n { - insertionSortLessFunc(data, a, b, less) + insertionSortCmpFunc(data, a, b, cmp) a = b b += blockSize } - insertionSortLessFunc(data, a, n, less) + insertionSortCmpFunc(data, a, n, cmp) for blockSize < n { a, b = 0, 2*blockSize for b <= n { - symMergeLessFunc(data, a, a+blockSize, b, less) + symMergeCmpFunc(data, a, a+blockSize, b, cmp) a = b b += 2 * blockSize } if m := a + blockSize; m < n { - symMergeLessFunc(data, a, m, n, less) + symMergeCmpFunc(data, a, m, n, cmp) } blockSize *= 2 } } -// symMergeLessFunc merges the two sorted subsequences data[a:m] and data[m:b] using +// symMergeCmpFunc merges the two sorted subsequences data[a:m] and data[m:b] using // the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum // Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz // Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in @@ -375,7 +375,7 @@ func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) { // symMerge assumes non-degenerate arguments: a < m && m < b. // Having the caller check this condition eliminates many leaf recursion calls, // which improves performance. -func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { +func symMergeCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) { // Avoid unnecessary recursions of symMerge // by direct insertion of data[a] into data[m:b] // if data[a:m] only contains one element. @@ -387,7 +387,7 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { j := b for i < j { h := int(uint(i+j) >> 1) - if less(data[h], data[a]) { + if cmp(data[h], data[a]) < 0 { i = h + 1 } else { j = h @@ -411,7 +411,7 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { j := m for i < j { h := int(uint(i+j) >> 1) - if !less(data[m], data[h]) { + if !(cmp(data[m], data[h]) < 0) { i = h + 1 } else { j = h @@ -438,7 +438,7 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { for start < r { c := int(uint(start+r) >> 1) - if !less(data[p-c], data[c]) { + if !(cmp(data[p-c], data[c]) < 0) { start = c + 1 } else { r = c @@ -447,33 +447,33 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { end := n - start if start < m && m < end { - rotateLessFunc(data, start, m, end, less) + rotateCmpFunc(data, start, m, end, cmp) } if a < start && start < mid { - symMergeLessFunc(data, a, start, mid, less) + symMergeCmpFunc(data, a, start, mid, cmp) } if mid < end && end < b { - symMergeLessFunc(data, mid, end, b, less) + symMergeCmpFunc(data, mid, end, b, cmp) } } -// rotateLessFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: +// rotateCmpFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: // Data of the form 'x u v y' is changed to 'x v u y'. // rotate performs at most b-a many calls to data.Swap, // and it assumes non-degenerate arguments: a < m && m < b. -func rotateLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { +func rotateCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) { i := m - a j := b - m for i != j { if i > j { - swapRangeLessFunc(data, m-i, m, j, less) + swapRangeCmpFunc(data, m-i, m, j, cmp) i -= j } else { - swapRangeLessFunc(data, m-i, m+j-i, i, less) + swapRangeCmpFunc(data, m-i, m+j-i, i, cmp) j -= i } } // i == j - swapRangeLessFunc(data, m-i, m, i, less) + swapRangeCmpFunc(data, m-i, m, i, cmp) } diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go index efaa1c8b7..99b47c398 100644 --- a/vendor/golang.org/x/exp/slices/zsortordered.go +++ b/vendor/golang.org/x/exp/slices/zsortordered.go @@ -11,7 +11,7 @@ import "golang.org/x/exp/constraints" // insertionSortOrdered sorts data[a:b] using insertion sort. func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) { for i := a + 1; i < b; i++ { - for j := i; j > a && (data[j] < data[j-1]); j-- { + for j := i; j > a && cmpLess(data[j], data[j-1]); j-- { data[j], data[j-1] = data[j-1], data[j] } } @@ -26,10 +26,10 @@ func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) { if child >= hi { break } - if child+1 < hi && (data[first+child] < data[first+child+1]) { + if child+1 < hi && cmpLess(data[first+child], data[first+child+1]) { child++ } - if !(data[first+root] < data[first+child]) { + if !cmpLess(data[first+root], data[first+child]) { return } data[first+root], data[first+child] = data[first+child], data[first+root] @@ -107,7 +107,7 @@ func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) { // Probably the slice contains many duplicate elements, partition the slice into // elements equal to and elements greater than the pivot. - if a > 0 && !(data[a-1] < data[pivot]) { + if a > 0 && !cmpLess(data[a-1], data[pivot]) { mid := partitionEqualOrdered(data, a, b, pivot) a = mid continue @@ -138,10 +138,10 @@ func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivo data[a], data[pivot] = data[pivot], data[a] i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - for i <= j && (data[i] < data[a]) { + for i <= j && cmpLess(data[i], data[a]) { i++ } - for i <= j && !(data[j] < data[a]) { + for i <= j && !cmpLess(data[j], data[a]) { j-- } if i > j { @@ -153,10 +153,10 @@ func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivo j-- for { - for i <= j && (data[i] < data[a]) { + for i <= j && cmpLess(data[i], data[a]) { i++ } - for i <= j && !(data[j] < data[a]) { + for i <= j && !cmpLess(data[j], data[a]) { j-- } if i > j { @@ -177,10 +177,10 @@ func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (ne i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned for { - for i <= j && !(data[a] < data[i]) { + for i <= j && !cmpLess(data[a], data[i]) { i++ } - for i <= j && (data[a] < data[j]) { + for i <= j && cmpLess(data[a], data[j]) { j-- } if i > j { @@ -201,7 +201,7 @@ func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool ) i := a + 1 for j := 0; j < maxSteps; j++ { - for i < b && !(data[i] < data[i-1]) { + for i < b && !cmpLess(data[i], data[i-1]) { i++ } @@ -218,7 +218,7 @@ func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool // Shift the smaller one to the left. if i-a >= 2 { for j := i - 1; j >= 1; j-- { - if !(data[j] < data[j-1]) { + if !cmpLess(data[j], data[j-1]) { break } data[j], data[j-1] = data[j-1], data[j] @@ -227,7 +227,7 @@ func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool // Shift the greater one to the right. if b-i >= 2 { for j := i + 1; j < b; j++ { - if !(data[j] < data[j-1]) { + if !cmpLess(data[j], data[j-1]) { break } data[j], data[j-1] = data[j-1], data[j] @@ -298,7 +298,7 @@ func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, h // order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) { - if data[b] < data[a] { + if cmpLess(data[b], data[a]) { *swaps++ return b, a } @@ -389,7 +389,7 @@ func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { j := b for i < j { h := int(uint(i+j) >> 1) - if data[h] < data[a] { + if cmpLess(data[h], data[a]) { i = h + 1 } else { j = h @@ -413,7 +413,7 @@ func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { j := m for i < j { h := int(uint(i+j) >> 1) - if !(data[m] < data[h]) { + if !cmpLess(data[m], data[h]) { i = h + 1 } else { j = h @@ -440,7 +440,7 @@ func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { for start < r { c := int(uint(start+r) >> 1) - if !(data[p-c] < data[c]) { + if !cmpLess(data[p-c], data[c]) { start = c + 1 } else { r = c diff --git a/vendor/golang.org/x/exp/slog/attr.go b/vendor/golang.org/x/exp/slog/attr.go index 29431cb7a..a180d0e1d 100644 --- a/vendor/golang.org/x/exp/slog/attr.go +++ b/vendor/golang.org/x/exp/slog/attr.go @@ -58,14 +58,26 @@ func Duration(key string, v time.Duration) Attr { } // Group returns an Attr for a Group Value. -// The caller must not subsequently mutate the -// argument slice. +// The first argument is the key; the remaining arguments +// are converted to Attrs as in [Logger.Log]. // -// Use Group to collect several Attrs under a single +// Use Group to collect several key-value pairs under a single // key on a log line, or as the result of LogValue // in order to log a single value as multiple Attrs. -func Group(key string, as ...Attr) Attr { - return Attr{key, GroupValue(as...)} +func Group(key string, args ...any) Attr { + return Attr{key, GroupValue(argsToAttrSlice(args)...)} +} + +func argsToAttrSlice(args []any) []Attr { + var ( + attr Attr + attrs []Attr + ) + for len(args) > 0 { + attr, args = argsToAttr(args) + attrs = append(attrs, attr) + } + return attrs } // Any returns an Attr for the supplied value. @@ -82,3 +94,9 @@ func (a Attr) Equal(b Attr) bool { func (a Attr) String() string { return fmt.Sprintf("%s=%s", a.Key, a.Value) } + +// isEmpty reports whether a has an empty key and a nil value. +// That can be written as Attr{} or Any("", nil). +func (a Attr) isEmpty() bool { + return a.Key == "" && a.Value.num == 0 && a.Value.any == nil +} diff --git a/vendor/golang.org/x/exp/slog/doc.go b/vendor/golang.org/x/exp/slog/doc.go index 3b37eec0d..4beaf8674 100644 --- a/vendor/golang.org/x/exp/slog/doc.go +++ b/vendor/golang.org/x/exp/slog/doc.go @@ -44,7 +44,7 @@ For more control over the output format, create a logger with a different handle This statement uses [New] to create a new logger with a TextHandler that writes structured records in text form to standard error: - logger := slog.New(slog.NewTextHandler(os.Stderr)) + logger := slog.New(slog.NewTextHandler(os.Stderr, nil)) [TextHandler] output is a sequence of key=value pairs, easily and unambiguously parsed by machine. This statement: @@ -57,14 +57,14 @@ produces this output: The package also provides [JSONHandler], whose output is line-delimited JSON: - logger := slog.New(slog.NewJSONHandler(os.Stdout)) + logger := slog.New(slog.NewJSONHandler(os.Stdout, nil)) logger.Info("hello", "count", 3) produces this output: {"time":"2022-11-08T15:28:26.000000000-05:00","level":"INFO","msg":"hello","count":3} -Both [TextHandler] and [JSONHandler] can be configured with a [HandlerOptions]. +Both [TextHandler] and [JSONHandler] can be configured with [HandlerOptions]. There are options for setting the minimum level (see Levels, below), displaying the source file and line of the log call, and modifying attributes before they are logged. @@ -78,38 +78,6 @@ will cause the top-level functions like [Info] to use it. so that existing applications that use [log.Printf] and related functions will send log records to the logger's handler without needing to be rewritten. -# Attrs and Values - -An [Attr] is a key-value pair. The Logger output methods accept Attrs as well as -alternating keys and values. The statement - - slog.Info("hello", slog.Int("count", 3)) - -behaves the same as - - slog.Info("hello", "count", 3) - -There are convenience constructors for [Attr] such as [Int], [String], and [Bool] -for common types, as well as the function [Any] for constructing Attrs of any -type. - -The value part of an Attr is a type called [Value]. -Like an [any], a Value can hold any Go value, -but it can represent typical values, including all numbers and strings, -without an allocation. - -For the most efficient log output, use [Logger.LogAttrs]. -It is similar to [Logger.Log] but accepts only Attrs, not alternating -keys and values; this allows it, too, to avoid allocation. - -The call - - logger.LogAttrs(nil, slog.LevelInfo, "hello", slog.Int("count", 3)) - -is the most efficient way to achieve the same output as - - slog.Info("hello", "count", 3) - Some attributes are common to many log calls. For example, you may wish to include the URL or trace identifier of a server request with all log events arising from the request. @@ -149,7 +117,7 @@ a global LevelVar: Then use the LevelVar to construct a handler, and make it the default: - h := slog.HandlerOptions{Level: programLevel}.NewJSONHandler(os.Stderr) + h := slog.NewJSONHandler(os.Stderr, &slog.HandlerOptions{Level: programLevel}) slog.SetDefault(slog.New(h)) Now the program can change its logging level with a single statement: @@ -164,11 +132,11 @@ How this qualification is displayed depends on the handler. [TextHandler] separates the group and attribute names with a dot. [JSONHandler] treats each group as a separate JSON object, with the group name as the key. -Use [Group] to create a Group Attr from a name and a list of Attrs: +Use [Group] to create a Group attribute from a name and a list of key-value pairs: slog.Group("request", - slog.String("method", r.Method), - slog.Any("url", r.URL)) + "method", r.Method, + "url", r.URL) TextHandler would display this group as @@ -199,19 +167,51 @@ so even if it uses the common key "id", the log line will have distinct keys. Some handlers may wish to include information from the [context.Context] that is available at the call site. One example of such information -is the identifier for the current span when tracing is is enabled. +is the identifier for the current span when tracing is enabled. The [Logger.Log] and [Logger.LogAttrs] methods take a context as a first argument, as do their corresponding top-level functions. Although the convenience methods on Logger (Info and so on) and the corresponding top-level functions do not take a context, the alternatives ending -in "Ctx" do. For example, +in "Context" do. For example, - slog.InfoCtx(ctx, "message") + slog.InfoContext(ctx, "message") It is recommended to pass a context to an output method if one is available. +# Attrs and Values + +An [Attr] is a key-value pair. The Logger output methods accept Attrs as well as +alternating keys and values. The statement + + slog.Info("hello", slog.Int("count", 3)) + +behaves the same as + + slog.Info("hello", "count", 3) + +There are convenience constructors for [Attr] such as [Int], [String], and [Bool] +for common types, as well as the function [Any] for constructing Attrs of any +type. + +The value part of an Attr is a type called [Value]. +Like an [any], a Value can hold any Go value, +but it can represent typical values, including all numbers and strings, +without an allocation. + +For the most efficient log output, use [Logger.LogAttrs]. +It is similar to [Logger.Log] but accepts only Attrs, not alternating +keys and values; this allows it, too, to avoid allocation. + +The call + + logger.LogAttrs(nil, slog.LevelInfo, "hello", slog.Int("count", 3)) + +is the most efficient way to achieve the same output as + + slog.Info("hello", "count", 3) + # Customizing a type's logging behavior If a type implements the [LogValuer] interface, the [Value] returned from its LogValue diff --git a/vendor/golang.org/x/exp/slog/handler.go b/vendor/golang.org/x/exp/slog/handler.go index c1b9037b4..bd635cb81 100644 --- a/vendor/golang.org/x/exp/slog/handler.go +++ b/vendor/golang.org/x/exp/slog/handler.go @@ -8,6 +8,7 @@ import ( "context" "fmt" "io" + "reflect" "strconv" "sync" "time" @@ -41,7 +42,7 @@ type Handler interface { Enabled(context.Context, Level) bool // Handle handles the Record. - // It will only be called Enabled returns true. + // It will only be called when Enabled returns true. // The Context argument is as for Enabled. // It is present solely to provide Handlers access to the context's values. // Canceling the context should not affect record processing. @@ -51,8 +52,9 @@ type Handler interface { // Handle methods that produce output should observe the following rules: // - If r.Time is the zero time, ignore the time. // - If r.PC is zero, ignore it. - // - If an Attr's key is the empty string and the value is not a group, - // ignore the Attr. + // - Attr's values should be resolved. + // - If an Attr's key and value are both the zero value, ignore the Attr. + // This can be tested with attr.Equal(Attr{}). // - If a group's key is empty, inline the group's Attrs. // - If a group has no Attrs (even if it has a non-empty key), // ignore it. @@ -61,7 +63,6 @@ type Handler interface { // WithAttrs returns a new Handler whose attributes consist of // both the receiver's attributes and the arguments. // The Handler owns the slice: it may retain, modify or discard it. - // [Logger.With] will resolve the Attrs. WithAttrs(attrs []Attr) Handler // WithGroup returns a new Handler with the given group appended to @@ -130,10 +131,8 @@ func (h *defaultHandler) WithGroup(name string) Handler { // HandlerOptions are options for a TextHandler or JSONHandler. // A zero HandlerOptions consists entirely of default values. type HandlerOptions struct { - // When AddSource is true, the handler adds a ("source", "file:line") - // attribute to the output indicating the source code position of the log - // statement. AddSource is false by default to skip the cost of computing - // this information. + // AddSource causes the handler to compute the source code position + // of the log statement and add a SourceKey attribute to the output. AddSource bool // Level reports the minimum record level that will be logged. @@ -285,22 +284,7 @@ func (h *commonHandler) handle(r Record) error { } // source if h.opts.AddSource { - frame := r.frame() - if frame.File != "" { - key := SourceKey - if rep == nil { - state.appendKey(key) - state.appendSource(frame.File, frame.Line) - } else { - buf := buffer.New() - buf.WriteString(frame.File) // TODO: escape? - buf.WriteByte(':') - buf.WritePosInt(frame.Line) - s := buf.String() - buf.Free() - state.appendAttr(String(key, s)) - } - } + state.appendAttr(Any(SourceKey, r.source())) } key = MessageKey msg := r.Message @@ -333,8 +317,9 @@ func (s *handleState) appendNonBuiltIns(r Record) { defer s.prefix.Free() s.prefix.WriteString(s.h.groupPrefix) s.openGroups() - r.Attrs(func(a Attr) { + r.Attrs(func(a Attr) bool { s.appendAttr(a) + return true }) if s.h.json { // Close all open groups. @@ -420,7 +405,6 @@ func (s *handleState) openGroup(name string) { if s.groups != nil { *s.groups = append(*s.groups, name) } - } // closeGroup ends the group with the given name. @@ -440,26 +424,32 @@ func (s *handleState) closeGroup(name string) { // It handles replacement and checking for an empty key. // after replacement). func (s *handleState) appendAttr(a Attr) { - v := a.Value - // Elide a non-group with an empty key. - if a.Key == "" && v.Kind() != KindGroup { - return - } - if rep := s.h.opts.ReplaceAttr; rep != nil && v.Kind() != KindGroup { + if rep := s.h.opts.ReplaceAttr; rep != nil && a.Value.Kind() != KindGroup { var gs []string if s.groups != nil { gs = *s.groups } - a = rep(gs, Attr{a.Key, v}) - if a.Key == "" { - return + // Resolve before calling ReplaceAttr, so the user doesn't have to. + a.Value = a.Value.Resolve() + a = rep(gs, a) + } + a.Value = a.Value.Resolve() + // Elide empty Attrs. + if a.isEmpty() { + return + } + // Special case: Source. + if v := a.Value; v.Kind() == KindAny { + if src, ok := v.Any().(*Source); ok { + if s.h.json { + a.Value = src.group() + } else { + a.Value = StringValue(fmt.Sprintf("%s:%d", src.File, src.Line)) + } } - // Although all attributes in the Record are already resolved, - // This one came from the user, so it may not have been. - v = a.Value.Resolve() } - if v.Kind() == KindGroup { - attrs := v.Group() + if a.Value.Kind() == KindGroup { + attrs := a.Value.Group() // Output only non-empty groups. if len(attrs) > 0 { // Inline a group with an empty key. @@ -475,7 +465,7 @@ func (s *handleState) appendAttr(a Attr) { } } else { s.appendKey(a.Key) - s.appendValue(v) + s.appendValue(a.Value) } } @@ -499,26 +489,6 @@ func (s *handleState) appendKey(key string) { s.sep = s.h.attrSep() } -func (s *handleState) appendSource(file string, line int) { - if s.h.json { - s.buf.WriteByte('"') - *s.buf = appendEscapedJSONString(*s.buf, file) - s.buf.WriteByte(':') - s.buf.WritePosInt(line) - s.buf.WriteByte('"') - } else { - // text - if needsQuoting(file) { - s.appendString(file + ":" + strconv.Itoa(line)) - } else { - // common case: no quoting needed. - s.appendString(file) - s.buf.WriteByte(':') - s.buf.WritePosInt(line) - } - } -} - func (s *handleState) appendString(str string) { if s.h.json { s.buf.WriteByte('"') @@ -535,6 +505,23 @@ func (s *handleState) appendString(str string) { } func (s *handleState) appendValue(v Value) { + defer func() { + if r := recover(); r != nil { + // If it panics with a nil pointer, the most likely cases are + // an encoding.TextMarshaler or error fails to guard against nil, + // in which case "" seems to be the feasible choice. + // + // Adapted from the code in fmt/print.go. + if v := reflect.ValueOf(v.any); v.Kind() == reflect.Pointer && v.IsNil() { + s.appendString("") + return + } + + // Otherwise just print the original panic message. + s.appendString(fmt.Sprintf("!PANIC: %v", r)) + } + }() + var err error if s.h.json { err = appendJSONValue(s, v) diff --git a/vendor/golang.org/x/exp/slog/json_handler.go b/vendor/golang.org/x/exp/slog/json_handler.go index 1cd1f5921..157ada869 100644 --- a/vendor/golang.org/x/exp/slog/json_handler.go +++ b/vendor/golang.org/x/exp/slog/json_handler.go @@ -11,7 +11,6 @@ import ( "errors" "fmt" "io" - "math" "strconv" "time" "unicode/utf8" @@ -26,18 +25,17 @@ type JSONHandler struct { } // NewJSONHandler creates a JSONHandler that writes to w, -// using the default options. -func NewJSONHandler(w io.Writer) *JSONHandler { - return (HandlerOptions{}).NewJSONHandler(w) -} - -// NewJSONHandler creates a JSONHandler with the given options that writes to w. -func (opts HandlerOptions) NewJSONHandler(w io.Writer) *JSONHandler { +// using the given options. +// If opts is nil, the default options are used. +func NewJSONHandler(w io.Writer, opts *HandlerOptions) *JSONHandler { + if opts == nil { + opts = &HandlerOptions{} + } return &JSONHandler{ &commonHandler{ json: true, w: w, - opts: opts, + opts: *opts, }, } } @@ -77,12 +75,16 @@ func (h *JSONHandler) WithGroup(name string) Handler { // To modify these or other attributes, or remove them from the output, use // [HandlerOptions.ReplaceAttr]. // -// Values are formatted as with encoding/json.Marshal, with the following -// exceptions: -// - Floating-point NaNs and infinities are formatted as one of the strings -// "NaN", "+Inf" or "-Inf". -// - Levels are formatted as with Level.String. -// - HTML characters are not escaped. +// Values are formatted as with an [encoding/json.Encoder] with SetEscapeHTML(false), +// with two exceptions. +// +// First, an Attr whose Value is of type error is formatted as a string, by +// calling its Error method. Only errors in Attrs receive this special treatment, +// not errors embedded in structs, slices, maps or other data structures that +// are processed by the encoding/json package. +// +// Second, an encoding failure does not cause Handle to return an error. +// Instead, the error message is formatted as a string. // // Each call to Handle results in a single serialized call to io.Writer.Write. func (h *JSONHandler) Handle(_ context.Context, r Record) error { @@ -110,22 +112,11 @@ func appendJSONValue(s *handleState, v Value) error { case KindUint64: *s.buf = strconv.AppendUint(*s.buf, v.Uint64(), 10) case KindFloat64: - f := v.Float64() - // json.Marshal fails on special floats, so handle them here. - switch { - case math.IsInf(f, 1): - s.buf.WriteString(`"+Inf"`) - case math.IsInf(f, -1): - s.buf.WriteString(`"-Inf"`) - case math.IsNaN(f): - s.buf.WriteString(`"NaN"`) - default: - // json.Marshal is funny about floats; it doesn't - // always match strconv.AppendFloat. So just call it. - // That's expensive, but floats are rare. - if err := appendJSONMarshal(s.buf, f); err != nil { - return err - } + // json.Marshal is funny about floats; it doesn't + // always match strconv.AppendFloat. So just call it. + // That's expensive, but floats are rare. + if err := appendJSONMarshal(s.buf, v.Float64()); err != nil { + return err } case KindBool: *s.buf = strconv.AppendBool(*s.buf, v.Bool()) @@ -136,13 +127,14 @@ func appendJSONValue(s *handleState, v Value) error { s.appendTime(v.Time()) case KindAny: a := v.Any() - if err, ok := a.(error); ok { + _, jm := a.(json.Marshaler) + if err, ok := a.(error); ok && !jm { s.appendString(err.Error()) } else { return appendJSONMarshal(s.buf, a) } default: - panic(fmt.Sprintf("bad kind: %d", v.Kind())) + panic(fmt.Sprintf("bad kind: %s", v.Kind())) } return nil } @@ -164,9 +156,7 @@ func appendJSONMarshal(buf *buffer.Buffer, v any) error { // It does not surround the string in quotation marks. // // Modified from encoding/json/encode.go:encodeState.string, -// with escapeHTML set to true. -// -// TODO: review whether HTML escaping is necessary. +// with escapeHTML set to false. func appendEscapedJSONString(buf []byte, s string) []byte { char := func(b byte) { buf = append(buf, b) } str := func(s string) { buf = append(buf, s...) } diff --git a/vendor/golang.org/x/exp/slog/list.go b/vendor/golang.org/x/exp/slog/list.go deleted file mode 100644 index 7b505ea00..000000000 --- a/vendor/golang.org/x/exp/slog/list.go +++ /dev/null @@ -1,85 +0,0 @@ -package slog - -// A list[T] is an immutable sequence. -// It supports three operations: append, len and indexing (at). -// The zero value is an empty list. -// -// Repeated calls to append happen in amortized O(1) space and time. (Appending -// an element allocates one node directly, and the normalize operation always -// doubles the front slice, so we can charge two slots to each element.) -// -// The len method takes constant time. -// -// The at method requires a normalized list, and then takes constant time. -// -// It is possible to obtain quadratic behavior by alternating append and at: -// the normalize required by at is called for each appended element, causing -// front to be copied each time. -type list[T any] struct { - front []T - back *node[T] // reversed - lenBack int -} - -type node[T any] struct { - el T - next *node[T] -} - -// append returns a new list consisting of the receiver with x appended. -func (l list[T]) append(x T) list[T] { - if l.front == nil { - // Empty list; return one with one element. - return list[T]{ - front: []T{x}, - back: nil, - lenBack: 0, - } - } - if l.lenBack == len(l.front) { - // When there are as many elements in back as in front, grow - // front and move all of back to it. - l = l.normalize() - } - // Push a new node with the element onto back, which is stored in - // reverse order. - return list[T]{ - front: l.front, - back: &node[T]{el: x, next: l.back}, - lenBack: l.lenBack + 1, - } -} - -// len returns the number of elements in the list. -func (l list[T]) len() int { - return len(l.front) + l.lenBack -} - -// at returns the ith element of the list. -// The list must be normalized. -func (l list[T]) at(i int) T { - if l.back != nil { - panic("not normalized") - } - return l.front[i] -} - -// normalize returns a list whose back is nil and whose front contains all the -// receiver's elements. -func (l list[T]) normalize() list[T] { - if l.back == nil { - return l - } - newFront := make([]T, len(l.front)+l.lenBack) - copy(newFront, l.front) - i := len(newFront) - 1 - for b := l.back; b != nil; b = b.next { - newFront[i] = b.el - i-- - } - return list[T]{ - front: newFront, - back: nil, - lenBack: 0, - } -} diff --git a/vendor/golang.org/x/exp/slog/logger.go b/vendor/golang.org/x/exp/slog/logger.go index 8f021e6f6..e87ec9936 100644 --- a/vendor/golang.org/x/exp/slog/logger.go +++ b/vendor/golang.org/x/exp/slog/logger.go @@ -89,27 +89,21 @@ func (l *Logger) clone() *Logger { func (l *Logger) Handler() Handler { return l.handler } // With returns a new Logger that includes the given arguments, converted to -// Attrs as in [Logger.Log] and resolved. +// Attrs as in [Logger.Log]. // The Attrs will be added to each output from the Logger. // The new Logger shares the old Logger's context. // The new Logger's handler is the result of calling WithAttrs on the receiver's // handler. func (l *Logger) With(args ...any) *Logger { - var ( - attr Attr - attrs []Attr - ) - for len(args) > 0 { - attr, args = argsToAttr(args) - attrs = append(attrs, attr) - } c := l.clone() - c.handler = l.handler.WithAttrs(attrs) + c.handler = l.handler.WithAttrs(argsToAttrSlice(args)) return c } // WithGroup returns a new Logger that starts a group. The keys of all // attributes added to the Logger will be qualified by the given name. +// (How that qualification happens depends on the [Handler.WithGroup] +// method of the Logger's Handler.) // The new Logger shares the old Logger's context. // // The new Logger's handler is the result of calling WithGroup on the receiver's @@ -173,7 +167,13 @@ func (l *Logger) Debug(msg string, args ...any) { l.log(nil, LevelDebug, msg, args...) } +// DebugContext logs at LevelDebug with the given context. +func (l *Logger) DebugContext(ctx context.Context, msg string, args ...any) { + l.log(ctx, LevelDebug, msg, args...) +} + // DebugCtx logs at LevelDebug with the given context. +// Deprecated: Use Logger.DebugContext. func (l *Logger) DebugCtx(ctx context.Context, msg string, args ...any) { l.log(ctx, LevelDebug, msg, args...) } @@ -183,7 +183,13 @@ func (l *Logger) Info(msg string, args ...any) { l.log(nil, LevelInfo, msg, args...) } +// InfoContext logs at LevelInfo with the given context. +func (l *Logger) InfoContext(ctx context.Context, msg string, args ...any) { + l.log(ctx, LevelInfo, msg, args...) +} + // InfoCtx logs at LevelInfo with the given context. +// Deprecated: Use Logger.InfoContext. func (l *Logger) InfoCtx(ctx context.Context, msg string, args ...any) { l.log(ctx, LevelInfo, msg, args...) } @@ -193,7 +199,13 @@ func (l *Logger) Warn(msg string, args ...any) { l.log(nil, LevelWarn, msg, args...) } +// WarnContext logs at LevelWarn with the given context. +func (l *Logger) WarnContext(ctx context.Context, msg string, args ...any) { + l.log(ctx, LevelWarn, msg, args...) +} + // WarnCtx logs at LevelWarn with the given context. +// Deprecated: Use Logger.WarnContext. func (l *Logger) WarnCtx(ctx context.Context, msg string, args ...any) { l.log(ctx, LevelWarn, msg, args...) } @@ -203,7 +215,13 @@ func (l *Logger) Error(msg string, args ...any) { l.log(nil, LevelError, msg, args...) } +// ErrorContext logs at LevelError with the given context. +func (l *Logger) ErrorContext(ctx context.Context, msg string, args ...any) { + l.log(ctx, LevelError, msg, args...) +} + // ErrorCtx logs at LevelError with the given context. +// Deprecated: Use Logger.ErrorContext. func (l *Logger) ErrorCtx(ctx context.Context, msg string, args ...any) { l.log(ctx, LevelError, msg, args...) } @@ -255,8 +273,8 @@ func Debug(msg string, args ...any) { Default().log(nil, LevelDebug, msg, args...) } -// DebugCtx calls Logger.DebugCtx on the default logger. -func DebugCtx(ctx context.Context, msg string, args ...any) { +// DebugContext calls Logger.DebugContext on the default logger. +func DebugContext(ctx context.Context, msg string, args ...any) { Default().log(ctx, LevelDebug, msg, args...) } @@ -265,8 +283,8 @@ func Info(msg string, args ...any) { Default().log(nil, LevelInfo, msg, args...) } -// InfoCtx calls Logger.InfoCtx on the default logger. -func InfoCtx(ctx context.Context, msg string, args ...any) { +// InfoContext calls Logger.InfoContext on the default logger. +func InfoContext(ctx context.Context, msg string, args ...any) { Default().log(ctx, LevelInfo, msg, args...) } @@ -275,8 +293,8 @@ func Warn(msg string, args ...any) { Default().log(nil, LevelWarn, msg, args...) } -// WarnCtx calls Logger.WarnCtx on the default logger. -func WarnCtx(ctx context.Context, msg string, args ...any) { +// WarnContext calls Logger.WarnContext on the default logger. +func WarnContext(ctx context.Context, msg string, args ...any) { Default().log(ctx, LevelWarn, msg, args...) } @@ -285,7 +303,31 @@ func Error(msg string, args ...any) { Default().log(nil, LevelError, msg, args...) } -// ErrorCtx calls Logger.ErrorCtx on the default logger. +// ErrorContext calls Logger.ErrorContext on the default logger. +func ErrorContext(ctx context.Context, msg string, args ...any) { + Default().log(ctx, LevelError, msg, args...) +} + +// DebugCtx calls Logger.DebugContext on the default logger. +// Deprecated: call DebugContext. +func DebugCtx(ctx context.Context, msg string, args ...any) { + Default().log(ctx, LevelDebug, msg, args...) +} + +// InfoCtx calls Logger.InfoContext on the default logger. +// Deprecated: call InfoContext. +func InfoCtx(ctx context.Context, msg string, args ...any) { + Default().log(ctx, LevelInfo, msg, args...) +} + +// WarnCtx calls Logger.WarnContext on the default logger. +// Deprecated: call WarnContext. +func WarnCtx(ctx context.Context, msg string, args ...any) { + Default().log(ctx, LevelWarn, msg, args...) +} + +// ErrorCtx calls Logger.ErrorContext on the default logger. +// Deprecated: call ErrorContext. func ErrorCtx(ctx context.Context, msg string, args ...any) { Default().log(ctx, LevelError, msg, args...) } diff --git a/vendor/golang.org/x/exp/slog/record.go b/vendor/golang.org/x/exp/slog/record.go index 6911c6c30..38b3440f7 100644 --- a/vendor/golang.org/x/exp/slog/record.go +++ b/vendor/golang.org/x/exp/slog/record.go @@ -64,15 +64,6 @@ func NewRecord(t time.Time, level Level, msg string, pc uintptr) Record { } } -// frame returns the runtime.Frame of the log event. -// If the Record was created without the necessary information, -// or if the location is unavailable, it returns a zero Frame. -func (r Record) frame() runtime.Frame { - fs := runtime.CallersFrames([]uintptr{r.PC}) - f, _ := fs.Next() - return f -} - // Clone returns a copy of the record with no shared state. // The original record and the clone can both be modified // without interfering with each other. @@ -87,27 +78,29 @@ func (r Record) NumAttrs() int { } // Attrs calls f on each Attr in the Record. -// The Attrs are already resolved. -func (r Record) Attrs(f func(Attr)) { +// Iteration stops if f returns false. +func (r Record) Attrs(f func(Attr) bool) { for i := 0; i < r.nFront; i++ { - f(r.front[i]) + if !f(r.front[i]) { + return + } } for _, a := range r.back { - f(a) + if !f(a) { + return + } } } // AddAttrs appends the given Attrs to the Record's list of Attrs. -// It resolves the Attrs before doing so. func (r *Record) AddAttrs(attrs ...Attr) { - resolveAttrs(attrs) n := copy(r.front[r.nFront:], attrs) r.nFront += n // Check if a copy was modified by slicing past the end // and seeing if the Attr there is non-zero. if cap(r.back) > len(r.back) { end := r.back[:len(r.back)+1][len(r.back)] - if end != (Attr{}) { + if !end.isEmpty() { panic("copies of a slog.Record were both modified") } } @@ -116,7 +109,6 @@ func (r *Record) AddAttrs(attrs ...Attr) { // Add converts the args to Attrs as described in [Logger.Log], // then appends the Attrs to the Record's list of Attrs. -// It resolves the Attrs before doing so. func (r *Record) Add(args ...any) { var a Attr for len(args) > 0 { @@ -150,7 +142,7 @@ const badKey = "!BADKEY" // argsToAttr turns a prefix of the nonempty args slice into an Attr // and returns the unconsumed portion of the slice. -// If args[0] is an Attr, it returns it, resolved. +// If args[0] is an Attr, it returns it. // If args[0] is a string, it treats the first two elements as // a key-value pair. // Otherwise, it treats args[0] as a value with a missing key. @@ -160,15 +152,56 @@ func argsToAttr(args []any) (Attr, []any) { if len(args) == 1 { return String(badKey, x), nil } - a := Any(x, args[1]) - a.Value = a.Value.Resolve() - return a, args[2:] + return Any(x, args[1]), args[2:] case Attr: - x.Value = x.Value.Resolve() return x, args[1:] default: return Any(badKey, x), args[1:] } } + +// Source describes the location of a line of source code. +type Source struct { + // Function is the package path-qualified function name containing the + // source line. If non-empty, this string uniquely identifies a single + // function in the program. This may be the empty string if not known. + Function string `json:"function"` + // File and Line are the file name and line number (1-based) of the source + // line. These may be the empty string and zero, respectively, if not known. + File string `json:"file"` + Line int `json:"line"` +} + +// attrs returns the non-zero fields of s as a slice of attrs. +// It is similar to a LogValue method, but we don't want Source +// to implement LogValuer because it would be resolved before +// the ReplaceAttr function was called. +func (s *Source) group() Value { + var as []Attr + if s.Function != "" { + as = append(as, String("function", s.Function)) + } + if s.File != "" { + as = append(as, String("file", s.File)) + } + if s.Line != 0 { + as = append(as, Int("line", s.Line)) + } + return GroupValue(as...) +} + +// source returns a Source for the log event. +// If the Record was created without the necessary information, +// or if the location is unavailable, it returns a non-nil *Source +// with zero fields. +func (r Record) source() *Source { + fs := runtime.CallersFrames([]uintptr{r.PC}) + f, _ := fs.Next() + return &Source{ + Function: f.Function, + File: f.File, + Line: f.Line, + } +} diff --git a/vendor/golang.org/x/exp/slog/text_handler.go b/vendor/golang.org/x/exp/slog/text_handler.go index 0faa36704..75b66b716 100644 --- a/vendor/golang.org/x/exp/slog/text_handler.go +++ b/vendor/golang.org/x/exp/slog/text_handler.go @@ -22,18 +22,17 @@ type TextHandler struct { } // NewTextHandler creates a TextHandler that writes to w, -// using the default options. -func NewTextHandler(w io.Writer) *TextHandler { - return (HandlerOptions{}).NewTextHandler(w) -} - -// NewTextHandler creates a TextHandler with the given options that writes to w. -func (opts HandlerOptions) NewTextHandler(w io.Writer) *TextHandler { +// using the given options. +// If opts is nil, the default options are used. +func NewTextHandler(w io.Writer, opts *HandlerOptions) *TextHandler { + if opts == nil { + opts = &HandlerOptions{} + } return &TextHandler{ &commonHandler{ json: false, w: w, - opts: opts, + opts: *opts, }, } } @@ -68,7 +67,7 @@ func (h *TextHandler) WithGroup(name string) Handler { // If the AddSource option is set and source information is available, // the key is "source" and the value is output as FILE:LINE. // -// The message's key "msg". +// The message's key is "msg". // // To modify these or other attributes, or remove them from the output, use // [HandlerOptions.ReplaceAttr]. @@ -80,9 +79,13 @@ func (h *TextHandler) WithGroup(name string) Handler { // characters, non-printing characters, '"' or '='. // // Keys inside groups consist of components (keys or group names) separated by -// dots. No further escaping is performed. If it is necessary to reconstruct the -// group structure of a key even in the presence of dots inside components, use -// [HandlerOptions.ReplaceAttr] to escape the keys. +// dots. No further escaping is performed. +// Thus there is no way to determine from the key "a.b.c" whether there +// are two groups "a" and "b" and a key "c", or a single group "a.b" and a key "c", +// or single group "a" and a key "b.c". +// If it is necessary to reconstruct the group structure of a key +// even in the presence of dots inside components, use +// [HandlerOptions.ReplaceAttr] to encode that information in the key. // // Each call to Handle results in a single serialized call to // io.Writer.Write. @@ -134,10 +137,15 @@ func byteSlice(a any) ([]byte, bool) { } func needsQuoting(s string) bool { + if len(s) == 0 { + return true + } for i := 0; i < len(s); { b := s[i] if b < utf8.RuneSelf { - if needsQuotingSet[b] { + // Quote anything except a backslash that would need quoting in a + // JSON string, as well as space and '=' + if b != '\\' && (b == ' ' || b == '=' || !safeSet[b]) { return true } i++ @@ -151,17 +159,3 @@ func needsQuoting(s string) bool { } return false } - -var needsQuotingSet = [utf8.RuneSelf]bool{ - '"': true, - '=': true, -} - -func init() { - for i := 0; i < utf8.RuneSelf; i++ { - r := rune(i) - if unicode.IsSpace(r) || !unicode.IsPrint(r) { - needsQuotingSet[i] = true - } - } -} diff --git a/vendor/golang.org/x/exp/slog/value.go b/vendor/golang.org/x/exp/slog/value.go index f331945d7..3550c46fc 100644 --- a/vendor/golang.org/x/exp/slog/value.go +++ b/vendor/golang.org/x/exp/slog/value.go @@ -7,14 +7,34 @@ package slog import ( "fmt" "math" + "runtime" "strconv" + "strings" "time" + "unsafe" "golang.org/x/exp/slices" ) -// Definitions for Value. -// The Value type itself can be found in value_{safe,unsafe}.go. +// A Value can represent any Go value, but unlike type any, +// it can represent most small values without an allocation. +// The zero Value corresponds to nil. +type Value struct { + _ [0]func() // disallow == + // num holds the value for Kinds Int64, Uint64, Float64, Bool and Duration, + // the string length for KindString, and nanoseconds since the epoch for KindTime. + num uint64 + // If any is of type Kind, then the value is in num as described above. + // If any is of type *time.Location, then the Kind is Time and time.Time value + // can be constructed from the Unix nanos in num and the location (monotonic time + // is not preserved). + // If any is of type stringptr, then the Kind is String and the string value + // consists of the length in num and the pointer in any. + // Otherwise, the Kind is Any and any is the value. + // (This implies that Attrs cannot store values of type Kind, *time.Location + // or stringptr.) + any any +} // Kind is the kind of a Value. type Kind int @@ -59,6 +79,26 @@ func (k Kind) String() string { // (No user-provided value has this type.) type kind Kind +// Kind returns v's Kind. +func (v Value) Kind() Kind { + switch x := v.any.(type) { + case Kind: + return x + case stringptr: + return KindString + case timeLocation: + return KindTime + case groupptr: + return KindGroup + case LogValuer: + return KindLogValuer + case kind: // a kind is just a wrapper for a Kind + return KindAny + default: + return KindAny + } +} + //////////////// Constructors // IntValue returns a Value for an int. @@ -112,12 +152,6 @@ func DurationValue(v time.Duration) Value { return Value{num: uint64(v.Nanoseconds()), any: KindDuration} } -// GroupValue returns a new Value for a list of Attrs. -// The caller must not subsequently mutate the argument slice. -func GroupValue(as ...Attr) Value { - return groupValue(as) -} - // AnyValue returns a Value for the supplied value. // // If the supplied value is of type Value, it is returned @@ -193,7 +227,7 @@ func (v Value) Any() any { case KindLogValuer: return v.any case KindGroup: - return v.uncheckedGroup() + return v.group() case KindInt64: return int64(v.num) case KindUint64: @@ -240,22 +274,22 @@ func (v Value) Bool() bool { return v.bool() } -func (a Value) bool() bool { - return a.num == 1 +func (v Value) bool() bool { + return v.num == 1 } // Duration returns v's value as a time.Duration. It panics // if v is not a time.Duration. -func (a Value) Duration() time.Duration { - if g, w := a.Kind(), KindDuration; g != w { +func (v Value) Duration() time.Duration { + if g, w := v.Kind(), KindDuration; g != w { panic(fmt.Sprintf("Value kind is %s, not %s", g, w)) } - return a.duration() + return v.duration() } -func (a Value) duration() time.Duration { - return time.Duration(int64(a.num)) +func (v Value) duration() time.Duration { + return time.Duration(int64(v.num)) } // Float64 returns v's value as a float64. It panics @@ -268,8 +302,8 @@ func (v Value) Float64() float64 { return v.float() } -func (a Value) float() float64 { - return math.Float64frombits(a.num) +func (v Value) float() float64 { + return math.Float64frombits(v.num) } // Time returns v's value as a time.Time. It panics @@ -298,12 +332,19 @@ func (v Value) LogValuer() LogValuer { // Group returns v's value as a []Attr. // It panics if v's Kind is not KindGroup. func (v Value) Group() []Attr { - return v.group() + if sp, ok := v.any.(groupptr); ok { + return unsafe.Slice((*Attr)(sp), v.num) + } + panic("Group: bad kind") +} + +func (v Value) group() []Attr { + return unsafe.Slice((*Attr)(v.any.(groupptr)), v.num) } //////////////// Other -// Equal reports whether v and w have equal keys and values. +// Equal reports whether v and w represent the same Go value. func (v Value) Equal(w Value) bool { k1 := v.Kind() k2 := w.Kind() @@ -322,7 +363,7 @@ func (v Value) Equal(w Value) bool { case KindAny, KindLogValuer: return v.any == w.any // may panic if non-comparable case KindGroup: - return slices.EqualFunc(v.uncheckedGroup(), w.uncheckedGroup(), Attr.Equal) + return slices.EqualFunc(v.group(), w.group(), Attr.Equal) default: panic(fmt.Sprintf("bad kind: %s", k1)) } @@ -346,8 +387,10 @@ func (v Value) append(dst []byte) []byte { return append(dst, v.duration().String()...) case KindTime: return append(dst, v.time().String()...) - case KindAny, KindGroup, KindLogValuer: - return append(dst, fmt.Sprint(v.any)...) + case KindGroup: + return fmt.Append(dst, v.group()) + case KindAny, KindLogValuer: + return fmt.Append(dst, v.any) default: panic(fmt.Sprintf("bad kind: %s", v.Kind())) } @@ -365,20 +408,19 @@ const maxLogValues = 100 // Resolve repeatedly calls LogValue on v while it implements LogValuer, // and returns the result. -// If v resolves to a group, the group's attributes' values are also resolved. +// If v resolves to a group, the group's attributes' values are not recursively +// resolved. // If the number of LogValue calls exceeds a threshold, a Value containing an // error is returned. // Resolve's return value is guaranteed not to be of Kind KindLogValuer. -func (v Value) Resolve() Value { - v = v.resolve() - if v.Kind() == KindGroup { - resolveAttrs(v.Group()) - } - return v -} - -func (v Value) resolve() Value { +func (v Value) Resolve() (rv Value) { orig := v + defer func() { + if r := recover(); r != nil { + rv = AnyValue(fmt.Errorf("LogValue panicked\n%s", stack(3, 5))) + } + }() + for i := 0; i < maxLogValues; i++ { if v.Kind() != KindLogValuer { return v @@ -389,10 +431,26 @@ func (v Value) resolve() Value { return AnyValue(err) } -// resolveAttrs replaces the values of the given Attrs with their -// resolutions. -func resolveAttrs(as []Attr) { - for i, a := range as { - as[i].Value = a.Value.Resolve() +func stack(skip, nFrames int) string { + pcs := make([]uintptr, nFrames+1) + n := runtime.Callers(skip+1, pcs) + if n == 0 { + return "(no stack)" + } + frames := runtime.CallersFrames(pcs[:n]) + var b strings.Builder + i := 0 + for { + frame, more := frames.Next() + fmt.Fprintf(&b, "called from %s (%s:%d)\n", frame.Function, frame.File, frame.Line) + if !more { + break + } + i++ + if i >= nFrames { + fmt.Fprintf(&b, "(rest of stack elided)\n") + break + } } + return b.String() } diff --git a/vendor/golang.org/x/exp/slog/value_119.go b/vendor/golang.org/x/exp/slog/value_119.go new file mode 100644 index 000000000..29b0d7329 --- /dev/null +++ b/vendor/golang.org/x/exp/slog/value_119.go @@ -0,0 +1,53 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.19 && !go1.20 + +package slog + +import ( + "reflect" + "unsafe" +) + +type ( + stringptr unsafe.Pointer // used in Value.any when the Value is a string + groupptr unsafe.Pointer // used in Value.any when the Value is a []Attr +) + +// StringValue returns a new Value for a string. +func StringValue(value string) Value { + hdr := (*reflect.StringHeader)(unsafe.Pointer(&value)) + return Value{num: uint64(hdr.Len), any: stringptr(hdr.Data)} +} + +func (v Value) str() string { + var s string + hdr := (*reflect.StringHeader)(unsafe.Pointer(&s)) + hdr.Data = uintptr(v.any.(stringptr)) + hdr.Len = int(v.num) + return s +} + +// String returns Value's value as a string, formatted like fmt.Sprint. Unlike +// the methods Int64, Float64, and so on, which panic if v is of the +// wrong kind, String never panics. +func (v Value) String() string { + if sp, ok := v.any.(stringptr); ok { + // Inlining this code makes a huge difference. + var s string + hdr := (*reflect.StringHeader)(unsafe.Pointer(&s)) + hdr.Data = uintptr(sp) + hdr.Len = int(v.num) + return s + } + return string(v.append(nil)) +} + +// GroupValue returns a new Value for a list of Attrs. +// The caller must not subsequently mutate the argument slice. +func GroupValue(as ...Attr) Value { + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&as)) + return Value{num: uint64(hdr.Len), any: groupptr(hdr.Data)} +} diff --git a/vendor/golang.org/x/exp/slog/value_120.go b/vendor/golang.org/x/exp/slog/value_120.go new file mode 100644 index 000000000..f7d4c0932 --- /dev/null +++ b/vendor/golang.org/x/exp/slog/value_120.go @@ -0,0 +1,39 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.20 + +package slog + +import "unsafe" + +type ( + stringptr *byte // used in Value.any when the Value is a string + groupptr *Attr // used in Value.any when the Value is a []Attr +) + +// StringValue returns a new Value for a string. +func StringValue(value string) Value { + return Value{num: uint64(len(value)), any: stringptr(unsafe.StringData(value))} +} + +// GroupValue returns a new Value for a list of Attrs. +// The caller must not subsequently mutate the argument slice. +func GroupValue(as ...Attr) Value { + return Value{num: uint64(len(as)), any: groupptr(unsafe.SliceData(as))} +} + +// String returns Value's value as a string, formatted like fmt.Sprint. Unlike +// the methods Int64, Float64, and so on, which panic if v is of the +// wrong kind, String never panics. +func (v Value) String() string { + if sp, ok := v.any.(stringptr); ok { + return unsafe.String(sp, v.num) + } + return string(v.append(nil)) +} + +func (v Value) str() string { + return unsafe.String(v.any.(stringptr), v.num) +} diff --git a/vendor/golang.org/x/exp/slog/value_safe.go b/vendor/golang.org/x/exp/slog/value_safe.go deleted file mode 100644 index 8b0a92d8f..000000000 --- a/vendor/golang.org/x/exp/slog/value_safe.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build safe_values - -package slog - -// This file defines the most portable representation of Value. - -// A Value can represent any Go value, but unlike type any, -// it can represent most small values without an allocation. -// The zero Value corresponds to nil. -type Value struct { - // num holds the value for Kinds Int64, Uint64, Float64, Bool and Duration, - // and nanoseconds since the epoch for KindTime. - num uint64 - // s holds the value for KindString. - s string - // If any is of type Kind, then the value is in num or s as described above. - // If any is of type *time.Location, then the Kind is Time and time.Time - // value can be constructed from the Unix nanos in num and the location - // (monotonic time is not preserved). - // Otherwise, the Kind is Any and any is the value. - // (This implies that Values cannot store Kinds or *time.Locations.) - any any -} - -// Kind returns v's Kind. -func (v Value) Kind() Kind { - switch k := v.any.(type) { - case Kind: - return k - case timeLocation: - return KindTime - case []Attr: - return KindGroup - case LogValuer: - return KindLogValuer - case kind: // a kind is just a wrapper for a Kind - return KindAny - default: - return KindAny - } -} - -func (v Value) str() string { - return v.s -} - -// StringValue returns a new Value for a string. -func StringValue(value string) Value { - return Value{s: value, any: KindString} -} - -// String returns Value's value as a string, formatted like fmt.Sprint. Unlike -// the methods Int64, Float64, and so on, which panic if v is of the -// wrong kind, String never panics. -func (v Value) String() string { - if v.Kind() == KindString { - return v.str() - } - var buf []byte - return string(v.append(buf)) -} - -func groupValue(as []Attr) Value { - return Value{any: as} -} - -func (v Value) group() []Attr { - return v.any.([]Attr) -} - -func (v Value) uncheckedGroup() []Attr { return v.group() } diff --git a/vendor/golang.org/x/exp/slog/value_unsafe.go b/vendor/golang.org/x/exp/slog/value_unsafe.go deleted file mode 100644 index f4276f988..000000000 --- a/vendor/golang.org/x/exp/slog/value_unsafe.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !safe_values - -package slog - -// This file defines the most compact representation of Value. - -import ( - "reflect" - "unsafe" -) - -// A Value can represent any Go value, but unlike type any, -// it can represent most small values without an allocation. -// The zero Value corresponds to nil. -type Value struct { - // num holds the value for Kinds Int64, Uint64, Float64, Bool and Duration, - // the string length for KindString, and nanoseconds since the epoch for KindTime. - num uint64 - // If any is of type Kind, then the value is in num as described above. - // If any is of type *time.Location, then the Kind is Time and time.Time value - // can be constructed from the Unix nanos in num and the location (monotonic time - // is not preserved). - // If any is of type stringptr, then the Kind is String and the string value - // consists of the length in num and the pointer in any. - // Otherwise, the Kind is Any and any is the value. - // (This implies that Attrs cannot store values of type Kind, *time.Location - // or stringptr.) - any any -} - -type ( - stringptr unsafe.Pointer // used in Value.any when the Value is a string - groupptr unsafe.Pointer // used in Value.any when the Value is a []Attr -) - -// Kind returns v's Kind. -func (v Value) Kind() Kind { - switch x := v.any.(type) { - case Kind: - return x - case stringptr: - return KindString - case timeLocation: - return KindTime - case groupptr: - return KindGroup - case LogValuer: - return KindLogValuer - case kind: // a kind is just a wrapper for a Kind - return KindAny - default: - return KindAny - } -} - -// StringValue returns a new Value for a string. -func StringValue(value string) Value { - hdr := (*reflect.StringHeader)(unsafe.Pointer(&value)) - return Value{num: uint64(hdr.Len), any: stringptr(hdr.Data)} -} - -func (v Value) str() string { - var s string - hdr := (*reflect.StringHeader)(unsafe.Pointer(&s)) - hdr.Data = uintptr(v.any.(stringptr)) - hdr.Len = int(v.num) - return s -} - -// String returns Value's value as a string, formatted like fmt.Sprint. Unlike -// the methods Int64, Float64, and so on, which panic if v is of the -// wrong kind, String never panics. -func (v Value) String() string { - if sp, ok := v.any.(stringptr); ok { - // Inlining this code makes a huge difference. - var s string - hdr := (*reflect.StringHeader)(unsafe.Pointer(&s)) - hdr.Data = uintptr(sp) - hdr.Len = int(v.num) - return s - } - var buf []byte - return string(v.append(buf)) -} - -func groupValue(as []Attr) Value { - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&as)) - return Value{num: uint64(hdr.Len), any: groupptr(hdr.Data)} -} - -// group returns the Value's value as a []Attr. -// It panics if the Value's Kind is not KindGroup. -func (v Value) group() []Attr { - if sp, ok := v.any.(groupptr); ok { - return unsafe.Slice((*Attr)(sp), v.num) - } - panic("Group: bad kind") -} - -func (v Value) uncheckedGroup() []Attr { - return unsafe.Slice((*Attr)(v.any.(groupptr)), v.num) -} diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go index cbee7a4e2..b18efb743 100644 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -20,7 +20,7 @@ type token struct{} // A zero Group is valid, has no limit on the number of active goroutines, // and does not cancel on error. type Group struct { - cancel func() + cancel func(error) wg sync.WaitGroup @@ -43,7 +43,7 @@ func (g *Group) done() { // returns a non-nil error or the first time Wait returns, whichever occurs // first. func WithContext(ctx context.Context) (*Group, context.Context) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := withCancelCause(ctx) return &Group{cancel: cancel}, ctx } @@ -52,7 +52,7 @@ func WithContext(ctx context.Context) (*Group, context.Context) { func (g *Group) Wait() error { g.wg.Wait() if g.cancel != nil { - g.cancel() + g.cancel(g.err) } return g.err } @@ -76,7 +76,7 @@ func (g *Group) Go(f func() error) { g.errOnce.Do(func() { g.err = err if g.cancel != nil { - g.cancel() + g.cancel(g.err) } }) } @@ -105,7 +105,7 @@ func (g *Group) TryGo(f func() error) bool { g.errOnce.Do(func() { g.err = err if g.cancel != nil { - g.cancel() + g.cancel(g.err) } }) } diff --git a/vendor/golang.org/x/sync/errgroup/go120.go b/vendor/golang.org/x/sync/errgroup/go120.go new file mode 100644 index 000000000..7d419d376 --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/go120.go @@ -0,0 +1,14 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.20 +// +build go1.20 + +package errgroup + +import "context" + +func withCancelCause(parent context.Context) (context.Context, func(error)) { + return context.WithCancelCause(parent) +} diff --git a/vendor/golang.org/x/sync/errgroup/pre_go120.go b/vendor/golang.org/x/sync/errgroup/pre_go120.go new file mode 100644 index 000000000..1795c18ac --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/pre_go120.go @@ -0,0 +1,15 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.20 +// +build !go1.20 + +package errgroup + +import "context" + +func withCancelCause(parent context.Context) (context.Context, func(error)) { + ctx, cancel := context.WithCancel(parent) + return ctx, func(error) { cancel() } +} diff --git a/vendor/golang.org/x/sync/singleflight/singleflight.go b/vendor/golang.org/x/sync/singleflight/singleflight.go new file mode 100644 index 000000000..8473fb792 --- /dev/null +++ b/vendor/golang.org/x/sync/singleflight/singleflight.go @@ -0,0 +1,205 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package singleflight provides a duplicate function call suppression +// mechanism. +package singleflight // import "golang.org/x/sync/singleflight" + +import ( + "bytes" + "errors" + "fmt" + "runtime" + "runtime/debug" + "sync" +) + +// errGoexit indicates the runtime.Goexit was called in +// the user given function. +var errGoexit = errors.New("runtime.Goexit was called") + +// A panicError is an arbitrary value recovered from a panic +// with the stack trace during the execution of given function. +type panicError struct { + value interface{} + stack []byte +} + +// Error implements error interface. +func (p *panicError) Error() string { + return fmt.Sprintf("%v\n\n%s", p.value, p.stack) +} + +func newPanicError(v interface{}) error { + stack := debug.Stack() + + // The first line of the stack trace is of the form "goroutine N [status]:" + // but by the time the panic reaches Do the goroutine may no longer exist + // and its status will have changed. Trim out the misleading line. + if line := bytes.IndexByte(stack[:], '\n'); line >= 0 { + stack = stack[line+1:] + } + return &panicError{value: v, stack: stack} +} + +// call is an in-flight or completed singleflight.Do call +type call struct { + wg sync.WaitGroup + + // These fields are written once before the WaitGroup is done + // and are only read after the WaitGroup is done. + val interface{} + err error + + // These fields are read and written with the singleflight + // mutex held before the WaitGroup is done, and are read but + // not written after the WaitGroup is done. + dups int + chans []chan<- Result +} + +// Group represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type Group struct { + mu sync.Mutex // protects m + m map[string]*call // lazily initialized +} + +// Result holds the results of Do, so they can be passed +// on a channel. +type Result struct { + Val interface{} + Err error + Shared bool +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + g.mu.Unlock() + c.wg.Wait() + + if e, ok := c.err.(*panicError); ok { + panic(e) + } else if c.err == errGoexit { + runtime.Goexit() + } + return c.val, c.err, true + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + g.doCall(c, key, fn) + return c.val, c.err, c.dups > 0 +} + +// DoChan is like Do but returns a channel that will receive the +// results when they are ready. +// +// The returned channel will not be closed. +func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { + ch := make(chan Result, 1) + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + c.chans = append(c.chans, ch) + g.mu.Unlock() + return ch + } + c := &call{chans: []chan<- Result{ch}} + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + go g.doCall(c, key, fn) + + return ch +} + +// doCall handles the single call for a key. +func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { + normalReturn := false + recovered := false + + // use double-defer to distinguish panic from runtime.Goexit, + // more details see https://golang.org/cl/134395 + defer func() { + // the given function invoked runtime.Goexit + if !normalReturn && !recovered { + c.err = errGoexit + } + + g.mu.Lock() + defer g.mu.Unlock() + c.wg.Done() + if g.m[key] == c { + delete(g.m, key) + } + + if e, ok := c.err.(*panicError); ok { + // In order to prevent the waiting channels from being blocked forever, + // needs to ensure that this panic cannot be recovered. + if len(c.chans) > 0 { + go panic(e) + select {} // Keep this goroutine around so that it will appear in the crash dump. + } else { + panic(e) + } + } else if c.err == errGoexit { + // Already in the process of goexit, no need to call again + } else { + // Normal return + for _, ch := range c.chans { + ch <- Result{c.val, c.err, c.dups > 0} + } + } + }() + + func() { + defer func() { + if !normalReturn { + // Ideally, we would wait to take a stack trace until we've determined + // whether this is a panic or a runtime.Goexit. + // + // Unfortunately, the only way we can distinguish the two is to see + // whether the recover stopped the goroutine from terminating, and by + // the time we know that, the part of the stack trace relevant to the + // panic has been discarded. + if r := recover(); r != nil { + c.err = newPanicError(r) + } + } + }() + + c.val, c.err = fn() + normalReturn = true + }() + + if !normalReturn { + recovered = true + } +} + +// Forget tells the singleflight to forget about a key. Future calls +// to Do for this key will call the function rather than waiting for +// an earlier call to complete. +func (g *Group) Forget(key string) { + g.mu.Lock() + delete(g.m, key) + g.mu.Unlock() +} diff --git a/vendor/golang.org/x/sys/unix/aliases.go b/vendor/golang.org/x/sys/unix/aliases.go index abc89c104..e7d3df4bd 100644 --- a/vendor/golang.org/x/sys/unix/aliases.go +++ b/vendor/golang.org/x/sys/unix/aliases.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) && go1.9 -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos -// +build go1.9 package unix diff --git a/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s b/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s index db9171c2e..269e173ca 100644 --- a/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s +++ b/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_386.s b/vendor/golang.org/x/sys/unix/asm_bsd_386.s index e0fcd9b3d..a4fcef0e0 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_386.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_386.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (freebsd || netbsd || openbsd) && gc -// +build freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s index 2b99c349a..1e63615c5 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin || dragonfly || freebsd || netbsd || openbsd) && gc -// +build darwin dragonfly freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_arm.s b/vendor/golang.org/x/sys/unix/asm_bsd_arm.s index d702d4adc..6496c3100 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_arm.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_arm.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (freebsd || netbsd || openbsd) && gc -// +build freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s b/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s index fe36a7391..4fd1f54da 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin || freebsd || netbsd || openbsd) && gc -// +build darwin freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s b/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s index e5b9a8489..42f7eb9e4 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin || freebsd || netbsd || openbsd) && gc -// +build darwin freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s b/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s index d560019ea..f8902667e 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin || freebsd || netbsd || openbsd) && gc -// +build darwin freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_386.s b/vendor/golang.org/x/sys/unix/asm_linux_386.s index 8fd101d07..3b4734870 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_386.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_386.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_amd64.s b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s index 7ed38e43c..67e29f317 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm.s b/vendor/golang.org/x/sys/unix/asm_linux_arm.s index 8ef1d5140..d6ae269ce 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_arm.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_arm.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm64.s b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s index 98ae02760..01e5e253c 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_arm64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && arm64 && gc -// +build linux -// +build arm64 -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_loong64.s b/vendor/golang.org/x/sys/unix/asm_linux_loong64.s index 565357288..2abf12f6e 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_loong64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_loong64.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && loong64 && gc -// +build linux -// +build loong64 -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s index 21231d2ce..f84bae712 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (mips64 || mips64le) && gc -// +build linux -// +build mips64 mips64le -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s b/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s index 6783b26c6..f08f62807 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (mips || mipsle) && gc -// +build linux -// +build mips mipsle -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s index 19d498934..bdfc024d2 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (ppc64 || ppc64le) && gc -// +build linux -// +build ppc64 ppc64le -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s index e42eb81d5..2e8c99612 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build riscv64 && gc -// +build riscv64 -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_s390x.s b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s index c46aab339..2c394b11e 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_s390x.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && s390x && gc -// +build linux -// +build s390x -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s index 5e7a1169c..fab586a2c 100644 --- a/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s +++ b/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s index f8c5394c1..f949ec547 100644 --- a/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_zos_s390x.s b/vendor/golang.org/x/sys/unix/asm_zos_s390x.s index 3b54e1858..2f67ba86d 100644 --- a/vendor/golang.org/x/sys/unix/asm_zos_s390x.s +++ b/vendor/golang.org/x/sys/unix/asm_zos_s390x.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x && gc -// +build zos -// +build s390x -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/cap_freebsd.go b/vendor/golang.org/x/sys/unix/cap_freebsd.go index 0b7c6adb8..a08657890 100644 --- a/vendor/golang.org/x/sys/unix/cap_freebsd.go +++ b/vendor/golang.org/x/sys/unix/cap_freebsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build freebsd -// +build freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/constants.go b/vendor/golang.org/x/sys/unix/constants.go index 394a3965b..6fb7cb77d 100644 --- a/vendor/golang.org/x/sys/unix/constants.go +++ b/vendor/golang.org/x/sys/unix/constants.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/dev_aix_ppc.go b/vendor/golang.org/x/sys/unix/dev_aix_ppc.go index 65a998508..d78513461 100644 --- a/vendor/golang.org/x/sys/unix/dev_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/dev_aix_ppc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix && ppc -// +build aix,ppc // Functions to access/create device major and minor numbers matching the // encoding used by AIX. diff --git a/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go b/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go index 8fc08ad0a..623a5e697 100644 --- a/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix && ppc64 -// +build aix,ppc64 // Functions to access/create device major and minor numbers matching the // encoding used AIX. diff --git a/vendor/golang.org/x/sys/unix/dev_zos.go b/vendor/golang.org/x/sys/unix/dev_zos.go index a388e59a0..bb6a64fe9 100644 --- a/vendor/golang.org/x/sys/unix/dev_zos.go +++ b/vendor/golang.org/x/sys/unix/dev_zos.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x // Functions to access/create device major and minor numbers matching the // encoding used by z/OS. diff --git a/vendor/golang.org/x/sys/unix/dirent.go b/vendor/golang.org/x/sys/unix/dirent.go index 2499f977b..1ebf11782 100644 --- a/vendor/golang.org/x/sys/unix/dirent.go +++ b/vendor/golang.org/x/sys/unix/dirent.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/endian_big.go b/vendor/golang.org/x/sys/unix/endian_big.go index a52026557..1095fd31d 100644 --- a/vendor/golang.org/x/sys/unix/endian_big.go +++ b/vendor/golang.org/x/sys/unix/endian_big.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. // //go:build armbe || arm64be || m68k || mips || mips64 || mips64p32 || ppc || ppc64 || s390 || s390x || shbe || sparc || sparc64 -// +build armbe arm64be m68k mips mips64 mips64p32 ppc ppc64 s390 s390x shbe sparc sparc64 package unix diff --git a/vendor/golang.org/x/sys/unix/endian_little.go b/vendor/golang.org/x/sys/unix/endian_little.go index b0f2bc4ae..b9f0e277b 100644 --- a/vendor/golang.org/x/sys/unix/endian_little.go +++ b/vendor/golang.org/x/sys/unix/endian_little.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. // //go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh -// +build 386 amd64 amd64p32 alpha arm arm64 loong64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh package unix diff --git a/vendor/golang.org/x/sys/unix/env_unix.go b/vendor/golang.org/x/sys/unix/env_unix.go index 29ccc4d13..a96da71f4 100644 --- a/vendor/golang.org/x/sys/unix/env_unix.go +++ b/vendor/golang.org/x/sys/unix/env_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos // Unix environment variables. diff --git a/vendor/golang.org/x/sys/unix/epoll_zos.go b/vendor/golang.org/x/sys/unix/epoll_zos.go index cedaf7e02..7753fddea 100644 --- a/vendor/golang.org/x/sys/unix/epoll_zos.go +++ b/vendor/golang.org/x/sys/unix/epoll_zos.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x package unix diff --git a/vendor/golang.org/x/sys/unix/fcntl.go b/vendor/golang.org/x/sys/unix/fcntl.go index e9b991258..58c6bfc70 100644 --- a/vendor/golang.org/x/sys/unix/fcntl.go +++ b/vendor/golang.org/x/sys/unix/fcntl.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build dragonfly || freebsd || linux || netbsd || openbsd -// +build dragonfly freebsd linux netbsd openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go b/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go index 29d44808b..13b4acd5c 100644 --- a/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go +++ b/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build (linux && 386) || (linux && arm) || (linux && mips) || (linux && mipsle) || (linux && ppc) -// +build linux,386 linux,arm linux,mips linux,mipsle linux,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/fdset.go b/vendor/golang.org/x/sys/unix/fdset.go index a8068f94f..9e83d18cd 100644 --- a/vendor/golang.org/x/sys/unix/fdset.go +++ b/vendor/golang.org/x/sys/unix/fdset.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/fstatfs_zos.go b/vendor/golang.org/x/sys/unix/fstatfs_zos.go index e377cc9f4..c8bde601e 100644 --- a/vendor/golang.org/x/sys/unix/fstatfs_zos.go +++ b/vendor/golang.org/x/sys/unix/fstatfs_zos.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x package unix diff --git a/vendor/golang.org/x/sys/unix/gccgo.go b/vendor/golang.org/x/sys/unix/gccgo.go index b06f52d74..aca5721dd 100644 --- a/vendor/golang.org/x/sys/unix/gccgo.go +++ b/vendor/golang.org/x/sys/unix/gccgo.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gccgo && !aix && !hurd -// +build gccgo,!aix,!hurd package unix diff --git a/vendor/golang.org/x/sys/unix/gccgo_c.c b/vendor/golang.org/x/sys/unix/gccgo_c.c index f98a1c542..d468b7b47 100644 --- a/vendor/golang.org/x/sys/unix/gccgo_c.c +++ b/vendor/golang.org/x/sys/unix/gccgo_c.c @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gccgo && !aix && !hurd -// +build gccgo,!aix,!hurd #include #include diff --git a/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go index e60e49a3d..972d61bd7 100644 --- a/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gccgo && linux && amd64 -// +build gccgo,linux,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/ifreq_linux.go b/vendor/golang.org/x/sys/unix/ifreq_linux.go index 15721a510..848840ae4 100644 --- a/vendor/golang.org/x/sys/unix/ifreq_linux.go +++ b/vendor/golang.org/x/sys/unix/ifreq_linux.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux -// +build linux package unix diff --git a/vendor/golang.org/x/sys/unix/ioctl_signed.go b/vendor/golang.org/x/sys/unix/ioctl_signed.go index 7def9580e..5b0759bd8 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_signed.go +++ b/vendor/golang.org/x/sys/unix/ioctl_signed.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || solaris -// +build aix solaris package unix diff --git a/vendor/golang.org/x/sys/unix/ioctl_unsigned.go b/vendor/golang.org/x/sys/unix/ioctl_unsigned.go index 649913d1e..20f470b9d 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_unsigned.go +++ b/vendor/golang.org/x/sys/unix/ioctl_unsigned.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || dragonfly || freebsd || hurd || linux || netbsd || openbsd -// +build darwin dragonfly freebsd hurd linux netbsd openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ioctl_zos.go b/vendor/golang.org/x/sys/unix/ioctl_zos.go index cdc21bf76..c8b2a750f 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_zos.go +++ b/vendor/golang.org/x/sys/unix/ioctl_zos.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x package unix diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 47fa6a7eb..cbe24150a 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -663,7 +663,6 @@ echo '// mkerrors.sh' "$@" echo '// Code generated by the command above; see README.md. DO NOT EDIT.' echo echo "//go:build ${GOARCH} && ${GOOS}" -echo "// +build ${GOARCH},${GOOS}" echo go tool cgo -godefs -- "$@" _const.go >_error.out cat _error.out | grep -vf _error.grep | grep -vf _signal.grep diff --git a/vendor/golang.org/x/sys/unix/mmap_nomremap.go b/vendor/golang.org/x/sys/unix/mmap_nomremap.go index ca0513632..4b68e5978 100644 --- a/vendor/golang.org/x/sys/unix/mmap_nomremap.go +++ b/vendor/golang.org/x/sys/unix/mmap_nomremap.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || openbsd || solaris -// +build aix darwin dragonfly freebsd openbsd solaris package unix diff --git a/vendor/golang.org/x/sys/unix/mremap.go b/vendor/golang.org/x/sys/unix/mremap.go index fa93d0aa9..fd45fe529 100644 --- a/vendor/golang.org/x/sys/unix/mremap.go +++ b/vendor/golang.org/x/sys/unix/mremap.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux || netbsd -// +build linux netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/pagesize_unix.go b/vendor/golang.org/x/sys/unix/pagesize_unix.go index 53f1b4c5b..4d0a3430e 100644 --- a/vendor/golang.org/x/sys/unix/pagesize_unix.go +++ b/vendor/golang.org/x/sys/unix/pagesize_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris // For Unix, get the pagesize from the runtime. diff --git a/vendor/golang.org/x/sys/unix/pledge_openbsd.go b/vendor/golang.org/x/sys/unix/pledge_openbsd.go index eb48294b2..6a09af53e 100644 --- a/vendor/golang.org/x/sys/unix/pledge_openbsd.go +++ b/vendor/golang.org/x/sys/unix/pledge_openbsd.go @@ -8,54 +8,31 @@ import ( "errors" "fmt" "strconv" - "syscall" - "unsafe" ) // Pledge implements the pledge syscall. // -// The pledge syscall does not accept execpromises on OpenBSD releases -// before 6.3. -// -// execpromises must be empty when Pledge is called on OpenBSD -// releases predating 6.3, otherwise an error will be returned. +// This changes both the promises and execpromises; use PledgePromises or +// PledgeExecpromises to only change the promises or execpromises +// respectively. // // For more information see pledge(2). func Pledge(promises, execpromises string) error { - maj, min, err := majmin() - if err != nil { + if err := pledgeAvailable(); err != nil { return err } - err = pledgeAvailable(maj, min, execpromises) + pptr, err := BytePtrFromString(promises) if err != nil { return err } - pptr, err := syscall.BytePtrFromString(promises) + exptr, err := BytePtrFromString(execpromises) if err != nil { return err } - // This variable will hold either a nil unsafe.Pointer or - // an unsafe.Pointer to a string (execpromises). - var expr unsafe.Pointer - - // If we're running on OpenBSD > 6.2, pass execpromises to the syscall. - if maj > 6 || (maj == 6 && min > 2) { - exptr, err := syscall.BytePtrFromString(execpromises) - if err != nil { - return err - } - expr = unsafe.Pointer(exptr) - } - - _, _, e := syscall.Syscall(SYS_PLEDGE, uintptr(unsafe.Pointer(pptr)), uintptr(expr), 0) - if e != 0 { - return e - } - - return nil + return pledge(pptr, exptr) } // PledgePromises implements the pledge syscall. @@ -64,30 +41,16 @@ func Pledge(promises, execpromises string) error { // // For more information see pledge(2). func PledgePromises(promises string) error { - maj, min, err := majmin() - if err != nil { - return err - } - - err = pledgeAvailable(maj, min, "") - if err != nil { + if err := pledgeAvailable(); err != nil { return err } - // This variable holds the execpromises and is always nil. - var expr unsafe.Pointer - - pptr, err := syscall.BytePtrFromString(promises) + pptr, err := BytePtrFromString(promises) if err != nil { return err } - _, _, e := syscall.Syscall(SYS_PLEDGE, uintptr(unsafe.Pointer(pptr)), uintptr(expr), 0) - if e != 0 { - return e - } - - return nil + return pledge(pptr, nil) } // PledgeExecpromises implements the pledge syscall. @@ -96,30 +59,16 @@ func PledgePromises(promises string) error { // // For more information see pledge(2). func PledgeExecpromises(execpromises string) error { - maj, min, err := majmin() - if err != nil { + if err := pledgeAvailable(); err != nil { return err } - err = pledgeAvailable(maj, min, execpromises) + exptr, err := BytePtrFromString(execpromises) if err != nil { return err } - // This variable holds the promises and is always nil. - var pptr unsafe.Pointer - - exptr, err := syscall.BytePtrFromString(execpromises) - if err != nil { - return err - } - - _, _, e := syscall.Syscall(SYS_PLEDGE, uintptr(pptr), uintptr(unsafe.Pointer(exptr)), 0) - if e != 0 { - return e - } - - return nil + return pledge(nil, exptr) } // majmin returns major and minor version number for an OpenBSD system. @@ -147,16 +96,15 @@ func majmin() (major int, minor int, err error) { // pledgeAvailable checks for availability of the pledge(2) syscall // based on the running OpenBSD version. -func pledgeAvailable(maj, min int, execpromises string) error { - // If OpenBSD <= 5.9, pledge is not available. - if (maj == 5 && min != 9) || maj < 5 { - return fmt.Errorf("pledge syscall is not available on OpenBSD %d.%d", maj, min) +func pledgeAvailable() error { + maj, min, err := majmin() + if err != nil { + return err } - // If OpenBSD <= 6.2 and execpromises is not empty, - // return an error - execpromises is not available before 6.3 - if (maj < 6 || (maj == 6 && min <= 2)) && execpromises != "" { - return fmt.Errorf("cannot use execpromises on OpenBSD %d.%d", maj, min) + // Require OpenBSD 6.4 as a minimum. + if maj < 6 || (maj == 6 && min <= 3) { + return fmt.Errorf("cannot call Pledge on OpenBSD %d.%d", maj, min) } return nil diff --git a/vendor/golang.org/x/sys/unix/ptrace_darwin.go b/vendor/golang.org/x/sys/unix/ptrace_darwin.go index 463c3eff7..3f0975f3d 100644 --- a/vendor/golang.org/x/sys/unix/ptrace_darwin.go +++ b/vendor/golang.org/x/sys/unix/ptrace_darwin.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin && !ios -// +build darwin,!ios package unix diff --git a/vendor/golang.org/x/sys/unix/ptrace_ios.go b/vendor/golang.org/x/sys/unix/ptrace_ios.go index ed0509a01..a4d35db5d 100644 --- a/vendor/golang.org/x/sys/unix/ptrace_ios.go +++ b/vendor/golang.org/x/sys/unix/ptrace_ios.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build ios -// +build ios package unix diff --git a/vendor/golang.org/x/sys/unix/race.go b/vendor/golang.org/x/sys/unix/race.go index 6f6c5fec5..714d2aae7 100644 --- a/vendor/golang.org/x/sys/unix/race.go +++ b/vendor/golang.org/x/sys/unix/race.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin && race) || (linux && race) || (freebsd && race) -// +build darwin,race linux,race freebsd,race package unix diff --git a/vendor/golang.org/x/sys/unix/race0.go b/vendor/golang.org/x/sys/unix/race0.go index 706e1322a..4a9f6634c 100644 --- a/vendor/golang.org/x/sys/unix/race0.go +++ b/vendor/golang.org/x/sys/unix/race0.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || (darwin && !race) || (linux && !race) || (freebsd && !race) || netbsd || openbsd || solaris || dragonfly || zos -// +build aix darwin,!race linux,!race freebsd,!race netbsd openbsd solaris dragonfly zos package unix diff --git a/vendor/golang.org/x/sys/unix/readdirent_getdents.go b/vendor/golang.org/x/sys/unix/readdirent_getdents.go index 4d6257569..dbd2b6ccb 100644 --- a/vendor/golang.org/x/sys/unix/readdirent_getdents.go +++ b/vendor/golang.org/x/sys/unix/readdirent_getdents.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || dragonfly || freebsd || linux || netbsd || openbsd -// +build aix dragonfly freebsd linux netbsd openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go b/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go index 2a4ba47c4..130398b6b 100644 --- a/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go +++ b/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin -// +build darwin package unix diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go index 3865943f6..c3a62dbb1 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos // Socket control messages diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go index 0840fe4a5..4a1eab37e 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/syscall.go b/vendor/golang.org/x/sys/unix/syscall.go index 63e8c8383..5ea74da98 100644 --- a/vendor/golang.org/x/sys/unix/syscall.go +++ b/vendor/golang.org/x/sys/unix/syscall.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos // Package unix contains an interface to the low-level operating system // primitives. OS details vary depending on the underlying system, and diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index e94e6cdac..67ce6cef2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix -// +build aix // Aix system calls. // This file is compiled as ordinary Go code, @@ -107,7 +106,8 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { if n > 0 { sl += _Socklen(n) + 1 } - if sa.raw.Path[0] == '@' { + if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) { + // Check sl > 3 so we don't change unnamed socket behavior. sa.raw.Path[0] = 0 // Don't count trailing NUL for abstract address. sl-- diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go index f2871fa95..1fdaa4760 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix && ppc -// +build aix,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go index 75718ec0f..c87f9a9f4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix && ppc64 -// +build aix,ppc64 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index 4217de518..6f328e3a5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || dragonfly || freebsd || netbsd || openbsd -// +build darwin dragonfly freebsd netbsd openbsd // BSD system call wrappers shared by *BSD based systems // including OS X (Darwin) and FreeBSD. Like the other diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go index b37310ce9..0eaecf5fc 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && darwin -// +build amd64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go index d51ec9963..f36c6707c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && darwin -// +build arm64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go index 53c96641f..16dc69937 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin && go1.12 -// +build darwin,go1.12 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go index 4e2d32120..14bab6b2d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && dragonfly -// +build amd64,dragonfly package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go index b8da51004..3967bca77 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 && freebsd -// +build 386,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go index 47155c483..eff19ada2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && freebsd -// +build amd64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go index 08932093f..4f24b517a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm && freebsd -// +build arm,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go index d151a0d0e..ac30759ec 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && freebsd -// +build arm64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go index d5cd64b37..aab725ca7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build riscv64 && freebsd -// +build riscv64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd.go b/vendor/golang.org/x/sys/unix/syscall_hurd.go index 381fd4673..ba46651f8 100644 --- a/vendor/golang.org/x/sys/unix/syscall_hurd.go +++ b/vendor/golang.org/x/sys/unix/syscall_hurd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build hurd -// +build hurd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd_386.go b/vendor/golang.org/x/sys/unix/syscall_hurd_386.go index 7cf54a3e4..df89f9e6b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_hurd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_hurd_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 && hurd -// +build 386,hurd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_illumos.go b/vendor/golang.org/x/sys/unix/syscall_illumos.go index 87db5a6a8..a863f7052 100644 --- a/vendor/golang.org/x/sys/unix/syscall_illumos.go +++ b/vendor/golang.org/x/sys/unix/syscall_illumos.go @@ -5,7 +5,6 @@ // illumos system calls not present on Solaris. //go:build amd64 && illumos -// +build amd64,illumos package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index fb4e50224..a5e1c10e3 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -417,7 +417,8 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { if n > 0 { sl += _Socklen(n) + 1 } - if sa.raw.Path[0] == '@' { + if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) { + // Check sl > 3 so we don't change unnamed socket behavior. sa.raw.Path[0] = 0 // Don't count trailing NUL for abstract address. sl-- @@ -2482,3 +2483,5 @@ func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) { } return attr, nil } + +//sys Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go index c7d9945ea..506dafa7b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 && linux -// +build 386,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go b/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go index 08086ac6a..38d55641b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (386 || amd64 || mips || mipsle || mips64 || mipsle || ppc64 || ppc64le || ppc || s390x || sparc64) -// +build linux -// +build 386 amd64 mips mipsle mips64 mipsle ppc64 ppc64le ppc s390x sparc64 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index 70601ce36..d557cf8de 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && linux -// +build amd64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go index 8b0f0f3aa..facdb83b2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && linux && gc -// +build amd64,linux,gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index da2986415..cd2dd797f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm && linux -// +build arm,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index f5266689a..cf2ee6c75 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && linux -// +build arm64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc.go index 2b1168d7d..ffc4c2b63 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && gc -// +build linux,gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go index 9843fb489..9ebfdcf44 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && gc && 386 -// +build linux,gc,386 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go index a6008fccd..5f2b57c4c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm && gc && linux -// +build arm,gc,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go index 7740af242..d1a3ad826 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && gccgo && 386 -// +build linux,gccgo,386 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go index e16a12299..f2f67423e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && gccgo && arm -// +build linux,gccgo,arm package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go index f6ab02ec1..3d0e98451 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build loong64 && linux -// +build loong64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go index 93fe59d25..70963a95a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (mips64 || mips64le) -// +build linux -// +build mips64 mips64le package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go index aae7f0ffd..c218ebd28 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (mips || mipsle) -// +build linux -// +build mips mipsle package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go index 66eff19a3..e6c48500c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && ppc -// +build linux,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go index 806aa2574..7286a9aa8 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (ppc64 || ppc64le) -// +build linux -// +build ppc64 ppc64le package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 5e6ceee12..6f5a28894 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build riscv64 && linux -// +build riscv64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go index 2f89e8f5d..66f31210d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build s390x && linux -// +build s390x,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go index 7ca064ae7..11d1f1698 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build sparc64 && linux -// +build sparc64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go index 5199d282f..7a5eb5743 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 && netbsd -// +build 386,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go index 70a9c52e9..62d8957ae 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && netbsd -// +build amd64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go index 3eb5942f9..ce6a06885 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm && netbsd -// +build arm,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go index fc6ccfd81..d46d689d1 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && netbsd -// +build arm64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 6f34479b5..d2882ee04 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -137,18 +137,13 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e } func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { - var _p0 unsafe.Pointer + var bufptr *Statfs_t var bufsize uintptr if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) + bufptr = &buf[0] bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf)) } - r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = e1 - } - return + return getfsstat(bufptr, bufsize, flags) } //sysnb getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) @@ -326,4 +321,7 @@ func Uname(uname *Utsname) error { //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error) +//sys getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) //sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) +//sys pledge(promises *byte, execpromises *byte) (err error) +//sys unveil(path *byte, flags *byte) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go index 6baabcdcb..9ddc89f4f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 && openbsd -// +build 386,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go index bab25360e..70a3c96ee 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && openbsd -// +build amd64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go index 8eed3c4d4..265caa87f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm && openbsd -// +build arm,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go index 483dde99d..ac4fda171 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && openbsd -// +build arm64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go index 04aa43f41..0a451e6dd 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build openbsd -// +build openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go index c2796139c..30a308cbb 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build ppc64 && openbsd -// +build ppc64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go index 23199a7ff..ea954330f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build riscv64 && openbsd -// +build riscv64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index b99cfa134..60c8142d4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -128,7 +128,8 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { if n > 0 { sl += _Socklen(n) + 1 } - if sa.raw.Path[0] == '@' { + if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) { + // Check sl > 3 so we don't change unnamed socket behavior. sa.raw.Path[0] = 0 // Don't count trailing NUL for abstract address. sl-- diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go index 0bd25ef81..e02d8ceae 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && solaris -// +build amd64,solaris package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index f6eda2705..77081de8c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_gc.go b/vendor/golang.org/x/sys/unix/syscall_unix_gc.go index b6919ca58..05c95bccf 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix_gc.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin || dragonfly || freebsd || (linux && !ppc64 && !ppc64le) || netbsd || openbsd || solaris) && gc -// +build darwin dragonfly freebsd linux,!ppc64,!ppc64le netbsd openbsd solaris -// +build gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go index f6f707acf..23f39b7af 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (ppc64le || ppc64) && gc -// +build linux -// +build ppc64le ppc64 -// +build gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index 4596d041c..d99d05f1b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x package unix diff --git a/vendor/golang.org/x/sys/unix/sysvshm_linux.go b/vendor/golang.org/x/sys/unix/sysvshm_linux.go index 2c3a4437f..4fcd38de2 100644 --- a/vendor/golang.org/x/sys/unix/sysvshm_linux.go +++ b/vendor/golang.org/x/sys/unix/sysvshm_linux.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux -// +build linux package unix diff --git a/vendor/golang.org/x/sys/unix/sysvshm_unix.go b/vendor/golang.org/x/sys/unix/sysvshm_unix.go index 5bb41d17b..79a84f18b 100644 --- a/vendor/golang.org/x/sys/unix/sysvshm_unix.go +++ b/vendor/golang.org/x/sys/unix/sysvshm_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin && !ios) || linux -// +build darwin,!ios linux package unix diff --git a/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go b/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go index 71bddefdb..9eb0db664 100644 --- a/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go +++ b/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin && !ios -// +build darwin,!ios package unix diff --git a/vendor/golang.org/x/sys/unix/timestruct.go b/vendor/golang.org/x/sys/unix/timestruct.go index 616b1b284..7997b1902 100644 --- a/vendor/golang.org/x/sys/unix/timestruct.go +++ b/vendor/golang.org/x/sys/unix/timestruct.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/unveil_openbsd.go b/vendor/golang.org/x/sys/unix/unveil_openbsd.go index 168d5ae77..cb7e598ce 100644 --- a/vendor/golang.org/x/sys/unix/unveil_openbsd.go +++ b/vendor/golang.org/x/sys/unix/unveil_openbsd.go @@ -4,39 +4,48 @@ package unix -import ( - "syscall" - "unsafe" -) +import "fmt" // Unveil implements the unveil syscall. // For more information see unveil(2). // Note that the special case of blocking further // unveil calls is handled by UnveilBlock. func Unveil(path string, flags string) error { - pathPtr, err := syscall.BytePtrFromString(path) - if err != nil { + if err := supportsUnveil(); err != nil { return err } - flagsPtr, err := syscall.BytePtrFromString(flags) + pathPtr, err := BytePtrFromString(path) if err != nil { return err } - _, _, e := syscall.Syscall(SYS_UNVEIL, uintptr(unsafe.Pointer(pathPtr)), uintptr(unsafe.Pointer(flagsPtr)), 0) - if e != 0 { - return e + flagsPtr, err := BytePtrFromString(flags) + if err != nil { + return err } - return nil + return unveil(pathPtr, flagsPtr) } // UnveilBlock blocks future unveil calls. // For more information see unveil(2). func UnveilBlock() error { - // Both pointers must be nil. - var pathUnsafe, flagsUnsafe unsafe.Pointer - _, _, e := syscall.Syscall(SYS_UNVEIL, uintptr(pathUnsafe), uintptr(flagsUnsafe), 0) - if e != 0 { - return e + if err := supportsUnveil(); err != nil { + return err } + return unveil(nil, nil) +} + +// supportsUnveil checks for availability of the unveil(2) system call based +// on the running OpenBSD version. +func supportsUnveil() error { + maj, min, err := majmin() + if err != nil { + return err + } + + // unveil is not available before 6.4 + if maj < 6 || (maj == 6 && min <= 3) { + return fmt.Errorf("cannot call Unveil on OpenBSD %d.%d", maj, min) + } + return nil } diff --git a/vendor/golang.org/x/sys/unix/xattr_bsd.go b/vendor/golang.org/x/sys/unix/xattr_bsd.go index f5f8e9f36..e16879396 100644 --- a/vendor/golang.org/x/sys/unix/xattr_bsd.go +++ b/vendor/golang.org/x/sys/unix/xattr_bsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build freebsd || netbsd -// +build freebsd netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go index ca9799b79..2fb219d78 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && aix -// +build ppc,aix // Created by cgo -godefs - DO NOT EDIT // cgo -godefs -- -maix32 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go index 200c8c26f..b0e6f5c85 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && aix -// +build ppc64,aix // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -maix64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index 143007627..e40fa8524 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && darwin -// +build amd64,darwin // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index ab044a742..bb02aa6c0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && darwin -// +build arm64,darwin // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go index 17bba0e44..c0e0f8694 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && dragonfly -// +build amd64,dragonfly // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go index f8c2c5138..6c6923906 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && freebsd -// +build 386,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m32 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go index 96310c3be..dd9163f8e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && freebsd -// +build amd64,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go index 777b69def..493a2a793 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && freebsd -// +build arm,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go index c557ac2db..8b437b307 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && freebsd -// +build arm64,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go index 341b4d962..67c02dd57 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && freebsd -// +build riscv64,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index f9c7f479b..9c00cbf51 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1,7 +1,6 @@ // Code generated by mkmerge; DO NOT EDIT. //go:build linux -// +build linux package unix @@ -481,10 +480,14 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_AFTER = 0x10 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_KPROBE_MULTI_RETURN = 0x1 + BPF_F_BEFORE = 0x8 + BPF_F_ID = 0x20 + BPF_F_LINK = 0x2000 + BPF_F_NETFILTER_IP_DEFRAG = 0x1 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_REPLACE = 0x4 BPF_F_SLEEPABLE = 0x10 @@ -521,6 +524,7 @@ const ( BPF_MAJOR_VERSION = 0x1 BPF_MAXINSNS = 0x1000 BPF_MEM = 0x60 + BPF_MEMSX = 0x80 BPF_MEMWORDS = 0x10 BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 @@ -776,6 +780,8 @@ const ( DEVLINK_GENL_MCGRP_CONFIG_NAME = "config" DEVLINK_GENL_NAME = "devlink" DEVLINK_GENL_VERSION = 0x1 + DEVLINK_PORT_FN_CAP_IPSEC_CRYPTO = 0x4 + DEVLINK_PORT_FN_CAP_IPSEC_PACKET = 0x8 DEVLINK_PORT_FN_CAP_MIGRATABLE = 0x2 DEVLINK_PORT_FN_CAP_ROCE = 0x1 DEVLINK_SB_THRESHOLD_TO_ALPHA_MAX = 0x14 @@ -1698,6 +1704,7 @@ const ( KEXEC_ON_CRASH = 0x1 KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 + KEXEC_UPDATE_ELFCOREHDR = 0x4 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CAPABILITIES = 0x1f KEYCTL_CAPS0_BIG_KEY = 0x10 @@ -2275,6 +2282,7 @@ const ( PERF_MEM_LVLNUM_PMEM = 0xe PERF_MEM_LVLNUM_RAM = 0xd PERF_MEM_LVLNUM_SHIFT = 0x21 + PERF_MEM_LVLNUM_UNC = 0x8 PERF_MEM_LVL_HIT = 0x2 PERF_MEM_LVL_IO = 0x1000 PERF_MEM_LVL_L1 = 0x8 @@ -3461,6 +3469,7 @@ const ( XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 + XDP_PKT_CONTD = 0x1 XDP_RING_NEED_WAKEUP = 0x1 XDP_RX_RING = 0x2 XDP_SHARED_UMEM = 0x1 @@ -3473,6 +3482,7 @@ const ( XDP_UMEM_REG = 0x4 XDP_UMEM_UNALIGNED_CHUNK_FLAG = 0x1 XDP_USE_NEED_WAKEUP = 0x8 + XDP_USE_SG = 0x10 XDP_ZEROCOPY = 0x4 XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 30aee00a5..4920821cf 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux -// +build 386,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/386/include -m32 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 8ebfa5127..a0c1e4112 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux -// +build amd64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/amd64/include -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 271a21cdc..c63985560 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux -// +build arm,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/arm/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 910c330a3..47cc62e25 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux -// +build arm64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/arm64/include -fsigned-char _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index a640798c9..27ac4a09e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build loong64 && linux -// +build loong64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/loong64/include _const.go @@ -119,6 +118,7 @@ const ( IXOFF = 0x1000 IXON = 0x400 LASX_CTX_MAGIC = 0x41535801 + LBT_CTX_MAGIC = 0x42540001 LSX_CTX_MAGIC = 0x53580001 MAP_ANON = 0x20 MAP_ANONYMOUS = 0x20 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 0d5925d34..54694642a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux -// +build mips,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/mips/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index d72a00e0b..3adb81d75 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux -// +build mips64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/mips64/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 02ba129f8..2dfe98f0d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux -// +build mips64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/mips64le/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 8daa6dd96..f5398f84f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux -// +build mipsle,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/mipsle/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 63c8fa2f7..c54f152d6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux -// +build ppc,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/ppc/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 930799ec1..76057dc72 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux -// +build ppc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/ppc64/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 8605a7dd7..e0c3725e2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux -// +build ppc64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/ppc64le/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 95a016f1c..18f2813ed 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux -// +build riscv64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/riscv64/include _const.go @@ -228,6 +227,9 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTRACE_GETFDPIC = 0x21 + PTRACE_GETFDPIC_EXEC = 0x0 + PTRACE_GETFDPIC_INTERP = 0x1 RLIMIT_AS = 0x9 RLIMIT_MEMLOCK = 0x8 RLIMIT_NOFILE = 0x7 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 1ae0108f5..11619d4ec 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux -// +build s390x,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/s390x/include -fsigned-char _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 1bb7c6333..396d994da 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux -// +build sparc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/sparc64/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go index 72f7420d2..130085df4 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && netbsd -// +build 386,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m32 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go index 8d4eb0c08..84769a1a3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && netbsd -// +build amd64,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go index 9eef9749f..602ded003 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && netbsd -// +build arm,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -marm _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go index 3b62ba192..efc0406ee 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && netbsd -// +build arm64,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go index af20e474b..5a6500f83 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && openbsd -// +build 386,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m32 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go index 6015fcb2b..a5aeeb979 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && openbsd -// +build amd64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go index 8d44955e4..0e9748a72 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && openbsd -// +build arm,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go index ae16fe754..4f4449abc 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && openbsd -// +build arm64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go index 03d90fe35..76a363f0f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && openbsd -// +build mips64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go index 8e2c51b1e..43ca0cdfd 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && openbsd -// +build ppc64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go index 13d403031..b1b8bb200 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && openbsd -// +build riscv64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go index 1afee6a08..d2ddd3176 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && solaris -// +build amd64,solaris // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go index fc7d0506f..4dfd2e051 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x // Hand edited based on zerrors_linux_s390x.go // TODO: auto-generate. diff --git a/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go b/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go index 97f20ca28..586317c78 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go @@ -1,8 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("arm", "arm64"). DO NOT EDIT. //go:build linux && (arm || arm64) -// +build linux -// +build arm arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go b/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go index 0b5f79430..d7c881be7 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go @@ -1,8 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("mips", "mips64"). DO NOT EDIT. //go:build linux && (mips || mips64) -// +build linux -// +build mips mips64 package unix diff --git a/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go b/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go index 2807f7e64..2d2de5d29 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go @@ -1,8 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("mipsle", "mips64le"). DO NOT EDIT. //go:build linux && (mipsle || mips64le) -// +build linux -// +build mipsle mips64le package unix diff --git a/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go b/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go index 281ea64e3..5adc79fb5 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go @@ -1,8 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("386", "amd64"). DO NOT EDIT. //go:build linux && (386 || amd64) -// +build linux -// +build 386 amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go index d1d1d2331..6ea64a3c0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build aix && ppc -// +build aix,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go index f99a18adc..99ee4399a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build aix && ppc64 -// +build aix,ppc64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go index c4d50ae50..b68a78362 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build aix && ppc64 && gc -// +build aix,ppc64,gc package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go index 6903d3b09..0a87450bf 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build aix && ppc64 && gccgo -// +build aix,ppc64,gccgo package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index 1cad561e9..ccb02f240 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build darwin && amd64 -// +build darwin,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index b18edbd0e..1b40b997b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build darwin && arm64 -// +build darwin,arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index 0c67df64a..aad65fc79 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build dragonfly && amd64 -// +build dragonfly,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index e6e05d145..c0096391a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && 386 -// +build freebsd,386 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index 7508accac..7664df749 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && amd64 -// +build freebsd,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index 7b56aead4..ae099182c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && arm -// +build freebsd,arm package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go index cc623dcaa..11fd5d45b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && arm64 -// +build freebsd,arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go index 581849197..c3d2d6530 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && riscv64 -// +build freebsd,riscv64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go index 6be25cd19..c698cbc01 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build illumos && amd64 -// +build illumos,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 1ff3aec74..faca7a557 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -1,7 +1,6 @@ // Code generated by mkmerge; DO NOT EDIT. //go:build linux -// +build linux package unix @@ -2195,3 +2194,13 @@ func schedGetattr(pid int, attr *SchedAttr, size uint, flags uint) (err error) { } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error) { + _, _, e1 := Syscall6(SYS_CACHESTAT, uintptr(fd), uintptr(unsafe.Pointer(crange)), uintptr(unsafe.Pointer(cstat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index 07b549cc2..4def3e9fc 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && 386 -// +build linux,386 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index 5f481bf83..fef2bc8ba 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && amd64 -// +build linux,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index 824cd52c7..a9fd76a88 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && arm -// +build linux,arm package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index e77aecfe9..460065028 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && arm64 -// +build linux,arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go index 806ffd1e1..c8987d264 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && loong64 -// +build linux,loong64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index 961a3afb7..921f43061 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && mips -// +build linux,mips package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index ed05005e9..44f067829 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && mips64 -// +build linux,mips64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index d365b718f..e7fa0abf0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && mips64le -// +build linux,mips64le package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index c3f1b8bbd..8c5125675 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && mipsle -// +build linux,mipsle package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go index a6574cf98..7392fd45e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && ppc -// +build linux,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index f40990264..41180434e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && ppc64 -// +build linux,ppc64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index 9dfcc2997..40c6ce7ae 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && ppc64le -// +build linux,ppc64le package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go index 0ab4f2ed7..2cfe34adb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && riscv64 -// +build linux,riscv64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index 6cde32237..61e6f0709 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && s390x -// +build linux,s390x package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go index 5253d65bf..834b84204 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && sparc64 -// +build linux,sparc64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index 2df3c5bac..e91ebc14a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build netbsd && 386 -// +build netbsd,386 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index a60556bab..be28babbc 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build netbsd && amd64 -// +build netbsd,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index 9f788917a..fb587e826 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build netbsd && arm -// +build netbsd,arm package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go index 82a4cb2dc..d576438bb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build netbsd && arm64 -// +build netbsd,arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 66b3b6456..88bfc2885 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && 386 -// +build openbsd,386 package unix @@ -2213,6 +2212,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2243,33 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" + + diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s index 3dcacd30d..4cbeff171 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s @@ -668,7 +668,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $4 DATA ·libc_munmap_trampoline_addr(SB)/4, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getfsstat_trampoline_addr(SB)/4, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $4 DATA ·libc_utimensat_trampoline_addr(SB)/4, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pledge_trampoline_addr(SB)/4, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $4 +DATA ·libc_unveil_trampoline_addr(SB)/4, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index c5c4cc112..b8a67b99a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && amd64 -// +build openbsd,amd64 package unix @@ -2213,6 +2212,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2243,33 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" + + diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s index 2763620b0..1123f2757 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s @@ -668,7 +668,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index 93bfbb328..af50a65c0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && arm -// +build openbsd,arm package unix @@ -2213,6 +2212,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2243,33 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" + + diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s index c92231404..82badae39 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s @@ -668,7 +668,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $4 DATA ·libc_munmap_trampoline_addr(SB)/4, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getfsstat_trampoline_addr(SB)/4, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $4 DATA ·libc_utimensat_trampoline_addr(SB)/4, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pledge_trampoline_addr(SB)/4, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $4 +DATA ·libc_unveil_trampoline_addr(SB)/4, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index a107b8fda..8fb4ff36a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && arm64 -// +build openbsd,arm64 package unix @@ -2213,6 +2212,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2243,33 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" + + diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s index a6bc32c92..24d7eecb9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s @@ -668,7 +668,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index c427de509..f469a83ee 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && mips64 -// +build openbsd,mips64 package unix @@ -2213,6 +2212,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2243,33 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" + + diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s index b4e7bceab..9a498a067 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s @@ -668,7 +668,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go index 60c1a99ae..c26ca2e1a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && ppc64 -// +build openbsd,ppc64 package unix @@ -2213,6 +2212,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2243,33 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" + + diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s index ca3f76600..1f224aa41 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s @@ -801,8 +801,26 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getfsstat(SB) + RET +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 CALL libc_utimensat(SB) RET GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_pledge(SB) + RET +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_unveil(SB) + RET +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go index 52eba360f..bcc920dd2 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && riscv64 -// +build openbsd,riscv64 package unix @@ -2213,6 +2212,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2243,33 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" + + diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s index 477a7d5b2..87a79c709 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s @@ -668,7 +668,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index b40189464..829b87feb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build solaris && amd64 -// +build solaris,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go index 1d8fe1d4b..94f011238 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build zos && s390x -// +build zos,s390x package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go index 55e048471..3a58ae819 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build 386 && openbsd -// +build 386,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go index d2243cf83..dcb7a0eb7 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build amd64 && openbsd -// +build amd64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go index 82dc51bd8..db5a7bf13 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build arm && openbsd -// +build arm,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go index cbdda1a4a..7be575a77 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build arm64 && openbsd -// +build arm64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go index f55eae1a8..d6e3174c6 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build mips64 && openbsd -// +build mips64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go index e44054470..ee97157d0 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build ppc64 && openbsd -// +build ppc64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go index a0db82fce..35c3b91d0 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build riscv64 && openbsd -// +build riscv64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go index f8298ff9b..5edda7687 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && darwin -// +build amd64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go index 5eb433bbf..0dc9e8b4d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && darwin -// +build arm64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go index 703675c0c..308ddf3a1 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && dragonfly -// +build amd64,dragonfly package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go index 4e0d96107..418664e3d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && freebsd -// +build 386,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go index 01636b838..34d0b86d7 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && freebsd -// +build amd64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go index ad99bc106..b71cf45e2 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && freebsd -// +build arm,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go index 89dcc4274..e32df1c1e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && freebsd -// +build arm64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go index ee37aaa0c..15ad6111f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && freebsd -// +build riscv64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 9862853d3..fcf3ecbdd 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux -// +build 386,linux package unix @@ -448,4 +447,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 8901f0f4e..f56dc2504 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux -// +build amd64,linux package unix @@ -370,4 +369,6 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 6902c37ee..974bf2467 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux -// +build arm,linux package unix @@ -412,4 +411,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index a6d3dff81..39a2739e2 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux -// +build arm64,linux package unix @@ -315,4 +314,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index b18f3f710..cf9c9d77e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build loong64 && linux -// +build loong64,linux package unix @@ -309,4 +308,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 0302e5e3d..10b7362ef 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux -// +build mips,linux package unix @@ -432,4 +431,5 @@ const ( SYS_FUTEX_WAITV = 4449 SYS_SET_MEMPOLICY_HOME_NODE = 4450 SYS_CACHESTAT = 4451 + SYS_FCHMODAT2 = 4452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 6693ba4a0..cd4d8b4fd 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux -// +build mips64,linux package unix @@ -362,4 +361,5 @@ const ( SYS_FUTEX_WAITV = 5449 SYS_SET_MEMPOLICY_HOME_NODE = 5450 SYS_CACHESTAT = 5451 + SYS_FCHMODAT2 = 5452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index fd93f4987..2c0efca81 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux -// +build mips64le,linux package unix @@ -362,4 +361,5 @@ const ( SYS_FUTEX_WAITV = 5449 SYS_SET_MEMPOLICY_HOME_NODE = 5450 SYS_CACHESTAT = 5451 + SYS_FCHMODAT2 = 5452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 760ddcadc..a72e31d39 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux -// +build mipsle,linux package unix @@ -432,4 +431,5 @@ const ( SYS_FUTEX_WAITV = 4449 SYS_SET_MEMPOLICY_HOME_NODE = 4450 SYS_CACHESTAT = 4451 + SYS_FCHMODAT2 = 4452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index cff2b2555..c7d1e3747 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux -// +build ppc,linux package unix @@ -439,4 +438,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index a4b2405d0..f4d4838c8 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux -// +build ppc64,linux package unix @@ -411,4 +410,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index aca54b4e3..b64f0e591 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux -// +build ppc64le,linux package unix @@ -411,4 +410,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 9d1738d64..95711195a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux -// +build riscv64,linux package unix @@ -316,4 +315,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 022878dc8..f94e943bc 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux -// +build s390x,linux package unix @@ -377,4 +376,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 4100a761c..ba0c2bc51 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux -// +build sparc64,linux package unix @@ -390,4 +389,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go index 3a6699eba..b2aa8cd49 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && netbsd -// +build 386,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go index 5677cd4f1..524a1b1c9 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && netbsd -// +build amd64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go index e784cb6db..d59b943ac 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && netbsd -// +build arm,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go index bd4952efa..31e771d53 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build arm64 && netbsd -// +build arm64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go index 597733813..9fd77c6cb 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && openbsd -// +build 386,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go index 16af29189..af10af28c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && openbsd -// +build amd64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go index f59b18a97..cc2028af4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && openbsd -// +build arm,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go index 721ef5910..c06dd4415 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && openbsd -// +build arm64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go index 01c43a01f..9ddbf3e08 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && openbsd -// +build mips64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go index f258cfa24..19a6ee413 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && openbsd -// +build ppc64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go index 07919e0ec..05192a782 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && openbsd -// +build riscv64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go index 073daad43..b2e308581 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go index 7a8161c1d..3e6d57cae 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && aix -// +build ppc,aix package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go index 07ed733c5..3a219bdce 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && aix -// +build ppc64,aix package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 690cefc3d..091d107f3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && darwin -// +build amd64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 5bffc10ea..28ff4ef74 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && darwin -// +build arm64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go index d0ba8e9b8..30e405bb4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && dragonfly -// +build amd64,dragonfly package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 29dc48337..6cbd094a3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && freebsd -// +build 386,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 0a89b2890..7c03b6ee7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && freebsd -// +build amd64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index c8666bb15..422107ee8 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && freebsd -// +build arm,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 88fb48a88..505a12acf 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && freebsd -// +build arm64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go index 698dc975e..cc986c790 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && freebsd -// +build riscv64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 18aa70b42..997bcd55a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -1,7 +1,6 @@ // Code generated by mkmerge; DO NOT EDIT. //go:build linux -// +build linux package unix @@ -5883,3 +5882,15 @@ type SchedAttr struct { } const SizeofSchedAttr = 0x38 + +type Cachestat_t struct { + Cache uint64 + Dirty uint64 + Writeback uint64 + Evicted uint64 + Recently_evicted uint64 +} +type CachestatRange struct { + Off uint64 + Len uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 6d8acbcc5..438a30aff 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux -// +build 386,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 59293c688..adceca355 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux -// +build amd64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 40cfa38c2..eeaa00a37 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux -// +build arm,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 055bc4216..6739aa91d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux -// +build arm64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go index f28affbc6..9920ef631 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build loong64 && linux -// +build loong64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 9d71e7ccd..2923b799a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux -// +build mips,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index fd5ccd332..ce2750ee4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux -// +build mips64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 7704de77a..3038811d7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux -// +build mips64le,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index df00b8757..efc6fed18 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux -// +build mipsle,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index 0942840db..9a654b75a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux -// +build ppc,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 034874395..40d358e33 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux -// +build ppc64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index bad067047..148c6ceb8 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux -// +build ppc64le,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 1b4c97c32..72ba81543 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux -// +build riscv64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index aa268d025..71e765508 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux -// +build s390x,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 444045b6c..4abbdb9de 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux -// +build sparc64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go index 9bc4c8f9d..f22e7947d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && netbsd -// +build 386,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go index bb05f655d..066a7d83d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && netbsd -// +build amd64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index db40e3a19..439548ec9 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && netbsd -// +build arm,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go index 11121151c..16085d3bb 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && netbsd -// +build arm64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go index 26eba23b7..afd13a3af 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && openbsd -// +build 386,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go index 5a5479886..5d97f1f9b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && openbsd -// +build amd64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go index be58c4e1f..34871cdc1 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && openbsd -// +build arm,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go index 52338266c..5911bceb3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && openbsd -// +build arm64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go index 605cfdb12..e4f24f3bc 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && openbsd -// +build mips64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go index d6724c010..ca50a7930 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && openbsd -// +build ppc64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go index ddfd27a43..d7d7f7902 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && openbsd -// +build riscv64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go index 0400747c6..14160576d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && solaris -// +build amd64,solaris package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go index aec1efcb3..54f31be63 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x // Hand edited based on ztypes_linux_s390x.go // TODO: auto-generate. diff --git a/vendor/golang.org/x/sys/windows/aliases.go b/vendor/golang.org/x/sys/windows/aliases.go index a20ebea63..ce2d713d6 100644 --- a/vendor/golang.org/x/sys/windows/aliases.go +++ b/vendor/golang.org/x/sys/windows/aliases.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows && go1.9 -// +build windows,go1.9 package windows diff --git a/vendor/golang.org/x/sys/windows/empty.s b/vendor/golang.org/x/sys/windows/empty.s index fdbbbcd31..ba64caca5 100644 --- a/vendor/golang.org/x/sys/windows/empty.s +++ b/vendor/golang.org/x/sys/windows/empty.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !go1.12 -// +build !go1.12 // This file is here to allow bodyless functions with go:linkname for Go 1.11 // and earlier (see https://golang.org/issue/23311). diff --git a/vendor/golang.org/x/sys/windows/eventlog.go b/vendor/golang.org/x/sys/windows/eventlog.go index 2cd60645e..6c366955d 100644 --- a/vendor/golang.org/x/sys/windows/eventlog.go +++ b/vendor/golang.org/x/sys/windows/eventlog.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows package windows diff --git a/vendor/golang.org/x/sys/windows/mksyscall.go b/vendor/golang.org/x/sys/windows/mksyscall.go index 8563f79c5..dbcdb090c 100644 --- a/vendor/golang.org/x/sys/windows/mksyscall.go +++ b/vendor/golang.org/x/sys/windows/mksyscall.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build generate -// +build generate package windows diff --git a/vendor/golang.org/x/sys/windows/race.go b/vendor/golang.org/x/sys/windows/race.go index 9196b089c..0f1bdc386 100644 --- a/vendor/golang.org/x/sys/windows/race.go +++ b/vendor/golang.org/x/sys/windows/race.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows && race -// +build windows,race package windows diff --git a/vendor/golang.org/x/sys/windows/race0.go b/vendor/golang.org/x/sys/windows/race0.go index 7bae4817a..0c78da78b 100644 --- a/vendor/golang.org/x/sys/windows/race0.go +++ b/vendor/golang.org/x/sys/windows/race0.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows && !race -// +build windows,!race package windows diff --git a/vendor/golang.org/x/sys/windows/service.go b/vendor/golang.org/x/sys/windows/service.go index c44a1b963..a9dc6308d 100644 --- a/vendor/golang.org/x/sys/windows/service.go +++ b/vendor/golang.org/x/sys/windows/service.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows package windows diff --git a/vendor/golang.org/x/sys/windows/str.go b/vendor/golang.org/x/sys/windows/str.go index 4fc01434e..6a4f9ce6a 100644 --- a/vendor/golang.org/x/sys/windows/str.go +++ b/vendor/golang.org/x/sys/windows/str.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows package windows diff --git a/vendor/golang.org/x/sys/windows/syscall.go b/vendor/golang.org/x/sys/windows/syscall.go index 8732cdb95..e85ed6b9c 100644 --- a/vendor/golang.org/x/sys/windows/syscall.go +++ b/vendor/golang.org/x/sys/windows/syscall.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows // Package windows contains an interface to the low-level operating system // primitives. OS details vary depending on the underlying system, and diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 35cfc57ca..fb6cfd046 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -233,6 +233,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) = userenv.CreateEnvironmentBlock //sys DestroyEnvironmentBlock(block *uint16) (err error) = userenv.DestroyEnvironmentBlock //sys getTickCount64() (ms uint64) = kernel32.GetTickCount64 +//sys GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) //sys SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) //sys GetFileAttributes(name *uint16) (attrs uint32, err error) [failretval==INVALID_FILE_ATTRIBUTES] = kernel32.GetFileAttributesW //sys SetFileAttributes(name *uint16, attrs uint32) (err error) = kernel32.SetFileAttributesW @@ -969,7 +970,8 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, int32, error) { if n > 0 { sl += int32(n) + 1 } - if sa.raw.Path[0] == '@' { + if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) { + // Check sl > 3 so we don't change unnamed socket behavior. sa.raw.Path[0] = 0 // Don't count trailing NUL for abstract address. sl-- diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index b88dc7c85..359780f6a 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -1094,7 +1094,33 @@ const ( SOMAXCONN = 0x7fffffff - TCP_NODELAY = 1 + TCP_NODELAY = 1 + TCP_EXPEDITED_1122 = 2 + TCP_KEEPALIVE = 3 + TCP_MAXSEG = 4 + TCP_MAXRT = 5 + TCP_STDURG = 6 + TCP_NOURG = 7 + TCP_ATMARK = 8 + TCP_NOSYNRETRIES = 9 + TCP_TIMESTAMPS = 10 + TCP_OFFLOAD_PREFERENCE = 11 + TCP_CONGESTION_ALGORITHM = 12 + TCP_DELAY_FIN_ACK = 13 + TCP_MAXRTMS = 14 + TCP_FASTOPEN = 15 + TCP_KEEPCNT = 16 + TCP_KEEPIDLE = TCP_KEEPALIVE + TCP_KEEPINTVL = 17 + TCP_FAIL_CONNECT_ON_ICMP_ERROR = 18 + TCP_ICMP_ERROR_INFO = 19 + + UDP_NOCHECKSUM = 1 + UDP_SEND_MSG_SIZE = 2 + UDP_RECV_MAX_COALESCED_SIZE = 3 + UDP_CHECKSUM_COVERAGE = 20 + + UDP_COALESCED_INFO = 3 SHUT_RD = 0 SHUT_WR = 1 diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 8b1688de4..db6282e00 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -253,6 +253,7 @@ var ( procGetFileAttributesW = modkernel32.NewProc("GetFileAttributesW") procGetFileInformationByHandle = modkernel32.NewProc("GetFileInformationByHandle") procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") + procGetFileTime = modkernel32.NewProc("GetFileTime") procGetFileType = modkernel32.NewProc("GetFileType") procGetFinalPathNameByHandleW = modkernel32.NewProc("GetFinalPathNameByHandleW") procGetFullPathNameW = modkernel32.NewProc("GetFullPathNameW") @@ -2185,6 +2186,14 @@ func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, return } +func GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { + r1, _, e1 := syscall.Syscall6(procGetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetFileType(filehandle Handle) (n uint32, err error) { r0, _, e1 := syscall.Syscall(procGetFileType.Addr(), 1, uintptr(filehandle), 0, 0) n = uint32(r0) diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE b/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index 0e6ae69a5..ab0fbb79b 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -1,8 +1,8 @@ # gRPC-Go -[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://pkg.go.dev/badge/google.golang.org/grpc)][API] [![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) +[![codecov](https://codecov.io/gh/grpc/grpc-go/graph/badge.svg)](https://codecov.io/gh/grpc/grpc-go) The [Go][] implementation of [gRPC][]: A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the @@ -14,21 +14,14 @@ RPC framework that puts mobile and HTTP/2 first. For more information see the ## Installation -With [Go module][] support (Go 1.11+), simply add the following import +Simply add the following import to your code, and then `go [build|run|test]` +will automatically fetch the necessary dependencies: + ```go import "google.golang.org/grpc" ``` -to your code, and then `go [build|run|test]` will automatically fetch the -necessary dependencies. - -Otherwise, to install the `grpc-go` package, run the following command: - -```console -$ go get -u google.golang.org/grpc -``` - > **Note:** If you are trying to access `grpc-go` from **China**, see the > [FAQ](#FAQ) below. @@ -56,15 +49,6 @@ To build Go code, there are several options: - Set up a VPN and access google.golang.org through that. -- Without Go module support: `git clone` the repo manually: - - ```sh - git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc - ``` - - You will need to do the same for all of grpc's dependencies in `golang.org`, - e.g. `golang.org/x/net`. - - With Go module support: it is possible to use the `replace` feature of `go mod` to create aliases for golang.org packages. In your project's directory: @@ -76,33 +60,13 @@ To build Go code, there are several options: ``` Again, this will need to be done for all transitive dependencies hosted on - golang.org as well. For details, refer to [golang/go issue #28652](https://github.com/golang/go/issues/28652). + golang.org as well. For details, refer to [golang/go issue + #28652](https://github.com/golang/go/issues/28652). ### Compiling error, undefined: grpc.SupportPackageIsVersion -#### If you are using Go modules: - -Ensure your gRPC-Go version is `require`d at the appropriate version in -the same module containing the generated `.pb.go` files. For example, -`SupportPackageIsVersion6` needs `v1.27.0`, so in your `go.mod` file: - -```go -module - -require ( - google.golang.org/grpc v1.27.0 -) -``` - -#### If you are *not* using Go modules: - -Update the `proto` package, gRPC package, and rebuild the `.proto` files: - -```sh -go get -u github.com/golang/protobuf/{proto,protoc-gen-go} -go get -u google.golang.org/grpc -protoc --go_out=plugins=grpc:. *.proto -``` +Please update to the latest version of gRPC-Go using +`go get google.golang.org/grpc`. ### How to turn on logging @@ -121,9 +85,11 @@ possible reasons, including: 1. mis-configured transport credentials, connection failed on handshaking 1. bytes disrupted, possibly by a proxy in between 1. server shutdown - 1. Keepalive parameters caused connection shutdown, for example if you have configured - your server to terminate connections regularly to [trigger DNS lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). - If this is the case, you may want to increase your [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), + 1. Keepalive parameters caused connection shutdown, for example if you have + configured your server to terminate connections regularly to [trigger DNS + lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). + If this is the case, you may want to increase your + [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), to allow longer RPC calls to finish. It can be tricky to debug this because the error happens on the client side but diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go index 3efca4591..52d530d7a 100644 --- a/vendor/google.golang.org/grpc/attributes/attributes.go +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -34,26 +34,26 @@ import ( // key/value pairs. Keys must be hashable, and users should define their own // types for keys. Values should not be modified after they are added to an // Attributes or if they were received from one. If values implement 'Equal(o -// interface{}) bool', it will be called by (*Attributes).Equal to determine -// whether two values with the same key should be considered equal. +// any) bool', it will be called by (*Attributes).Equal to determine whether +// two values with the same key should be considered equal. type Attributes struct { - m map[interface{}]interface{} + m map[any]any } // New returns a new Attributes containing the key/value pair. -func New(key, value interface{}) *Attributes { - return &Attributes{m: map[interface{}]interface{}{key: value}} +func New(key, value any) *Attributes { + return &Attributes{m: map[any]any{key: value}} } // WithValue returns a new Attributes containing the previous keys and values // and the new key/value pair. If the same key appears multiple times, the // last value overwrites all previous values for that key. To remove an // existing key, use a nil value. value should not be modified later. -func (a *Attributes) WithValue(key, value interface{}) *Attributes { +func (a *Attributes) WithValue(key, value any) *Attributes { if a == nil { return New(key, value) } - n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+1)} + n := &Attributes{m: make(map[any]any, len(a.m)+1)} for k, v := range a.m { n.m[k] = v } @@ -63,20 +63,19 @@ func (a *Attributes) WithValue(key, value interface{}) *Attributes { // Value returns the value associated with these attributes for key, or nil if // no value is associated with key. The returned value should not be modified. -func (a *Attributes) Value(key interface{}) interface{} { +func (a *Attributes) Value(key any) any { if a == nil { return nil } return a.m[key] } -// Equal returns whether a and o are equivalent. If 'Equal(o interface{}) -// bool' is implemented for a value in the attributes, it is called to -// determine if the value matches the one stored in the other attributes. If -// Equal is not implemented, standard equality is used to determine if the two -// values are equal. Note that some types (e.g. maps) aren't comparable by -// default, so they must be wrapped in a struct, or in an alias type, with Equal -// defined. +// Equal returns whether a and o are equivalent. If 'Equal(o any) bool' is +// implemented for a value in the attributes, it is called to determine if the +// value matches the one stored in the other attributes. If Equal is not +// implemented, standard equality is used to determine if the two values are +// equal. Note that some types (e.g. maps) aren't comparable by default, so +// they must be wrapped in a struct, or in an alias type, with Equal defined. func (a *Attributes) Equal(o *Attributes) bool { if a == nil && o == nil { return true @@ -93,7 +92,7 @@ func (a *Attributes) Equal(o *Attributes) bool { // o missing element of a return false } - if eq, ok := v.(interface{ Equal(o interface{}) bool }); ok { + if eq, ok := v.(interface{ Equal(o any) bool }); ok { if !eq.Equal(ov) { return false } @@ -112,19 +111,31 @@ func (a *Attributes) String() string { sb.WriteString("{") first := true for k, v := range a.m { - var key, val string - if str, ok := k.(interface{ String() string }); ok { - key = str.String() - } - if str, ok := v.(interface{ String() string }); ok { - val = str.String() - } if !first { sb.WriteString(", ") } - sb.WriteString(fmt.Sprintf("%q: %q, ", key, val)) + sb.WriteString(fmt.Sprintf("%q: %q ", str(k), str(v))) first = false } sb.WriteString("}") return sb.String() } + +func str(x any) (s string) { + if v, ok := x.(fmt.Stringer); ok { + return fmt.Sprint(v) + } else if v, ok := x.(string); ok { + return v + } + return fmt.Sprintf("<%p>", x) +} + +// MarshalJSON helps implement the json.Marshaler interface, thereby rendering +// the Attributes correctly when printing (via pretty.JSON) structs containing +// Attributes as fields. +// +// Is it impossible to unmarshal attributes from a JSON representation and this +// method is meant only for debugging purposes. +func (a *Attributes) MarshalJSON() ([]byte, error) { + return []byte(a.String()), nil +} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index 8f00523c0..d79560a2e 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -30,6 +30,7 @@ import ( "google.golang.org/grpc/channelz" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" @@ -39,6 +40,8 @@ import ( var ( // m is a map from name to balancer builder. m = make(map[string]Builder) + + logger = grpclog.Component("balancer") ) // Register registers the balancer builder to the balancer map. b.Name @@ -51,6 +54,12 @@ var ( // an init() function), and is not thread-safe. If multiple Balancers are // registered with the same name, the one registered last will take effect. func Register(b Builder) { + if strings.ToLower(b.Name()) != b.Name() { + // TODO: Skip the use of strings.ToLower() to index the map after v1.59 + // is released to switch to case sensitive balancer registry. Also, + // remove this warning and update the docstrings for Register and Get. + logger.Warningf("Balancer registered with name %q. grpc-go will be switching to case sensitive balancer registries soon", b.Name()) + } m[strings.ToLower(b.Name())] = b } @@ -70,6 +79,12 @@ func init() { // Note that the compare is done in a case-insensitive fashion. // If no builder is register with the name, nil will be returned. func Get(name string) Builder { + if strings.ToLower(name) != name { + // TODO: Skip the use of strings.ToLower() to index the map after v1.59 + // is released to switch to case sensitive balancer registry. Also, + // remove this warning and update the docstrings for Register and Get. + logger.Warningf("Balancer retrieved for name %q. grpc-go will be switching to case sensitive balancer registries soon", name) + } if b, ok := m[strings.ToLower(name)]; ok { return b } @@ -105,8 +120,8 @@ type SubConn interface { // // This will trigger a state transition for the SubConn. // - // Deprecated: This method is now part of the ClientConn interface and will - // eventually be removed from here. + // Deprecated: this method will be removed. Create new SubConns for new + // addresses instead. UpdateAddresses([]resolver.Address) // Connect starts the connecting for this SubConn. Connect() @@ -115,6 +130,13 @@ type SubConn interface { // creates a new one and returns it. Returns a close function which must // be called when the Producer is no longer needed. GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) + // Shutdown shuts down the SubConn gracefully. Any started RPCs will be + // allowed to complete. No future calls should be made on the SubConn. + // One final state update will be delivered to the StateListener (or + // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to + // indicate the shutdown operation. This may be delivered before + // in-progress RPCs are complete and the actual connection is closed. + Shutdown() } // NewSubConnOptions contains options to create new SubConn. @@ -129,6 +151,11 @@ type NewSubConnOptions struct { // HealthCheckEnabled indicates whether health check service should be // enabled on this SubConn HealthCheckEnabled bool + // StateListener is called when the state of the subconn changes. If nil, + // Balancer.UpdateSubConnState will be called instead. Will never be + // invoked until after Connect() is called on the SubConn created with + // these options. + StateListener func(SubConnState) } // State contains the balancer's state relevant to the gRPC ClientConn. @@ -150,16 +177,24 @@ type ClientConn interface { // NewSubConn is called by balancer to create a new SubConn. // It doesn't block and wait for the connections to be established. // Behaviors of the SubConn can be controlled by options. + // + // Deprecated: please be aware that in a future version, SubConns will only + // support one address per SubConn. NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error) // RemoveSubConn removes the SubConn from ClientConn. // The SubConn will be shutdown. + // + // Deprecated: use SubConn.Shutdown instead. RemoveSubConn(SubConn) // UpdateAddresses updates the addresses used in the passed in SubConn. // gRPC checks if the currently connected address is still in the new list. // If so, the connection will be kept. Else, the connection will be // gracefully closed, and a new connection will be created. // - // This will trigger a state transition for the SubConn. + // This may trigger a state transition for the SubConn. + // + // Deprecated: this method will be removed. Create new SubConns for new + // addresses instead. UpdateAddresses(SubConn, []resolver.Address) // UpdateState notifies gRPC that the balancer's internal state has @@ -250,7 +285,7 @@ type DoneInfo struct { // trailing metadata. // // The only supported type now is *orca_v3.LoadReport. - ServerLoad interface{} + ServerLoad any } var ( @@ -343,9 +378,13 @@ type Balancer interface { ResolverError(error) // UpdateSubConnState is called by gRPC when the state of a SubConn // changes. + // + // Deprecated: Use NewSubConnOptions.StateListener when creating the + // SubConn instead. UpdateSubConnState(SubConn, SubConnState) - // Close closes the balancer. The balancer is not required to call - // ClientConn.RemoveSubConn for its existing SubConns. + // Close closes the balancer. The balancer is not currently required to + // call SubConn.Shutdown for its existing SubConns; however, this will be + // required in a future release, so it is recommended. Close() } @@ -390,15 +429,14 @@ var ErrBadResolverState = errors.New("bad resolver state") type ProducerBuilder interface { // Build creates a Producer. The first parameter is always a // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the - // associated SubConn), but is declared as interface{} to avoid a - // dependency cycle. Should also return a close function that will be - // called when all references to the Producer have been given up. - Build(grpcClientConnInterface interface{}) (p Producer, close func()) + // associated SubConn), but is declared as `any` to avoid a dependency + // cycle. Should also return a close function that will be called when all + // references to the Producer have been given up. + Build(grpcClientConnInterface any) (p Producer, close func()) } // A Producer is a type shared among potentially many consumers. It is // associated with a SubConn, and an implementation will typically contain // other methods to provide additional functionality, e.g. configuration or // subscription registration. -type Producer interface { -} +type Producer any diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index 3929c26d3..a7f1eeec8 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -105,7 +105,12 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { addrsSet.Set(a, nil) if _, ok := b.subConns.Get(a); !ok { // a is a new address (not existing in b.subConns). - sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) + var sc balancer.SubConn + opts := balancer.NewSubConnOptions{ + HealthCheckEnabled: b.config.HealthCheck, + StateListener: func(scs balancer.SubConnState) { b.updateSubConnState(sc, scs) }, + } + sc, err := b.cc.NewSubConn([]resolver.Address{a}, opts) if err != nil { logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) continue @@ -121,10 +126,10 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { sc := sci.(balancer.SubConn) // a was removed by resolver. if _, ok := addrsSet.Get(a); !ok { - b.cc.RemoveSubConn(sc) + sc.Shutdown() b.subConns.Delete(a) // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. - // The entry will be deleted in UpdateSubConnState. + // The entry will be deleted in updateSubConnState. } } // If resolver state contains no addresses, return an error so ClientConn @@ -177,7 +182,12 @@ func (b *baseBalancer) regeneratePicker() { b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) } +// UpdateSubConnState is a nop because a StateListener is always set in NewSubConn. func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + logger.Errorf("base.baseBalancer: UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) +} + +func (b *baseBalancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { s := state.ConnectivityState if logger.V(2) { logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) @@ -204,8 +214,8 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su case connectivity.Idle: sc.Connect() case connectivity.Shutdown: - // When an address was removed by resolver, b called RemoveSubConn but - // kept the sc's state in scStates. Remove state for this sc here. + // When an address was removed by resolver, b called Shutdown but kept + // the sc's state in scStates. Remove state for this sc here. delete(b.scStates, sc) case connectivity.TransientFailure: // Save error to be reported via picker. @@ -226,7 +236,7 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su } // Close is a nop because base balancer doesn't have internal state to clean up, -// and it doesn't need to call RemoveSubConn for the SubConns. +// and it doesn't need to call Shutdown for the SubConns. func (b *baseBalancer) Close() { } diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index 04b9ad411..a4411c22b 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -99,20 +99,6 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat // lock held. But the lock guards only the scheduling part. The actual // callback is called asynchronously without the lock being held. ok := ccb.serializer.Schedule(func(_ context.Context) { - // If the addresses specified in the update contain addresses of type - // "grpclb" and the selected LB policy is not "grpclb", these addresses - // will be filtered out and ccs will be modified with the updated - // address list. - if ccb.curBalancerName != grpclbName { - var addrs []resolver.Address - for _, addr := range ccs.ResolverState.Addresses { - if addr.Type == resolver.GRPCLB { - continue - } - addrs = append(addrs, addr) - } - ccs.ResolverState.Addresses = addrs - } errCh <- ccb.balancer.UpdateClientConnState(*ccs) }) if !ok { @@ -139,7 +125,9 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { ccb.mu.Lock() ccb.serializer.Schedule(func(_ context.Context) { - ccb.balancer.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) + // Even though it is optional for balancers, gracefulswitch ensures + // opts.StateListener is set, so this cannot ever be nil. + sc.(*acBalancerWrapper).stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) }) ccb.mu.Unlock() } @@ -221,7 +209,7 @@ func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) { } ccb.mode = m - done := ccb.serializer.Done + done := ccb.serializer.Done() b := ccb.balancer ok := ccb.serializer.Schedule(func(_ context.Context) { // Close the serializer to ensure that no more calls from gRPC are sent @@ -238,11 +226,9 @@ func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) { } ccb.mu.Unlock() - // Give enqueued callbacks a chance to finish. + // Give enqueued callbacks a chance to finish before closing the balancer. <-done - // Spawn a goroutine to close the balancer (since it may block trying to - // cleanup all allocated resources) and return early. - go b.Close() + b.Close() } // exitIdleMode is invoked by grpc when the channel exits idle mode either @@ -314,29 +300,19 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) return nil, err } - acbw := &acBalancerWrapper{ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer)} + acbw := &acBalancerWrapper{ + ccb: ccb, + ac: ac, + producers: make(map[balancer.ProducerBuilder]*refCountedProducer), + stateListener: opts.StateListener, + } ac.acbw = acbw return acbw, nil } func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { - if ccb.isIdleOrClosed() { - // It it safe to ignore this call when the balancer is closed or in idle - // because the ClientConn takes care of closing the connections. - // - // Not returning early from here when the balancer is closed or in idle - // leads to a deadlock though, because of the following sequence of - // calls when holding cc.mu: - // cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close --> - // ccb.RemoveAddrConn --> cc.removeAddrConn - return - } - - acbw, ok := sc.(*acBalancerWrapper) - if !ok { - return - } - ccb.cc.removeAddrConn(acbw.ac, errConnDrain) + // The graceful switch balancer will never call this. + logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc") } func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { @@ -380,7 +356,9 @@ func (ccb *ccBalancerWrapper) Target() string { // acBalancerWrapper is a wrapper on top of ac for balancers. // It implements balancer.SubConn interface. type acBalancerWrapper struct { - ac *addrConn // read-only + ac *addrConn // read-only + ccb *ccBalancerWrapper // read-only + stateListener func(balancer.SubConnState) mu sync.Mutex producers map[balancer.ProducerBuilder]*refCountedProducer @@ -398,6 +376,23 @@ func (acbw *acBalancerWrapper) Connect() { go acbw.ac.connect() } +func (acbw *acBalancerWrapper) Shutdown() { + ccb := acbw.ccb + if ccb.isIdleOrClosed() { + // It it safe to ignore this call when the balancer is closed or in idle + // because the ClientConn takes care of closing the connections. + // + // Not returning early from here when the balancer is closed or in idle + // leads to a deadlock though, because of the following sequence of + // calls when holding cc.mu: + // cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close --> + // ccb.RemoveAddrConn --> cc.removeAddrConn + return + } + + ccb.cc.removeAddrConn(acbw.ac, errConnDrain) +} + // NewStream begins a streaming RPC on the addrConn. If the addrConn is not // ready, blocks until it is or ctx expires. Returns an error when the context // expires or the addrConn is shut down. @@ -411,7 +406,7 @@ func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, // Invoke performs a unary RPC. If the addrConn is not ready, returns // errSubConnNotReady. -func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error { +func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error { cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...) if err != nil { return err diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index ec2c2fa14..595480112 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.22.0 // source: grpc/binlog/v1/binarylog.proto diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go index e6a1dc5d7..788c89c16 100644 --- a/vendor/google.golang.org/grpc/call.go +++ b/vendor/google.golang.org/grpc/call.go @@ -26,12 +26,7 @@ import ( // received. This is typically called by generated code. // // All errors returned by Invoke are compatible with the status package. -func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { - if err := cc.idlenessMgr.onCallBegin(); err != nil { - return err - } - defer cc.idlenessMgr.onCallEnd() - +func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply any, opts ...CallOption) error { // allow interceptor to see all applicable call options, which means those // configured as defaults from dial option as well as per-call options opts = combine(cc.dopts.callOptions, opts) @@ -61,13 +56,13 @@ func combine(o1 []CallOption, o2 []CallOption) []CallOption { // received. This is typically called by generated code. // // DEPRECATED: Use ClientConn.Invoke instead. -func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error { +func Invoke(ctx context.Context, method string, args, reply any, cc *ClientConn, opts ...CallOption) error { return cc.Invoke(ctx, method, args, reply, opts...) } var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false} -func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { +func invoke(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error { cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...) if err != nil { return err diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 95a7459b0..429c389e4 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -34,9 +34,12 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/idle" + "google.golang.org/grpc/internal/pretty" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" @@ -53,8 +56,6 @@ import ( const ( // minimum time to give a connection to complete minConnectTimeout = 20 * time.Second - // must match grpclbName in grpclb/grpclb.go - grpclbName = "grpclb" ) var ( @@ -137,7 +138,6 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ target: target, - csMgr: &connectivityStateManager{}, conns: make(map[*addrConn]struct{}), dopts: defaultDialOptions(), czData: new(channelzData), @@ -190,6 +190,8 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * // Register ClientConn with channelz. cc.channelzRegistration(target) + cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelzID) + if err := cc.validateTransportCredentials(); err != nil { return nil, err } @@ -265,7 +267,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * // Configure idleness support with configured idle timeout or default idle // timeout duration. Idleness can be explicitly disabled by the user, by // setting the dial option to 0. - cc.idlenessMgr = newIdlenessManager(cc, cc.dopts.idleTimeout) + cc.idlenessMgr = idle.NewManager(idle.ManagerOptions{Enforcer: (*idler)(cc), Timeout: cc.dopts.idleTimeout, Logger: logger}) // Return early for non-blocking dials. if !cc.dopts.block { @@ -316,6 +318,16 @@ func (cc *ClientConn) addTraceEvent(msg string) { channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) } +type idler ClientConn + +func (i *idler) EnterIdleMode() error { + return (*ClientConn)(i).enterIdleMode() +} + +func (i *idler) ExitIdleMode() error { + return (*ClientConn)(i).exitIdleMode() +} + // exitIdleMode moves the channel out of idle mode by recreating the name // resolver and load balancer. func (cc *ClientConn) exitIdleMode() error { @@ -325,8 +337,8 @@ func (cc *ClientConn) exitIdleMode() error { return errConnClosing } if cc.idlenessState != ccIdlenessStateIdle { + channelz.Infof(logger, cc.channelzID, "ClientConn asked to exit idle mode, current mode is %v", cc.idlenessState) cc.mu.Unlock() - logger.Info("ClientConn asked to exit idle mode when not in idle mode") return nil } @@ -349,7 +361,7 @@ func (cc *ClientConn) exitIdleMode() error { cc.idlenessState = ccIdlenessStateExitingIdle exitedIdle := false if cc.blockingpicker == nil { - cc.blockingpicker = newPickerWrapper() + cc.blockingpicker = newPickerWrapper(cc.dopts.copts.StatsHandlers) } else { cc.blockingpicker.exitIdleMode() exitedIdle = true @@ -392,12 +404,13 @@ func (cc *ClientConn) exitIdleMode() error { // name resolver, load balancer and any subchannels. func (cc *ClientConn) enterIdleMode() error { cc.mu.Lock() + defer cc.mu.Unlock() + if cc.conns == nil { - cc.mu.Unlock() return ErrClientConnClosing } if cc.idlenessState != ccIdlenessStateActive { - logger.Error("ClientConn asked to enter idle mode when not active") + channelz.Warningf(logger, cc.channelzID, "ClientConn asked to enter idle mode, current mode is %v", cc.idlenessState) return nil } @@ -418,14 +431,14 @@ func (cc *ClientConn) enterIdleMode() error { cc.balancerWrapper.enterIdleMode() cc.csMgr.updateState(connectivity.Idle) cc.idlenessState = ccIdlenessStateIdle - cc.mu.Unlock() + cc.addTraceEvent("entering idle mode") go func() { - cc.addTraceEvent("entering idle mode") for ac := range conns { ac.tearDown(errConnIdling) } }() + return nil } @@ -474,7 +487,6 @@ func (cc *ClientConn) validateTransportCredentials() error { func (cc *ClientConn) channelzRegistration(target string) { cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) cc.addTraceEvent("created") - cc.csMgr.channelzID = cc.channelzID } // chainUnaryClientInterceptors chains all unary client interceptors into one. @@ -491,7 +503,7 @@ func chainUnaryClientInterceptors(cc *ClientConn) { } else if len(interceptors) == 1 { chainedInt = interceptors[0] } else { - chainedInt = func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { + chainedInt = func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...) } } @@ -503,7 +515,7 @@ func getChainUnaryInvoker(interceptors []UnaryClientInterceptor, curr int, final if curr == len(interceptors)-1 { return finalInvoker } - return func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { + return func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error { return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...) } } @@ -539,13 +551,27 @@ func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStr } } +// newConnectivityStateManager creates an connectivityStateManager with +// the specified id. +func newConnectivityStateManager(ctx context.Context, id *channelz.Identifier) *connectivityStateManager { + return &connectivityStateManager{ + channelzID: id, + pubSub: grpcsync.NewPubSub(ctx), + } +} + // connectivityStateManager keeps the connectivity.State of ClientConn. // This struct will eventually be exported so the balancers can access it. +// +// TODO: If possible, get rid of the `connectivityStateManager` type, and +// provide this functionality using the `PubSub`, to avoid keeping track of +// the connectivity state at two places. type connectivityStateManager struct { mu sync.Mutex state connectivity.State notifyChan chan struct{} channelzID *channelz.Identifier + pubSub *grpcsync.PubSub } // updateState updates the connectivity.State of ClientConn. @@ -561,6 +587,8 @@ func (csm *connectivityStateManager) updateState(state connectivity.State) { return } csm.state = state + csm.pubSub.Publish(state) + channelz.Infof(logger, csm.channelzID, "Channel Connectivity change to %v", state) if csm.notifyChan != nil { // There are other goroutines waiting on this channel. @@ -590,7 +618,7 @@ func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} { type ClientConnInterface interface { // Invoke performs a unary RPC and returns after the response is received // into reply. - Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error + Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error // NewStream begins a streaming RPC. NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) } @@ -622,7 +650,7 @@ type ClientConn struct { channelzID *channelz.Identifier // Channelz identifier for the channel. resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. - idlenessMgr idlenessManager + idlenessMgr idle.Manager // The following provide their own synchronization, and therefore don't // require cc.mu to be held to access them. @@ -668,6 +696,19 @@ const ( ccIdlenessStateExitingIdle ) +func (s ccIdlenessState) String() string { + switch s { + case ccIdlenessStateActive: + return "active" + case ccIdlenessStateIdle: + return "idle" + case ccIdlenessStateExitingIdle: + return "exitingIdle" + default: + return "unknown" + } +} + // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. // @@ -759,6 +800,16 @@ func init() { panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err)) } emptyServiceConfig = cfg.Config.(*ServiceConfig) + + internal.SubscribeToConnectivityStateChanges = func(cc *ClientConn, s grpcsync.Subscriber) func() { + return cc.csMgr.pubSub.Subscribe(s) + } + internal.EnterIdleModeForTesting = func(cc *ClientConn) error { + return cc.enterIdleMode() + } + internal.ExitIdleModeForTesting = func(cc *ClientConn) error { + return cc.exitIdleMode() + } } func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { @@ -867,6 +918,20 @@ func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivi cc.balancerWrapper.updateSubConnState(sc, s, err) } +// Makes a copy of the input addresses slice and clears out the balancer +// attributes field. Addresses are passed during subconn creation and address +// update operations. In both cases, we will clear the balancer attributes by +// calling this function, and therefore we will be able to use the Equal method +// provided by the resolver.Address type for comparison. +func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Address { + out := make([]resolver.Address, len(in)) + for i := range in { + out[i] = in[i] + out[i].BalancerAttributes = nil + } + return out +} + // newAddrConn creates an addrConn for addrs and adds it to cc.conns. // // Caller needs to make sure len(addrs) > 0. @@ -874,7 +939,7 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub ac := &addrConn{ state: connectivity.Idle, cc: cc, - addrs: addrs, + addrs: copyAddressesWithoutBalancerAttributes(addrs), scopts: opts, dopts: cc.dopts, czData: new(channelzData), @@ -995,8 +1060,9 @@ func equalAddresses(a, b []resolver.Address) bool { // connections or connection attempts. func (ac *addrConn) updateAddrs(addrs []resolver.Address) { ac.mu.Lock() - channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", pretty.ToJSON(ac.curAddr), pretty.ToJSON(addrs)) + addrs = copyAddressesWithoutBalancerAttributes(addrs) if equalAddresses(ac.addrs, addrs) { ac.mu.Unlock() return @@ -1031,8 +1097,8 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { ac.cancel() ac.ctx, ac.cancel = context.WithCancel(ac.cc.ctx) - // We have to defer here because GracefulClose => Close => onClose, which - // requires locking ac.mu. + // We have to defer here because GracefulClose => onClose, which requires + // locking ac.mu. if ac.transport != nil { defer ac.transport.GracefulClose() ac.transport = nil @@ -1137,23 +1203,13 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel } var newBalancerName string - if cc.sc != nil && cc.sc.lbConfig != nil { + if cc.sc == nil || (cc.sc.lbConfig == nil && cc.sc.LB == nil) { + // No service config or no LB policy specified in config. + newBalancerName = PickFirstBalancerName + } else if cc.sc.lbConfig != nil { newBalancerName = cc.sc.lbConfig.name - } else { - var isGRPCLB bool - for _, a := range addrs { - if a.Type == resolver.GRPCLB { - isGRPCLB = true - break - } - } - if isGRPCLB { - newBalancerName = grpclbName - } else if cc.sc != nil && cc.sc.LB != nil { - newBalancerName = *cc.sc.LB - } else { - newBalancerName = PickFirstBalancerName - } + } else { // cc.sc.LB != nil + newBalancerName = *cc.sc.LB } cc.balancerWrapper.switchTo(newBalancerName) } @@ -1192,7 +1248,10 @@ func (cc *ClientConn) ResetConnectBackoff() { // Close tears down the ClientConn and all underlying connections. func (cc *ClientConn) Close() error { - defer cc.cancel() + defer func() { + cc.cancel() + <-cc.csMgr.pubSub.Done() + }() cc.mu.Lock() if cc.conns == nil { @@ -1226,7 +1285,7 @@ func (cc *ClientConn) Close() error { rWrapper.close() } if idlenessMgr != nil { - idlenessMgr.close() + idlenessMgr.Close() } for ac := range conns { @@ -1336,12 +1395,14 @@ func (ac *addrConn) resetTransport() { if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil { ac.cc.resolveNow(resolver.ResolveNowOptions{}) - // After exhausting all addresses, the addrConn enters - // TRANSIENT_FAILURE. + ac.mu.Lock() if acCtx.Err() != nil { + // addrConn was torn down. + ac.mu.Unlock() return } - ac.mu.Lock() + // After exhausting all addresses, the addrConn enters + // TRANSIENT_FAILURE. ac.updateConnectivityState(connectivity.TransientFailure, err) // Backoff. @@ -1537,7 +1598,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { // Set up the health check helper functions. currentTr := ac.transport - newStream := func(method string) (interface{}, error) { + newStream := func(method string) (any, error) { ac.mu.Lock() if ac.transport != currentTr { ac.mu.Unlock() @@ -1625,16 +1686,7 @@ func (ac *addrConn) tearDown(err error) { ac.updateConnectivityState(connectivity.Shutdown, nil) ac.cancel() ac.curAddr = resolver.Address{} - if err == errConnDrain && curTr != nil { - // GracefulClose(...) may be executed multiple times when - // i) receiving multiple GoAway frames from the server; or - // ii) there are concurrent name resolver/Balancer triggered - // address removal and GoAway. - // We have to unlock and re-lock here because GracefulClose => Close => onClose, which requires locking ac.mu. - ac.mu.Unlock() - curTr.GracefulClose() - ac.mu.Lock() - } + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ Desc: "Subchannel deleted", Severity: channelz.CtInfo, @@ -1648,6 +1700,29 @@ func (ac *addrConn) tearDown(err error) { // being deleted right away. channelz.RemoveEntry(ac.channelzID) ac.mu.Unlock() + + // We have to release the lock before the call to GracefulClose/Close here + // because both of them call onClose(), which requires locking ac.mu. + if curTr != nil { + if err == errConnDrain { + // Close the transport gracefully when the subConn is being shutdown. + // + // GracefulClose() may be executed multiple times if: + // - multiple GoAway frames are received from the server + // - there are concurrent name resolver or balancer triggered + // address removal and GoAway + curTr.GracefulClose() + } else { + // Hard close the transport when the channel is entering idle or is + // being shutdown. In the case where the channel is being shutdown, + // closing of transports is also taken care of by cancelation of cc.ctx. + // But in the case where the channel is entering idle, we need to + // explicitly close the transports here. Instead of distinguishing + // between these two cases, it is simpler to close the transport + // unconditionally here. + curTr.Close(err) + } + } } func (ac *addrConn) getState() connectivity.State { @@ -1807,19 +1882,70 @@ func (cc *ClientConn) parseTargetAndFindResolver() error { } // parseTarget uses RFC 3986 semantics to parse the given target into a -// resolver.Target struct containing scheme, authority and url. Query -// params are stripped from the endpoint. +// resolver.Target struct containing url. Query params are stripped from the +// endpoint. func parseTarget(target string) (resolver.Target, error) { u, err := url.Parse(target) if err != nil { return resolver.Target{}, err } - return resolver.Target{ - Scheme: u.Scheme, - Authority: u.Host, - URL: *u, - }, nil + return resolver.Target{URL: *u}, nil +} + +func encodeAuthority(authority string) string { + const upperhex = "0123456789ABCDEF" + + // Return for characters that must be escaped as per + // Valid chars are mentioned here: + // https://datatracker.ietf.org/doc/html/rfc3986#section-3.2 + shouldEscape := func(c byte) bool { + // Alphanum are always allowed. + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { + return false + } + switch c { + case '-', '_', '.', '~': // Unreserved characters + return false + case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // Subdelim characters + return false + case ':', '[', ']', '@': // Authority related delimeters + return false + } + // Everything else must be escaped. + return true + } + + hexCount := 0 + for i := 0; i < len(authority); i++ { + c := authority[i] + if shouldEscape(c) { + hexCount++ + } + } + + if hexCount == 0 { + return authority + } + + required := len(authority) + 2*hexCount + t := make([]byte, required) + + j := 0 + // This logic is a barebones version of escape in the go net/url library. + for i := 0; i < len(authority); i++ { + switch c := authority[i]; { + case shouldEscape(c): + t[j] = '%' + t[j+1] = upperhex[c>>4] + t[j+2] = upperhex[c&15] + j += 3 + default: + t[j] = authority[i] + j++ + } + } + return string(t) } // Determine channel authority. The order of precedence is as follows: @@ -1872,7 +1998,11 @@ func (cc *ClientConn) determineAuthority() error { // the channel authority given the user's dial target. For resolvers // which don't implement this interface, we will use the endpoint from // "scheme://authority/endpoint" as the default authority. - cc.authority = endpoint + // Escape the endpoint to handle use cases where the endpoint + // might not be a valid authority by default. + // For example an endpoint which has multiple paths like + // 'a/b/c', which is not a valid authority by default. + cc.authority = encodeAuthority(endpoint) } channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) return nil diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go index 129776547..411e3dfd4 100644 --- a/vendor/google.golang.org/grpc/codec.go +++ b/vendor/google.golang.org/grpc/codec.go @@ -27,8 +27,8 @@ import ( // omits the name/string, which vary between the two and are not needed for // anything besides the registry in the encoding package. type baseCodec interface { - Marshal(v interface{}) ([]byte, error) - Unmarshal(data []byte, v interface{}) error + Marshal(v any) ([]byte, error) + Unmarshal(data []byte, v any) error } var _ baseCodec = Codec(nil) @@ -41,9 +41,9 @@ var _ baseCodec = encoding.Codec(nil) // Deprecated: use encoding.Codec instead. type Codec interface { // Marshal returns the wire format of v. - Marshal(v interface{}) ([]byte, error) + Marshal(v any) ([]byte, error) // Unmarshal parses the wire format into v. - Unmarshal(data []byte, v interface{}) error + Unmarshal(data []byte, v any) error // String returns the name of the Codec implementation. This is unused by // gRPC. String() string diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 15a3d5102..cfc9fd85e 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -78,6 +78,7 @@ type dialOptions struct { defaultServiceConfigRawJSON *string resolvers []resolver.Builder idleTimeout time.Duration + recvBufferPool SharedBufferPool } // DialOption configures how we set up the connection. @@ -138,6 +139,20 @@ func newJoinDialOption(opts ...DialOption) DialOption { return &joinDialOption{opts: opts} } +// WithSharedWriteBuffer allows reusing per-connection transport write buffer. +// If this option is set to true every connection will release the buffer after +// flushing the data on the wire. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithSharedWriteBuffer(val bool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.SharedWriteBuffer = val + }) +} + // WithWriteBufferSize determines how much data can be batched before doing a // write on the wire. The corresponding memory allocation for this buffer will // be twice the size to keep syscalls low. The default value for this buffer is @@ -628,6 +643,8 @@ func defaultDialOptions() dialOptions { ReadBufferSize: defaultReadBufSize, UseProxy: true, }, + recvBufferPool: nopBufferPool{}, + idleTimeout: 30 * time.Minute, } } @@ -664,8 +681,8 @@ func WithResolvers(rs ...resolver.Builder) DialOption { // channel will exit idle mode when the Connect() method is called or when an // RPC is initiated. // -// By default this feature is disabled, which can also be explicitly configured -// by passing zero to this function. +// A default timeout of 30 minutes will be used if this dial option is not set +// at dial time and idleness can be disabled by passing a timeout of zero. // // # Experimental // @@ -676,3 +693,24 @@ func WithIdleTimeout(d time.Duration) DialOption { o.idleTimeout = d }) } + +// WithRecvBufferPool returns a DialOption that configures the ClientConn +// to use the provided shared buffer pool for parsing incoming messages. Depending +// on the application's workload, this could result in reduced memory allocation. +// +// If you are unsure about how to implement a memory pool but want to utilize one, +// begin with grpc.NewSharedBufferPool. +// +// Note: The shared buffer pool feature will not be active if any of the following +// options are used: WithStatsHandler, EnableTracing, or binary logging. In such +// cases, the shared buffer pool will be ignored. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.recvBufferPool = bufferPool + }) +} diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 07a586135..5ebf88d71 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -38,6 +38,10 @@ const Identity = "identity" // Compressor is used for compressing and decompressing when sending or // receiving messages. +// +// If a Compressor implements `DecompressedSize(compressedBytes []byte) int`, +// gRPC will invoke it to determine the size of the buffer allocated for the +// result of decompression. A return value of -1 indicates unknown size. type Compressor interface { // Compress writes the data written to wc to w after compressing it. If an // error occurs while initializing the compressor, that error is returned @@ -51,15 +55,6 @@ type Compressor interface { // coding header. The result must be static; the result cannot change // between calls. Name() string - // If a Compressor implements - // DecompressedSize(compressedBytes []byte) int, gRPC will call it - // to determine the size of the buffer allocated for the result of decompression. - // Return -1 to indicate unknown size. - // - // Experimental - // - // Notice: This API is EXPERIMENTAL and may be changed or removed in a - // later release. } var registeredCompressor = make(map[string]Compressor) @@ -90,9 +85,9 @@ func GetCompressor(name string) Compressor { // methods can be called from concurrent goroutines. type Codec interface { // Marshal returns the wire format of v. - Marshal(v interface{}) ([]byte, error) + Marshal(v any) ([]byte, error) // Unmarshal parses the wire format into v. - Unmarshal(data []byte, v interface{}) error + Unmarshal(data []byte, v any) error // Name returns the name of the Codec implementation. The returned string // will be used as part of content type in transmission. The result must be // static; the result cannot change between calls. diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go index 3009b35af..0ee3d3bae 100644 --- a/vendor/google.golang.org/grpc/encoding/proto/proto.go +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -37,7 +37,7 @@ func init() { // codec is a Codec implementation with protobuf. It is the default codec for gRPC. type codec struct{} -func (codec) Marshal(v interface{}) ([]byte, error) { +func (codec) Marshal(v any) ([]byte, error) { vv, ok := v.(proto.Message) if !ok { return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) @@ -45,7 +45,7 @@ func (codec) Marshal(v interface{}) ([]byte, error) { return proto.Marshal(vv) } -func (codec) Unmarshal(data []byte, v interface{}) error { +func (codec) Unmarshal(data []byte, v any) error { vv, ok := v.(proto.Message) if !ok { return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go index 8358dd6e2..ac73c9ced 100644 --- a/vendor/google.golang.org/grpc/grpclog/component.go +++ b/vendor/google.golang.org/grpc/grpclog/component.go @@ -31,71 +31,71 @@ type componentData struct { var cache = map[string]*componentData{} -func (c *componentData) InfoDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) InfoDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.InfoDepth(depth+1, args...) } -func (c *componentData) WarningDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) WarningDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.WarningDepth(depth+1, args...) } -func (c *componentData) ErrorDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) ErrorDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.ErrorDepth(depth+1, args...) } -func (c *componentData) FatalDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) FatalDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.FatalDepth(depth+1, args...) } -func (c *componentData) Info(args ...interface{}) { +func (c *componentData) Info(args ...any) { c.InfoDepth(1, args...) } -func (c *componentData) Warning(args ...interface{}) { +func (c *componentData) Warning(args ...any) { c.WarningDepth(1, args...) } -func (c *componentData) Error(args ...interface{}) { +func (c *componentData) Error(args ...any) { c.ErrorDepth(1, args...) } -func (c *componentData) Fatal(args ...interface{}) { +func (c *componentData) Fatal(args ...any) { c.FatalDepth(1, args...) } -func (c *componentData) Infof(format string, args ...interface{}) { +func (c *componentData) Infof(format string, args ...any) { c.InfoDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Warningf(format string, args ...interface{}) { +func (c *componentData) Warningf(format string, args ...any) { c.WarningDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Errorf(format string, args ...interface{}) { +func (c *componentData) Errorf(format string, args ...any) { c.ErrorDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Fatalf(format string, args ...interface{}) { +func (c *componentData) Fatalf(format string, args ...any) { c.FatalDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Infoln(args ...interface{}) { +func (c *componentData) Infoln(args ...any) { c.InfoDepth(1, args...) } -func (c *componentData) Warningln(args ...interface{}) { +func (c *componentData) Warningln(args ...any) { c.WarningDepth(1, args...) } -func (c *componentData) Errorln(args ...interface{}) { +func (c *componentData) Errorln(args ...any) { c.ErrorDepth(1, args...) } -func (c *componentData) Fatalln(args ...interface{}) { +func (c *componentData) Fatalln(args ...any) { c.FatalDepth(1, args...) } diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go index c8bb2be34..16928c9cb 100644 --- a/vendor/google.golang.org/grpc/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -42,53 +42,53 @@ func V(l int) bool { } // Info logs to the INFO log. -func Info(args ...interface{}) { +func Info(args ...any) { grpclog.Logger.Info(args...) } // Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. -func Infof(format string, args ...interface{}) { +func Infof(format string, args ...any) { grpclog.Logger.Infof(format, args...) } // Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. -func Infoln(args ...interface{}) { +func Infoln(args ...any) { grpclog.Logger.Infoln(args...) } // Warning logs to the WARNING log. -func Warning(args ...interface{}) { +func Warning(args ...any) { grpclog.Logger.Warning(args...) } // Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. -func Warningf(format string, args ...interface{}) { +func Warningf(format string, args ...any) { grpclog.Logger.Warningf(format, args...) } // Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. -func Warningln(args ...interface{}) { +func Warningln(args ...any) { grpclog.Logger.Warningln(args...) } // Error logs to the ERROR log. -func Error(args ...interface{}) { +func Error(args ...any) { grpclog.Logger.Error(args...) } // Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. -func Errorf(format string, args ...interface{}) { +func Errorf(format string, args ...any) { grpclog.Logger.Errorf(format, args...) } // Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. -func Errorln(args ...interface{}) { +func Errorln(args ...any) { grpclog.Logger.Errorln(args...) } // Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. // It calls os.Exit() with exit code 1. -func Fatal(args ...interface{}) { +func Fatal(args ...any) { grpclog.Logger.Fatal(args...) // Make sure fatal logs will exit. os.Exit(1) @@ -96,7 +96,7 @@ func Fatal(args ...interface{}) { // Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. // It calls os.Exit() with exit code 1. -func Fatalf(format string, args ...interface{}) { +func Fatalf(format string, args ...any) { grpclog.Logger.Fatalf(format, args...) // Make sure fatal logs will exit. os.Exit(1) @@ -104,7 +104,7 @@ func Fatalf(format string, args ...interface{}) { // Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. // It calle os.Exit()) with exit code 1. -func Fatalln(args ...interface{}) { +func Fatalln(args ...any) { grpclog.Logger.Fatalln(args...) // Make sure fatal logs will exit. os.Exit(1) @@ -113,20 +113,20 @@ func Fatalln(args ...interface{}) { // Print prints to the logger. Arguments are handled in the manner of fmt.Print. // // Deprecated: use Info. -func Print(args ...interface{}) { +func Print(args ...any) { grpclog.Logger.Info(args...) } // Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. // // Deprecated: use Infof. -func Printf(format string, args ...interface{}) { +func Printf(format string, args ...any) { grpclog.Logger.Infof(format, args...) } // Println prints to the logger. Arguments are handled in the manner of fmt.Println. // // Deprecated: use Infoln. -func Println(args ...interface{}) { +func Println(args ...any) { grpclog.Logger.Infoln(args...) } diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go index ef06a4822..b1674d826 100644 --- a/vendor/google.golang.org/grpc/grpclog/logger.go +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -24,12 +24,12 @@ import "google.golang.org/grpc/internal/grpclog" // // Deprecated: use LoggerV2. type Logger interface { - Fatal(args ...interface{}) - Fatalf(format string, args ...interface{}) - Fatalln(args ...interface{}) - Print(args ...interface{}) - Printf(format string, args ...interface{}) - Println(args ...interface{}) + Fatal(args ...any) + Fatalf(format string, args ...any) + Fatalln(args ...any) + Print(args ...any) + Printf(format string, args ...any) + Println(args ...any) } // SetLogger sets the logger that is used in grpc. Call only from @@ -45,39 +45,39 @@ type loggerWrapper struct { Logger } -func (g *loggerWrapper) Info(args ...interface{}) { +func (g *loggerWrapper) Info(args ...any) { g.Logger.Print(args...) } -func (g *loggerWrapper) Infoln(args ...interface{}) { +func (g *loggerWrapper) Infoln(args ...any) { g.Logger.Println(args...) } -func (g *loggerWrapper) Infof(format string, args ...interface{}) { +func (g *loggerWrapper) Infof(format string, args ...any) { g.Logger.Printf(format, args...) } -func (g *loggerWrapper) Warning(args ...interface{}) { +func (g *loggerWrapper) Warning(args ...any) { g.Logger.Print(args...) } -func (g *loggerWrapper) Warningln(args ...interface{}) { +func (g *loggerWrapper) Warningln(args ...any) { g.Logger.Println(args...) } -func (g *loggerWrapper) Warningf(format string, args ...interface{}) { +func (g *loggerWrapper) Warningf(format string, args ...any) { g.Logger.Printf(format, args...) } -func (g *loggerWrapper) Error(args ...interface{}) { +func (g *loggerWrapper) Error(args ...any) { g.Logger.Print(args...) } -func (g *loggerWrapper) Errorln(args ...interface{}) { +func (g *loggerWrapper) Errorln(args ...any) { g.Logger.Println(args...) } -func (g *loggerWrapper) Errorf(format string, args ...interface{}) { +func (g *loggerWrapper) Errorf(format string, args ...any) { g.Logger.Printf(format, args...) } diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go index 5de66e40d..ecfd36d71 100644 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -33,35 +33,35 @@ import ( // LoggerV2 does underlying logging work for grpclog. type LoggerV2 interface { // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. - Info(args ...interface{}) + Info(args ...any) // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. - Infoln(args ...interface{}) + Infoln(args ...any) // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. - Infof(format string, args ...interface{}) + Infof(format string, args ...any) // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. - Warning(args ...interface{}) + Warning(args ...any) // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. - Warningln(args ...interface{}) + Warningln(args ...any) // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. - Warningf(format string, args ...interface{}) + Warningf(format string, args ...any) // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. - Error(args ...interface{}) + Error(args ...any) // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - Errorln(args ...interface{}) + Errorln(args ...any) // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - Errorf(format string, args ...interface{}) + Errorf(format string, args ...any) // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatal(args ...interface{}) + Fatal(args ...any) // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalln(args ...interface{}) + Fatalln(args ...any) // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalf(format string, args ...interface{}) + Fatalf(format string, args ...any) // V reports whether verbosity level l is at least the requested verbose level. V(l int) bool } @@ -182,53 +182,53 @@ func (g *loggerT) output(severity int, s string) { g.m[severity].Output(2, string(b)) } -func (g *loggerT) Info(args ...interface{}) { +func (g *loggerT) Info(args ...any) { g.output(infoLog, fmt.Sprint(args...)) } -func (g *loggerT) Infoln(args ...interface{}) { +func (g *loggerT) Infoln(args ...any) { g.output(infoLog, fmt.Sprintln(args...)) } -func (g *loggerT) Infof(format string, args ...interface{}) { +func (g *loggerT) Infof(format string, args ...any) { g.output(infoLog, fmt.Sprintf(format, args...)) } -func (g *loggerT) Warning(args ...interface{}) { +func (g *loggerT) Warning(args ...any) { g.output(warningLog, fmt.Sprint(args...)) } -func (g *loggerT) Warningln(args ...interface{}) { +func (g *loggerT) Warningln(args ...any) { g.output(warningLog, fmt.Sprintln(args...)) } -func (g *loggerT) Warningf(format string, args ...interface{}) { +func (g *loggerT) Warningf(format string, args ...any) { g.output(warningLog, fmt.Sprintf(format, args...)) } -func (g *loggerT) Error(args ...interface{}) { +func (g *loggerT) Error(args ...any) { g.output(errorLog, fmt.Sprint(args...)) } -func (g *loggerT) Errorln(args ...interface{}) { +func (g *loggerT) Errorln(args ...any) { g.output(errorLog, fmt.Sprintln(args...)) } -func (g *loggerT) Errorf(format string, args ...interface{}) { +func (g *loggerT) Errorf(format string, args ...any) { g.output(errorLog, fmt.Sprintf(format, args...)) } -func (g *loggerT) Fatal(args ...interface{}) { +func (g *loggerT) Fatal(args ...any) { g.output(fatalLog, fmt.Sprint(args...)) os.Exit(1) } -func (g *loggerT) Fatalln(args ...interface{}) { +func (g *loggerT) Fatalln(args ...any) { g.output(fatalLog, fmt.Sprintln(args...)) os.Exit(1) } -func (g *loggerT) Fatalf(format string, args ...interface{}) { +func (g *loggerT) Fatalf(format string, args ...any) { g.output(fatalLog, fmt.Sprintf(format, args...)) os.Exit(1) } @@ -248,11 +248,11 @@ func (g *loggerT) V(l int) bool { type DepthLoggerV2 interface { LoggerV2 // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. - InfoDepth(depth int, args ...interface{}) + InfoDepth(depth int, args ...any) // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. - WarningDepth(depth int, args ...interface{}) + WarningDepth(depth int, args ...any) // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. - ErrorDepth(depth int, args ...interface{}) + ErrorDepth(depth int, args ...any) // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. - FatalDepth(depth int, args ...interface{}) + FatalDepth(depth int, args ...any) } diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go index bb96ef57b..877d78fc3 100644 --- a/vendor/google.golang.org/grpc/interceptor.go +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -23,7 +23,7 @@ import ( ) // UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. -type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error +type UnaryInvoker func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error // UnaryClientInterceptor intercepts the execution of a unary RPC on the client. // Unary interceptors can be specified as a DialOption, using @@ -40,7 +40,7 @@ type UnaryInvoker func(ctx context.Context, method string, req, reply interface{ // defaults from the ClientConn as well as per-call options. // // The returned error must be compatible with the status package. -type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error +type UnaryClientInterceptor func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error // Streamer is called by StreamClientInterceptor to create a ClientStream. type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) @@ -66,7 +66,7 @@ type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *Cli // server side. All per-rpc information may be mutated by the interceptor. type UnaryServerInfo struct { // Server is the service implementation the user provides. This is read-only. - Server interface{} + Server any // FullMethod is the full RPC method string, i.e., /package.service/method. FullMethod string } @@ -78,13 +78,13 @@ type UnaryServerInfo struct { // status package, or be one of the context errors. Otherwise, gRPC will use // codes.Unknown as the status code and err.Error() as the status message of the // RPC. -type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) +type UnaryHandler func(ctx context.Context, req any) (any, error) // UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info // contains all the information of this RPC the interceptor can operate on. And handler is the wrapper // of the service method implementation. It is the responsibility of the interceptor to invoke handler // to complete the RPC. -type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) +type UnaryServerInterceptor func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (resp any, err error) // StreamServerInfo consists of various information about a streaming RPC on // server side. All per-rpc information may be mutated by the interceptor. @@ -101,4 +101,4 @@ type StreamServerInfo struct { // info contains all the information of this RPC the interceptor can operate on. And handler is the // service method implementation. It is the responsibility of the interceptor to invoke handler to // complete the RPC. -type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error +type StreamServerInterceptor func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go index 5fc0ee3da..fed1c011a 100644 --- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -23,6 +23,8 @@ package backoff import ( + "context" + "errors" "time" grpcbackoff "google.golang.org/grpc/backoff" @@ -71,3 +73,37 @@ func (bc Exponential) Backoff(retries int) time.Duration { } return time.Duration(backoff) } + +// ErrResetBackoff is the error to be returned by the function executed by RunF, +// to instruct the latter to reset its backoff state. +var ErrResetBackoff = errors.New("reset backoff state") + +// RunF provides a convenient way to run a function f repeatedly until the +// context expires or f returns a non-nil error that is not ErrResetBackoff. +// When f returns ErrResetBackoff, RunF continues to run f, but resets its +// backoff state before doing so. backoff accepts an integer representing the +// number of retries, and returns the amount of time to backoff. +func RunF(ctx context.Context, f func() error, backoff func(int) time.Duration) { + attempt := 0 + timer := time.NewTimer(0) + for ctx.Err() == nil { + select { + case <-timer.C: + case <-ctx.Done(): + timer.Stop() + return + } + + err := f() + if errors.Is(err, ErrResetBackoff) { + timer.Reset(0) + attempt = 0 + continue + } + if err != nil { + return + } + timer.Reset(backoff(attempt)) + attempt++ + } +} diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go index 08666f62a..3c594e6e4 100644 --- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -200,8 +200,8 @@ func (gsb *Balancer) ExitIdle() { } } -// UpdateSubConnState forwards the update to the appropriate child. -func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { +// updateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState, cb func(balancer.SubConnState)) { gsb.currentMu.Lock() defer gsb.currentMu.Unlock() gsb.mu.Lock() @@ -214,13 +214,26 @@ func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubC } else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] { balToUpdate = gsb.balancerPending } - gsb.mu.Unlock() if balToUpdate == nil { // SubConn belonged to a stale lb policy that has not yet fully closed, // or the balancer was already closed. + gsb.mu.Unlock() return } - balToUpdate.UpdateSubConnState(sc, state) + if state.ConnectivityState == connectivity.Shutdown { + delete(balToUpdate.subconns, sc) + } + gsb.mu.Unlock() + if cb != nil { + cb(state) + } else { + balToUpdate.UpdateSubConnState(sc, state) + } +} + +// UpdateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + gsb.updateSubConnState(sc, state, nil) } // Close closes any active child balancers. @@ -242,7 +255,7 @@ func (gsb *Balancer) Close() { // // It implements the balancer.ClientConn interface and is passed down in that // capacity to the wrapped balancer. It maintains a set of subConns created by -// the wrapped balancer and calls from the latter to create/update/remove +// the wrapped balancer and calls from the latter to create/update/shutdown // SubConns update this set before being forwarded to the parent ClientConn. // State updates from the wrapped balancer can result in invocation of the // graceful switch logic. @@ -254,21 +267,10 @@ type balancerWrapper struct { subconns map[balancer.SubConn]bool // subconns created by this balancer } -func (bw *balancerWrapper) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { - if state.ConnectivityState == connectivity.Shutdown { - bw.gsb.mu.Lock() - delete(bw.subconns, sc) - bw.gsb.mu.Unlock() - } - // There is no need to protect this read with a mutex, as the write to the - // Balancer field happens in SwitchTo, which completes before this can be - // called. - bw.Balancer.UpdateSubConnState(sc, state) -} - -// Close closes the underlying LB policy and removes the subconns it created. bw -// must not be referenced via balancerCurrent or balancerPending in gsb when -// called. gsb.mu must not be held. Does not panic with a nil receiver. +// Close closes the underlying LB policy and shuts down the subconns it +// created. bw must not be referenced via balancerCurrent or balancerPending in +// gsb when called. gsb.mu must not be held. Does not panic with a nil +// receiver. func (bw *balancerWrapper) Close() { // before Close is called. if bw == nil { @@ -281,7 +283,7 @@ func (bw *balancerWrapper) Close() { bw.Balancer.Close() bw.gsb.mu.Lock() for sc := range bw.subconns { - bw.gsb.cc.RemoveSubConn(sc) + sc.Shutdown() } bw.gsb.mu.Unlock() } @@ -335,13 +337,16 @@ func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.Ne } bw.gsb.mu.Unlock() + var sc balancer.SubConn + oldListener := opts.StateListener + opts.StateListener = func(state balancer.SubConnState) { bw.gsb.updateSubConnState(sc, state, oldListener) } sc, err := bw.gsb.cc.NewSubConn(addrs, opts) if err != nil { return nil, err } bw.gsb.mu.Lock() if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call - bw.gsb.cc.RemoveSubConn(sc) + sc.Shutdown() bw.gsb.mu.Unlock() return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) } @@ -360,13 +365,9 @@ func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) { } func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) { - bw.gsb.mu.Lock() - if !bw.gsb.balancerCurrentOrPending(bw) { - bw.gsb.mu.Unlock() - return - } - bw.gsb.mu.Unlock() - bw.gsb.cc.RemoveSubConn(sc) + // Note: existing third party balancers may call this, so it must remain + // until RemoveSubConn is fully removed. + sc.Shutdown() } func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { diff --git a/vendor/google.golang.org/grpc/internal/balancerload/load.go b/vendor/google.golang.org/grpc/internal/balancerload/load.go index 3a905d966..94a08d687 100644 --- a/vendor/google.golang.org/grpc/internal/balancerload/load.go +++ b/vendor/google.golang.org/grpc/internal/balancerload/load.go @@ -25,7 +25,7 @@ import ( // Parser converts loads from metadata into a concrete type. type Parser interface { // Parse parses loads from metadata. - Parse(md metadata.MD) interface{} + Parse(md metadata.MD) any } var parser Parser @@ -38,7 +38,7 @@ func SetParser(lr Parser) { } // Parse calls parser.Read(). -func Parse(md metadata.MD) interface{} { +func Parse(md metadata.MD) any { if parser == nil { return nil } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index 6c3f63221..0f31274a3 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -230,7 +230,7 @@ type ClientMessage struct { OnClientSide bool // Message can be a proto.Message or []byte. Other messages formats are not // supported. - Message interface{} + Message any } func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry { @@ -270,7 +270,7 @@ type ServerMessage struct { OnClientSide bool // Message can be a proto.Message or []byte. Other messages formats are not // supported. - Message interface{} + Message any } func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry { diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go index 81c2f5fd7..4399c3df4 100644 --- a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go +++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -28,25 +28,25 @@ import "sync" // the underlying mutex used for synchronization. // // Unbounded supports values of any type to be stored in it by using a channel -// of `interface{}`. This means that a call to Put() incurs an extra memory -// allocation, and also that users need a type assertion while reading. For -// performance critical code paths, using Unbounded is strongly discouraged and -// defining a new type specific implementation of this buffer is preferred. See +// of `any`. This means that a call to Put() incurs an extra memory allocation, +// and also that users need a type assertion while reading. For performance +// critical code paths, using Unbounded is strongly discouraged and defining a +// new type specific implementation of this buffer is preferred. See // internal/transport/transport.go for an example of this. type Unbounded struct { - c chan interface{} + c chan any closed bool mu sync.Mutex - backlog []interface{} + backlog []any } // NewUnbounded returns a new instance of Unbounded. func NewUnbounded() *Unbounded { - return &Unbounded{c: make(chan interface{}, 1)} + return &Unbounded{c: make(chan any, 1)} } // Put adds t to the unbounded buffer. -func (b *Unbounded) Put(t interface{}) { +func (b *Unbounded) Put(t any) { b.mu.Lock() defer b.mu.Unlock() if b.closed { @@ -89,7 +89,7 @@ func (b *Unbounded) Load() { // // If the unbounded buffer is closed, the read channel returned by this method // is closed. -func (b *Unbounded) Get() <-chan interface{} { +func (b *Unbounded) Get() <-chan any { return b.c } diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index 777cbcd79..5395e7752 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -24,9 +24,7 @@ package channelz import ( - "context" "errors" - "fmt" "sort" "sync" "sync/atomic" @@ -40,8 +38,11 @@ const ( ) var ( - db dbWrapper - idGen idGenerator + // IDGen is the global channelz entity ID generator. It should not be used + // outside this package except by tests. + IDGen IDGenerator + + db dbWrapper // EntryPerPage defines the number of channelz entries to be shown on a web page. EntryPerPage = int64(50) curState int32 @@ -52,14 +53,14 @@ var ( func TurnOn() { if !IsOn() { db.set(newChannelMap()) - idGen.reset() + IDGen.Reset() atomic.StoreInt32(&curState, 1) } } // IsOn returns whether channelz data collection is on. func IsOn() bool { - return atomic.CompareAndSwapInt32(&curState, 1, 1) + return atomic.LoadInt32(&curState) == 1 } // SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel). @@ -97,43 +98,6 @@ func (d *dbWrapper) get() *channelMap { return d.DB } -// NewChannelzStorageForTesting initializes channelz data storage and id -// generator for testing purposes. -// -// Returns a cleanup function to be invoked by the test, which waits for up to -// 10s for all channelz state to be reset by the grpc goroutines when those -// entities get closed. This cleanup function helps with ensuring that tests -// don't mess up each other. -func NewChannelzStorageForTesting() (cleanup func() error) { - db.set(newChannelMap()) - idGen.reset() - - return func() error { - cm := db.get() - if cm == nil { - return nil - } - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - ticker := time.NewTicker(10 * time.Millisecond) - defer ticker.Stop() - for { - cm.mu.RLock() - topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets := len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets) - cm.mu.RUnlock() - - if err := ctx.Err(); err != nil { - return fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets) - } - if topLevelChannels == 0 && servers == 0 && channels == 0 && subChannels == 0 && listenSockets == 0 && normalSockets == 0 { - return nil - } - <-ticker.C - } - } -} - // GetTopChannels returns a slice of top channel's ChannelMetric, along with a // boolean indicating whether there's more top channels to be queried for. // @@ -193,7 +157,7 @@ func GetServer(id int64) *ServerMetric { // // If channelz is not turned ON, the channelz database is not mutated. func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier { - id := idGen.genID() + id := IDGen.genID() var parent int64 isTopChannel := true if pid != nil { @@ -229,7 +193,7 @@ func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, er if pid == nil { return nil, errors.New("a SubChannel's parent id cannot be nil") } - id := idGen.genID() + id := IDGen.genID() if !IsOn() { return newIdentifer(RefSubChannel, id, pid), nil } @@ -251,7 +215,7 @@ func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, er // // If channelz is not turned ON, the channelz database is not mutated. func RegisterServer(s Server, ref string) *Identifier { - id := idGen.genID() + id := IDGen.genID() if !IsOn() { return newIdentifer(RefServer, id, nil) } @@ -277,7 +241,7 @@ func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, e if pid == nil { return nil, errors.New("a ListenSocket's parent id cannot be 0") } - id := idGen.genID() + id := IDGen.genID() if !IsOn() { return newIdentifer(RefListenSocket, id, pid), nil } @@ -297,7 +261,7 @@ func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, e if pid == nil { return nil, errors.New("a NormalSocket's parent id cannot be 0") } - id := idGen.genID() + id := IDGen.genID() if !IsOn() { return newIdentifer(RefNormalSocket, id, pid), nil } @@ -776,14 +740,17 @@ func (c *channelMap) GetServer(id int64) *ServerMetric { return sm } -type idGenerator struct { +// IDGenerator is an incrementing atomic that tracks IDs for channelz entities. +type IDGenerator struct { id int64 } -func (i *idGenerator) reset() { +// Reset resets the generated ID back to zero. Should only be used at +// initialization or by tests sensitive to the ID number. +func (i *IDGenerator) Reset() { atomic.StoreInt64(&i.id, 0) } -func (i *idGenerator) genID() int64 { +func (i *IDGenerator) genID() int64 { return atomic.AddInt64(&i.id, 1) } diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go index 8e13a3d2c..f89e6f77b 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/logging.go +++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -31,7 +31,7 @@ func withParens(id *Identifier) string { } // Info logs and adds a trace event if channelz is on. -func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { +func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprint(args...), Severity: CtInfo, @@ -39,7 +39,7 @@ func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { } // Infof logs and adds a trace event if channelz is on. -func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { +func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprintf(format, args...), Severity: CtInfo, @@ -47,7 +47,7 @@ func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...inter } // Warning logs and adds a trace event if channelz is on. -func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { +func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprint(args...), Severity: CtWarning, @@ -55,7 +55,7 @@ func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { } // Warningf logs and adds a trace event if channelz is on. -func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { +func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprintf(format, args...), Severity: CtWarning, @@ -63,7 +63,7 @@ func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...in } // Error logs and adds a trace event if channelz is on. -func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { +func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprint(args...), Severity: CtError, @@ -71,7 +71,7 @@ func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { } // Errorf logs and adds a trace event if channelz is on. -func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { +func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprintf(format, args...), Severity: CtError, diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go index 7b2f350e2..1d4020f53 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -628,6 +628,7 @@ type tracedChannel interface { type channelTrace struct { cm *channelMap + clearCalled bool createdTime time.Time eventCount int64 mu sync.Mutex @@ -656,6 +657,10 @@ func (c *channelTrace) append(e *TraceEvent) { } func (c *channelTrace) clear() { + if c.clearCalled { + return + } + c.clearCalled = true c.mu.Lock() for _, e := range c.events { if e.RefID != 0 { diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go index 8d194e44e..98288c3f8 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go @@ -23,7 +23,7 @@ import ( ) // GetSocketOption gets the socket option info of the conn. -func GetSocketOption(socket interface{}) *SocketOptionData { +func GetSocketOption(socket any) *SocketOptionData { c, ok := socket.(syscall.Conn) if !ok { return nil diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go index 837ddc402..b5568b22e 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go @@ -22,6 +22,6 @@ package channelz // GetSocketOption gets the socket option info of the conn. -func GetSocketOption(c interface{}) *SocketOptionData { +func GetSocketOption(c any) *SocketOptionData { return nil } diff --git a/vendor/google.golang.org/grpc/internal/credentials/credentials.go b/vendor/google.golang.org/grpc/internal/credentials/credentials.go index 32c9b5903..9deee7f65 100644 --- a/vendor/google.golang.org/grpc/internal/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/internal/credentials/credentials.go @@ -25,12 +25,12 @@ import ( type requestInfoKey struct{} // NewRequestInfoContext creates a context with ri. -func NewRequestInfoContext(ctx context.Context, ri interface{}) context.Context { +func NewRequestInfoContext(ctx context.Context, ri any) context.Context { return context.WithValue(ctx, requestInfoKey{}, ri) } // RequestInfoFromContext extracts the RequestInfo from ctx. -func RequestInfoFromContext(ctx context.Context) interface{} { +func RequestInfoFromContext(ctx context.Context) any { return ctx.Value(requestInfoKey{}) } @@ -39,11 +39,11 @@ func RequestInfoFromContext(ctx context.Context) interface{} { type clientHandshakeInfoKey struct{} // ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx. -func ClientHandshakeInfoFromContext(ctx context.Context) interface{} { +func ClientHandshakeInfoFromContext(ctx context.Context) any { return ctx.Value(clientHandshakeInfoKey{}) } // NewClientHandshakeInfoContext creates a context with chi. -func NewClientHandshakeInfoContext(ctx context.Context, chi interface{}) context.Context { +func NewClientHandshakeInfoContext(ctx context.Context, chi any) context.Context { return context.WithValue(ctx, clientHandshakeInfoKey{}, chi) } diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 80fd5c7d2..3cf10ddfb 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -37,9 +37,15 @@ var ( // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) // PickFirstLBConfig is set if we should support configuration of the - // pick_first LB policy, which can be enabled by setting the environment - // variable "GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG" to "true". - PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", false) + // pick_first LB policy. + PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", true) + // LeastRequestLB is set if we should support the least_request_experimental + // LB policy, which can be enabled by setting the environment variable + // "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true". + LeastRequestLB = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST", false) + // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS + // handshakes that can be performed. + ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go index b68e26a36..bfc45102a 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go @@ -30,7 +30,7 @@ var Logger LoggerV2 var DepthLogger DepthLoggerV2 // InfoDepth logs to the INFO log at the specified depth. -func InfoDepth(depth int, args ...interface{}) { +func InfoDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.InfoDepth(depth, args...) } else { @@ -39,7 +39,7 @@ func InfoDepth(depth int, args ...interface{}) { } // WarningDepth logs to the WARNING log at the specified depth. -func WarningDepth(depth int, args ...interface{}) { +func WarningDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.WarningDepth(depth, args...) } else { @@ -48,7 +48,7 @@ func WarningDepth(depth int, args ...interface{}) { } // ErrorDepth logs to the ERROR log at the specified depth. -func ErrorDepth(depth int, args ...interface{}) { +func ErrorDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.ErrorDepth(depth, args...) } else { @@ -57,7 +57,7 @@ func ErrorDepth(depth int, args ...interface{}) { } // FatalDepth logs to the FATAL log at the specified depth. -func FatalDepth(depth int, args ...interface{}) { +func FatalDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.FatalDepth(depth, args...) } else { @@ -71,35 +71,35 @@ func FatalDepth(depth int, args ...interface{}) { // is defined here to avoid a circular dependency. type LoggerV2 interface { // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. - Info(args ...interface{}) + Info(args ...any) // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. - Infoln(args ...interface{}) + Infoln(args ...any) // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. - Infof(format string, args ...interface{}) + Infof(format string, args ...any) // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. - Warning(args ...interface{}) + Warning(args ...any) // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. - Warningln(args ...interface{}) + Warningln(args ...any) // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. - Warningf(format string, args ...interface{}) + Warningf(format string, args ...any) // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. - Error(args ...interface{}) + Error(args ...any) // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - Errorln(args ...interface{}) + Errorln(args ...any) // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - Errorf(format string, args ...interface{}) + Errorf(format string, args ...any) // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatal(args ...interface{}) + Fatal(args ...any) // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalln(args ...interface{}) + Fatalln(args ...any) // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalf(format string, args ...interface{}) + Fatalf(format string, args ...any) // V reports whether verbosity level l is at least the requested verbose level. V(l int) bool } @@ -116,11 +116,11 @@ type LoggerV2 interface { // later release. type DepthLoggerV2 interface { // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. - InfoDepth(depth int, args ...interface{}) + InfoDepth(depth int, args ...any) // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. - WarningDepth(depth int, args ...interface{}) + WarningDepth(depth int, args ...any) // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. - ErrorDepth(depth int, args ...interface{}) + ErrorDepth(depth int, args ...any) // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. - FatalDepth(depth int, args ...interface{}) + FatalDepth(depth int, args ...any) } diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go index 02224b42c..faa998de7 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go @@ -31,7 +31,7 @@ type PrefixLogger struct { } // Infof does info logging. -func (pl *PrefixLogger) Infof(format string, args ...interface{}) { +func (pl *PrefixLogger) Infof(format string, args ...any) { if pl != nil { // Handle nil, so the tests can pass in a nil logger. format = pl.prefix + format @@ -42,7 +42,7 @@ func (pl *PrefixLogger) Infof(format string, args ...interface{}) { } // Warningf does warning logging. -func (pl *PrefixLogger) Warningf(format string, args ...interface{}) { +func (pl *PrefixLogger) Warningf(format string, args ...any) { if pl != nil { format = pl.prefix + format pl.logger.WarningDepth(1, fmt.Sprintf(format, args...)) @@ -52,7 +52,7 @@ func (pl *PrefixLogger) Warningf(format string, args ...interface{}) { } // Errorf does error logging. -func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { +func (pl *PrefixLogger) Errorf(format string, args ...any) { if pl != nil { format = pl.prefix + format pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...)) @@ -62,7 +62,7 @@ func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { } // Debugf does info logging at verbose level 2. -func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { +func (pl *PrefixLogger) Debugf(format string, args ...any) { // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe // rewrite PrefixLogger a little to ensure that we don't use the global // `Logger` here, and instead use the `logger` field. diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go index d08e3e907..aa97273e7 100644 --- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go @@ -80,6 +80,13 @@ func Uint32() uint32 { return r.Uint32() } +// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source. +func ExpFloat64() float64 { + mu.Lock() + defer mu.Unlock() + return r.ExpFloat64() +} + // Shuffle implements rand.Shuffle on the grpcrand global source. var Shuffle = func(n int, f func(int, int)) { mu.Lock() diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go index 37b8d4117..900917dbe 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -32,10 +32,10 @@ import ( // // This type is safe for concurrent access. type CallbackSerializer struct { - // Done is closed once the serializer is shut down completely, i.e all + // done is closed once the serializer is shut down completely, i.e all // scheduled callbacks are executed and the serializer has deallocated all // its resources. - Done chan struct{} + done chan struct{} callbacks *buffer.Unbounded closedMu sync.Mutex @@ -48,12 +48,12 @@ type CallbackSerializer struct { // callbacks will be added once this context is canceled, and any pending un-run // callbacks will be executed before the serializer is shut down. func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { - t := &CallbackSerializer{ - Done: make(chan struct{}), + cs := &CallbackSerializer{ + done: make(chan struct{}), callbacks: buffer.NewUnbounded(), } - go t.run(ctx) - return t + go cs.run(ctx) + return cs } // Schedule adds a callback to be scheduled after existing callbacks are run. @@ -64,56 +64,62 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { // Return value indicates if the callback was successfully added to the list of // callbacks to be executed by the serializer. It is not possible to add // callbacks once the context passed to NewCallbackSerializer is cancelled. -func (t *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { - t.closedMu.Lock() - defer t.closedMu.Unlock() +func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { + cs.closedMu.Lock() + defer cs.closedMu.Unlock() - if t.closed { + if cs.closed { return false } - t.callbacks.Put(f) + cs.callbacks.Put(f) return true } -func (t *CallbackSerializer) run(ctx context.Context) { +func (cs *CallbackSerializer) run(ctx context.Context) { var backlog []func(context.Context) - defer close(t.Done) + defer close(cs.done) for ctx.Err() == nil { select { case <-ctx.Done(): // Do nothing here. Next iteration of the for loop will not happen, // since ctx.Err() would be non-nil. - case callback, ok := <-t.callbacks.Get(): + case callback, ok := <-cs.callbacks.Get(): if !ok { return } - t.callbacks.Load() + cs.callbacks.Load() callback.(func(ctx context.Context))(ctx) } } // Fetch pending callbacks if any, and execute them before returning from - // this method and closing t.Done. - t.closedMu.Lock() - t.closed = true - backlog = t.fetchPendingCallbacks() - t.callbacks.Close() - t.closedMu.Unlock() + // this method and closing cs.done. + cs.closedMu.Lock() + cs.closed = true + backlog = cs.fetchPendingCallbacks() + cs.callbacks.Close() + cs.closedMu.Unlock() for _, b := range backlog { b(ctx) } } -func (t *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) { +func (cs *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) { var backlog []func(context.Context) for { select { - case b := <-t.callbacks.Get(): + case b := <-cs.callbacks.Get(): backlog = append(backlog, b.(func(context.Context))) - t.callbacks.Load() + cs.callbacks.Load() default: return backlog } } } + +// Done returns a channel that is closed after the context passed to +// NewCallbackSerializer is canceled and all callbacks have been executed. +func (cs *CallbackSerializer) Done() <-chan struct{} { + return cs.done +} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go new file mode 100644 index 000000000..aef8cec1a --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go @@ -0,0 +1,121 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "context" + "sync" +) + +// Subscriber represents an entity that is subscribed to messages published on +// a PubSub. It wraps the callback to be invoked by the PubSub when a new +// message is published. +type Subscriber interface { + // OnMessage is invoked when a new message is published. Implementations + // must not block in this method. + OnMessage(msg any) +} + +// PubSub is a simple one-to-many publish-subscribe system that supports +// messages of arbitrary type. It guarantees that messages are delivered in +// the same order in which they were published. +// +// Publisher invokes the Publish() method to publish new messages, while +// subscribers interested in receiving these messages register a callback +// via the Subscribe() method. +// +// Once a PubSub is stopped, no more messages can be published, but any pending +// published messages will be delivered to the subscribers. Done may be used +// to determine when all published messages have been delivered. +type PubSub struct { + cs *CallbackSerializer + + // Access to the below fields are guarded by this mutex. + mu sync.Mutex + msg any + subscribers map[Subscriber]bool +} + +// NewPubSub returns a new PubSub instance. Users should cancel the +// provided context to shutdown the PubSub. +func NewPubSub(ctx context.Context) *PubSub { + return &PubSub{ + cs: NewCallbackSerializer(ctx), + subscribers: map[Subscriber]bool{}, + } +} + +// Subscribe registers the provided Subscriber to the PubSub. +// +// If the PubSub contains a previously published message, the Subscriber's +// OnMessage() callback will be invoked asynchronously with the existing +// message to begin with, and subsequently for every newly published message. +// +// The caller is responsible for invoking the returned cancel function to +// unsubscribe itself from the PubSub. +func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) { + ps.mu.Lock() + defer ps.mu.Unlock() + + ps.subscribers[sub] = true + + if ps.msg != nil { + msg := ps.msg + ps.cs.Schedule(func(context.Context) { + ps.mu.Lock() + defer ps.mu.Unlock() + if !ps.subscribers[sub] { + return + } + sub.OnMessage(msg) + }) + } + + return func() { + ps.mu.Lock() + defer ps.mu.Unlock() + delete(ps.subscribers, sub) + } +} + +// Publish publishes the provided message to the PubSub, and invokes +// callbacks registered by subscribers asynchronously. +func (ps *PubSub) Publish(msg any) { + ps.mu.Lock() + defer ps.mu.Unlock() + + ps.msg = msg + for sub := range ps.subscribers { + s := sub + ps.cs.Schedule(func(context.Context) { + ps.mu.Lock() + defer ps.mu.Unlock() + if !ps.subscribers[s] { + return + } + s.OnMessage(msg) + }) + } +} + +// Done returns a channel that is closed after the context passed to NewPubSub +// is canceled and all updates have been sent to subscribers. +func (ps *PubSub) Done() <-chan struct{} { + return ps.cs.Done() +} diff --git a/vendor/google.golang.org/grpc/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go similarity index 61% rename from vendor/google.golang.org/grpc/idle.go rename to vendor/google.golang.org/grpc/internal/idle/idle.go index dc3dc72f6..6c272476e 100644 --- a/vendor/google.golang.org/grpc/idle.go +++ b/vendor/google.golang.org/grpc/internal/idle/idle.go @@ -16,7 +16,9 @@ * */ -package grpc +// Package idle contains a component for managing idleness (entering and exiting) +// based on RPC activity. +package idle import ( "fmt" @@ -24,6 +26,8 @@ import ( "sync" "sync/atomic" "time" + + "google.golang.org/grpc/grpclog" ) // For overriding in unit tests. @@ -31,31 +35,31 @@ var timeAfterFunc = func(d time.Duration, f func()) *time.Timer { return time.AfterFunc(d, f) } -// idlenessEnforcer is the functionality provided by grpc.ClientConn to enter +// Enforcer is the functionality provided by grpc.ClientConn to enter // and exit from idle mode. -type idlenessEnforcer interface { - exitIdleMode() error - enterIdleMode() error +type Enforcer interface { + ExitIdleMode() error + EnterIdleMode() error } -// idlenessManager defines the functionality required to track RPC activity on a +// Manager defines the functionality required to track RPC activity on a // channel. -type idlenessManager interface { - onCallBegin() error - onCallEnd() - close() +type Manager interface { + OnCallBegin() error + OnCallEnd() + Close() } -type noopIdlenessManager struct{} +type noopManager struct{} -func (noopIdlenessManager) onCallBegin() error { return nil } -func (noopIdlenessManager) onCallEnd() {} -func (noopIdlenessManager) close() {} +func (noopManager) OnCallBegin() error { return nil } +func (noopManager) OnCallEnd() {} +func (noopManager) Close() {} -// idlenessManagerImpl implements the idlenessManager interface. It uses atomic -// operations to synchronize access to shared state and a mutex to guarantee -// mutual exclusion in a critical section. -type idlenessManagerImpl struct { +// manager implements the Manager interface. It uses atomic operations to +// synchronize access to shared state and a mutex to guarantee mutual exclusion +// in a critical section. +type manager struct { // State accessed atomically. lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed. activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there. @@ -64,14 +68,15 @@ type idlenessManagerImpl struct { // Can be accessed without atomics or mutex since these are set at creation // time and read-only after that. - enforcer idlenessEnforcer // Functionality provided by grpc.ClientConn. - timeout int64 // Idle timeout duration nanos stored as an int64. + enforcer Enforcer // Functionality provided by grpc.ClientConn. + timeout int64 // Idle timeout duration nanos stored as an int64. + logger grpclog.LoggerV2 // idleMu is used to guarantee mutual exclusion in two scenarios: // - Opposing intentions: // - a: Idle timeout has fired and handleIdleTimeout() is trying to put // the channel in idle mode because the channel has been inactive. - // - b: At the same time an RPC is made on the channel, and onCallBegin() + // - b: At the same time an RPC is made on the channel, and OnCallBegin() // is trying to prevent the channel from going idle. // - Competing intentions: // - The channel is in idle mode and there are multiple RPCs starting at @@ -83,28 +88,37 @@ type idlenessManagerImpl struct { timer *time.Timer } -// newIdlenessManager creates a new idleness manager implementation for the +// ManagerOptions is a collection of options used by +// NewManager. +type ManagerOptions struct { + Enforcer Enforcer + Timeout time.Duration + Logger grpclog.LoggerV2 +} + +// NewManager creates a new idleness manager implementation for the // given idle timeout. -func newIdlenessManager(enforcer idlenessEnforcer, idleTimeout time.Duration) idlenessManager { - if idleTimeout == 0 { - return noopIdlenessManager{} +func NewManager(opts ManagerOptions) Manager { + if opts.Timeout == 0 { + return noopManager{} } - i := &idlenessManagerImpl{ - enforcer: enforcer, - timeout: int64(idleTimeout), + m := &manager{ + enforcer: opts.Enforcer, + timeout: int64(opts.Timeout), + logger: opts.Logger, } - i.timer = timeAfterFunc(idleTimeout, i.handleIdleTimeout) - return i + m.timer = timeAfterFunc(opts.Timeout, m.handleIdleTimeout) + return m } // resetIdleTimer resets the idle timer to the given duration. This method // should only be called from the timer callback. -func (i *idlenessManagerImpl) resetIdleTimer(d time.Duration) { - i.idleMu.Lock() - defer i.idleMu.Unlock() +func (m *manager) resetIdleTimer(d time.Duration) { + m.idleMu.Lock() + defer m.idleMu.Unlock() - if i.timer == nil { + if m.timer == nil { // Only close sets timer to nil. We are done. return } @@ -112,47 +126,47 @@ func (i *idlenessManagerImpl) resetIdleTimer(d time.Duration) { // It is safe to ignore the return value from Reset() because this method is // only ever called from the timer callback, which means the timer has // already fired. - i.timer.Reset(d) + m.timer.Reset(d) } // handleIdleTimeout is the timer callback that is invoked upon expiry of the // configured idle timeout. The channel is considered inactive if there are no // ongoing calls and no RPC activity since the last time the timer fired. -func (i *idlenessManagerImpl) handleIdleTimeout() { - if i.isClosed() { +func (m *manager) handleIdleTimeout() { + if m.isClosed() { return } - if atomic.LoadInt32(&i.activeCallsCount) > 0 { - i.resetIdleTimer(time.Duration(i.timeout)) + if atomic.LoadInt32(&m.activeCallsCount) > 0 { + m.resetIdleTimer(time.Duration(m.timeout)) return } // There has been activity on the channel since we last got here. Reset the // timer and return. - if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 { + if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { // Set the timer to fire after a duration of idle timeout, calculated // from the time the most recent RPC completed. - atomic.StoreInt32(&i.activeSinceLastTimerCheck, 0) - i.resetIdleTimer(time.Duration(atomic.LoadInt64(&i.lastCallEndTime) + i.timeout - time.Now().UnixNano())) + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 0) + m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime) + m.timeout - time.Now().UnixNano())) return } // This CAS operation is extremely likely to succeed given that there has // been no activity since the last time we were here. Setting the - // activeCallsCount to -math.MaxInt32 indicates to onCallBegin() that the + // activeCallsCount to -math.MaxInt32 indicates to OnCallBegin() that the // channel is either in idle mode or is trying to get there. - if !atomic.CompareAndSwapInt32(&i.activeCallsCount, 0, -math.MaxInt32) { + if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) { // This CAS operation can fail if an RPC started after we checked for // activity at the top of this method, or one was ongoing from before // the last time we were here. In both case, reset the timer and return. - i.resetIdleTimer(time.Duration(i.timeout)) + m.resetIdleTimer(time.Duration(m.timeout)) return } // Now that we've set the active calls count to -math.MaxInt32, it's time to // actually move to idle mode. - if i.tryEnterIdleMode() { + if m.tryEnterIdleMode() { // Successfully entered idle mode. No timer needed until we exit idle. return } @@ -160,8 +174,8 @@ func (i *idlenessManagerImpl) handleIdleTimeout() { // Failed to enter idle mode due to a concurrent RPC that kept the channel // active, or because of an error from the channel. Undo the attempt to // enter idle, and reset the timer to try again later. - atomic.AddInt32(&i.activeCallsCount, math.MaxInt32) - i.resetIdleTimer(time.Duration(i.timeout)) + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) + m.resetIdleTimer(time.Duration(m.timeout)) } // tryEnterIdleMode instructs the channel to enter idle mode. But before @@ -171,15 +185,15 @@ func (i *idlenessManagerImpl) handleIdleTimeout() { // Return value indicates whether or not the channel moved to idle mode. // // Holds idleMu which ensures mutual exclusion with exitIdleMode. -func (i *idlenessManagerImpl) tryEnterIdleMode() bool { - i.idleMu.Lock() - defer i.idleMu.Unlock() +func (m *manager) tryEnterIdleMode() bool { + m.idleMu.Lock() + defer m.idleMu.Unlock() - if atomic.LoadInt32(&i.activeCallsCount) != -math.MaxInt32 { + if atomic.LoadInt32(&m.activeCallsCount) != -math.MaxInt32 { // We raced and lost to a new RPC. Very rare, but stop entering idle. return false } - if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 { + if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { // An very short RPC could have come in (and also finished) after we // checked for calls count and activity in handleIdleTimeout(), but // before the CAS operation. So, we need to check for activity again. @@ -189,99 +203,99 @@ func (i *idlenessManagerImpl) tryEnterIdleMode() bool { // No new RPCs have come in since we last set the active calls count value // -math.MaxInt32 in the timer callback. And since we have the lock, it is // safe to enter idle mode now. - if err := i.enforcer.enterIdleMode(); err != nil { - logger.Errorf("Failed to enter idle mode: %v", err) + if err := m.enforcer.EnterIdleMode(); err != nil { + m.logger.Errorf("Failed to enter idle mode: %v", err) return false } // Successfully entered idle mode. - i.actuallyIdle = true + m.actuallyIdle = true return true } -// onCallBegin is invoked at the start of every RPC. -func (i *idlenessManagerImpl) onCallBegin() error { - if i.isClosed() { +// OnCallBegin is invoked at the start of every RPC. +func (m *manager) OnCallBegin() error { + if m.isClosed() { return nil } - if atomic.AddInt32(&i.activeCallsCount, 1) > 0 { + if atomic.AddInt32(&m.activeCallsCount, 1) > 0 { // Channel is not idle now. Set the activity bit and allow the call. - atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1) + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) return nil } // Channel is either in idle mode or is in the process of moving to idle // mode. Attempt to exit idle mode to allow this RPC. - if err := i.exitIdleMode(); err != nil { + if err := m.exitIdleMode(); err != nil { // Undo the increment to calls count, and return an error causing the // RPC to fail. - atomic.AddInt32(&i.activeCallsCount, -1) + atomic.AddInt32(&m.activeCallsCount, -1) return err } - atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1) + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) return nil } // exitIdleMode instructs the channel to exit idle mode. // // Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. -func (i *idlenessManagerImpl) exitIdleMode() error { - i.idleMu.Lock() - defer i.idleMu.Unlock() +func (m *manager) exitIdleMode() error { + m.idleMu.Lock() + defer m.idleMu.Unlock() - if !i.actuallyIdle { + if !m.actuallyIdle { // This can happen in two scenarios: // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called // tryEnterIdleMode(). But before the latter could grab the lock, an RPC - // came in and onCallBegin() noticed that the calls count is negative. + // came in and OnCallBegin() noticed that the calls count is negative. // - Channel is in idle mode, and multiple new RPCs come in at the same - // time, all of them notice a negative calls count in onCallBegin and get + // time, all of them notice a negative calls count in OnCallBegin and get // here. The first one to get the lock would got the channel to exit idle. // // Either way, nothing to do here. return nil } - if err := i.enforcer.exitIdleMode(); err != nil { + if err := m.enforcer.ExitIdleMode(); err != nil { return fmt.Errorf("channel failed to exit idle mode: %v", err) } // Undo the idle entry process. This also respects any new RPC attempts. - atomic.AddInt32(&i.activeCallsCount, math.MaxInt32) - i.actuallyIdle = false + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) + m.actuallyIdle = false // Start a new timer to fire after the configured idle timeout. - i.timer = timeAfterFunc(time.Duration(i.timeout), i.handleIdleTimeout) + m.timer = timeAfterFunc(time.Duration(m.timeout), m.handleIdleTimeout) return nil } -// onCallEnd is invoked at the end of every RPC. -func (i *idlenessManagerImpl) onCallEnd() { - if i.isClosed() { +// OnCallEnd is invoked at the end of every RPC. +func (m *manager) OnCallEnd() { + if m.isClosed() { return } // Record the time at which the most recent call finished. - atomic.StoreInt64(&i.lastCallEndTime, time.Now().UnixNano()) + atomic.StoreInt64(&m.lastCallEndTime, time.Now().UnixNano()) // Decrement the active calls count. This count can temporarily go negative // when the timer callback is in the process of moving the channel to idle // mode, but one or more RPCs come in and complete before the timer callback // can get done with the process of moving to idle mode. - atomic.AddInt32(&i.activeCallsCount, -1) + atomic.AddInt32(&m.activeCallsCount, -1) } -func (i *idlenessManagerImpl) isClosed() bool { - return atomic.LoadInt32(&i.closed) == 1 +func (m *manager) isClosed() bool { + return atomic.LoadInt32(&m.closed) == 1 } -func (i *idlenessManagerImpl) close() { - atomic.StoreInt32(&i.closed, 1) +func (m *manager) Close() { + atomic.StoreInt32(&m.closed, 1) - i.idleMu.Lock() - i.timer.Stop() - i.timer = nil - i.idleMu.Unlock() + m.idleMu.Lock() + m.timer.Stop() + m.timer = nil + m.idleMu.Unlock() } diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 42ff39c84..0d94c63e0 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -30,7 +30,7 @@ import ( var ( // WithHealthCheckFunc is set by dialoptions.go - WithHealthCheckFunc interface{} // func (HealthChecker) DialOption + WithHealthCheckFunc any // func (HealthChecker) DialOption // HealthCheckFunc is used to provide client-side LB channel health checking HealthCheckFunc HealthChecker // BalancerUnregister is exported by package balancer to unregister a balancer. @@ -38,8 +38,12 @@ var ( // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by // default, but tests may wish to set it lower for convenience. KeepaliveMinPingTime = 10 * time.Second + // KeepaliveMinServerPingTime is the minimum ping interval for servers. + // This must be 1s by default, but tests may wish to set it lower for + // convenience. + KeepaliveMinServerPingTime = time.Second // ParseServiceConfig parses a JSON representation of the service config. - ParseServiceConfig interface{} // func(string) *serviceconfig.ParseResult + ParseServiceConfig any // func(string) *serviceconfig.ParseResult // EqualServiceConfigForTesting is for testing service config generation and // parsing. Both a and b should be returned by ParseServiceConfig. // This function compares the config without rawJSON stripped, in case the @@ -49,33 +53,33 @@ var ( // given name. This is set by package certprovider for use from xDS // bootstrap code while parsing certificate provider configs in the // bootstrap file. - GetCertificateProviderBuilder interface{} // func(string) certprovider.Builder + GetCertificateProviderBuilder any // func(string) certprovider.Builder // GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo // stored in the passed in attributes. This is set by // credentials/xds/xds.go. - GetXDSHandshakeInfoForTesting interface{} // func (*attributes.Attributes) *xds.HandshakeInfo + GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *xds.HandshakeInfo // GetServerCredentials returns the transport credentials configured on a // gRPC server. An xDS-enabled server needs to know what type of credentials // is configured on the underlying gRPC server. This is set by server.go. - GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials + GetServerCredentials any // func (*grpc.Server) credentials.TransportCredentials // CanonicalString returns the canonical string of the code defined here: // https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. // // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. - CanonicalString interface{} // func (codes.Code) string + CanonicalString any // func (codes.Code) string // DrainServerTransports initiates a graceful close of existing connections // on a gRPC server accepted on the provided listener address. An // xDS-enabled server invokes this method on a grpc.Server when a particular // listener moves to "not-serving" mode. - DrainServerTransports interface{} // func(*grpc.Server, string) + DrainServerTransports any // func(*grpc.Server, string) // AddGlobalServerOptions adds an array of ServerOption that will be // effective globally for newly created servers. The priority will be: 1. // user-provided; 2. this method; 3. default values. // // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. - AddGlobalServerOptions interface{} // func(opt ...ServerOption) + AddGlobalServerOptions any // func(opt ...ServerOption) // ClearGlobalServerOptions clears the array of extra ServerOption. This // method is useful in testing and benchmarking. // @@ -88,14 +92,14 @@ var ( // // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. - AddGlobalDialOptions interface{} // func(opt ...DialOption) + AddGlobalDialOptions any // func(opt ...DialOption) // DisableGlobalDialOptions returns a DialOption that prevents the // ClientConn from applying the global DialOptions (set via // AddGlobalDialOptions). // // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. - DisableGlobalDialOptions interface{} // func() grpc.DialOption + DisableGlobalDialOptions any // func() grpc.DialOption // ClearGlobalDialOptions clears the array of extra DialOption. This // method is useful in testing and benchmarking. // @@ -104,23 +108,26 @@ var ( ClearGlobalDialOptions func() // JoinDialOptions combines the dial options passed as arguments into a // single dial option. - JoinDialOptions interface{} // func(...grpc.DialOption) grpc.DialOption + JoinDialOptions any // func(...grpc.DialOption) grpc.DialOption // JoinServerOptions combines the server options passed as arguments into a // single server option. - JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption + JoinServerOptions any // func(...grpc.ServerOption) grpc.ServerOption // WithBinaryLogger returns a DialOption that specifies the binary logger // for a ClientConn. // // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. - WithBinaryLogger interface{} // func(binarylog.Logger) grpc.DialOption + WithBinaryLogger any // func(binarylog.Logger) grpc.DialOption // BinaryLogger returns a ServerOption that can set the binary logger for a // server. // // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. - BinaryLogger interface{} // func(binarylog.Logger) grpc.ServerOption + BinaryLogger any // func(binarylog.Logger) grpc.ServerOption + + // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a provided grpc.ClientConn + SubscribeToConnectivityStateChanges any // func(*grpc.ClientConn, grpcsync.Subscriber) // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using // the provided xds bootstrap config instead of the global configuration from @@ -131,7 +138,7 @@ var ( // // This function should ONLY be used for testing and may not work with some // other features, including the CSDS service. - NewXDSResolverWithConfigForTesting interface{} // func([]byte) (resolver.Builder, error) + NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error) // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster // Specifier Plugin for testing purposes, regardless of the XDSRLS environment @@ -163,7 +170,17 @@ var ( UnregisterRBACHTTPFilterForTesting func() // ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY. - ORCAAllowAnyMinReportingInterval interface{} // func(so *orca.ServiceOptions) + ORCAAllowAnyMinReportingInterval any // func(so *orca.ServiceOptions) + + // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra + // metadata to RPCs. + GRPCResolverSchemeExtraMetadata string = "xds" + + // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode. + EnterIdleModeForTesting any // func(*grpc.ClientConn) error + + // ExitIdleModeForTesting gets the ClientConn to exit IDLE mode. + ExitIdleModeForTesting any // func(*grpc.ClientConn) error ) // HealthChecker defines the signature of the client-side LB channel health checking function. @@ -174,7 +191,7 @@ var ( // // The health checking protocol is defined at: // https://github.com/grpc/grpc/blob/master/doc/health-checking.md -type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), serviceName string) error +type HealthChecker func(ctx context.Context, newStream func(string) (any, error), setConnectivityState func(connectivity.State, error), serviceName string) error const ( // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode. diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go index c82e608e0..900bfb716 100644 --- a/vendor/google.golang.org/grpc/internal/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -35,7 +35,7 @@ const mdKey = mdKeyType("grpc.internal.address.metadata") type mdValue metadata.MD -func (m mdValue) Equal(o interface{}) bool { +func (m mdValue) Equal(o any) bool { om, ok := o.(mdValue) if !ok { return false diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go index 0177af4b5..703319137 100644 --- a/vendor/google.golang.org/grpc/internal/pretty/pretty.go +++ b/vendor/google.golang.org/grpc/internal/pretty/pretty.go @@ -35,7 +35,7 @@ const jsonIndent = " " // ToJSON marshals the input into a json string. // // If marshal fails, it falls back to fmt.Sprintf("%+v"). -func ToJSON(e interface{}) string { +func ToJSON(e any) string { switch ee := e.(type) { case protov1.Message: mm := jsonpb.Marshaler{Indent: jsonIndent} diff --git a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go index c7a18a948..f0603871c 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go +++ b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go @@ -92,7 +92,7 @@ type ClientStream interface { // calling RecvMsg on the same stream at the same time, but it is not safe // to call SendMsg on the same stream in different goroutines. It is also // not safe to call CloseSend concurrently with SendMsg. - SendMsg(m interface{}) error + SendMsg(m any) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the stream completes successfully. On // any other error, the stream is aborted and the error contains the RPC @@ -101,7 +101,7 @@ type ClientStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // ClientInterceptor is an interceptor for gRPC client streams. diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 09a667f33..99e1e5b36 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -62,7 +62,8 @@ const ( defaultPort = "443" defaultDNSSvrPort = "53" golang = "GO" - // txtPrefix is the prefix string to be prepended to the host name for txt record lookup. + // txtPrefix is the prefix string to be prepended to the host name for txt + // record lookup. txtPrefix = "_grpc_config." // In DNS, service config is encoded in a TXT record via the mechanism // described in RFC-1464 using the attribute name grpc_config. @@ -86,14 +87,14 @@ var ( minDNSResRate = 30 * time.Second ) -var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) { - return func(ctx context.Context, network, address string) (net.Conn, error) { +var addressDialer = func(address string) func(context.Context, string, string) (net.Conn, error) { + return func(ctx context.Context, network, _ string) (net.Conn, error) { var dialer net.Dialer - return dialer.DialContext(ctx, network, authority) + return dialer.DialContext(ctx, network, address) } } -var customAuthorityResolver = func(authority string) (netResolver, error) { +var newNetResolver = func(authority string) (netResolver, error) { host, port, err := parseTarget(authority, defaultDNSSvrPort) if err != nil { return nil, err @@ -103,7 +104,7 @@ var customAuthorityResolver = func(authority string) (netResolver, error) { return &net.Resolver{ PreferGo: true, - Dial: customAuthorityDialler(authorityWithPort), + Dial: addressDialer(authorityWithPort), }, nil } @@ -114,7 +115,8 @@ func NewBuilder() resolver.Builder { type dnsBuilder struct{} -// Build creates and starts a DNS resolver that watches the name resolution of the target. +// Build creates and starts a DNS resolver that watches the name resolution of +// the target. func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { host, port, err := parseTarget(target.Endpoint(), defaultPort) if err != nil { @@ -143,7 +145,7 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts if target.URL.Host == "" { d.resolver = defaultResolver } else { - d.resolver, err = customAuthorityResolver(target.URL.Host) + d.resolver, err = newNetResolver(target.URL.Host) if err != nil { return nil, err } @@ -180,19 +182,22 @@ type dnsResolver struct { ctx context.Context cancel context.CancelFunc cc resolver.ClientConn - // rn channel is used by ResolveNow() to force an immediate resolution of the target. + // rn channel is used by ResolveNow() to force an immediate resolution of the + // target. rn chan struct{} - // wg is used to enforce Close() to return after the watcher() goroutine has finished. - // Otherwise, data race will be possible. [Race Example] in dns_resolver_test we - // replace the real lookup functions with mocked ones to facilitate testing. - // If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes - // will warns lookup (READ the lookup function pointers) inside watcher() goroutine - // has data race with replaceNetFunc (WRITE the lookup function pointers). + // wg is used to enforce Close() to return after the watcher() goroutine has + // finished. Otherwise, data race will be possible. [Race Example] in + // dns_resolver_test we replace the real lookup functions with mocked ones to + // facilitate testing. If Close() doesn't wait for watcher() goroutine + // finishes, race detector sometimes will warns lookup (READ the lookup + // function pointers) inside watcher() goroutine has data race with + // replaceNetFunc (WRITE the lookup function pointers). wg sync.WaitGroup disableServiceConfig bool } -// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches. +// ResolveNow invoke an immediate resolution of the target that this +// dnsResolver watches. func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) { select { case d.rn <- struct{}{}: @@ -220,8 +225,8 @@ func (d *dnsResolver) watcher() { var timer *time.Timer if err == nil { - // Success resolving, wait for the next ResolveNow. However, also wait 30 seconds at the very least - // to prevent constantly re-resolving. + // Success resolving, wait for the next ResolveNow. However, also wait 30 + // seconds at the very least to prevent constantly re-resolving. backoffIndex = 1 timer = newTimerDNSResRate(minDNSResRate) select { @@ -231,7 +236,8 @@ func (d *dnsResolver) watcher() { case <-d.rn: } } else { - // Poll on an error found in DNS Resolver or an error received from ClientConn. + // Poll on an error found in DNS Resolver or an error received from + // ClientConn. timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex)) backoffIndex++ } @@ -278,7 +284,8 @@ func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { } func handleDNSError(err error, lookupType string) error { - if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { + dnsErr, ok := err.(*net.DNSError) + if ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { // Timeouts and temporary errors should be communicated to gRPC to // attempt another DNS query (with backoff). Other errors should be // suppressed (they may represent the absence of a TXT record). @@ -307,10 +314,12 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { res += s } - // TXT record must have "grpc_config=" attribute in order to be used as service config. + // TXT record must have "grpc_config=" attribute in order to be used as + // service config. if !strings.HasPrefix(res, txtAttribute) { logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute) - // This is not an error; it is the equivalent of not having a service config. + // This is not an error; it is the equivalent of not having a service + // config. return nil } sc := canaryingSC(strings.TrimPrefix(res, txtAttribute)) @@ -352,9 +361,10 @@ func (d *dnsResolver) lookup() (*resolver.State, error) { return &state, nil } -// formatIP returns ok = false if addr is not a valid textual representation of an IP address. -// If addr is an IPv4 address, return the addr and ok = true. -// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. +// formatIP returns ok = false if addr is not a valid textual representation of +// an IP address. If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and +// ok = true. func formatIP(addr string) (addrIP string, ok bool) { ip := net.ParseIP(addr) if ip == nil { @@ -366,10 +376,10 @@ func formatIP(addr string) (addrIP string, ok bool) { return "[" + addr + "]", true } -// parseTarget takes the user input target string and default port, returns formatted host and port info. -// If target doesn't specify a port, set the port to be the defaultPort. -// If target is in IPv6 format and host-name is enclosed in square brackets, brackets -// are stripped when setting the host. +// parseTarget takes the user input target string and default port, returns +// formatted host and port info. If target doesn't specify a port, set the port +// to be the defaultPort. If target is in IPv6 format and host-name is enclosed +// in square brackets, brackets are stripped when setting the host. // examples: // target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443" // target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80" @@ -385,12 +395,14 @@ func parseTarget(target, defaultPort string) (host, port string, err error) { } if host, port, err = net.SplitHostPort(target); err == nil { if port == "" { - // If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error. + // If the port field is empty (target ends with colon), e.g. "[::1]:", + // this is an error. return "", "", errEndsWithColon } // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port if host == "" { - // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. + // Keep consistent with net.Dial(): If the host is empty, as in ":80", + // the local system is assumed. host = "localhost" } return host, port, nil diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index b0ead4f54..03ef2fedd 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -43,13 +43,41 @@ type Status struct { s *spb.Status } +// NewWithProto returns a new status including details from statusProto. This +// is meant to be used by the gRPC library only. +func NewWithProto(code codes.Code, message string, statusProto []string) *Status { + if len(statusProto) != 1 { + // No grpc-status-details bin header, or multiple; just ignore. + return &Status{s: &spb.Status{Code: int32(code), Message: message}} + } + st := &spb.Status{} + if err := proto.Unmarshal([]byte(statusProto[0]), st); err != nil { + // Probably not a google.rpc.Status proto; do not provide details. + return &Status{s: &spb.Status{Code: int32(code), Message: message}} + } + if st.Code == int32(code) { + // The codes match between the grpc-status header and the + // grpc-status-details-bin header; use the full details proto. + return &Status{s: st} + } + return &Status{ + s: &spb.Status{ + Code: int32(codes.Internal), + Message: fmt.Sprintf( + "grpc-status-details-bin mismatch: grpc-status=%v, grpc-message=%q, grpc-status-details-bin=%+v", + code, message, st, + ), + }, + } +} + // New returns a Status representing c and msg. func New(c codes.Code, msg string) *Status { return &Status{s: &spb.Status{Code: int32(c), Message: msg}} } // Newf returns New(c, fmt.Sprintf(format, a...)). -func Newf(c codes.Code, format string, a ...interface{}) *Status { +func Newf(c codes.Code, format string, a ...any) *Status { return New(c, fmt.Sprintf(format, a...)) } @@ -64,7 +92,7 @@ func Err(c codes.Code, msg string) error { } // Errorf returns Error(c, fmt.Sprintf(format, a...)). -func Errorf(c codes.Code, format string, a ...interface{}) error { +func Errorf(c codes.Code, format string, a ...any) error { return Err(c, fmt.Sprintf(format, a...)) } @@ -120,11 +148,11 @@ func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { // Details returns a slice of details messages attached to the status. // If a detail cannot be decoded, the error is returned in place of the detail. -func (s *Status) Details() []interface{} { +func (s *Status) Details() []any { if s == nil || s.s == nil { return nil } - details := make([]interface{}, 0, len(s.s.Details)) + details := make([]any, 0, len(s.s.Details)) for _, any := range s.s.Details { detail := &ptypes.DynamicAny{} if err := ptypes.UnmarshalAny(any, detail); err != nil { diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index be5a9c81e..b330ccedc 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -40,7 +40,7 @@ var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { } type itemNode struct { - it interface{} + it any next *itemNode } @@ -49,7 +49,7 @@ type itemList struct { tail *itemNode } -func (il *itemList) enqueue(i interface{}) { +func (il *itemList) enqueue(i any) { n := &itemNode{it: i} if il.tail == nil { il.head, il.tail = n, n @@ -61,11 +61,11 @@ func (il *itemList) enqueue(i interface{}) { // peek returns the first item in the list without removing it from the // list. -func (il *itemList) peek() interface{} { +func (il *itemList) peek() any { return il.head.it } -func (il *itemList) dequeue() interface{} { +func (il *itemList) dequeue() any { if il.head == nil { return nil } @@ -336,7 +336,7 @@ func (c *controlBuffer) put(it cbItem) error { return err } -func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (bool, error) { +func (c *controlBuffer) executeAndPut(f func(it any) bool, it cbItem) (bool, error) { var wakeUp bool c.mu.Lock() if c.err != nil { @@ -373,7 +373,7 @@ func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (b } // Note argument f should never be nil. -func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) { +func (c *controlBuffer) execute(f func(it any) bool, it any) (bool, error) { c.mu.Lock() if c.err != nil { c.mu.Unlock() @@ -387,7 +387,7 @@ func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bo return true, nil } -func (c *controlBuffer) get(block bool) (interface{}, error) { +func (c *controlBuffer) get(block bool) (any, error) { for { c.mu.Lock() if c.err != nil { @@ -830,7 +830,7 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error { return nil } -func (l *loopyWriter) handle(i interface{}) error { +func (l *loopyWriter) handle(i any) error { switch i := i.(type) { case *incomingWindowUpdate: l.incomingWindowUpdateHandler(i) diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 98f80e3fa..17f7a21b5 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -220,18 +220,20 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro h.Set("Grpc-Message", encodeGrpcMessage(m)) } + s.hdrMu.Lock() if p := st.Proto(); p != nil && len(p.Details) > 0 { + delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. panic(err) } - h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes)) + h.Set(grpcStatusDetailsBinHeader, encodeBinHeader(stBytes)) } - if md := s.Trailer(); len(md) > 0 { - for k, vv := range md { + if len(s.trailer) > 0 { + for k, vv := range s.trailer { // Clients don't tolerate reading restricted headers after some non restricted ones were sent. if isReservedHeader(k) { continue @@ -243,6 +245,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } } } + s.hdrMu.Unlock() }) if err == nil { // transport has not been closed @@ -287,7 +290,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { } // writeCustomHeaders sets custom headers set on the stream via SetHeader -// on the first write call (Write, WriteHeader, or WriteStatus). +// on the first write call (Write, WriteHeader, or WriteStatus) func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { h := ht.rw.Header() @@ -344,7 +347,7 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { return err } -func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) { +func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) { // With this transport type there will be exactly 1 stream: this HTTP request. ctx := ht.req.Context() diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 326bf0848..d6f5c4935 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -330,7 +330,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts readerDone: make(chan struct{}), writerDone: make(chan struct{}), goAway: make(chan struct{}), - framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize), + framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), fc: &trInFlow{limit: uint32(icwz)}, scheme: scheme, activeStreams: make(map[uint32]*Stream), @@ -762,7 +762,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, firstTry := true var ch chan struct{} transportDrainRequired := false - checkForStreamQuota := func(it interface{}) bool { + checkForStreamQuota := func(it any) bool { if t.streamQuota <= 0 { // Can go negative if server decreases it. if firstTry { t.waitingStreams++ @@ -800,7 +800,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return true } var hdrListSizeErr error - checkForHeaderListSize := func(it interface{}) bool { + checkForHeaderListSize := func(it any) bool { if t.maxSendHeaderListSize == nil { return true } @@ -815,7 +815,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return true } for { - success, err := t.controlBuf.executeAndPut(func(it interface{}) bool { + success, err := t.controlBuf.executeAndPut(func(it any) bool { return checkForHeaderListSize(it) && checkForStreamQuota(it) }, hdr) if err != nil { @@ -927,7 +927,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. rst: rst, rstCode: rstCode, } - addBackStreamQuota := func(interface{}) bool { + addBackStreamQuota := func(any) bool { t.streamQuota++ if t.streamQuota > 0 && t.waitingStreams > 0 { select { @@ -1080,7 +1080,7 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) { // for the transport and the stream based on the current bdp // estimation. func (t *http2Client) updateFlowControl(n uint32) { - updateIWS := func(interface{}) bool { + updateIWS := func(any) bool { t.initialWindowSize = int32(n) t.mu.Lock() for _, s := range t.activeStreams { @@ -1233,7 +1233,7 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { } updateFuncs = append(updateFuncs, updateStreamQuota) } - t.controlBuf.executeAndPut(func(interface{}) bool { + t.controlBuf.executeAndPut(func(any) bool { for _, f := range updateFuncs { f() } @@ -1399,7 +1399,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { mdata = make(map[string][]string) contentTypeErr = "malformed header: missing HTTP content-type" grpcMessage string - statusGen *status.Status recvCompress string httpStatusCode *int httpStatusErr string @@ -1434,12 +1433,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { rawStatusCode = codes.Code(uint32(code)) case "grpc-message": grpcMessage = decodeGrpcMessage(hf.Value) - case "grpc-status-details-bin": - var err error - statusGen, err = decodeGRPCStatusDetails(hf.Value) - if err != nil { - headerError = fmt.Sprintf("transport: malformed grpc-status-details-bin: %v", err) - } case ":status": if hf.Value == "200" { httpStatusErr = "" @@ -1505,14 +1498,15 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } - isHeader := false - - // If headerChan hasn't been closed yet - if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { - s.headerValid = true - if !endStream { - // HEADERS frame block carries a Response-Headers. - isHeader = true + // For headers, set them in s.header and close headerChan. For trailers or + // trailers-only, closeStream will set the trailers and close headerChan as + // needed. + if !endStream { + // If headerChan hasn't been closed yet (expected, given we checked it + // above, but something else could have potentially closed the whole + // stream). + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + s.headerValid = true // These values can be set without any synchronization because // stream goroutine will read it only after seeing a closed // headerChan which we'll close after setting this. @@ -1520,15 +1514,12 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { if len(mdata) > 0 { s.header = mdata } - } else { - // HEADERS frame block carries a Trailers-Only. - s.noHeaders = true + close(s.headerChan) } - close(s.headerChan) } for _, sh := range t.statsHandlers { - if isHeader { + if !endStream { inHeader := &stats.InHeader{ Client: true, WireLength: int(frame.Header().Length), @@ -1550,13 +1541,12 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } - if statusGen == nil { - statusGen = status.New(rawStatusCode, grpcMessage) - } + status := istatus.NewWithProto(rawStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader]) - // if client received END_STREAM from server while stream was still active, send RST_STREAM - rst := s.getState() == streamActive - t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true) + // If client received END_STREAM from server while stream was still active, + // send RST_STREAM. + rstStream := s.getState() == streamActive + t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, status, mdata, true) } // readServerPreface reads and handles the initial settings frame from the diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index ec4eef213..6fa1eb419 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -165,7 +165,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, if config.MaxHeaderListSize != nil { maxHeaderListSize = *config.MaxHeaderListSize } - framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize) + framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize) // Send initial settings as connection preface to client. isettings := []http2.Setting{{ ID: http2.SettingMaxFrameSize, @@ -233,7 +233,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, kp.Timeout = defaultServerKeepaliveTimeout } if kp.Time != infinity { - if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + if err = syscall.SetTCPUserTimeout(rawConn, kp.Timeout); err != nil { return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) } } @@ -342,7 +342,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, // operateHeaders takes action on the decoded headers. Returns an error if fatal // error encountered and transport needs to close, otherwise returns nil. -func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) error { +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream)) error { // Acquire max stream ID lock for entire duration t.maxStreamMu.Lock() defer t.maxStreamMu.Unlock() @@ -561,7 +561,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } if t.inTapHandle != nil { var err error - if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil { + if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method, Header: mdata}); err != nil { t.mu.Unlock() if t.logger.V(logLevel) { t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err) @@ -592,7 +592,6 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( s.requestRead = func(n int) { t.adjustWindow(s, uint32(n)) } - s.ctx = traceCtx(s.ctx, s.method) for _, sh := range t.stats { s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) inHeader := &stats.InHeader{ @@ -630,7 +629,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( // HandleStreams receives incoming streams using the given handler. This is // typically run in a separate goroutine. // traceCtx attaches trace to ctx and returns the new context. -func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { +func (t *http2Server) HandleStreams(handle func(*Stream)) { defer close(t.readerDone) for { t.controlBuf.throttle() @@ -665,7 +664,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. } switch frame := frame.(type) { case *http2.MetaHeadersFrame: - if err := t.operateHeaders(frame, handle, traceCtx); err != nil { + if err := t.operateHeaders(frame, handle); err != nil { t.Close(err) break } @@ -850,7 +849,7 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) { } return nil }) - t.controlBuf.executeAndPut(func(interface{}) bool { + t.controlBuf.executeAndPut(func(any) bool { for _, f := range updateFuncs { f() } @@ -934,7 +933,7 @@ func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) return headerFields } -func (t *http2Server) checkForHeaderListSize(it interface{}) bool { +func (t *http2Server) checkForHeaderListSize(it any) bool { if t.maxSendHeaderListSize == nil { return true } @@ -1053,12 +1052,15 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) if p := st.Proto(); p != nil && len(p.Details) > 0 { + // Do not use the user's grpc-status-details-bin (if present) if we are + // even attempting to set our own. + delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err) } else { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) + headerFields = append(headerFields, hpack.HeaderField{Name: grpcStatusDetailsBinHeader, Value: encodeBinHeader(stBytes)}) } } diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 19cbb18f5..dc29d590e 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -30,15 +30,13 @@ import ( "net/url" "strconv" "strings" + "sync" "time" "unicode/utf8" - "github.com/golang/protobuf/proto" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" - spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" ) const ( @@ -87,6 +85,8 @@ var ( } ) +var grpcStatusDetailsBinHeader = "grpc-status-details-bin" + // isReservedHeader checks whether hdr belongs to HTTP2 headers // reserved by gRPC protocol. Any other headers are classified as the // user-specified metadata. @@ -102,7 +102,6 @@ func isReservedHeader(hdr string) bool { "grpc-message", "grpc-status", "grpc-timeout", - "grpc-status-details-bin", // Intentionally exclude grpc-previous-rpc-attempts and // grpc-retry-pushback-ms, which are "reserved", but their API // intentionally works via metadata. @@ -153,18 +152,6 @@ func decodeMetadataHeader(k, v string) (string, error) { return v, nil } -func decodeGRPCStatusDetails(rawDetails string) (*status.Status, error) { - v, err := decodeBinHeader(rawDetails) - if err != nil { - return nil, err - } - st := &spb.Status{} - if err = proto.Unmarshal(v, st); err != nil { - return nil, err - } - return status.FromProto(st), nil -} - type timeoutUnit uint8 const ( @@ -309,6 +296,7 @@ func decodeGrpcMessageUnchecked(msg string) string { } type bufWriter struct { + pool *sync.Pool buf []byte offset int batchSize int @@ -316,12 +304,17 @@ type bufWriter struct { err error } -func newBufWriter(conn net.Conn, batchSize int) *bufWriter { - return &bufWriter{ - buf: make([]byte, batchSize*2), +func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter { + w := &bufWriter{ batchSize: batchSize, conn: conn, + pool: pool, + } + // this indicates that we should use non shared buf + if pool == nil { + w.buf = make([]byte, batchSize) } + return w } func (w *bufWriter) Write(b []byte) (n int, err error) { @@ -332,19 +325,34 @@ func (w *bufWriter) Write(b []byte) (n int, err error) { n, err = w.conn.Write(b) return n, toIOError(err) } + if w.buf == nil { + b := w.pool.Get().(*[]byte) + w.buf = *b + } for len(b) > 0 { nn := copy(w.buf[w.offset:], b) b = b[nn:] w.offset += nn n += nn if w.offset >= w.batchSize { - err = w.Flush() + err = w.flushKeepBuffer() } } return n, err } func (w *bufWriter) Flush() error { + err := w.flushKeepBuffer() + // Only release the buffer if we are in a "shared" mode + if w.buf != nil && w.pool != nil { + b := w.buf + w.pool.Put(&b) + w.buf = nil + } + return err +} + +func (w *bufWriter) flushKeepBuffer() error { if w.err != nil { return w.err } @@ -381,7 +389,10 @@ type framer struct { fr *http2.Framer } -func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer { +var writeBufferPoolMap map[int]*sync.Pool = make(map[int]*sync.Pool) +var writeBufferMutex sync.Mutex + +func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer { if writeBufferSize < 0 { writeBufferSize = 0 } @@ -389,7 +400,11 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderList if readBufferSize > 0 { r = bufio.NewReaderSize(r, readBufferSize) } - w := newBufWriter(conn, writeBufferSize) + var pool *sync.Pool + if sharedWriteBuffer { + pool = getWriteBufferPool(writeBufferSize) + } + w := newBufWriter(conn, writeBufferSize, pool) f := &framer{ writer: w, fr: http2.NewFramer(w, r), @@ -403,6 +418,24 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderList return f } +func getWriteBufferPool(writeBufferSize int) *sync.Pool { + writeBufferMutex.Lock() + defer writeBufferMutex.Unlock() + size := writeBufferSize * 2 + pool, ok := writeBufferPoolMap[size] + if ok { + return pool + } + pool = &sync.Pool{ + New: func() any { + b := make([]byte, size) + return &b + }, + } + writeBufferPoolMap[size] = pool + return pool +} + // parseDialTarget returns the network and address to pass to dialer. func parseDialTarget(target string) (string, string) { net := "tcp" diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index aa1c89659..aac056e72 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -43,10 +43,6 @@ import ( "google.golang.org/grpc/tap" ) -// ErrNoHeaders is used as a signal that a trailers only response was received, -// and is not a real error. -var ErrNoHeaders = errors.New("stream has no headers") - const logLevel = 2 type bufferPool struct { @@ -56,7 +52,7 @@ type bufferPool struct { func newBufferPool() *bufferPool { return &bufferPool{ pool: sync.Pool{ - New: func() interface{} { + New: func() any { return new(bytes.Buffer) }, }, @@ -390,14 +386,10 @@ func (s *Stream) Header() (metadata.MD, error) { } s.waitOnHeader() - if !s.headerValid { + if !s.headerValid || s.noHeaders { return nil, s.status.Err() } - if s.noHeaders { - return nil, ErrNoHeaders - } - return s.header.Copy(), nil } @@ -559,6 +551,7 @@ type ServerConfig struct { InitialConnWindowSize int32 WriteBufferSize int ReadBufferSize int + SharedWriteBuffer bool ChannelzParentID *channelz.Identifier MaxHeaderListSize *uint32 HeaderTableSize *uint32 @@ -592,6 +585,8 @@ type ConnectOptions struct { WriteBufferSize int // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. ReadBufferSize int + // SharedWriteBuffer indicates whether connections should reuse write buffer + SharedWriteBuffer bool // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. ChannelzParentID *channelz.Identifier // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. @@ -703,7 +698,7 @@ type ClientTransport interface { // Write methods for a given Stream will be called serially. type ServerTransport interface { // HandleStreams receives incoming streams using the given handler. - HandleStreams(func(*Stream), func(context.Context, string) context.Context) + HandleStreams(func(*Stream)) // WriteHeader sends the header metadata for the given stream. // WriteHeader may not be called on all streams. @@ -736,7 +731,7 @@ type ServerTransport interface { } // connectionErrorf creates an ConnectionError with the specified error description. -func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { +func connectionErrorf(temp bool, e error, format string, a ...any) ConnectionError { return ConnectionError{ Desc: fmt.Sprintf(format, a...), temp: temp, diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index 02f975951..236837f41 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -28,21 +28,26 @@ import ( "google.golang.org/grpc/internal/channelz" istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/stats" "google.golang.org/grpc/status" ) // pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick // actions and unblock when there's a picker update. type pickerWrapper struct { - mu sync.Mutex - done bool - idle bool - blockingCh chan struct{} - picker balancer.Picker + mu sync.Mutex + done bool + idle bool + blockingCh chan struct{} + picker balancer.Picker + statsHandlers []stats.Handler // to record blocking picker calls } -func newPickerWrapper() *pickerWrapper { - return &pickerWrapper{blockingCh: make(chan struct{})} +func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper { + return &pickerWrapper{ + blockingCh: make(chan struct{}), + statsHandlers: statsHandlers, + } } // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. @@ -95,6 +100,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. var ch chan struct{} var lastPickErr error + for { pw.mu.Lock() if pw.done { @@ -129,6 +135,20 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. continue } + // If the channel is set, it means that the pick call had to wait for a + // new picker at some point. Either it's the first iteration and this + // function received the first picker, or a picker errored with + // ErrNoSubConnAvailable or errored with failfast set to false, which + // will trigger a continue to the next iteration. In the first case this + // conditional will hit if this call had to block (the channel is set). + // In the second case, the only way it will get to this conditional is + // if there is a new picker. + if ch != nil { + for _, sh := range pw.statsHandlers { + sh.HandleRPC(ctx, &stats.PickerUpdated{}) + } + } + ch = pw.blockingCh p := pw.picker pw.mu.Unlock() diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index abe266b02..2e9cf66b4 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -26,12 +26,18 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/envconfig" + internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) -// PickFirstBalancerName is the name of the pick_first balancer. -const PickFirstBalancerName = "pick_first" +const ( + // PickFirstBalancerName is the name of the pick_first balancer. + PickFirstBalancerName = "pick_first" + logPrefix = "[pick-first-lb %p] " +) func newPickfirstBuilder() balancer.Builder { return &pickfirstBuilder{} @@ -40,7 +46,9 @@ func newPickfirstBuilder() balancer.Builder { type pickfirstBuilder struct{} func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { - return &pickfirstBalancer{cc: cc} + b := &pickfirstBalancer{cc: cc} + b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) + return b } func (*pickfirstBuilder) Name() string { @@ -57,23 +65,36 @@ type pfConfig struct { } func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - cfg := &pfConfig{} - if err := json.Unmarshal(js, cfg); err != nil { + if !envconfig.PickFirstLBConfig { + // Prior to supporting loadbalancing configuration, the pick_first LB + // policy did not implement the balancer.ConfigParser interface. This + // meant that if a non-empty configuration was passed to it, the service + // config unmarshaling code would throw a warning log, but would + // continue using the pick_first LB policy. The code below ensures the + // same behavior is retained if the env var is not set. + if string(js) != "{}" { + logger.Warningf("Ignoring non-empty balancer configuration %q for the pick_first LB policy", string(js)) + } + return nil, nil + } + + var cfg pfConfig + if err := json.Unmarshal(js, &cfg); err != nil { return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) } return cfg, nil } type pickfirstBalancer struct { + logger *internalgrpclog.PrefixLogger state connectivity.State cc balancer.ClientConn subConn balancer.SubConn - cfg *pfConfig } func (b *pickfirstBalancer) ResolverError(err error) { - if logger.V(2) { - logger.Infof("pickfirstBalancer: ResolverError called with error: %v", err) + if b.logger.V(2) { + b.logger.Infof("Received error from the name resolver: %v", err) } if b.subConn == nil { b.state = connectivity.TransientFailure @@ -96,35 +117,44 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // The resolver reported an empty address list. Treat it like an error by // calling b.ResolverError. if b.subConn != nil { - // Remove the old subConn. All addresses were removed, so it is no longer - // valid. - b.cc.RemoveSubConn(b.subConn) + // Shut down the old subConn. All addresses were removed, so it is + // no longer valid. + b.subConn.Shutdown() b.subConn = nil } b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } - if state.BalancerConfig != nil { - cfg, ok := state.BalancerConfig.(*pfConfig) - if !ok { - return fmt.Errorf("pickfirstBalancer: received nil or illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) - } - b.cfg = cfg + // We don't have to guard this block with the env var because ParseConfig + // already does so. + cfg, ok := state.BalancerConfig.(pfConfig) + if state.BalancerConfig != nil && !ok { + return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) } - - if envconfig.PickFirstLBConfig && b.cfg != nil && b.cfg.ShuffleAddressList { + if cfg.ShuffleAddressList { + addrs = append([]resolver.Address{}, addrs...) grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) } + + if b.logger.V(2) { + b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) + } + if b.subConn != nil { b.cc.UpdateAddresses(b.subConn, addrs) return nil } - subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{}) + var subConn balancer.SubConn + subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{ + StateListener: func(state balancer.SubConnState) { + b.updateSubConnState(subConn, state) + }, + }) if err != nil { - if logger.V(2) { - logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) + if b.logger.V(2) { + b.logger.Infof("Failed to create new SubConn: %v", err) } b.state = connectivity.TransientFailure b.cc.UpdateState(balancer.State{ @@ -143,13 +173,19 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState return nil } +// UpdateSubConnState is unused as a StateListener is always registered when +// creating SubConns. func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { - if logger.V(2) { - logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", subConn, state) + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state) +} + +func (b *pickfirstBalancer) updateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { + if b.logger.V(2) { + b.logger.Infof("Received SubConn state update: %p, %+v", subConn, state) } if b.subConn != subConn { - if logger.V(2) { - logger.Infof("pickfirstBalancer: ignored state change because subConn is not recognized") + if b.logger.V(2) { + b.logger.Infof("Ignored state change because subConn is not recognized") } return } diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go index cd4554785..73bd63364 100644 --- a/vendor/google.golang.org/grpc/preloader.go +++ b/vendor/google.golang.org/grpc/preloader.go @@ -37,7 +37,7 @@ type PreparedMsg struct { } // Encode marshalls and compresses the message using the codec and compressor for the stream. -func (p *PreparedMsg) Encode(s Stream, msg interface{}) error { +func (p *PreparedMsg) Encode(s Stream, msg any) error { ctx := s.Context() rpcInfo, ok := rpcInfoFromContext(ctx) if !ok { diff --git a/vendor/google.golang.org/grpc/reflection/adapt.go b/vendor/google.golang.org/grpc/reflection/adapt.go new file mode 100644 index 000000000..33b907a36 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/adapt.go @@ -0,0 +1,187 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package reflection + +import ( + v1reflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1reflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1alphareflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" + v1alphareflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" +) + +// asV1Alpha returns an implementation of the v1alpha version of the reflection +// interface that delegates all calls to the given v1 version. +func asV1Alpha(svr v1reflectiongrpc.ServerReflectionServer) v1alphareflectiongrpc.ServerReflectionServer { + return v1AlphaServerImpl{svr: svr} +} + +type v1AlphaServerImpl struct { + svr v1reflectiongrpc.ServerReflectionServer +} + +func (s v1AlphaServerImpl) ServerReflectionInfo(stream v1alphareflectiongrpc.ServerReflection_ServerReflectionInfoServer) error { + return s.svr.ServerReflectionInfo(v1AlphaServerStreamAdapter{stream}) +} + +type v1AlphaServerStreamAdapter struct { + v1alphareflectiongrpc.ServerReflection_ServerReflectionInfoServer +} + +func (s v1AlphaServerStreamAdapter) Send(response *v1reflectionpb.ServerReflectionResponse) error { + return s.ServerReflection_ServerReflectionInfoServer.Send(v1ToV1AlphaResponse(response)) +} + +func (s v1AlphaServerStreamAdapter) Recv() (*v1reflectionpb.ServerReflectionRequest, error) { + resp, err := s.ServerReflection_ServerReflectionInfoServer.Recv() + if err != nil { + return nil, err + } + return v1AlphaToV1Request(resp), nil +} + +func v1ToV1AlphaResponse(v1 *v1reflectionpb.ServerReflectionResponse) *v1alphareflectionpb.ServerReflectionResponse { + var v1alpha v1alphareflectionpb.ServerReflectionResponse + v1alpha.ValidHost = v1.ValidHost + if v1.OriginalRequest != nil { + v1alpha.OriginalRequest = v1ToV1AlphaRequest(v1.OriginalRequest) + } + switch mr := v1.MessageResponse.(type) { + case *v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse: + if mr != nil { + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1alphareflectionpb.FileDescriptorResponse{ + FileDescriptorProto: mr.FileDescriptorResponse.GetFileDescriptorProto(), + }, + } + } + case *v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse: + if mr != nil { + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &v1alphareflectionpb.ExtensionNumberResponse{ + BaseTypeName: mr.AllExtensionNumbersResponse.GetBaseTypeName(), + ExtensionNumber: mr.AllExtensionNumbersResponse.GetExtensionNumber(), + }, + } + } + case *v1reflectionpb.ServerReflectionResponse_ListServicesResponse: + if mr != nil { + svcs := make([]*v1alphareflectionpb.ServiceResponse, len(mr.ListServicesResponse.GetService())) + for i, svc := range mr.ListServicesResponse.GetService() { + svcs[i] = &v1alphareflectionpb.ServiceResponse{ + Name: svc.GetName(), + } + } + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &v1alphareflectionpb.ListServiceResponse{ + Service: svcs, + }, + } + } + case *v1reflectionpb.ServerReflectionResponse_ErrorResponse: + if mr != nil { + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1alphareflectionpb.ErrorResponse{ + ErrorCode: mr.ErrorResponse.GetErrorCode(), + ErrorMessage: mr.ErrorResponse.GetErrorMessage(), + }, + } + } + default: + // no value set + } + return &v1alpha +} + +func v1AlphaToV1Request(v1alpha *v1alphareflectionpb.ServerReflectionRequest) *v1reflectionpb.ServerReflectionRequest { + var v1 v1reflectionpb.ServerReflectionRequest + v1.Host = v1alpha.Host + switch mr := v1alpha.MessageRequest.(type) { + case *v1alphareflectionpb.ServerReflectionRequest_FileByFilename: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileByFilename{ + FileByFilename: mr.FileByFilename, + } + case *v1alphareflectionpb.ServerReflectionRequest_FileContainingSymbol: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileContainingSymbol{ + FileContainingSymbol: mr.FileContainingSymbol, + } + case *v1alphareflectionpb.ServerReflectionRequest_FileContainingExtension: + if mr.FileContainingExtension != nil { + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &v1reflectionpb.ExtensionRequest{ + ContainingType: mr.FileContainingExtension.GetContainingType(), + ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(), + }, + } + } + case *v1alphareflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType{ + AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType, + } + case *v1alphareflectionpb.ServerReflectionRequest_ListServices: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_ListServices{ + ListServices: mr.ListServices, + } + default: + // no value set + } + return &v1 +} + +func v1ToV1AlphaRequest(v1 *v1reflectionpb.ServerReflectionRequest) *v1alphareflectionpb.ServerReflectionRequest { + var v1alpha v1alphareflectionpb.ServerReflectionRequest + v1alpha.Host = v1.Host + switch mr := v1.MessageRequest.(type) { + case *v1reflectionpb.ServerReflectionRequest_FileByFilename: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileByFilename{ + FileByFilename: mr.FileByFilename, + } + } + case *v1reflectionpb.ServerReflectionRequest_FileContainingSymbol: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileContainingSymbol{ + FileContainingSymbol: mr.FileContainingSymbol, + } + } + case *v1reflectionpb.ServerReflectionRequest_FileContainingExtension: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &v1alphareflectionpb.ExtensionRequest{ + ContainingType: mr.FileContainingExtension.GetContainingType(), + ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(), + }, + } + } + case *v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType{ + AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType, + } + } + case *v1reflectionpb.ServerReflectionRequest_ListServices: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_ListServices{ + ListServices: mr.ListServices, + } + } + default: + // no value set + } + return &v1alpha +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go new file mode 100644 index 000000000..6f5c786b2 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go @@ -0,0 +1,953 @@ +// Copyright 2016 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Service exported by server reflection. A more complete description of how +// server reflection works can be found at +// https://github.com/grpc/grpc/blob/master/doc/server-reflection.md +// +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.22.0 +// source: grpc/reflection/v1/reflection.proto + +package grpc_reflection_v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The message sent by the client when calling ServerReflectionInfo method. +type ServerReflectionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + // To use reflection service, the client should set one of the following + // fields in message_request. The server distinguishes requests by their + // defined field and then handles them using corresponding methods. + // + // Types that are assignable to MessageRequest: + // + // *ServerReflectionRequest_FileByFilename + // *ServerReflectionRequest_FileContainingSymbol + // *ServerReflectionRequest_FileContainingExtension + // *ServerReflectionRequest_AllExtensionNumbersOfType + // *ServerReflectionRequest_ListServices + MessageRequest isServerReflectionRequest_MessageRequest `protobuf_oneof:"message_request"` +} + +func (x *ServerReflectionRequest) Reset() { + *x = ServerReflectionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerReflectionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerReflectionRequest) ProtoMessage() {} + +func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerReflectionRequest.ProtoReflect.Descriptor instead. +func (*ServerReflectionRequest) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{0} +} + +func (x *ServerReflectionRequest) GetHost() string { + if x != nil { + return x.Host + } + return "" +} + +func (m *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_MessageRequest { + if m != nil { + return m.MessageRequest + } + return nil +} + +func (x *ServerReflectionRequest) GetFileByFilename() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileByFilename); ok { + return x.FileByFilename + } + return "" +} + +func (x *ServerReflectionRequest) GetFileContainingSymbol() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingSymbol); ok { + return x.FileContainingSymbol + } + return "" +} + +func (x *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingExtension); ok { + return x.FileContainingExtension + } + return nil +} + +func (x *ServerReflectionRequest) GetAllExtensionNumbersOfType() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_AllExtensionNumbersOfType); ok { + return x.AllExtensionNumbersOfType + } + return "" +} + +func (x *ServerReflectionRequest) GetListServices() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_ListServices); ok { + return x.ListServices + } + return "" +} + +type isServerReflectionRequest_MessageRequest interface { + isServerReflectionRequest_MessageRequest() +} + +type ServerReflectionRequest_FileByFilename struct { + // Find a proto file by the file name. + FileByFilename string `protobuf:"bytes,3,opt,name=file_by_filename,json=fileByFilename,proto3,oneof"` +} + +type ServerReflectionRequest_FileContainingSymbol struct { + // Find the proto file that declares the given fully-qualified symbol name. + // This field should be a fully-qualified symbol name + // (e.g. .[.] or .). + FileContainingSymbol string `protobuf:"bytes,4,opt,name=file_containing_symbol,json=fileContainingSymbol,proto3,oneof"` +} + +type ServerReflectionRequest_FileContainingExtension struct { + // Find the proto file which defines an extension extending the given + // message type with the given field number. + FileContainingExtension *ExtensionRequest `protobuf:"bytes,5,opt,name=file_containing_extension,json=fileContainingExtension,proto3,oneof"` +} + +type ServerReflectionRequest_AllExtensionNumbersOfType struct { + // Finds the tag numbers used by all known extensions of the given message + // type, and appends them to ExtensionNumberResponse in an undefined order. + // Its corresponding method is best-effort: it's not guaranteed that the + // reflection service will implement this method, and it's not guaranteed + // that this method will provide all extensions. Returns + // StatusCode::UNIMPLEMENTED if it's not implemented. + // This field should be a fully-qualified type name. The format is + // . + AllExtensionNumbersOfType string `protobuf:"bytes,6,opt,name=all_extension_numbers_of_type,json=allExtensionNumbersOfType,proto3,oneof"` +} + +type ServerReflectionRequest_ListServices struct { + // List the full names of registered services. The content will not be + // checked. + ListServices string `protobuf:"bytes,7,opt,name=list_services,json=listServices,proto3,oneof"` +} + +func (*ServerReflectionRequest_FileByFilename) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_FileContainingSymbol) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_FileContainingExtension) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_AllExtensionNumbersOfType) isServerReflectionRequest_MessageRequest() { +} + +func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRequest() {} + +// The type name and extension number sent by the client when requesting +// file_containing_extension. +type ExtensionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Fully-qualified type name. The format should be . + ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"` + ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` +} + +func (x *ExtensionRequest) Reset() { + *x = ExtensionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionRequest) ProtoMessage() {} + +func (x *ExtensionRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionRequest.ProtoReflect.Descriptor instead. +func (*ExtensionRequest) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{1} +} + +func (x *ExtensionRequest) GetContainingType() string { + if x != nil { + return x.ContainingType + } + return "" +} + +func (x *ExtensionRequest) GetExtensionNumber() int32 { + if x != nil { + return x.ExtensionNumber + } + return 0 +} + +// The message sent by the server to answer ServerReflectionInfo method. +type ServerReflectionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"` + OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"` + // The server sets one of the following fields according to the message_request + // in the request. + // + // Types that are assignable to MessageResponse: + // + // *ServerReflectionResponse_FileDescriptorResponse + // *ServerReflectionResponse_AllExtensionNumbersResponse + // *ServerReflectionResponse_ListServicesResponse + // *ServerReflectionResponse_ErrorResponse + MessageResponse isServerReflectionResponse_MessageResponse `protobuf_oneof:"message_response"` +} + +func (x *ServerReflectionResponse) Reset() { + *x = ServerReflectionResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerReflectionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerReflectionResponse) ProtoMessage() {} + +func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerReflectionResponse.ProtoReflect.Descriptor instead. +func (*ServerReflectionResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{2} +} + +func (x *ServerReflectionResponse) GetValidHost() string { + if x != nil { + return x.ValidHost + } + return "" +} + +func (x *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest { + if x != nil { + return x.OriginalRequest + } + return nil +} + +func (m *ServerReflectionResponse) GetMessageResponse() isServerReflectionResponse_MessageResponse { + if m != nil { + return m.MessageResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_FileDescriptorResponse); ok { + return x.FileDescriptorResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNumberResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_AllExtensionNumbersResponse); ok { + return x.AllExtensionNumbersResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetListServicesResponse() *ListServiceResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ListServicesResponse); ok { + return x.ListServicesResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetErrorResponse() *ErrorResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ErrorResponse); ok { + return x.ErrorResponse + } + return nil +} + +type isServerReflectionResponse_MessageResponse interface { + isServerReflectionResponse_MessageResponse() +} + +type ServerReflectionResponse_FileDescriptorResponse struct { + // This message is used to answer file_by_filename, file_containing_symbol, + // file_containing_extension requests with transitive dependencies. + // As the repeated label is not allowed in oneof fields, we use a + // FileDescriptorResponse message to encapsulate the repeated fields. + // The reflection service is allowed to avoid sending FileDescriptorProtos + // that were previously sent in response to earlier requests in the stream. + FileDescriptorResponse *FileDescriptorResponse `protobuf:"bytes,4,opt,name=file_descriptor_response,json=fileDescriptorResponse,proto3,oneof"` +} + +type ServerReflectionResponse_AllExtensionNumbersResponse struct { + // This message is used to answer all_extension_numbers_of_type requests. + AllExtensionNumbersResponse *ExtensionNumberResponse `protobuf:"bytes,5,opt,name=all_extension_numbers_response,json=allExtensionNumbersResponse,proto3,oneof"` +} + +type ServerReflectionResponse_ListServicesResponse struct { + // This message is used to answer list_services requests. + ListServicesResponse *ListServiceResponse `protobuf:"bytes,6,opt,name=list_services_response,json=listServicesResponse,proto3,oneof"` +} + +type ServerReflectionResponse_ErrorResponse struct { + // This message is used when an error occurs. + ErrorResponse *ErrorResponse `protobuf:"bytes,7,opt,name=error_response,json=errorResponse,proto3,oneof"` +} + +func (*ServerReflectionResponse_FileDescriptorResponse) isServerReflectionResponse_MessageResponse() { +} + +func (*ServerReflectionResponse_AllExtensionNumbersResponse) isServerReflectionResponse_MessageResponse() { +} + +func (*ServerReflectionResponse_ListServicesResponse) isServerReflectionResponse_MessageResponse() {} + +func (*ServerReflectionResponse_ErrorResponse) isServerReflectionResponse_MessageResponse() {} + +// Serialized FileDescriptorProto messages sent by the server answering +// a file_by_filename, file_containing_symbol, or file_containing_extension +// request. +type FileDescriptorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Serialized FileDescriptorProto messages. We avoid taking a dependency on + // descriptor.proto, which uses proto2 only features, by making them opaque + // bytes instead. + FileDescriptorProto [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"` +} + +func (x *FileDescriptorResponse) Reset() { + *x = FileDescriptorResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileDescriptorResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileDescriptorResponse) ProtoMessage() {} + +func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileDescriptorResponse.ProtoReflect.Descriptor instead. +func (*FileDescriptorResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{3} +} + +func (x *FileDescriptorResponse) GetFileDescriptorProto() [][]byte { + if x != nil { + return x.FileDescriptorProto + } + return nil +} + +// A list of extension numbers sent by the server answering +// all_extension_numbers_of_type request. +type ExtensionNumberResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Full name of the base type, including the package name. The format + // is . + BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"` + ExtensionNumber []int32 `protobuf:"varint,2,rep,packed,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` +} + +func (x *ExtensionNumberResponse) Reset() { + *x = ExtensionNumberResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionNumberResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionNumberResponse) ProtoMessage() {} + +func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionNumberResponse.ProtoReflect.Descriptor instead. +func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{4} +} + +func (x *ExtensionNumberResponse) GetBaseTypeName() string { + if x != nil { + return x.BaseTypeName + } + return "" +} + +func (x *ExtensionNumberResponse) GetExtensionNumber() []int32 { + if x != nil { + return x.ExtensionNumber + } + return nil +} + +// A list of ServiceResponse sent by the server answering list_services request. +type ListServiceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The information of each service may be expanded in the future, so we use + // ServiceResponse message to encapsulate it. + Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"` +} + +func (x *ListServiceResponse) Reset() { + *x = ListServiceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListServiceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListServiceResponse) ProtoMessage() {} + +func (x *ListServiceResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListServiceResponse.ProtoReflect.Descriptor instead. +func (*ListServiceResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{5} +} + +func (x *ListServiceResponse) GetService() []*ServiceResponse { + if x != nil { + return x.Service + } + return nil +} + +// The information of a single service used by ListServiceResponse to answer +// list_services request. +type ServiceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Full name of a registered service, including its package name. The format + // is . + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *ServiceResponse) Reset() { + *x = ServiceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceResponse) ProtoMessage() {} + +func (x *ServiceResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceResponse.ProtoReflect.Descriptor instead. +func (*ServiceResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{6} +} + +func (x *ServiceResponse) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The error code and error message sent by the server when an error occurs. +type ErrorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This field uses the error codes defined in grpc::StatusCode. + ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *ErrorResponse) Reset() { + *x = ErrorResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ErrorResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ErrorResponse) ProtoMessage() {} + +func (x *ErrorResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ErrorResponse.ProtoReflect.Descriptor instead. +func (*ErrorResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{7} +} + +func (x *ErrorResponse) GetErrorCode() int32 { + if x != nil { + return x.ErrorCode + } + return 0 +} + +func (x *ErrorResponse) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +var File_grpc_reflection_v1_reflection_proto protoreflect.FileDescriptor + +var file_grpc_reflection_v1_reflection_proto_rawDesc = []byte{ + 0x0a, 0x23, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x22, 0xf3, 0x02, 0x0a, 0x17, 0x53, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x66, 0x69, 0x6c, + 0x65, 0x5f, 0x62, 0x79, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x42, 0x79, 0x46, 0x69, 0x6c, + 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x14, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x12, 0x62, 0x0a, + 0x19, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, + 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x17, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x42, 0x0a, 0x1d, 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x4f, + 0x66, 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0d, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, + 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x11, 0x0a, 0x0f, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, + 0x66, 0x0a, 0x10, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, + 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0xae, 0x04, 0x0a, 0x18, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x6f, + 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, + 0x6f, 0x73, 0x74, 0x12, 0x56, 0x0a, 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, 0x6f, 0x72, 0x69, 0x67, + 0x69, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x66, 0x0a, 0x18, 0x66, + 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x72, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x16, 0x66, 0x69, 0x6c, + 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x72, 0x0a, 0x1e, 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, + 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x1b, 0x61, 0x6c, 0x6c, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5f, 0x0a, 0x16, 0x6c, 0x69, 0x73, 0x74, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, + 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, + 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x48, 0x00, 0x52, 0x14, 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0e, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x12, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4c, 0x0a, 0x16, 0x46, 0x69, 0x6c, 0x65, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0c, 0x52, 0x13, 0x66, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x54, + 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x22, 0x54, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x07, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, + 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x25, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, + 0x53, 0x0a, 0x0d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x32, 0x89, 0x01, 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, + 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x75, 0x0a, 0x14, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x2b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, + 0x42, 0x66, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x15, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x34, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, + 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x72, 0x65, 0x66, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_grpc_reflection_v1_reflection_proto_rawDescOnce sync.Once + file_grpc_reflection_v1_reflection_proto_rawDescData = file_grpc_reflection_v1_reflection_proto_rawDesc +) + +func file_grpc_reflection_v1_reflection_proto_rawDescGZIP() []byte { + file_grpc_reflection_v1_reflection_proto_rawDescOnce.Do(func() { + file_grpc_reflection_v1_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_reflection_v1_reflection_proto_rawDescData) + }) + return file_grpc_reflection_v1_reflection_proto_rawDescData +} + +var file_grpc_reflection_v1_reflection_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_grpc_reflection_v1_reflection_proto_goTypes = []interface{}{ + (*ServerReflectionRequest)(nil), // 0: grpc.reflection.v1.ServerReflectionRequest + (*ExtensionRequest)(nil), // 1: grpc.reflection.v1.ExtensionRequest + (*ServerReflectionResponse)(nil), // 2: grpc.reflection.v1.ServerReflectionResponse + (*FileDescriptorResponse)(nil), // 3: grpc.reflection.v1.FileDescriptorResponse + (*ExtensionNumberResponse)(nil), // 4: grpc.reflection.v1.ExtensionNumberResponse + (*ListServiceResponse)(nil), // 5: grpc.reflection.v1.ListServiceResponse + (*ServiceResponse)(nil), // 6: grpc.reflection.v1.ServiceResponse + (*ErrorResponse)(nil), // 7: grpc.reflection.v1.ErrorResponse +} +var file_grpc_reflection_v1_reflection_proto_depIdxs = []int32{ + 1, // 0: grpc.reflection.v1.ServerReflectionRequest.file_containing_extension:type_name -> grpc.reflection.v1.ExtensionRequest + 0, // 1: grpc.reflection.v1.ServerReflectionResponse.original_request:type_name -> grpc.reflection.v1.ServerReflectionRequest + 3, // 2: grpc.reflection.v1.ServerReflectionResponse.file_descriptor_response:type_name -> grpc.reflection.v1.FileDescriptorResponse + 4, // 3: grpc.reflection.v1.ServerReflectionResponse.all_extension_numbers_response:type_name -> grpc.reflection.v1.ExtensionNumberResponse + 5, // 4: grpc.reflection.v1.ServerReflectionResponse.list_services_response:type_name -> grpc.reflection.v1.ListServiceResponse + 7, // 5: grpc.reflection.v1.ServerReflectionResponse.error_response:type_name -> grpc.reflection.v1.ErrorResponse + 6, // 6: grpc.reflection.v1.ListServiceResponse.service:type_name -> grpc.reflection.v1.ServiceResponse + 0, // 7: grpc.reflection.v1.ServerReflection.ServerReflectionInfo:input_type -> grpc.reflection.v1.ServerReflectionRequest + 2, // 8: grpc.reflection.v1.ServerReflection.ServerReflectionInfo:output_type -> grpc.reflection.v1.ServerReflectionResponse + 8, // [8:9] is the sub-list for method output_type + 7, // [7:8] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name +} + +func init() { file_grpc_reflection_v1_reflection_proto_init() } +func file_grpc_reflection_v1_reflection_proto_init() { + if File_grpc_reflection_v1_reflection_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_grpc_reflection_v1_reflection_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerReflectionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerReflectionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileDescriptorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionNumberResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ErrorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*ServerReflectionRequest_FileByFilename)(nil), + (*ServerReflectionRequest_FileContainingSymbol)(nil), + (*ServerReflectionRequest_FileContainingExtension)(nil), + (*ServerReflectionRequest_AllExtensionNumbersOfType)(nil), + (*ServerReflectionRequest_ListServices)(nil), + } + file_grpc_reflection_v1_reflection_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*ServerReflectionResponse_FileDescriptorResponse)(nil), + (*ServerReflectionResponse_AllExtensionNumbersResponse)(nil), + (*ServerReflectionResponse_ListServicesResponse)(nil), + (*ServerReflectionResponse_ErrorResponse)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_reflection_v1_reflection_proto_rawDesc, + NumEnums: 0, + NumMessages: 8, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_grpc_reflection_v1_reflection_proto_goTypes, + DependencyIndexes: file_grpc_reflection_v1_reflection_proto_depIdxs, + MessageInfos: file_grpc_reflection_v1_reflection_proto_msgTypes, + }.Build() + File_grpc_reflection_v1_reflection_proto = out.File + file_grpc_reflection_v1_reflection_proto_rawDesc = nil + file_grpc_reflection_v1_reflection_proto_goTypes = nil + file_grpc_reflection_v1_reflection_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go new file mode 100644 index 000000000..62b56a8be --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go @@ -0,0 +1,164 @@ +// Copyright 2016 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Service exported by server reflection. A more complete description of how +// server reflection works can be found at +// https://github.com/grpc/grpc/blob/master/doc/server-reflection.md +// +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 +// source: grpc/reflection/v1/reflection.proto + +package grpc_reflection_v1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + ServerReflection_ServerReflectionInfo_FullMethodName = "/grpc.reflection.v1.ServerReflection/ServerReflectionInfo" +) + +// ServerReflectionClient is the client API for ServerReflection service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ServerReflectionClient interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) +} + +type serverReflectionClient struct { + cc grpc.ClientConnInterface +} + +func NewServerReflectionClient(cc grpc.ClientConnInterface) ServerReflectionClient { + return &serverReflectionClient{cc} +} + +func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) { + stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], ServerReflection_ServerReflectionInfo_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &serverReflectionServerReflectionInfoClient{stream} + return x, nil +} + +type ServerReflection_ServerReflectionInfoClient interface { + Send(*ServerReflectionRequest) error + Recv() (*ServerReflectionResponse, error) + grpc.ClientStream +} + +type serverReflectionServerReflectionInfoClient struct { + grpc.ClientStream +} + +func (x *serverReflectionServerReflectionInfoClient) Send(m *ServerReflectionRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *serverReflectionServerReflectionInfoClient) Recv() (*ServerReflectionResponse, error) { + m := new(ServerReflectionResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// ServerReflectionServer is the server API for ServerReflection service. +// All implementations should embed UnimplementedServerReflectionServer +// for forward compatibility +type ServerReflectionServer interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error +} + +// UnimplementedServerReflectionServer should be embedded to have forward compatible implementations. +type UnimplementedServerReflectionServer struct { +} + +func (UnimplementedServerReflectionServer) ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error { + return status.Errorf(codes.Unimplemented, "method ServerReflectionInfo not implemented") +} + +// UnsafeServerReflectionServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ServerReflectionServer will +// result in compilation errors. +type UnsafeServerReflectionServer interface { + mustEmbedUnimplementedServerReflectionServer() +} + +func RegisterServerReflectionServer(s grpc.ServiceRegistrar, srv ServerReflectionServer) { + s.RegisterService(&ServerReflection_ServiceDesc, srv) +} + +func _ServerReflection_ServerReflectionInfo_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ServerReflectionServer).ServerReflectionInfo(&serverReflectionServerReflectionInfoServer{stream}) +} + +type ServerReflection_ServerReflectionInfoServer interface { + Send(*ServerReflectionResponse) error + Recv() (*ServerReflectionRequest, error) + grpc.ServerStream +} + +type serverReflectionServerReflectionInfoServer struct { + grpc.ServerStream +} + +func (x *serverReflectionServerReflectionInfoServer) Send(m *ServerReflectionResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *serverReflectionServerReflectionInfoServer) Recv() (*ServerReflectionRequest, error) { + m := new(ServerReflectionRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// ServerReflection_ServiceDesc is the grpc.ServiceDesc for ServerReflection service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ServerReflection_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.reflection.v1.ServerReflection", + HandlerType: (*ServerReflectionServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "ServerReflectionInfo", + Handler: _ServerReflection_ServerReflectionInfo_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc/reflection/v1/reflection.proto", +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go index d54c07676..69fbfb621 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.22.0 // grpc/reflection/v1alpha/reflection.proto is a deprecated file. diff --git a/vendor/google.golang.org/grpc/reflection/serverreflection.go b/vendor/google.golang.org/grpc/reflection/serverreflection.go index e2f9ebfbb..76dae09d8 100644 --- a/vendor/google.golang.org/grpc/reflection/serverreflection.go +++ b/vendor/google.golang.org/grpc/reflection/serverreflection.go @@ -48,8 +48,9 @@ import ( "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" - v1alphagrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" - v1alphapb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" + v1reflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1reflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1alphareflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" ) // GRPCServer is the interface provided by a gRPC server. It is implemented by @@ -63,9 +64,19 @@ type GRPCServer interface { var _ GRPCServer = (*grpc.Server)(nil) // Register registers the server reflection service on the given gRPC server. +// Both the v1 and v1alpha versions are registered. func Register(s GRPCServer) { - svr := NewServer(ServerOptions{Services: s}) - v1alphagrpc.RegisterServerReflectionServer(s, svr) + svr := NewServerV1(ServerOptions{Services: s}) + v1alphareflectiongrpc.RegisterServerReflectionServer(s, asV1Alpha(svr)) + v1reflectiongrpc.RegisterServerReflectionServer(s, svr) +} + +// RegisterV1 registers only the v1 version of the server reflection service +// on the given gRPC server. Many clients may only support v1alpha so most +// users should use Register instead, at least until clients have upgraded. +func RegisterV1(s GRPCServer) { + svr := NewServerV1(ServerOptions{Services: s}) + v1reflectiongrpc.RegisterServerReflectionServer(s, svr) } // ServiceInfoProvider is an interface used to retrieve metadata about the @@ -120,13 +131,27 @@ type ServerOptions struct { // NewServer returns a reflection server implementation using the given options. // This can be used to customize behavior of the reflection service. Most usages +// should prefer to use Register instead. For backwards compatibility reasons, +// this returns the v1alpha version of the reflection server. For a v1 version +// of the reflection server, see NewServerV1. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewServer(opts ServerOptions) v1alphareflectiongrpc.ServerReflectionServer { + return asV1Alpha(NewServerV1(opts)) +} + +// NewServerV1 returns a reflection server implementation using the given options. +// This can be used to customize behavior of the reflection service. Most usages // should prefer to use Register instead. // // # Experimental // // Notice: This function is EXPERIMENTAL and may be changed or removed in a // later release. -func NewServer(opts ServerOptions) v1alphagrpc.ServerReflectionServer { +func NewServerV1(opts ServerOptions) v1reflectiongrpc.ServerReflectionServer { if opts.DescriptorResolver == nil { opts.DescriptorResolver = protoregistry.GlobalFiles } @@ -141,7 +166,7 @@ func NewServer(opts ServerOptions) v1alphagrpc.ServerReflectionServer { } type serverReflectionServer struct { - v1alphagrpc.UnimplementedServerReflectionServer + v1alphareflectiongrpc.UnimplementedServerReflectionServer s ServiceInfoProvider descResolver protodesc.Resolver extResolver ExtensionResolver @@ -215,11 +240,11 @@ func (s *serverReflectionServer) allExtensionNumbersForTypeName(name string) ([] } // listServices returns the names of services this server exposes. -func (s *serverReflectionServer) listServices() []*v1alphapb.ServiceResponse { +func (s *serverReflectionServer) listServices() []*v1reflectionpb.ServiceResponse { serviceInfo := s.s.GetServiceInfo() - resp := make([]*v1alphapb.ServiceResponse, 0, len(serviceInfo)) + resp := make([]*v1reflectionpb.ServiceResponse, 0, len(serviceInfo)) for svc := range serviceInfo { - resp = append(resp, &v1alphapb.ServiceResponse{Name: svc}) + resp = append(resp, &v1reflectionpb.ServiceResponse{Name: svc}) } sort.Slice(resp, func(i, j int) bool { return resp[i].Name < resp[j].Name @@ -228,7 +253,7 @@ func (s *serverReflectionServer) listServices() []*v1alphapb.ServiceResponse { } // ServerReflectionInfo is the reflection service handler. -func (s *serverReflectionServer) ServerReflectionInfo(stream v1alphagrpc.ServerReflection_ServerReflectionInfoServer) error { +func (s *serverReflectionServer) ServerReflectionInfo(stream v1reflectiongrpc.ServerReflection_ServerReflectionInfoServer) error { sentFileDescriptors := make(map[string]bool) for { in, err := stream.Recv() @@ -239,79 +264,79 @@ func (s *serverReflectionServer) ServerReflectionInfo(stream v1alphagrpc.ServerR return err } - out := &v1alphapb.ServerReflectionResponse{ + out := &v1reflectionpb.ServerReflectionResponse{ ValidHost: in.Host, OriginalRequest: in, } switch req := in.MessageRequest.(type) { - case *v1alphapb.ServerReflectionRequest_FileByFilename: + case *v1reflectionpb.ServerReflectionRequest_FileByFilename: var b [][]byte fd, err := s.descResolver.FindFileByPath(req.FileByFilename) if err == nil { b, err = s.fileDescWithDependencies(fd, sentFileDescriptors) } if err != nil { - out.MessageResponse = &v1alphapb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1alphapb.ErrorResponse{ + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &v1alphapb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &v1alphapb.FileDescriptorResponse{FileDescriptorProto: b}, + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, } } - case *v1alphapb.ServerReflectionRequest_FileContainingSymbol: + case *v1reflectionpb.ServerReflectionRequest_FileContainingSymbol: b, err := s.fileDescEncodingContainingSymbol(req.FileContainingSymbol, sentFileDescriptors) if err != nil { - out.MessageResponse = &v1alphapb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1alphapb.ErrorResponse{ + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &v1alphapb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &v1alphapb.FileDescriptorResponse{FileDescriptorProto: b}, + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, } } - case *v1alphapb.ServerReflectionRequest_FileContainingExtension: + case *v1reflectionpb.ServerReflectionRequest_FileContainingExtension: typeName := req.FileContainingExtension.ContainingType extNum := req.FileContainingExtension.ExtensionNumber b, err := s.fileDescEncodingContainingExtension(typeName, extNum, sentFileDescriptors) if err != nil { - out.MessageResponse = &v1alphapb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1alphapb.ErrorResponse{ + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &v1alphapb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &v1alphapb.FileDescriptorResponse{FileDescriptorProto: b}, + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, } } - case *v1alphapb.ServerReflectionRequest_AllExtensionNumbersOfType: + case *v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: extNums, err := s.allExtensionNumbersForTypeName(req.AllExtensionNumbersOfType) if err != nil { - out.MessageResponse = &v1alphapb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1alphapb.ErrorResponse{ + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &v1alphapb.ServerReflectionResponse_AllExtensionNumbersResponse{ - AllExtensionNumbersResponse: &v1alphapb.ExtensionNumberResponse{ + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &v1reflectionpb.ExtensionNumberResponse{ BaseTypeName: req.AllExtensionNumbersOfType, ExtensionNumber: extNums, }, } } - case *v1alphapb.ServerReflectionRequest_ListServices: - out.MessageResponse = &v1alphapb.ServerReflectionResponse_ListServicesResponse{ - ListServicesResponse: &v1alphapb.ListServiceResponse{ + case *v1reflectionpb.ServerReflectionRequest_ListServices: + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &v1reflectionpb.ListServiceResponse{ Service: s.listServices(), }, } diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go index efcb7f3ef..804be887d 100644 --- a/vendor/google.golang.org/grpc/resolver/map.go +++ b/vendor/google.golang.org/grpc/resolver/map.go @@ -20,7 +20,7 @@ package resolver type addressMapEntry struct { addr Address - value interface{} + value any } // AddressMap is a map of addresses to arbitrary values taking into account @@ -69,7 +69,7 @@ func (l addressMapEntryList) find(addr Address) int { } // Get returns the value for the address in the map, if present. -func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { +func (a *AddressMap) Get(addr Address) (value any, ok bool) { addrKey := toMapKey(&addr) entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { @@ -79,7 +79,7 @@ func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { } // Set updates or adds the value to the address in the map. -func (a *AddressMap) Set(addr Address, value interface{}) { +func (a *AddressMap) Set(addr Address, value any) { addrKey := toMapKey(&addr) entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { @@ -127,8 +127,8 @@ func (a *AddressMap) Keys() []Address { } // Values returns a slice of all current map values. -func (a *AddressMap) Values() []interface{} { - ret := make([]interface{}, 0, a.Len()) +func (a *AddressMap) Values() []any { + ret := make([]any, 0, a.Len()) for _, entryList := range a.m { for _, entry := range entryList { ret = append(ret, entry.value) diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index 353c10b69..11384e228 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -77,25 +77,6 @@ func GetDefaultScheme() string { return defaultScheme } -// AddressType indicates the address type returned by name resolution. -// -// Deprecated: use Attributes in Address instead. -type AddressType uint8 - -const ( - // Backend indicates the address is for a backend server. - // - // Deprecated: use Attributes in Address instead. - Backend AddressType = iota - // GRPCLB indicates the address is for a grpclb load balancer. - // - // Deprecated: to select the GRPCLB load balancing policy, use a service - // config with a corresponding loadBalancingConfig. To supply balancer - // addresses to the GRPCLB load balancing policy, set State.Attributes - // using balancer/grpclb/state.Set. - GRPCLB -) - // Address represents a server the client connects to. // // # Experimental @@ -111,9 +92,6 @@ type Address struct { // the address, instead of the hostname from the Dial target string. In most cases, // this should not be set. // - // If Type is GRPCLB, ServerName should be the name of the remote load - // balancer, not the name of the backend. - // // WARNING: ServerName must only be populated with trusted values. It // is insecure to populate it with data from untrusted inputs since untrusted // values could be used to bypass the authority checks performed by TLS. @@ -126,27 +104,29 @@ type Address struct { // BalancerAttributes contains arbitrary data about this address intended // for consumption by the LB policy. These attributes do not affect SubConn // creation, connection establishment, handshaking, etc. - BalancerAttributes *attributes.Attributes - - // Type is the type of this address. // - // Deprecated: use Attributes instead. - Type AddressType + // Deprecated: when an Address is inside an Endpoint, this field should not + // be used, and it will eventually be removed entirely. + BalancerAttributes *attributes.Attributes // Metadata is the information associated with Addr, which may be used // to make load balancing decision. // // Deprecated: use Attributes instead. - Metadata interface{} + Metadata any } // Equal returns whether a and o are identical. Metadata is compared directly, // not with any recursive introspection. +// +// This method compares all fields of the address. When used to tell apart +// addresses during subchannel creation or connection establishment, it might be +// more appropriate for the caller to implement custom equality logic. func (a Address) Equal(o Address) bool { return a.Addr == o.Addr && a.ServerName == o.ServerName && a.Attributes.Equal(o.Attributes) && a.BalancerAttributes.Equal(o.BalancerAttributes) && - a.Type == o.Type && a.Metadata == o.Metadata + a.Metadata == o.Metadata } // String returns JSON formatted string representation of the address. @@ -190,11 +170,37 @@ type BuildOptions struct { Dialer func(context.Context, string) (net.Conn, error) } +// An Endpoint is one network endpoint, or server, which may have multiple +// addresses with which it can be accessed. +type Endpoint struct { + // Addresses contains a list of addresses used to access this endpoint. + Addresses []Address + + // Attributes contains arbitrary data about this endpoint intended for + // consumption by the LB policy. + Attributes *attributes.Attributes +} + // State contains the current Resolver state relevant to the ClientConn. type State struct { // Addresses is the latest set of resolved addresses for the target. + // + // If a resolver sets Addresses but does not set Endpoints, one Endpoint + // will be created for each Address before the State is passed to the LB + // policy. The BalancerAttributes of each entry in Addresses will be set + // in Endpoints.Attributes, and be cleared in the Endpoint's Address's + // BalancerAttributes. + // + // Soon, Addresses will be deprecated and replaced fully by Endpoints. Addresses []Address + // Endpoints is the latest set of resolved endpoints for the target. + // + // If a resolver produces a State containing Endpoints but not Addresses, + // it must take care to ensure the LB policies it selects will support + // Endpoints. + Endpoints []Endpoint + // ServiceConfig contains the result from parsing the latest service // config. If it is nil, it indicates no service config is present or the // resolver does not provide service configs. @@ -254,20 +260,7 @@ type ClientConn interface { // target does not contain a scheme or if the parsed scheme is not registered // (i.e. no corresponding resolver available to resolve the endpoint), we will // apply the default scheme, and will attempt to reparse it. -// -// Examples: -// -// - "dns://some_authority/foo.bar" -// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} -// - "foo.bar" -// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"} -// - "unknown_scheme://authority/endpoint" -// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"} type Target struct { - // Deprecated: use URL.Scheme instead. - Scheme string - // Deprecated: use URL.Host instead. - Authority string // URL contains the parsed dial target with an optional default scheme added // to it if the original dial target contained no scheme or contained an // unregistered scheme. Any query params specified in the original dial @@ -321,10 +314,3 @@ type Resolver interface { // Close closes the resolver. Close() } - -// UnregisterForTesting removes the resolver builder with the given scheme from the -// resolver map. -// This function is for testing only. -func UnregisterForTesting(scheme string) { - delete(m, scheme) -} diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go index b408b3688..d68330560 100644 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -133,7 +133,7 @@ func (ccr *ccResolverWrapper) close() { ccr.mu.Unlock() // Give enqueued callbacks a chance to finish. - <-ccr.serializer.Done + <-ccr.serializer.Done() // Spawn a goroutine to close the resolver (since it may block trying to // cleanup all allocated resources) and return early. @@ -152,6 +152,14 @@ func (ccr *ccResolverWrapper) serializerScheduleLocked(f func(context.Context)) // which includes addresses and service config. func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { errCh := make(chan error, 1) + if s.Endpoints == nil { + s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses)) + for _, a := range s.Addresses { + ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} + ep.Addresses[0].BalancerAttributes = nil + s.Endpoints = append(s.Endpoints, ep) + } + } ok := ccr.serializer.Schedule(func(context.Context) { ccr.addChannelzTraceEvent(s) ccr.curState = s diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 2030736a3..b7723aa09 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -75,7 +75,7 @@ func NewGZIPCompressorWithLevel(level int) (Compressor, error) { } return &gzipCompressor{ pool: sync.Pool{ - New: func() interface{} { + New: func() any { w, err := gzip.NewWriterLevel(io.Discard, level) if err != nil { panic(err) @@ -577,6 +577,9 @@ type parser struct { // The header of a gRPC message. Find more detail at // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md header [5]byte + + // recvBufferPool is the pool of shared receive buffers. + recvBufferPool SharedBufferPool } // recvMsg reads a complete gRPC message from the stream. @@ -610,9 +613,7 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt if int(length) > maxReceiveMessageSize { return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) } - // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead - // of making it for each message: - msg = make([]byte, int(length)) + msg = p.recvBufferPool.Get(int(length)) if _, err := p.r.Read(msg); err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF @@ -625,7 +626,7 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt // encode serializes msg and returns a buffer containing the message, or an // error if it is too large to be transmitted by grpc. If msg is nil, it // generates an empty message. -func encode(c baseCodec, msg interface{}) ([]byte, error) { +func encode(c baseCodec, msg any) ([]byte, error) { if msg == nil { // NOTE: typed nils will not be caught by this check return nil, nil } @@ -692,7 +693,7 @@ func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { return hdr, data } -func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload { +func outPayload(client bool, msg any, data, payload []byte, t time.Time) *stats.OutPayload { return &stats.OutPayload{ Client: client, Payload: msg, @@ -726,12 +727,12 @@ type payloadInfo struct { } func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) { - pf, d, err := p.recvMsg(maxReceiveMessageSize) + pf, buf, err := p.recvMsg(maxReceiveMessageSize) if err != nil { return nil, err } if payInfo != nil { - payInfo.compressedLength = len(d) + payInfo.compressedLength = len(buf) } if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { @@ -743,10 +744,10 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, // use this decompressor as the default. if dc != nil { - d, err = dc.Do(bytes.NewReader(d)) - size = len(d) + buf, err = dc.Do(bytes.NewReader(buf)) + size = len(buf) } else { - d, size, err = decompress(compressor, d, maxReceiveMessageSize) + buf, size, err = decompress(compressor, buf, maxReceiveMessageSize) } if err != nil { return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) @@ -757,7 +758,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) } } - return d, nil + return buf, nil } // Using compressor, decompress d, returning data and size. @@ -791,16 +792,18 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize // For the two compressor parameters, both should not be set, but if they are, // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? -func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { - d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { + buf, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) if err != nil { return err } - if err := c.Unmarshal(d, m); err != nil { + if err := c.Unmarshal(buf, m); err != nil { return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) } if payInfo != nil { - payInfo.uncompressedBytes = d + payInfo.uncompressedBytes = buf + } else { + p.recvBufferPool.Put(&buf) } return nil } @@ -860,19 +863,22 @@ func ErrorDesc(err error) string { // Errorf returns nil if c is OK. // // Deprecated: use status.Errorf instead. -func Errorf(c codes.Code, format string, a ...interface{}) error { +func Errorf(c codes.Code, format string, a ...any) error { return status.Errorf(c, format, a...) } +var errContextCanceled = status.Error(codes.Canceled, context.Canceled.Error()) +var errContextDeadline = status.Error(codes.DeadlineExceeded, context.DeadlineExceeded.Error()) + // toRPCErr converts an error into an error from the status package. func toRPCErr(err error) error { switch err { case nil, io.EOF: return err case context.DeadlineExceeded: - return status.Error(codes.DeadlineExceeded, err.Error()) + return errContextDeadline case context.Canceled: - return status.Error(codes.Canceled, err.Error()) + return errContextCanceled case io.ErrUnexpectedEOF: return status.Error(codes.Internal, err.Error()) } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 8869cc906..8f60d4214 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -86,7 +86,7 @@ func init() { var statusOK = status.New(codes.OK, "") var logger = grpclog.Component("core") -type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) +type methodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) // MethodDesc represents an RPC service's method specification. type MethodDesc struct { @@ -99,20 +99,20 @@ type ServiceDesc struct { ServiceName string // The pointer to the service interface. Used to check whether the user // provided implementation satisfies the interface requirements. - HandlerType interface{} + HandlerType any Methods []MethodDesc Streams []StreamDesc - Metadata interface{} + Metadata any } // serviceInfo wraps information about a service. It is very similar to // ServiceDesc and is constructed from it for internal purposes. type serviceInfo struct { // Contains the implementation for the methods in this service. - serviceImpl interface{} + serviceImpl any methods map[string]*MethodDesc streams map[string]*StreamDesc - mdata interface{} + mdata any } // Server is a gRPC server to serve RPC requests. @@ -164,10 +164,12 @@ type serverOptions struct { initialConnWindowSize int32 writeBufferSize int readBufferSize int + sharedWriteBuffer bool connectionTimeout time.Duration maxHeaderListSize *uint32 headerTableSize *uint32 numServerWorkers uint32 + recvBufferPool SharedBufferPool } var defaultServerOptions = serverOptions{ @@ -177,6 +179,7 @@ var defaultServerOptions = serverOptions{ connectionTimeout: 120 * time.Second, writeBufferSize: defaultWriteBufSize, readBufferSize: defaultReadBufSize, + recvBufferPool: nopBufferPool{}, } var globalServerOptions []ServerOption @@ -228,6 +231,20 @@ func newJoinServerOption(opts ...ServerOption) ServerOption { return &joinServerOption{opts: opts} } +// SharedWriteBuffer allows reusing per-connection transport write buffer. +// If this option is set to true every connection will release the buffer after +// flushing the data on the wire. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func SharedWriteBuffer(val bool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.sharedWriteBuffer = val + }) +} + // WriteBufferSize determines how much data can be batched before doing a write // on the wire. The corresponding memory allocation for this buffer will be // twice the size to keep syscalls low. The default value for this buffer is @@ -268,9 +285,9 @@ func InitialConnWindowSize(s int32) ServerOption { // KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { - if kp.Time > 0 && kp.Time < time.Second { + if kp.Time > 0 && kp.Time < internal.KeepaliveMinServerPingTime { logger.Warning("Adjusting keepalive ping interval to minimum period of 1s") - kp.Time = time.Second + kp.Time = internal.KeepaliveMinServerPingTime } return newFuncServerOption(func(o *serverOptions) { @@ -550,6 +567,27 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption { }) } +// RecvBufferPool returns a ServerOption that configures the server +// to use the provided shared buffer pool for parsing incoming messages. Depending +// on the application's workload, this could result in reduced memory allocation. +// +// If you are unsure about how to implement a memory pool but want to utilize one, +// begin with grpc.NewSharedBufferPool. +// +// Note: The shared buffer pool feature will not be active if any of the following +// options are used: StatsHandler, EnableTracing, or binary logging. In such +// cases, the shared buffer pool will be ignored. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func RecvBufferPool(bufferPool SharedBufferPool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.recvBufferPool = bufferPool + }) +} + // serverWorkerResetThreshold defines how often the stack must be reset. Every // N requests, by spawning a new goroutine in its place, a worker can reset its // stack so that large stacks don't live in memory forever. 2^16 should allow @@ -625,7 +663,7 @@ func NewServer(opt ...ServerOption) *Server { // printf records an event in s's event log, unless s has been stopped. // REQUIRES s.mu is held. -func (s *Server) printf(format string, a ...interface{}) { +func (s *Server) printf(format string, a ...any) { if s.events != nil { s.events.Printf(format, a...) } @@ -633,7 +671,7 @@ func (s *Server) printf(format string, a ...interface{}) { // errorf records an error in s's event log, unless s has been stopped. // REQUIRES s.mu is held. -func (s *Server) errorf(format string, a ...interface{}) { +func (s *Server) errorf(format string, a ...any) { if s.events != nil { s.events.Errorf(format, a...) } @@ -648,14 +686,14 @@ type ServiceRegistrar interface { // once the server has started serving. // desc describes the service and its methods and handlers. impl is the // service implementation which is passed to the method handlers. - RegisterService(desc *ServiceDesc, impl interface{}) + RegisterService(desc *ServiceDesc, impl any) } // RegisterService registers a service and its implementation to the gRPC // server. It is called from the IDL generated code. This must be called before // invoking Serve. If ss is non-nil (for legacy code), its type is checked to // ensure it implements sd.HandlerType. -func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { +func (s *Server) RegisterService(sd *ServiceDesc, ss any) { if ss != nil { ht := reflect.TypeOf(sd.HandlerType).Elem() st := reflect.TypeOf(ss) @@ -666,7 +704,7 @@ func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { s.register(sd, ss) } -func (s *Server) register(sd *ServiceDesc, ss interface{}) { +func (s *Server) register(sd *ServiceDesc, ss any) { s.mu.Lock() defer s.mu.Unlock() s.printf("RegisterService(%q)", sd.ServiceName) @@ -707,7 +745,7 @@ type MethodInfo struct { type ServiceInfo struct { Methods []MethodInfo // Metadata is the metadata specified in ServiceDesc when registering service. - Metadata interface{} + Metadata any } // GetServiceInfo returns a map from service names to ServiceInfo. @@ -908,6 +946,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { InitialConnWindowSize: s.opts.initialConnWindowSize, WriteBufferSize: s.opts.writeBufferSize, ReadBufferSize: s.opts.readBufferSize, + SharedWriteBuffer: s.opts.sharedWriteBuffer, ChannelzParentID: s.channelzID, MaxHeaderListSize: s.opts.maxHeaderListSize, HeaderTableSize: s.opts.headerTableSize, @@ -944,7 +983,7 @@ func (s *Server) serveStreams(st transport.ServerTransport) { f := func() { defer streamQuota.release() defer wg.Done() - s.handleStream(st, stream, s.traceInfo(st, stream)) + s.handleStream(st, stream) } if s.opts.numServerWorkers > 0 { @@ -956,12 +995,6 @@ func (s *Server) serveStreams(st transport.ServerTransport) { } } go f() - }, func(ctx context.Context, method string) context.Context { - if !EnableTracing { - return ctx - } - tr := trace.New("grpc.Recv."+methodFamily(method), method) - return trace.NewContext(ctx, tr) }) wg.Wait() } @@ -1010,30 +1043,6 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { s.serveStreams(st) } -// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. -// If tracing is not enabled, it returns nil. -func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { - if !EnableTracing { - return nil - } - tr, ok := trace.FromContext(stream.Context()) - if !ok { - return nil - } - - trInfo = &traceInfo{ - tr: tr, - firstLine: firstLine{ - client: false, - remoteAddr: st.RemoteAddr(), - }, - } - if dl, ok := stream.Context().Deadline(); ok { - trInfo.firstLine.deadline = time.Until(dl) - } - return trInfo -} - func (s *Server) addConn(addr string, st transport.ServerTransport) bool { s.mu.Lock() defer s.mu.Unlock() @@ -1094,7 +1103,7 @@ func (s *Server) incrCallsFailed() { atomic.AddInt64(&s.czData.callsFailed, 1) } -func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { +func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err) @@ -1113,7 +1122,7 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str err = t.Write(stream, hdr, payload, opts) if err == nil { for _, sh := range s.opts.statsHandlers { - sh.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + sh.HandleRPC(ctx, outPayload(false, msg, data, payload, time.Now())) } } return err @@ -1141,7 +1150,7 @@ func chainUnaryServerInterceptors(s *Server) { } func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { - return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { + return func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (any, error) { return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) } } @@ -1150,12 +1159,12 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info if curr == len(interceptors)-1 { return finalHandler } - return func(ctx context.Context, req interface{}) (interface{}, error) { + return func(ctx context.Context, req any) (any, error) { return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) } } -func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { +func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { shs := s.opts.statsHandlers if len(shs) != 0 || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { @@ -1169,7 +1178,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. IsClientStream: false, IsServerStream: false, } - sh.HandleRPC(stream.Context(), statsBegin) + sh.HandleRPC(ctx, statsBegin) } if trInfo != nil { trInfo.tr.LazyLog(&trInfo.firstLine, false) @@ -1187,7 +1196,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. defer func() { if trInfo != nil { if err != nil && err != io.EOF { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) trInfo.tr.SetError() } trInfo.tr.Finish() @@ -1201,7 +1210,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if err != nil && err != io.EOF { end.Error = toRPCErr(err) } - sh.HandleRPC(stream.Context(), end) + sh.HandleRPC(ctx, end) } if channelz.IsOn() { @@ -1223,7 +1232,6 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } } if len(binlogs) != 0 { - ctx := stream.Context() md, _ := metadata.FromIncomingContext(ctx) logEntry := &binarylog.ClientHeader{ Header: md, @@ -1294,7 +1302,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if len(shs) != 0 || len(binlogs) != 0 { payInfo = &payloadInfo{} } - d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + d, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) if err != nil { if e := t.WriteStatus(stream, status.Convert(err)); e != nil { channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) @@ -1304,12 +1312,12 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if channelz.IsOn() { t.IncrMsgRecv() } - df := func(v interface{}) error { + df := func(v any) error { if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } for _, sh := range shs { - sh.HandleRPC(stream.Context(), &stats.InPayload{ + sh.HandleRPC(ctx, &stats.InPayload{ RecvTime: time.Now(), Payload: v, Length: len(d), @@ -1323,7 +1331,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Message: d, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), cm) + binlog.Log(ctx, cm) } } if trInfo != nil { @@ -1331,7 +1339,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } return nil } - ctx := NewContextWithServerTransportStream(stream.Context(), stream) + ctx = NewContextWithServerTransportStream(ctx, stream) reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt) if appErr != nil { appStatus, ok := status.FromError(appErr) @@ -1356,7 +1364,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Header: h, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), sh) + binlog.Log(ctx, sh) } } st := &binarylog.ServerTrailer{ @@ -1364,7 +1372,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } return appErr @@ -1379,7 +1387,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if stream.SendCompress() != sendCompressorName { comp = encoding.GetCompressor(stream.SendCompress()) } - if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { + if err := s.sendResponse(ctx, t, stream, reply, cp, opts, comp); err != nil { if err == io.EOF { // The entire stream is done (for unary RPC only). return err @@ -1406,8 +1414,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), sh) - binlog.Log(stream.Context(), st) + binlog.Log(ctx, sh) + binlog.Log(ctx, st) } } return err @@ -1421,8 +1429,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Message: reply, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), sh) - binlog.Log(stream.Context(), sm) + binlog.Log(ctx, sh) + binlog.Log(ctx, sm) } } if channelz.IsOn() { @@ -1440,7 +1448,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } return t.WriteStatus(stream, statusOK) @@ -1468,7 +1476,7 @@ func chainStreamServerInterceptors(s *Server) { } func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { - return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { + return func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) } } @@ -1477,12 +1485,12 @@ func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, inf if curr == len(interceptors)-1 { return finalHandler } - return func(srv interface{}, stream ServerStream) error { + return func(srv any, stream ServerStream) error { return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) } } -func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { +func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { if channelz.IsOn() { s.incrCallsStarted() } @@ -1496,15 +1504,15 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp IsServerStream: sd.ServerStreams, } for _, sh := range shs { - sh.HandleRPC(stream.Context(), statsBegin) + sh.HandleRPC(ctx, statsBegin) } } - ctx := NewContextWithServerTransportStream(stream.Context(), stream) + ctx = NewContextWithServerTransportStream(ctx, stream) ss := &serverStream{ ctx: ctx, t: t, s: stream, - p: &parser{r: stream}, + p: &parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, codec: s.getCodec(stream.ContentSubtype()), maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, @@ -1518,7 +1526,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if trInfo != nil { ss.mu.Lock() if err != nil && err != io.EOF { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ss.trInfo.tr.SetError() } ss.trInfo.tr.Finish() @@ -1535,7 +1543,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp end.Error = toRPCErr(err) } for _, sh := range shs { - sh.HandleRPC(stream.Context(), end) + sh.HandleRPC(ctx, end) } } @@ -1577,7 +1585,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp logEntry.PeerAddr = peer.Addr } for _, binlog := range ss.binlogs { - binlog.Log(stream.Context(), logEntry) + binlog.Log(ctx, logEntry) } } @@ -1621,7 +1629,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp trInfo.tr.LazyLog(&trInfo.firstLine, false) } var appErr error - var server interface{} + var server any if info != nil { server = info.serviceImpl } @@ -1655,7 +1663,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp Err: appErr, } for _, binlog := range ss.binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } t.WriteStatus(ss.s, appStatus) @@ -1673,33 +1681,50 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp Err: appErr, } for _, binlog := range ss.binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } return t.WriteStatus(ss.s, statusOK) } -func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { + ctx := stream.Context() + var ti *traceInfo + if EnableTracing { + tr := trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()) + ctx = trace.NewContext(ctx, tr) + ti = &traceInfo{ + tr: tr, + firstLine: firstLine{ + client: false, + remoteAddr: t.RemoteAddr(), + }, + } + if dl, ok := ctx.Deadline(); ok { + ti.firstLine.deadline = time.Until(dl) + } + } + sm := stream.Method() if sm != "" && sm[0] == '/' { sm = sm[1:] } pos := strings.LastIndex(sm, "/") if pos == -1 { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true) + ti.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ti.tr.SetError() } channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } - if trInfo != nil { - trInfo.tr.Finish() + if ti != nil { + ti.tr.Finish() } return } @@ -1709,17 +1734,17 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str srv, knownService := s.services[service] if knownService { if md, ok := srv.methods[method]; ok { - s.processUnaryRPC(t, stream, srv, md, trInfo) + s.processUnaryRPC(ctx, t, stream, srv, md, ti) return } if sd, ok := srv.streams[method]; ok { - s.processStreamingRPC(t, stream, srv, sd, trInfo) + s.processStreamingRPC(ctx, t, stream, srv, sd, ti) return } } // Unknown service, or known server unknown method. if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { - s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) + s.processStreamingRPC(ctx, t, stream, nil, unknownDesc, ti) return } var errDesc string @@ -1728,19 +1753,19 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str } else { errDesc = fmt.Sprintf("unknown method %v for service %v", method, service) } - if trInfo != nil { - trInfo.tr.LazyPrintf("%s", errDesc) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyPrintf("%s", errDesc) + ti.tr.SetError() } if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ti.tr.SetError() } channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } - if trInfo != nil { - trInfo.tr.Finish() + if ti != nil { + ti.tr.Finish() } } @@ -2054,12 +2079,12 @@ func validateSendCompressor(name, clientCompressors string) error { // atomicSemaphore implements a blocking, counting semaphore. acquire should be // called synchronously; release may be called asynchronously. type atomicSemaphore struct { - n int64 + n atomic.Int64 wait chan struct{} } func (q *atomicSemaphore) acquire() { - if atomic.AddInt64(&q.n, -1) < 0 { + if q.n.Add(-1) < 0 { // We ran out of quota. Block until a release happens. <-q.wait } @@ -2070,12 +2095,14 @@ func (q *atomicSemaphore) release() { // concurrent calls to acquire, but also note that with synchronous calls to // acquire, as our system does, n will never be less than -1. There are // fairness issues (queuing) to consider if this was to be generalized. - if atomic.AddInt64(&q.n, 1) <= 0 { + if q.n.Add(1) <= 0 { // An acquire was waiting on us. Unblock it. q.wait <- struct{}{} } } func newHandlerQuota(n uint32) *atomicSemaphore { - return &atomicSemaphore{n: int64(n), wait: make(chan struct{}, 1)} + a := &atomicSemaphore{wait: make(chan struct{}, 1)} + a.n.Store(int64(n)) + return a } diff --git a/vendor/google.golang.org/grpc/shared_buffer_pool.go b/vendor/google.golang.org/grpc/shared_buffer_pool.go new file mode 100644 index 000000000..48a64cfe8 --- /dev/null +++ b/vendor/google.golang.org/grpc/shared_buffer_pool.go @@ -0,0 +1,154 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import "sync" + +// SharedBufferPool is a pool of buffers that can be shared, resulting in +// decreased memory allocation. Currently, in gRPC-go, it is only utilized +// for parsing incoming messages. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +type SharedBufferPool interface { + // Get returns a buffer with specified length from the pool. + // + // The returned byte slice may be not zero initialized. + Get(length int) []byte + + // Put returns a buffer to the pool. + Put(*[]byte) +} + +// NewSharedBufferPool creates a simple SharedBufferPool with buckets +// of different sizes to optimize memory usage. This prevents the pool from +// wasting large amounts of memory, even when handling messages of varying sizes. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewSharedBufferPool() SharedBufferPool { + return &simpleSharedBufferPool{ + pools: [poolArraySize]simpleSharedBufferChildPool{ + newBytesPool(level0PoolMaxSize), + newBytesPool(level1PoolMaxSize), + newBytesPool(level2PoolMaxSize), + newBytesPool(level3PoolMaxSize), + newBytesPool(level4PoolMaxSize), + newBytesPool(0), + }, + } +} + +// simpleSharedBufferPool is a simple implementation of SharedBufferPool. +type simpleSharedBufferPool struct { + pools [poolArraySize]simpleSharedBufferChildPool +} + +func (p *simpleSharedBufferPool) Get(size int) []byte { + return p.pools[p.poolIdx(size)].Get(size) +} + +func (p *simpleSharedBufferPool) Put(bs *[]byte) { + p.pools[p.poolIdx(cap(*bs))].Put(bs) +} + +func (p *simpleSharedBufferPool) poolIdx(size int) int { + switch { + case size <= level0PoolMaxSize: + return level0PoolIdx + case size <= level1PoolMaxSize: + return level1PoolIdx + case size <= level2PoolMaxSize: + return level2PoolIdx + case size <= level3PoolMaxSize: + return level3PoolIdx + case size <= level4PoolMaxSize: + return level4PoolIdx + default: + return levelMaxPoolIdx + } +} + +const ( + level0PoolMaxSize = 16 // 16 B + level1PoolMaxSize = level0PoolMaxSize * 16 // 256 B + level2PoolMaxSize = level1PoolMaxSize * 16 // 4 KB + level3PoolMaxSize = level2PoolMaxSize * 16 // 64 KB + level4PoolMaxSize = level3PoolMaxSize * 16 // 1 MB +) + +const ( + level0PoolIdx = iota + level1PoolIdx + level2PoolIdx + level3PoolIdx + level4PoolIdx + levelMaxPoolIdx + poolArraySize +) + +type simpleSharedBufferChildPool interface { + Get(size int) []byte + Put(any) +} + +type bufferPool struct { + sync.Pool + + defaultSize int +} + +func (p *bufferPool) Get(size int) []byte { + bs := p.Pool.Get().(*[]byte) + + if cap(*bs) < size { + p.Pool.Put(bs) + + return make([]byte, size) + } + + return (*bs)[:size] +} + +func newBytesPool(size int) simpleSharedBufferChildPool { + return &bufferPool{ + Pool: sync.Pool{ + New: func() any { + bs := make([]byte, size) + return &bs + }, + }, + defaultSize: size, + } +} + +// nopBufferPool is a buffer pool just makes new buffer without pooling. +type nopBufferPool struct { +} + +func (nopBufferPool) Get(length int) []byte { + return make([]byte, length) +} + +func (nopBufferPool) Put(*[]byte) { +} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index 7a552a9b7..4ab70e2d4 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -59,12 +59,22 @@ func (s *Begin) IsClient() bool { return s.Client } func (s *Begin) isRPCStats() {} +// PickerUpdated indicates that the LB policy provided a new picker while the +// RPC was waiting for one. +type PickerUpdated struct{} + +// IsClient indicates if the stats information is from client side. Only Client +// Side interfaces with a Picker, thus always returns true. +func (*PickerUpdated) IsClient() bool { return true } + +func (*PickerUpdated) isRPCStats() {} + // InPayload contains the information for an incoming payload. type InPayload struct { // Client is true if this InPayload is from client side. Client bool // Payload is the payload with original type. - Payload interface{} + Payload any // Data is the serialized message payload. Data []byte @@ -134,7 +144,7 @@ type OutPayload struct { // Client is true if this OutPayload is from client side. Client bool // Payload is the payload with original type. - Payload interface{} + Payload any // Data is the serialized message payload. Data []byte // Length is the size of the uncompressed payload data. Does not include any diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go index bcf2e4d81..a93360efb 100644 --- a/vendor/google.golang.org/grpc/status/status.go +++ b/vendor/google.golang.org/grpc/status/status.go @@ -50,7 +50,7 @@ func New(c codes.Code, msg string) *Status { } // Newf returns New(c, fmt.Sprintf(format, a...)). -func Newf(c codes.Code, format string, a ...interface{}) *Status { +func Newf(c codes.Code, format string, a ...any) *Status { return New(c, fmt.Sprintf(format, a...)) } @@ -60,7 +60,7 @@ func Error(c codes.Code, msg string) error { } // Errorf returns Error(c, fmt.Sprintf(format, a...)). -func Errorf(c codes.Code, format string, a ...interface{}) error { +func Errorf(c codes.Code, format string, a ...any) error { return Error(c, fmt.Sprintf(format, a...)) } @@ -99,25 +99,27 @@ func FromError(err error) (s *Status, ok bool) { } type grpcstatus interface{ GRPCStatus() *Status } if gs, ok := err.(grpcstatus); ok { - if gs.GRPCStatus() == nil { + grpcStatus := gs.GRPCStatus() + if grpcStatus == nil { // Error has status nil, which maps to codes.OK. There // is no sensible behavior for this, so we turn it into // an error with codes.Unknown and discard the existing // status. return New(codes.Unknown, err.Error()), false } - return gs.GRPCStatus(), true + return grpcStatus, true } var gs grpcstatus if errors.As(err, &gs) { - if gs.GRPCStatus() == nil { + grpcStatus := gs.GRPCStatus() + if grpcStatus == nil { // Error wraps an error that has status nil, which maps // to codes.OK. There is no sensible behavior for this, // so we turn it into an error with codes.Unknown and // discard the existing status. return New(codes.Unknown, err.Error()), false } - p := gs.GRPCStatus().Proto() + p := grpcStatus.Proto() p.Message = err.Error() return status.FromProto(p), true } diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 10092685b..b14b2fbea 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -31,6 +31,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/encoding" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancerload" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" @@ -54,7 +55,7 @@ import ( // status package, or be one of the context errors. Otherwise, gRPC will use // codes.Unknown as the status code and err.Error() as the status message of the // RPC. -type StreamHandler func(srv interface{}, stream ServerStream) error +type StreamHandler func(srv any, stream ServerStream) error // StreamDesc represents a streaming RPC service's method specification. Used // on the server when registering services and on the client when initiating @@ -79,9 +80,9 @@ type Stream interface { // Deprecated: See ClientStream and ServerStream documentation instead. Context() context.Context // Deprecated: See ClientStream and ServerStream documentation instead. - SendMsg(m interface{}) error + SendMsg(m any) error // Deprecated: See ClientStream and ServerStream documentation instead. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // ClientStream defines the client-side behavior of a streaming RPC. @@ -90,7 +91,9 @@ type Stream interface { // status package. type ClientStream interface { // Header returns the header metadata received from the server if there - // is any. It blocks if the metadata is not ready to read. + // is any. It blocks if the metadata is not ready to read. If the metadata + // is nil and the error is also nil, then the stream was terminated without + // headers, and the status can be discovered by calling RecvMsg. Header() (metadata.MD, error) // Trailer returns the trailer metadata from the server, if there is any. // It must only be called after stream.CloseAndRecv has returned, or @@ -126,7 +129,7 @@ type ClientStream interface { // // It is not safe to modify the message after calling SendMsg. Tracing // libraries and stats handlers may use the message lazily. - SendMsg(m interface{}) error + SendMsg(m any) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the stream completes successfully. On // any other error, the stream is aborted and the error contains the RPC @@ -135,7 +138,7 @@ type ClientStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // NewStream creates a new Stream for the client side. This is typically @@ -155,11 +158,6 @@ type ClientStream interface { // If none of the above happen, a goroutine and a context will be leaked, and grpc // will not call the optionally-configured stats handler with a stats.End message. func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { - if err := cc.idlenessMgr.onCallBegin(); err != nil { - return nil, err - } - defer cc.idlenessMgr.onCallEnd() - // allow interceptor to see all applicable call options, which means those // configured as defaults from dial option as well as per-call options opts = combine(cc.dopts.callOptions, opts) @@ -176,6 +174,16 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { + // Start tracking the RPC for idleness purposes. This is where a stream is + // created for both streaming and unary RPCs, and hence is a good place to + // track active RPC count. + if err := cc.idlenessMgr.OnCallBegin(); err != nil { + return nil, err + } + // Add a calloption, to decrement the active call count, that gets executed + // when the RPC completes. + opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...) + if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { // validate md if err := imetadata.Validate(md); err != nil { @@ -433,7 +441,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) ctx = trace.NewContext(ctx, trInfo.tr) } - if cs.cc.parsedTarget.URL.Scheme == "xds" { + if cs.cc.parsedTarget.URL.Scheme == internal.GRPCResolverSchemeExtraMetadata { // Add extra metadata (metadata that will be added by transport) to context // so the balancer can see them. ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( @@ -507,7 +515,7 @@ func (a *csAttempt) newStream() error { return toRPCErr(nse.Err) } a.s = s - a.p = &parser{r: s} + a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool} return nil } @@ -788,23 +796,24 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) func (cs *clientStream) Header() (metadata.MD, error) { var m metadata.MD - noHeader := false err := cs.withRetry(func(a *csAttempt) error { var err error m, err = a.s.Header() - if err == transport.ErrNoHeaders { - noHeader = true - return nil - } return toRPCErr(err) }, cs.commitAttemptLocked) + if m == nil && err == nil { + // The stream ended with success. Finish the clientStream. + err = io.EOF + } + if err != nil { cs.finish(err) - return nil, err + // Do not return the error. The user should get it by calling Recv(). + return nil, nil } - if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && !noHeader { + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && m != nil { // Only log if binary log is on and header has not been logged, and // there is actually headers to log. logEntry := &binarylog.ServerHeader{ @@ -820,6 +829,7 @@ func (cs *clientStream) Header() (metadata.MD, error) { binlog.Log(cs.ctx, logEntry) } } + return m, nil } @@ -860,7 +870,7 @@ func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error cs.buffer = append(cs.buffer, op) } -func (cs *clientStream) SendMsg(m interface{}) (err error) { +func (cs *clientStream) SendMsg(m any) (err error) { defer func() { if err != nil && err != io.EOF { // Call finish on the client stream for errors generated by this SendMsg @@ -904,7 +914,7 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { return err } -func (cs *clientStream) RecvMsg(m interface{}) error { +func (cs *clientStream) RecvMsg(m any) error { if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged { // Call Header() to binary log header if it's not already logged. cs.Header() @@ -928,24 +938,6 @@ func (cs *clientStream) RecvMsg(m interface{}) error { if err != nil || !cs.desc.ServerStreams { // err != nil or non-server-streaming indicates end of stream. cs.finish(err) - - if len(cs.binlogs) != 0 { - // finish will not log Trailer. Log Trailer here. - logEntry := &binarylog.ServerTrailer{ - OnClientSide: true, - Trailer: cs.Trailer(), - Err: err, - } - if logEntry.Err == io.EOF { - logEntry.Err = nil - } - if peer, ok := peer.FromContext(cs.Context()); ok { - logEntry.PeerAddr = peer.Addr - } - for _, binlog := range cs.binlogs { - binlog.Log(cs.ctx, logEntry) - } - } } return err } @@ -1001,18 +993,30 @@ func (cs *clientStream) finish(err error) { } } } + cs.mu.Unlock() - // For binary logging. only log cancel in finish (could be caused by RPC ctx - // canceled or ClientConn closed). Trailer will be logged in RecvMsg. - // - // Only one of cancel or trailer needs to be logged. In the cases where - // users don't call RecvMsg, users must have already canceled the RPC. - if len(cs.binlogs) != 0 && status.Code(err) == codes.Canceled { - c := &binarylog.Cancel{ - OnClientSide: true, - } - for _, binlog := range cs.binlogs { - binlog.Log(cs.ctx, c) + // Only one of cancel or trailer needs to be logged. + if len(cs.binlogs) != 0 { + switch err { + case errContextCanceled, errContextDeadline, ErrClientConnClosing: + c := &binarylog.Cancel{ + OnClientSide: true, + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, c) + } + default: + logEntry := &binarylog.ServerTrailer{ + OnClientSide: true, + Trailer: cs.Trailer(), + Err: err, + } + if peer, ok := peer.FromContext(cs.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, logEntry) + } } } if err == nil { @@ -1028,7 +1032,7 @@ func (cs *clientStream) finish(err error) { cs.cancel() } -func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { +func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error { cs := a.cs if a.trInfo != nil { a.mu.Lock() @@ -1055,7 +1059,7 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { return nil } -func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { +func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { cs := a.cs if len(a.statsHandlers) != 0 && payInfo == nil { payInfo = &payloadInfo{} @@ -1270,7 +1274,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin return nil, err } as.s = s - as.p = &parser{r: s} + as.p = &parser{r: s, recvBufferPool: ac.dopts.recvBufferPool} ac.incrCallsStarted() if desc != unaryStreamDesc { // Listen on stream context to cleanup when the stream context is @@ -1348,7 +1352,7 @@ func (as *addrConnStream) Context() context.Context { return as.s.Context() } -func (as *addrConnStream) SendMsg(m interface{}) (err error) { +func (as *addrConnStream) SendMsg(m any) (err error) { defer func() { if err != nil && err != io.EOF { // Call finish on the client stream for errors generated by this SendMsg @@ -1393,7 +1397,7 @@ func (as *addrConnStream) SendMsg(m interface{}) (err error) { return nil } -func (as *addrConnStream) RecvMsg(m interface{}) (err error) { +func (as *addrConnStream) RecvMsg(m any) (err error) { defer func() { if err != nil || !as.desc.ServerStreams { // err != nil or non-server-streaming indicates end of stream. @@ -1512,7 +1516,7 @@ type ServerStream interface { // // It is not safe to modify the message after calling SendMsg. Tracing // libraries and stats handlers may use the message lazily. - SendMsg(m interface{}) error + SendMsg(m any) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the client has performed a CloseSend. On // any non-EOF error, the stream is aborted and the error contains the @@ -1521,7 +1525,7 @@ type ServerStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // serverStream implements a server side Stream. @@ -1602,7 +1606,7 @@ func (ss *serverStream) SetTrailer(md metadata.MD) { ss.s.SetTrailer(md) } -func (ss *serverStream) SendMsg(m interface{}) (err error) { +func (ss *serverStream) SendMsg(m any) (err error) { defer func() { if ss.trInfo != nil { ss.mu.Lock() @@ -1610,7 +1614,7 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { if err == nil { ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) } else { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ss.trInfo.tr.SetError() } } @@ -1677,7 +1681,7 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { return nil } -func (ss *serverStream) RecvMsg(m interface{}) (err error) { +func (ss *serverStream) RecvMsg(m any) (err error) { defer func() { if ss.trInfo != nil { ss.mu.Lock() @@ -1685,7 +1689,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { if err == nil { ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) } else if err != io.EOF { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ss.trInfo.tr.SetError() } } @@ -1757,7 +1761,7 @@ func MethodFromServerStream(stream ServerStream) (string, bool) { // prepareMsg returns the hdr, payload and data // using the compressors passed or using the // passed preparedmsg -func prepareMsg(m interface{}, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { +func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { if preparedMsg, ok := m.(*PreparedMsg); ok { return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil } diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go index bfa5dfa40..07f012576 100644 --- a/vendor/google.golang.org/grpc/tap/tap.go +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -27,6 +27,8 @@ package tap import ( "context" + + "google.golang.org/grpc/metadata" ) // Info defines the relevant information needed by the handles. @@ -34,6 +36,10 @@ type Info struct { // FullMethodName is the string of grpc method (in the format of // /package.service/method). FullMethodName string + + // Header contains the header metadata received. + Header metadata.MD + // TODO: More to be added. } diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go index 07a2d26b3..9ded79321 100644 --- a/vendor/google.golang.org/grpc/trace.go +++ b/vendor/google.golang.org/grpc/trace.go @@ -97,8 +97,8 @@ func truncate(x string, l int) string { // payload represents an RPC request or response payload. type payload struct { - sent bool // whether this is an outgoing payload - msg interface{} // e.g. a proto.Message + sent bool // whether this is an outgoing payload + msg any // e.g. a proto.Message // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? } @@ -111,7 +111,7 @@ func (p payload) String() string { type fmtStringer struct { format string - a []interface{} + a []any } func (f *fmtStringer) String() string { diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 3cc754062..6d2cadd79 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.56.3" +const Version = "1.59.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index a8e4732b3..bb480f1f9 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -84,12 +84,18 @@ not git grep -l 'x/net/context' -- "*.go" # thread safety. git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' +# - Do not use "interface{}"; use "any" instead. +git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc' + # - Do not call grpclog directly. Use grpclog.Component instead. git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' # - Ensure all ptypes proto packages are renamed when importing. not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" +# - Ensure all usages of grpc_testing package are renamed when importing. +not git grep "\(import \|^\s*\)\"google.golang.org/grpc/interop/grpc_testing" -- "*.go" + # - Ensure all xds proto imports are renamed to *pb or *grpc. git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' @@ -106,7 +112,7 @@ for MOD_FILE in $(find . -name 'go.mod'); do goimports -l . 2>&1 | not grep -vE "\.pb\.go" golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:" - go mod tidy -compat=1.17 + go mod tidy -compat=1.19 git status --porcelain 2>&1 | fail_on_output || \ (git status; git --no-pager diff; exit 1) popd @@ -168,8 +174,6 @@ proto.RegisteredExtension is deprecated proto.RegisteredExtensions is deprecated proto.RegisterMapType is deprecated proto.Unmarshaler is deprecated -resolver.Backend -resolver.GRPCLB Target is deprecated: Use the Target field in the BuildOptions instead. xxx_messageInfo_ ' "${SC_OUT}" diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go index d09d22e13..66b95870e 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go @@ -106,13 +106,19 @@ func (o MarshalOptions) Format(m proto.Message) string { // MarshalOptions. Do not depend on the output being stable. It may change over // time across different versions of the program. func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { - return o.marshal(m) + return o.marshal(nil, m) +} + +// MarshalAppend appends the JSON format encoding of m to b, +// returning the result. +func (o MarshalOptions) MarshalAppend(b []byte, m proto.Message) ([]byte, error) { + return o.marshal(b, m) } // marshal is a centralized function that all marshal operations go through. // For profiling purposes, avoid changing the name of this function or // introducing other code paths for marshal that do not go through this. -func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { +func (o MarshalOptions) marshal(b []byte, m proto.Message) ([]byte, error) { if o.Multiline && o.Indent == "" { o.Indent = defaultIndent } @@ -120,7 +126,7 @@ func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { o.Resolver = protoregistry.GlobalTypes } - internalEnc, err := json.NewEncoder(o.Indent) + internalEnc, err := json.NewEncoder(b, o.Indent) if err != nil { return nil, err } @@ -128,7 +134,7 @@ func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { // Treat nil message interface as an empty message, // in which case the output in an empty JSON object. if m == nil { - return []byte("{}"), nil + return append(b, '{', '}'), nil } enc := encoder{internalEnc, o} diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go index ebf6c6528..722a7b41d 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go @@ -101,13 +101,19 @@ func (o MarshalOptions) Format(m proto.Message) string { // MarshalOptions object. Do not depend on the output being stable. It may // change over time across different versions of the program. func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { - return o.marshal(m) + return o.marshal(nil, m) +} + +// MarshalAppend appends the textproto format encoding of m to b, +// returning the result. +func (o MarshalOptions) MarshalAppend(b []byte, m proto.Message) ([]byte, error) { + return o.marshal(b, m) } // marshal is a centralized function that all marshal operations go through. // For profiling purposes, avoid changing the name of this function or // introducing other code paths for marshal that do not go through this. -func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { +func (o MarshalOptions) marshal(b []byte, m proto.Message) ([]byte, error) { var delims = [2]byte{'{', '}'} if o.Multiline && o.Indent == "" { @@ -117,7 +123,7 @@ func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { o.Resolver = protoregistry.GlobalTypes } - internalEnc, err := text.NewEncoder(o.Indent, delims, o.EmitASCII) + internalEnc, err := text.NewEncoder(b, o.Indent, delims, o.EmitASCII) if err != nil { return nil, err } @@ -125,7 +131,7 @@ func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { // Treat nil message interface as an empty message, // in which case there is nothing to output. if m == nil { - return []byte{}, nil + return b, nil } enc := encoder{internalEnc, o} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go index fbdf34873..934f2dcb3 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go @@ -41,8 +41,10 @@ type Encoder struct { // // If indent is a non-empty string, it causes every entry for an Array or Object // to be preceded by the indent and trailed by a newline. -func NewEncoder(indent string) (*Encoder, error) { - e := &Encoder{} +func NewEncoder(buf []byte, indent string) (*Encoder, error) { + e := &Encoder{ + out: buf, + } if len(indent) > 0 { if strings.Trim(indent, " \t") != "" { return nil, errors.New("indent may only be composed of space or tab characters") @@ -176,13 +178,13 @@ func appendFloat(out []byte, n float64, bitSize int) []byte { // WriteInt writes out the given signed integer in JSON number value. func (e *Encoder) WriteInt(n int64) { e.prepareNext(scalar) - e.out = append(e.out, strconv.FormatInt(n, 10)...) + e.out = strconv.AppendInt(e.out, n, 10) } // WriteUint writes out the given unsigned integer in JSON number value. func (e *Encoder) WriteUint(n uint64) { e.prepareNext(scalar) - e.out = append(e.out, strconv.FormatUint(n, 10)...) + e.out = strconv.AppendUint(e.out, n, 10) } // StartObject writes out the '{' symbol. diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go index da289ccce..cf7aed77b 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go @@ -53,8 +53,10 @@ type encoderState struct { // If outputASCII is true, strings will be serialized in such a way that // multi-byte UTF-8 sequences are escaped. This property ensures that the // overall output is ASCII (as opposed to UTF-8). -func NewEncoder(indent string, delims [2]byte, outputASCII bool) (*Encoder, error) { - e := &Encoder{} +func NewEncoder(buf []byte, indent string, delims [2]byte, outputASCII bool) (*Encoder, error) { + e := &Encoder{ + encoderState: encoderState{out: buf}, + } if len(indent) > 0 { if strings.Trim(indent, " \t") != "" { return nil, errors.New("indent may only be composed of space and tab characters") @@ -195,13 +197,13 @@ func appendFloat(out []byte, n float64, bitSize int) []byte { // WriteInt writes out the given signed integer value. func (e *Encoder) WriteInt(n int64) { e.prepareNext(scalar) - e.out = append(e.out, strconv.FormatInt(n, 10)...) + e.out = strconv.AppendInt(e.out, n, 10) } // WriteUint writes out the given unsigned integer value. func (e *Encoder) WriteUint(n uint64) { e.prepareNext(scalar) - e.out = append(e.out, strconv.FormatUint(n, 10)...) + e.out = strconv.AppendUint(e.out, n, 10) } // WriteLiteral writes out the given string as a literal value without quotes. diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index 5c0e8f73f..136f1b215 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -183,13 +183,58 @@ const ( // Field names for google.protobuf.ExtensionRangeOptions. const ( ExtensionRangeOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + ExtensionRangeOptions_Declaration_field_name protoreflect.Name = "declaration" + ExtensionRangeOptions_Verification_field_name protoreflect.Name = "verification" ExtensionRangeOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.uninterpreted_option" + ExtensionRangeOptions_Declaration_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.declaration" + ExtensionRangeOptions_Verification_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.verification" ) // Field numbers for google.protobuf.ExtensionRangeOptions. const ( ExtensionRangeOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 + ExtensionRangeOptions_Declaration_field_number protoreflect.FieldNumber = 2 + ExtensionRangeOptions_Verification_field_number protoreflect.FieldNumber = 3 +) + +// Full and short names for google.protobuf.ExtensionRangeOptions.VerificationState. +const ( + ExtensionRangeOptions_VerificationState_enum_fullname = "google.protobuf.ExtensionRangeOptions.VerificationState" + ExtensionRangeOptions_VerificationState_enum_name = "VerificationState" +) + +// Names for google.protobuf.ExtensionRangeOptions.Declaration. +const ( + ExtensionRangeOptions_Declaration_message_name protoreflect.Name = "Declaration" + ExtensionRangeOptions_Declaration_message_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration" +) + +// Field names for google.protobuf.ExtensionRangeOptions.Declaration. +const ( + ExtensionRangeOptions_Declaration_Number_field_name protoreflect.Name = "number" + ExtensionRangeOptions_Declaration_FullName_field_name protoreflect.Name = "full_name" + ExtensionRangeOptions_Declaration_Type_field_name protoreflect.Name = "type" + ExtensionRangeOptions_Declaration_IsRepeated_field_name protoreflect.Name = "is_repeated" + ExtensionRangeOptions_Declaration_Reserved_field_name protoreflect.Name = "reserved" + ExtensionRangeOptions_Declaration_Repeated_field_name protoreflect.Name = "repeated" + + ExtensionRangeOptions_Declaration_Number_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.number" + ExtensionRangeOptions_Declaration_FullName_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.full_name" + ExtensionRangeOptions_Declaration_Type_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.type" + ExtensionRangeOptions_Declaration_IsRepeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.is_repeated" + ExtensionRangeOptions_Declaration_Reserved_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.reserved" + ExtensionRangeOptions_Declaration_Repeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.repeated" +) + +// Field numbers for google.protobuf.ExtensionRangeOptions.Declaration. +const ( + ExtensionRangeOptions_Declaration_Number_field_number protoreflect.FieldNumber = 1 + ExtensionRangeOptions_Declaration_FullName_field_number protoreflect.FieldNumber = 2 + ExtensionRangeOptions_Declaration_Type_field_number protoreflect.FieldNumber = 3 + ExtensionRangeOptions_Declaration_IsRepeated_field_number protoreflect.FieldNumber = 4 + ExtensionRangeOptions_Declaration_Reserved_field_number protoreflect.FieldNumber = 5 + ExtensionRangeOptions_Declaration_Repeated_field_number protoreflect.FieldNumber = 6 ) // Names for google.protobuf.FieldDescriptorProto. @@ -540,6 +585,7 @@ const ( FieldOptions_DebugRedact_field_name protoreflect.Name = "debug_redact" FieldOptions_Retention_field_name protoreflect.Name = "retention" FieldOptions_Target_field_name protoreflect.Name = "target" + FieldOptions_Targets_field_name protoreflect.Name = "targets" FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype" @@ -552,6 +598,7 @@ const ( FieldOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.debug_redact" FieldOptions_Retention_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.retention" FieldOptions_Target_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.target" + FieldOptions_Targets_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.targets" FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option" ) @@ -567,6 +614,7 @@ const ( FieldOptions_DebugRedact_field_number protoreflect.FieldNumber = 16 FieldOptions_Retention_field_number protoreflect.FieldNumber = 17 FieldOptions_Target_field_number protoreflect.FieldNumber = 18 + FieldOptions_Targets_field_number protoreflect.FieldNumber = 19 FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) diff --git a/vendor/google.golang.org/protobuf/internal/genid/type_gen.go b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go index 3bc710138..e0f75fea0 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/type_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go @@ -32,6 +32,7 @@ const ( Type_Options_field_name protoreflect.Name = "options" Type_SourceContext_field_name protoreflect.Name = "source_context" Type_Syntax_field_name protoreflect.Name = "syntax" + Type_Edition_field_name protoreflect.Name = "edition" Type_Name_field_fullname protoreflect.FullName = "google.protobuf.Type.name" Type_Fields_field_fullname protoreflect.FullName = "google.protobuf.Type.fields" @@ -39,6 +40,7 @@ const ( Type_Options_field_fullname protoreflect.FullName = "google.protobuf.Type.options" Type_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Type.source_context" Type_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Type.syntax" + Type_Edition_field_fullname protoreflect.FullName = "google.protobuf.Type.edition" ) // Field numbers for google.protobuf.Type. @@ -49,6 +51,7 @@ const ( Type_Options_field_number protoreflect.FieldNumber = 4 Type_SourceContext_field_number protoreflect.FieldNumber = 5 Type_Syntax_field_number protoreflect.FieldNumber = 6 + Type_Edition_field_number protoreflect.FieldNumber = 7 ) // Names for google.protobuf.Field. @@ -121,12 +124,14 @@ const ( Enum_Options_field_name protoreflect.Name = "options" Enum_SourceContext_field_name protoreflect.Name = "source_context" Enum_Syntax_field_name protoreflect.Name = "syntax" + Enum_Edition_field_name protoreflect.Name = "edition" Enum_Name_field_fullname protoreflect.FullName = "google.protobuf.Enum.name" Enum_Enumvalue_field_fullname protoreflect.FullName = "google.protobuf.Enum.enumvalue" Enum_Options_field_fullname protoreflect.FullName = "google.protobuf.Enum.options" Enum_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Enum.source_context" Enum_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Enum.syntax" + Enum_Edition_field_fullname protoreflect.FullName = "google.protobuf.Enum.edition" ) // Field numbers for google.protobuf.Enum. @@ -136,6 +141,7 @@ const ( Enum_Options_field_number protoreflect.FieldNumber = 3 Enum_SourceContext_field_number protoreflect.FieldNumber = 4 Enum_Syntax_field_number protoreflect.FieldNumber = 5 + Enum_Edition_field_number protoreflect.FieldNumber = 6 ) // Names for google.protobuf.EnumValue. diff --git a/vendor/google.golang.org/protobuf/internal/order/order.go b/vendor/google.golang.org/protobuf/internal/order/order.go index 33745ed06..dea522e12 100644 --- a/vendor/google.golang.org/protobuf/internal/order/order.go +++ b/vendor/google.golang.org/protobuf/internal/order/order.go @@ -33,7 +33,7 @@ var ( return !inOneof(ox) && inOneof(oy) } // Fields in disjoint oneof sets are sorted by declaration index. - if ox != nil && oy != nil && ox != oy { + if inOneof(ox) && inOneof(oy) && ox != oy { return ox.Index() < oy.Index() } // Fields sorted by field number. diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index f7014cd51..0999f29d5 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -51,7 +51,7 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 30 + Minor = 31 Patch = 0 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/size.go b/vendor/google.golang.org/protobuf/proto/size.go index 554b9c6c0..f1692b49b 100644 --- a/vendor/google.golang.org/protobuf/proto/size.go +++ b/vendor/google.golang.org/protobuf/proto/size.go @@ -73,23 +73,27 @@ func (o MarshalOptions) sizeField(fd protoreflect.FieldDescriptor, value protore } func (o MarshalOptions) sizeList(num protowire.Number, fd protoreflect.FieldDescriptor, list protoreflect.List) (size int) { + sizeTag := protowire.SizeTag(num) + if fd.IsPacked() && list.Len() > 0 { content := 0 for i, llen := 0, list.Len(); i < llen; i++ { content += o.sizeSingular(num, fd.Kind(), list.Get(i)) } - return protowire.SizeTag(num) + protowire.SizeBytes(content) + return sizeTag + protowire.SizeBytes(content) } for i, llen := 0, list.Len(); i < llen; i++ { - size += protowire.SizeTag(num) + o.sizeSingular(num, fd.Kind(), list.Get(i)) + size += sizeTag + o.sizeSingular(num, fd.Kind(), list.Get(i)) } return size } func (o MarshalOptions) sizeMap(num protowire.Number, fd protoreflect.FieldDescriptor, mapv protoreflect.Map) (size int) { + sizeTag := protowire.SizeTag(num) + mapv.Range(func(key protoreflect.MapKey, value protoreflect.Value) bool { - size += protowire.SizeTag(num) + size += sizeTag size += protowire.SizeBytes(o.sizeField(fd.MapKey(), key.Value()) + o.sizeField(fd.MapValue(), value)) return true }) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go index 54ce326df..717b106f3 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -363,6 +363,8 @@ func (p *SourcePath) appendFieldOptions(b []byte) []byte { b = p.appendSingularField(b, "retention", nil) case 18: b = p.appendSingularField(b, "target", nil) + case 19: + b = p.appendRepeatedField(b, "targets", nil) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -418,6 +420,10 @@ func (p *SourcePath) appendExtensionRangeOptions(b []byte) []byte { switch (*p)[0] { case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + case 2: + b = p.appendRepeatedField(b, "declaration", (*SourcePath).appendExtensionRangeOptions_Declaration) + case 3: + b = p.appendSingularField(b, "verification", nil) } return b } @@ -473,3 +479,24 @@ func (p *SourcePath) appendUninterpretedOption_NamePart(b []byte) []byte { } return b } + +func (p *SourcePath) appendExtensionRangeOptions_Declaration(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "number", nil) + case 2: + b = p.appendSingularField(b, "full_name", nil) + case 3: + b = p.appendSingularField(b, "type", nil) + case 4: + b = p.appendSingularField(b, "is_repeated", nil) + case 5: + b = p.appendSingularField(b, "reserved", nil) + case 6: + b = p.appendSingularField(b, "repeated", nil) + } + return b +} diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index dac5671db..04c00f737 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -48,6 +48,64 @@ import ( sync "sync" ) +// The verification state of the extension range. +type ExtensionRangeOptions_VerificationState int32 + +const ( + // All the extensions of the range must be declared. + ExtensionRangeOptions_DECLARATION ExtensionRangeOptions_VerificationState = 0 + ExtensionRangeOptions_UNVERIFIED ExtensionRangeOptions_VerificationState = 1 +) + +// Enum value maps for ExtensionRangeOptions_VerificationState. +var ( + ExtensionRangeOptions_VerificationState_name = map[int32]string{ + 0: "DECLARATION", + 1: "UNVERIFIED", + } + ExtensionRangeOptions_VerificationState_value = map[string]int32{ + "DECLARATION": 0, + "UNVERIFIED": 1, + } +) + +func (x ExtensionRangeOptions_VerificationState) Enum() *ExtensionRangeOptions_VerificationState { + p := new(ExtensionRangeOptions_VerificationState) + *p = x + return p +} + +func (x ExtensionRangeOptions_VerificationState) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ExtensionRangeOptions_VerificationState) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor() +} + +func (ExtensionRangeOptions_VerificationState) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[0] +} + +func (x ExtensionRangeOptions_VerificationState) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *ExtensionRangeOptions_VerificationState) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = ExtensionRangeOptions_VerificationState(num) + return nil +} + +// Deprecated: Use ExtensionRangeOptions_VerificationState.Descriptor instead. +func (ExtensionRangeOptions_VerificationState) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{3, 0} +} + type FieldDescriptorProto_Type int32 const ( @@ -137,11 +195,11 @@ func (x FieldDescriptorProto_Type) String() string { } func (FieldDescriptorProto_Type) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() } func (FieldDescriptorProto_Type) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[0] + return &file_google_protobuf_descriptor_proto_enumTypes[1] } func (x FieldDescriptorProto_Type) Number() protoreflect.EnumNumber { @@ -197,11 +255,11 @@ func (x FieldDescriptorProto_Label) String() string { } func (FieldDescriptorProto_Label) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() } func (FieldDescriptorProto_Label) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[1] + return &file_google_protobuf_descriptor_proto_enumTypes[2] } func (x FieldDescriptorProto_Label) Number() protoreflect.EnumNumber { @@ -258,11 +316,11 @@ func (x FileOptions_OptimizeMode) String() string { } func (FileOptions_OptimizeMode) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() } func (FileOptions_OptimizeMode) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[2] + return &file_google_protobuf_descriptor_proto_enumTypes[3] } func (x FileOptions_OptimizeMode) Number() protoreflect.EnumNumber { @@ -288,7 +346,13 @@ type FieldOptions_CType int32 const ( // Default mode. - FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_STRING FieldOptions_CType = 0 + // The option [ctype=CORD] may be applied to a non-repeated field of type + // "bytes". It indicates that in C++, the data should be stored in a Cord + // instead of a string. For very large strings, this may reduce memory + // fragmentation. It may also allow better performance when parsing from a + // Cord, or when parsing with aliasing enabled, as the parsed Cord may then + // alias the original buffer. FieldOptions_CORD FieldOptions_CType = 1 FieldOptions_STRING_PIECE FieldOptions_CType = 2 ) @@ -318,11 +382,11 @@ func (x FieldOptions_CType) String() string { } func (FieldOptions_CType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() } func (FieldOptions_CType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[3] + return &file_google_protobuf_descriptor_proto_enumTypes[4] } func (x FieldOptions_CType) Number() protoreflect.EnumNumber { @@ -380,11 +444,11 @@ func (x FieldOptions_JSType) String() string { } func (FieldOptions_JSType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() } func (FieldOptions_JSType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[4] + return &file_google_protobuf_descriptor_proto_enumTypes[5] } func (x FieldOptions_JSType) Number() protoreflect.EnumNumber { @@ -442,11 +506,11 @@ func (x FieldOptions_OptionRetention) String() string { } func (FieldOptions_OptionRetention) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() } func (FieldOptions_OptionRetention) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[5] + return &file_google_protobuf_descriptor_proto_enumTypes[6] } func (x FieldOptions_OptionRetention) Number() protoreflect.EnumNumber { @@ -526,11 +590,11 @@ func (x FieldOptions_OptionTargetType) String() string { } func (FieldOptions_OptionTargetType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() } func (FieldOptions_OptionTargetType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[6] + return &file_google_protobuf_descriptor_proto_enumTypes[7] } func (x FieldOptions_OptionTargetType) Number() protoreflect.EnumNumber { @@ -588,11 +652,11 @@ func (x MethodOptions_IdempotencyLevel) String() string { } func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() } func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[7] + return &file_google_protobuf_descriptor_proto_enumTypes[8] } func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber { @@ -652,11 +716,11 @@ func (x GeneratedCodeInfo_Annotation_Semantic) String() string { } func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor() } func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[8] + return &file_google_protobuf_descriptor_proto_enumTypes[9] } func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber { @@ -1015,7 +1079,21 @@ type ExtensionRangeOptions struct { // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` -} + // go/protobuf-stripping-extension-declarations + // Like Metadata, but we use a repeated field to hold all extension + // declarations. This should avoid the size increases of transforming a large + // extension range into small ranges in generated binaries. + Declaration []*ExtensionRangeOptions_Declaration `protobuf:"bytes,2,rep,name=declaration" json:"declaration,omitempty"` + // The verification state of the range. + // TODO(b/278783756): flip the default to DECLARATION once all empty ranges + // are marked as UNVERIFIED. + Verification *ExtensionRangeOptions_VerificationState `protobuf:"varint,3,opt,name=verification,enum=google.protobuf.ExtensionRangeOptions_VerificationState,def=1" json:"verification,omitempty"` +} + +// Default values for ExtensionRangeOptions fields. +const ( + Default_ExtensionRangeOptions_Verification = ExtensionRangeOptions_UNVERIFIED +) func (x *ExtensionRangeOptions) Reset() { *x = ExtensionRangeOptions{} @@ -1056,6 +1134,20 @@ func (x *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption return nil } +func (x *ExtensionRangeOptions) GetDeclaration() []*ExtensionRangeOptions_Declaration { + if x != nil { + return x.Declaration + } + return nil +} + +func (x *ExtensionRangeOptions) GetVerification() ExtensionRangeOptions_VerificationState { + if x != nil && x.Verification != nil { + return *x.Verification + } + return Default_ExtensionRangeOptions_Verification +} + // Describes a field within a message. type FieldDescriptorProto struct { state protoimpl.MessageState @@ -2046,8 +2138,10 @@ type FieldOptions struct { // The ctype option instructs the C++ code generator to use a different // representation of the field than it normally would. See the specific - // options below. This option is not yet implemented in the open source - // release -- sorry, we'll try to include it in a future version! + // options below. This option is only implemented to support use of + // [ctype=CORD] and [ctype=STRING] (the default) on non-repeated fields of + // type "bytes" in the open source release -- sorry, we'll try to include + // other types in a future version! Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` // The packed option can be enabled for repeated primitive fields to enable // a more efficient representation on the wire. Rather than repeatedly @@ -2111,9 +2205,11 @@ type FieldOptions struct { Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` // Indicate that the field value should not be printed out when using debug // formats, e.g. when the field contains sensitive credentials. - DebugRedact *bool `protobuf:"varint,16,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` - Retention *FieldOptions_OptionRetention `protobuf:"varint,17,opt,name=retention,enum=google.protobuf.FieldOptions_OptionRetention" json:"retention,omitempty"` - Target *FieldOptions_OptionTargetType `protobuf:"varint,18,opt,name=target,enum=google.protobuf.FieldOptions_OptionTargetType" json:"target,omitempty"` + DebugRedact *bool `protobuf:"varint,16,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` + Retention *FieldOptions_OptionRetention `protobuf:"varint,17,opt,name=retention,enum=google.protobuf.FieldOptions_OptionRetention" json:"retention,omitempty"` + // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. + Target *FieldOptions_OptionTargetType `protobuf:"varint,18,opt,name=target,enum=google.protobuf.FieldOptions_OptionTargetType" json:"target,omitempty"` + Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2224,6 +2320,7 @@ func (x *FieldOptions) GetRetention() FieldOptions_OptionRetention { return FieldOptions_RETENTION_UNKNOWN } +// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. func (x *FieldOptions) GetTarget() FieldOptions_OptionTargetType { if x != nil && x.Target != nil { return *x.Target @@ -2231,6 +2328,13 @@ func (x *FieldOptions) GetTarget() FieldOptions_OptionTargetType { return FieldOptions_TARGET_TYPE_UNKNOWN } +func (x *FieldOptions) GetTargets() []FieldOptions_OptionTargetType { + if x != nil { + return x.Targets + } + return nil +} + func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2960,6 +3064,108 @@ func (x *DescriptorProto_ReservedRange) GetEnd() int32 { return 0 } +type ExtensionRangeOptions_Declaration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The extension number declared within the extension range. + Number *int32 `protobuf:"varint,1,opt,name=number" json:"number,omitempty"` + // The fully-qualified name of the extension field. There must be a leading + // dot in front of the full name. + FullName *string `protobuf:"bytes,2,opt,name=full_name,json=fullName" json:"full_name,omitempty"` + // The fully-qualified type name of the extension field. Unlike + // Metadata.type, Declaration.type must have a leading dot for messages + // and enums. + Type *string `protobuf:"bytes,3,opt,name=type" json:"type,omitempty"` + // Deprecated. Please use "repeated". + // + // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. + IsRepeated *bool `protobuf:"varint,4,opt,name=is_repeated,json=isRepeated" json:"is_repeated,omitempty"` + // If true, indicates that the number is reserved in the extension range, + // and any extension field with the number will fail to compile. Set this + // when a declared extension field is deleted. + Reserved *bool `protobuf:"varint,5,opt,name=reserved" json:"reserved,omitempty"` + // If true, indicates that the extension must be defined as repeated. + // Otherwise the extension must be defined as optional. + Repeated *bool `protobuf:"varint,6,opt,name=repeated" json:"repeated,omitempty"` +} + +func (x *ExtensionRangeOptions_Declaration) Reset() { + *x = ExtensionRangeOptions_Declaration{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionRangeOptions_Declaration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionRangeOptions_Declaration) ProtoMessage() {} + +func (x *ExtensionRangeOptions_Declaration) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionRangeOptions_Declaration.ProtoReflect.Descriptor instead. +func (*ExtensionRangeOptions_Declaration) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *ExtensionRangeOptions_Declaration) GetNumber() int32 { + if x != nil && x.Number != nil { + return *x.Number + } + return 0 +} + +func (x *ExtensionRangeOptions_Declaration) GetFullName() string { + if x != nil && x.FullName != nil { + return *x.FullName + } + return "" +} + +func (x *ExtensionRangeOptions_Declaration) GetType() string { + if x != nil && x.Type != nil { + return *x.Type + } + return "" +} + +// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. +func (x *ExtensionRangeOptions_Declaration) GetIsRepeated() bool { + if x != nil && x.IsRepeated != nil { + return *x.IsRepeated + } + return false +} + +func (x *ExtensionRangeOptions_Declaration) GetReserved() bool { + if x != nil && x.Reserved != nil { + return *x.Reserved + } + return false +} + +func (x *ExtensionRangeOptions_Declaration) GetRepeated() bool { + if x != nil && x.Repeated != nil { + return *x.Repeated + } + return false +} + // Range of reserved numeric values. Reserved values may not be used by // entries in the same enum. Reserved ranges may not overlap. // @@ -2978,7 +3184,7 @@ type EnumDescriptorProto_EnumReservedRange struct { func (x *EnumDescriptorProto_EnumReservedRange) Reset() { *x = EnumDescriptorProto_EnumReservedRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2991,7 +3197,7 @@ func (x *EnumDescriptorProto_EnumReservedRange) String() string { func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3038,7 +3244,7 @@ type UninterpretedOption_NamePart struct { func (x *UninterpretedOption_NamePart) Reset() { *x = UninterpretedOption_NamePart{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3051,7 +3257,7 @@ func (x *UninterpretedOption_NamePart) String() string { func (*UninterpretedOption_NamePart) ProtoMessage() {} func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3182,7 +3388,7 @@ type SourceCodeInfo_Location struct { func (x *SourceCodeInfo_Location) Reset() { *x = SourceCodeInfo_Location{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3195,7 +3401,7 @@ func (x *SourceCodeInfo_Location) String() string { func (*SourceCodeInfo_Location) ProtoMessage() {} func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3269,7 +3475,7 @@ type GeneratedCodeInfo_Annotation struct { func (x *GeneratedCodeInfo_Annotation) Reset() { *x = GeneratedCodeInfo_Annotation{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3282,7 +3488,7 @@ func (x *GeneratedCodeInfo_Annotation) String() string { func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3436,264 +3642,296 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, - 0x7c, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, - 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, - 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, - 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, - 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0xad, 0x04, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, + 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, + 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, + 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x68, + 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65, 0x72, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a, 0x0a, + 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xb3, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, + 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x23, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0a, 0x69, 0x73, 0x52, 0x65, + 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x22, 0x34, + 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, + 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, + 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, + 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, + 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, - 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, - 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, - 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, - 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, - 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, - 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, - 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, - 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, - 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, - 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, - 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, - 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, - 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, - 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, - 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, - 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, - 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, - 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, - 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, - 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, - 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, - 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x12, 0x12, 0x0a, - 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, - 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, - 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, + 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, + 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, + 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, + 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, + 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, + 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, + 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, + 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, + 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, + 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, + 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, + 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, + 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, + 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, + 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, + 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, + 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, + 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, + 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, + 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, + 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, + 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, + 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, + 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x12, + 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45, + 0x44, 0x10, 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, + 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, + 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, + 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, + 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, + 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, + 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, + 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, + 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, + 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, + 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, + 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91, 0x09, 0x0a, 0x0b, 0x46, + 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, + 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, + 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, + 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, + 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, + 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, + 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, + 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, + 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, + 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, + 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, + 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, + 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, - 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, - 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, - 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, - 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, - 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, - 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, - 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, - 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, - 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, - 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, - 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, - 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, - 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, - 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, - 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, - 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, - 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, - 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, - 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, - 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, - 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, - 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, - 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, - 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, - 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, - 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, - 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, - 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, - 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, - 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, - 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, - 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, - 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, - 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, - 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x2a, 0x20, 0x01, 0x28, - 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, 0x70, 0x68, 0x70, 0x47, 0x65, 0x6e, - 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, - 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, - 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, - 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, - 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, - 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, - 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, - 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, - 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, - 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, - 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, - 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, - 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, - 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, - 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, - 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, - 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, - 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, - 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, - 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, - 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xbb, 0x03, 0x0a, - 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, - 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, - 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, - 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, - 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, - 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, - 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, - 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42, - 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, - 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, - 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, + 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, + 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, + 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, + 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, + 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x2a, 0x20, + 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, 0x70, 0x68, 0x70, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, + 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, + 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, + 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, + 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, + 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, + 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, + 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, + 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, + 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, + 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, + 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, - 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, - 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xb7, 0x08, 0x0a, 0x0c, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, - 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, - 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, - 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, - 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, - 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, - 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, - 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, - 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, - 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, - 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, - 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, - 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x46, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x12, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, + 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, + 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, + 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, + 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, + 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xbb, + 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, + 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, + 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, + 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, + 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, + 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, + 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, + 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, + 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, + 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x85, 0x09, 0x0a, + 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, + 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, + 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, + 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, + 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, + 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, + 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, + 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, + 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, + 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, + 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, + 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, + 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, + 0x12, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x42, 0x02, 0x18, 0x01, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, + 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, @@ -3885,98 +4123,103 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { return file_google_protobuf_descriptor_proto_rawDescData } -var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 9) -var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 27) +var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 10) +var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 28) var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ - (FieldDescriptorProto_Type)(0), // 0: google.protobuf.FieldDescriptorProto.Type - (FieldDescriptorProto_Label)(0), // 1: google.protobuf.FieldDescriptorProto.Label - (FileOptions_OptimizeMode)(0), // 2: google.protobuf.FileOptions.OptimizeMode - (FieldOptions_CType)(0), // 3: google.protobuf.FieldOptions.CType - (FieldOptions_JSType)(0), // 4: google.protobuf.FieldOptions.JSType - (FieldOptions_OptionRetention)(0), // 5: google.protobuf.FieldOptions.OptionRetention - (FieldOptions_OptionTargetType)(0), // 6: google.protobuf.FieldOptions.OptionTargetType - (MethodOptions_IdempotencyLevel)(0), // 7: google.protobuf.MethodOptions.IdempotencyLevel - (GeneratedCodeInfo_Annotation_Semantic)(0), // 8: google.protobuf.GeneratedCodeInfo.Annotation.Semantic - (*FileDescriptorSet)(nil), // 9: google.protobuf.FileDescriptorSet - (*FileDescriptorProto)(nil), // 10: google.protobuf.FileDescriptorProto - (*DescriptorProto)(nil), // 11: google.protobuf.DescriptorProto - (*ExtensionRangeOptions)(nil), // 12: google.protobuf.ExtensionRangeOptions - (*FieldDescriptorProto)(nil), // 13: google.protobuf.FieldDescriptorProto - (*OneofDescriptorProto)(nil), // 14: google.protobuf.OneofDescriptorProto - (*EnumDescriptorProto)(nil), // 15: google.protobuf.EnumDescriptorProto - (*EnumValueDescriptorProto)(nil), // 16: google.protobuf.EnumValueDescriptorProto - (*ServiceDescriptorProto)(nil), // 17: google.protobuf.ServiceDescriptorProto - (*MethodDescriptorProto)(nil), // 18: google.protobuf.MethodDescriptorProto - (*FileOptions)(nil), // 19: google.protobuf.FileOptions - (*MessageOptions)(nil), // 20: google.protobuf.MessageOptions - (*FieldOptions)(nil), // 21: google.protobuf.FieldOptions - (*OneofOptions)(nil), // 22: google.protobuf.OneofOptions - (*EnumOptions)(nil), // 23: google.protobuf.EnumOptions - (*EnumValueOptions)(nil), // 24: google.protobuf.EnumValueOptions - (*ServiceOptions)(nil), // 25: google.protobuf.ServiceOptions - (*MethodOptions)(nil), // 26: google.protobuf.MethodOptions - (*UninterpretedOption)(nil), // 27: google.protobuf.UninterpretedOption - (*SourceCodeInfo)(nil), // 28: google.protobuf.SourceCodeInfo - (*GeneratedCodeInfo)(nil), // 29: google.protobuf.GeneratedCodeInfo - (*DescriptorProto_ExtensionRange)(nil), // 30: google.protobuf.DescriptorProto.ExtensionRange - (*DescriptorProto_ReservedRange)(nil), // 31: google.protobuf.DescriptorProto.ReservedRange - (*EnumDescriptorProto_EnumReservedRange)(nil), // 32: google.protobuf.EnumDescriptorProto.EnumReservedRange - (*UninterpretedOption_NamePart)(nil), // 33: google.protobuf.UninterpretedOption.NamePart - (*SourceCodeInfo_Location)(nil), // 34: google.protobuf.SourceCodeInfo.Location - (*GeneratedCodeInfo_Annotation)(nil), // 35: google.protobuf.GeneratedCodeInfo.Annotation + (ExtensionRangeOptions_VerificationState)(0), // 0: google.protobuf.ExtensionRangeOptions.VerificationState + (FieldDescriptorProto_Type)(0), // 1: google.protobuf.FieldDescriptorProto.Type + (FieldDescriptorProto_Label)(0), // 2: google.protobuf.FieldDescriptorProto.Label + (FileOptions_OptimizeMode)(0), // 3: google.protobuf.FileOptions.OptimizeMode + (FieldOptions_CType)(0), // 4: google.protobuf.FieldOptions.CType + (FieldOptions_JSType)(0), // 5: google.protobuf.FieldOptions.JSType + (FieldOptions_OptionRetention)(0), // 6: google.protobuf.FieldOptions.OptionRetention + (FieldOptions_OptionTargetType)(0), // 7: google.protobuf.FieldOptions.OptionTargetType + (MethodOptions_IdempotencyLevel)(0), // 8: google.protobuf.MethodOptions.IdempotencyLevel + (GeneratedCodeInfo_Annotation_Semantic)(0), // 9: google.protobuf.GeneratedCodeInfo.Annotation.Semantic + (*FileDescriptorSet)(nil), // 10: google.protobuf.FileDescriptorSet + (*FileDescriptorProto)(nil), // 11: google.protobuf.FileDescriptorProto + (*DescriptorProto)(nil), // 12: google.protobuf.DescriptorProto + (*ExtensionRangeOptions)(nil), // 13: google.protobuf.ExtensionRangeOptions + (*FieldDescriptorProto)(nil), // 14: google.protobuf.FieldDescriptorProto + (*OneofDescriptorProto)(nil), // 15: google.protobuf.OneofDescriptorProto + (*EnumDescriptorProto)(nil), // 16: google.protobuf.EnumDescriptorProto + (*EnumValueDescriptorProto)(nil), // 17: google.protobuf.EnumValueDescriptorProto + (*ServiceDescriptorProto)(nil), // 18: google.protobuf.ServiceDescriptorProto + (*MethodDescriptorProto)(nil), // 19: google.protobuf.MethodDescriptorProto + (*FileOptions)(nil), // 20: google.protobuf.FileOptions + (*MessageOptions)(nil), // 21: google.protobuf.MessageOptions + (*FieldOptions)(nil), // 22: google.protobuf.FieldOptions + (*OneofOptions)(nil), // 23: google.protobuf.OneofOptions + (*EnumOptions)(nil), // 24: google.protobuf.EnumOptions + (*EnumValueOptions)(nil), // 25: google.protobuf.EnumValueOptions + (*ServiceOptions)(nil), // 26: google.protobuf.ServiceOptions + (*MethodOptions)(nil), // 27: google.protobuf.MethodOptions + (*UninterpretedOption)(nil), // 28: google.protobuf.UninterpretedOption + (*SourceCodeInfo)(nil), // 29: google.protobuf.SourceCodeInfo + (*GeneratedCodeInfo)(nil), // 30: google.protobuf.GeneratedCodeInfo + (*DescriptorProto_ExtensionRange)(nil), // 31: google.protobuf.DescriptorProto.ExtensionRange + (*DescriptorProto_ReservedRange)(nil), // 32: google.protobuf.DescriptorProto.ReservedRange + (*ExtensionRangeOptions_Declaration)(nil), // 33: google.protobuf.ExtensionRangeOptions.Declaration + (*EnumDescriptorProto_EnumReservedRange)(nil), // 34: google.protobuf.EnumDescriptorProto.EnumReservedRange + (*UninterpretedOption_NamePart)(nil), // 35: google.protobuf.UninterpretedOption.NamePart + (*SourceCodeInfo_Location)(nil), // 36: google.protobuf.SourceCodeInfo.Location + (*GeneratedCodeInfo_Annotation)(nil), // 37: google.protobuf.GeneratedCodeInfo.Annotation } var file_google_protobuf_descriptor_proto_depIdxs = []int32{ - 10, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto - 11, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto - 15, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 17, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto - 13, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 19, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions - 28, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo - 13, // 7: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto - 13, // 8: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 11, // 9: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto - 15, // 10: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 30, // 11: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange - 14, // 12: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto - 20, // 13: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions - 31, // 14: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange - 27, // 15: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 1, // 16: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label - 0, // 17: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type - 21, // 18: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions - 22, // 19: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions - 16, // 20: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto - 23, // 21: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions - 32, // 22: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange - 24, // 23: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions - 18, // 24: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto - 25, // 25: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions - 26, // 26: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions - 2, // 27: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode - 27, // 28: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 27, // 29: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 3, // 30: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType - 4, // 31: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType - 5, // 32: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention - 6, // 33: google.protobuf.FieldOptions.target:type_name -> google.protobuf.FieldOptions.OptionTargetType - 27, // 34: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 27, // 35: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 27, // 36: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 27, // 37: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 27, // 38: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 7, // 39: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel - 27, // 40: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 33, // 41: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart - 34, // 42: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location - 35, // 43: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation - 12, // 44: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions - 8, // 45: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic - 46, // [46:46] is the sub-list for method output_type - 46, // [46:46] is the sub-list for method input_type - 46, // [46:46] is the sub-list for extension type_name - 46, // [46:46] is the sub-list for extension extendee - 0, // [0:46] is the sub-list for field type_name + 11, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto + 12, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto + 16, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 18, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto + 14, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 20, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions + 29, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo + 14, // 7: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto + 14, // 8: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 12, // 9: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto + 16, // 10: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 31, // 11: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange + 15, // 12: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto + 21, // 13: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions + 32, // 14: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange + 28, // 15: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 33, // 16: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration + 0, // 17: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState + 2, // 18: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label + 1, // 19: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type + 22, // 20: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions + 23, // 21: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions + 17, // 22: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto + 24, // 23: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions + 34, // 24: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange + 25, // 25: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions + 19, // 26: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto + 26, // 27: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions + 27, // 28: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions + 3, // 29: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode + 28, // 30: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 28, // 31: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 4, // 32: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType + 5, // 33: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType + 6, // 34: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention + 7, // 35: google.protobuf.FieldOptions.target:type_name -> google.protobuf.FieldOptions.OptionTargetType + 7, // 36: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType + 28, // 37: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 28, // 38: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 28, // 39: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 28, // 40: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 28, // 41: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 8, // 42: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel + 28, // 43: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 35, // 44: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 36, // 45: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 37, // 46: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 13, // 47: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 9, // 48: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic + 49, // [49:49] is the sub-list for method output_type + 49, // [49:49] is the sub-list for method input_type + 49, // [49:49] is the sub-list for extension type_name + 49, // [49:49] is the sub-list for extension extendee + 0, // [0:49] is the sub-list for field type_name } func init() { file_google_protobuf_descriptor_proto_init() } @@ -4280,7 +4523,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { + switch v := v.(*ExtensionRangeOptions_Declaration); i { case 0: return &v.state case 1: @@ -4292,7 +4535,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UninterpretedOption_NamePart); i { + switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { case 0: return &v.state case 1: @@ -4304,7 +4547,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SourceCodeInfo_Location); i { + switch v := v.(*UninterpretedOption_NamePart); i { case 0: return &v.state case 1: @@ -4316,6 +4559,18 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceCodeInfo_Location); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GeneratedCodeInfo_Annotation); i { case 0: return &v.state @@ -4333,8 +4588,8 @@ func file_google_protobuf_descriptor_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc, - NumEnums: 9, - NumMessages: 27, + NumEnums: 10, + NumMessages: 28, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index a6c7a33f3..580b232f4 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -142,39 +142,39 @@ import ( // // Example 2: Pack and unpack a message in Java. // -// Foo foo = ...; -// Any any = Any.pack(foo); -// ... -// if (any.is(Foo.class)) { -// foo = any.unpack(Foo.class); -// } -// // or ... -// if (any.isSameTypeAs(Foo.getDefaultInstance())) { -// foo = any.unpack(Foo.getDefaultInstance()); -// } -// -// Example 3: Pack and unpack a message in Python. -// -// foo = Foo(...) -// any = Any() -// any.Pack(foo) -// ... -// if any.Is(Foo.DESCRIPTOR): -// any.Unpack(foo) -// ... -// -// Example 4: Pack and unpack a message in Go -// -// foo := &pb.Foo{...} -// any, err := anypb.New(foo) -// if err != nil { -// ... -// } -// ... -// foo := &pb.Foo{} -// if err := any.UnmarshalTo(foo); err != nil { -// ... -// } +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// // or ... +// if (any.isSameTypeAs(Foo.getDefaultInstance())) { +// foo = any.unpack(Foo.getDefaultInstance()); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := anypb.New(foo) +// if err != nil { +// ... +// } +// ... +// foo := &pb.Foo{} +// if err := any.UnmarshalTo(foo); err != nil { +// ... +// } // // The pack methods provided by protobuf library will by default use // 'type.googleapis.com/full.type.name' as the type URL and the unpack @@ -182,8 +182,8 @@ import ( // in the type URL, for example "foo.bar.com/x/y.z" will yield type // name "y.z". // -// # JSON -// +// JSON +// ==== // The JSON representation of an `Any` value uses the regular // representation of the deserialized, embedded message, with an // additional field `@type` which contains the type URL. Example: diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index 61f69fc11..81511a336 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -167,7 +167,7 @@ import ( // [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with // the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use // the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D +// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime() // ) to obtain a formatter capable of generating timestamps in this format. type Timestamp struct { state protoimpl.MessageState diff --git a/vendor/modules.txt b/vendor/modules.txt index 162045334..7b96d2485 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,11 +1,10 @@ # github.com/HdrHistogram/hdrhistogram-go v1.1.2 ## explicit; go 1.14 -# github.com/Shopify/sarama v1.38.1 +# github.com/IBM/sarama v1.40.1 ## explicit; go 1.17 -github.com/Shopify/sarama -# github.com/aws/aws-sdk-go-v2 v1.22.1 +github.com/IBM/sarama +# github.com/aws/aws-sdk-go-v2 v1.23.0 ## explicit; go 1.19 -github.com/aws/aws-sdk-go-v2 github.com/aws/aws-sdk-go-v2/aws github.com/aws/aws-sdk-go-v2/aws/defaults github.com/aws/aws-sdk-go-v2/aws/middleware @@ -18,6 +17,8 @@ github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 github.com/aws/aws-sdk-go-v2/aws/signer/v4 github.com/aws/aws-sdk-go-v2/aws/transport/http github.com/aws/aws-sdk-go-v2/internal/auth +github.com/aws/aws-sdk-go-v2/internal/auth/smithy +github.com/aws/aws-sdk-go-v2/internal/endpoints github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn github.com/aws/aws-sdk-go-v2/internal/rand github.com/aws/aws-sdk-go-v2/internal/sdk @@ -26,10 +27,10 @@ github.com/aws/aws-sdk-go-v2/internal/shareddefaults github.com/aws/aws-sdk-go-v2/internal/strings github.com/aws/aws-sdk-go-v2/internal/sync/singleflight github.com/aws/aws-sdk-go-v2/internal/timeconv -# github.com/aws/aws-sdk-go-v2/config v1.18.37 -## explicit; go 1.15 +# github.com/aws/aws-sdk-go-v2/config v1.25.3 +## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/config -# github.com/aws/aws-sdk-go-v2/credentials v1.15.1 +# github.com/aws/aws-sdk-go-v2/credentials v1.16.2 ## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/credentials github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds @@ -38,50 +39,54 @@ github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client github.com/aws/aws-sdk-go-v2/credentials/processcreds github.com/aws/aws-sdk-go-v2/credentials/ssocreds github.com/aws/aws-sdk-go-v2/credentials/stscreds -# github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.2 +# github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.4 ## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/feature/ec2/imds github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config -# github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.1 +# github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.3 ## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/internal/configsources -# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.1 +# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.3 ## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 -# github.com/aws/aws-sdk-go-v2/internal/ini v1.3.42 -## explicit; go 1.15 +# github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1 +## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/internal/ini -# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.1 +# github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.1 +## explicit; go 1.19 +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding +# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.3 ## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/service/internal/presigned-url -# github.com/aws/aws-sdk-go-v2/service/sns v1.23.0 -## explicit; go 1.15 +# github.com/aws/aws-sdk-go-v2/service/sns v1.25.2 +## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/service/sns github.com/aws/aws-sdk-go-v2/service/sns/internal/endpoints github.com/aws/aws-sdk-go-v2/service/sns/types -# github.com/aws/aws-sdk-go-v2/service/sqs v1.23.2 +# github.com/aws/aws-sdk-go-v2/service/sqs v1.23.4 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/service/sqs github.com/aws/aws-sdk-go-v2/service/sqs/internal/endpoints github.com/aws/aws-sdk-go-v2/service/sqs/types -# github.com/aws/aws-sdk-go-v2/service/sso v1.17.0 +# github.com/aws/aws-sdk-go-v2/service/sso v1.17.2 ## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/service/sso github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints github.com/aws/aws-sdk-go-v2/service/sso/types -# github.com/aws/aws-sdk-go-v2/service/ssooidc v1.19.0 +# github.com/aws/aws-sdk-go-v2/service/ssooidc v1.20.0 ## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/service/ssooidc github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints github.com/aws/aws-sdk-go-v2/service/ssooidc/types -# github.com/aws/aws-sdk-go-v2/service/sts v1.25.0 +# github.com/aws/aws-sdk-go-v2/service/sts v1.25.3 ## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/service/sts github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints github.com/aws/aws-sdk-go-v2/service/sts/types -# github.com/aws/smithy-go v1.16.0 +# github.com/aws/smithy-go v1.17.0 ## explicit; go 1.19 github.com/aws/smithy-go +github.com/aws/smithy-go/auth github.com/aws/smithy-go/auth/bearer github.com/aws/smithy-go/context github.com/aws/smithy-go/document @@ -129,7 +134,7 @@ github.com/eclipse/paho.golang/paho ## explicit; go 1.13 github.com/elastic/elastic-transport-go/v8/elastictransport github.com/elastic/elastic-transport-go/v8/elastictransport/version -# github.com/elastic/go-elasticsearch/v8 v8.7.0 +# github.com/elastic/go-elasticsearch/v8 v8.11.0 ## explicit; go 1.13 github.com/elastic/go-elasticsearch/v8 github.com/elastic/go-elasticsearch/v8/esapi @@ -189,6 +194,7 @@ github.com/elastic/go-elasticsearch/v8/typedapi/cluster/existscomponenttemplate github.com/elastic/go-elasticsearch/v8/typedapi/cluster/getcomponenttemplate github.com/elastic/go-elasticsearch/v8/typedapi/cluster/getsettings github.com/elastic/go-elasticsearch/v8/typedapi/cluster/health +github.com/elastic/go-elasticsearch/v8/typedapi/cluster/info github.com/elastic/go-elasticsearch/v8/typedapi/cluster/pendingtasks github.com/elastic/go-elasticsearch/v8/typedapi/cluster/postvotingconfigexclusions github.com/elastic/go-elasticsearch/v8/typedapi/cluster/putcomponenttemplate @@ -197,6 +203,7 @@ github.com/elastic/go-elasticsearch/v8/typedapi/cluster/remoteinfo github.com/elastic/go-elasticsearch/v8/typedapi/cluster/reroute github.com/elastic/go-elasticsearch/v8/typedapi/cluster/state github.com/elastic/go-elasticsearch/v8/typedapi/cluster/stats +github.com/elastic/go-elasticsearch/v8/typedapi/core/bulk github.com/elastic/go-elasticsearch/v8/typedapi/core/clearscroll github.com/elastic/go-elasticsearch/v8/typedapi/core/closepointintime github.com/elastic/go-elasticsearch/v8/typedapi/core/count @@ -214,10 +221,13 @@ github.com/elastic/go-elasticsearch/v8/typedapi/core/getscript github.com/elastic/go-elasticsearch/v8/typedapi/core/getscriptcontext github.com/elastic/go-elasticsearch/v8/typedapi/core/getscriptlanguages github.com/elastic/go-elasticsearch/v8/typedapi/core/getsource +github.com/elastic/go-elasticsearch/v8/typedapi/core/healthreport github.com/elastic/go-elasticsearch/v8/typedapi/core/index github.com/elastic/go-elasticsearch/v8/typedapi/core/info github.com/elastic/go-elasticsearch/v8/typedapi/core/knnsearch github.com/elastic/go-elasticsearch/v8/typedapi/core/mget +github.com/elastic/go-elasticsearch/v8/typedapi/core/msearch +github.com/elastic/go-elasticsearch/v8/typedapi/core/msearchtemplate github.com/elastic/go-elasticsearch/v8/typedapi/core/mtermvectors github.com/elastic/go-elasticsearch/v8/typedapi/core/openpointintime github.com/elastic/go-elasticsearch/v8/typedapi/core/ping @@ -252,6 +262,7 @@ github.com/elastic/go-elasticsearch/v8/typedapi/eql/search github.com/elastic/go-elasticsearch/v8/typedapi/features/getfeatures github.com/elastic/go-elasticsearch/v8/typedapi/features/resetfeatures github.com/elastic/go-elasticsearch/v8/typedapi/fleet/globalcheckpoints +github.com/elastic/go-elasticsearch/v8/typedapi/fleet/msearch github.com/elastic/go-elasticsearch/v8/typedapi/fleet/search github.com/elastic/go-elasticsearch/v8/typedapi/graph/explore github.com/elastic/go-elasticsearch/v8/typedapi/ilm/deletelifecycle @@ -275,6 +286,7 @@ github.com/elastic/go-elasticsearch/v8/typedapi/indices/createdatastream github.com/elastic/go-elasticsearch/v8/typedapi/indices/datastreamsstats github.com/elastic/go-elasticsearch/v8/typedapi/indices/delete github.com/elastic/go-elasticsearch/v8/typedapi/indices/deletealias +github.com/elastic/go-elasticsearch/v8/typedapi/indices/deletedatalifecycle github.com/elastic/go-elasticsearch/v8/typedapi/indices/deletedatastream github.com/elastic/go-elasticsearch/v8/typedapi/indices/deleteindextemplate github.com/elastic/go-elasticsearch/v8/typedapi/indices/deletetemplate @@ -284,11 +296,13 @@ github.com/elastic/go-elasticsearch/v8/typedapi/indices/exists github.com/elastic/go-elasticsearch/v8/typedapi/indices/existsalias github.com/elastic/go-elasticsearch/v8/typedapi/indices/existsindextemplate github.com/elastic/go-elasticsearch/v8/typedapi/indices/existstemplate +github.com/elastic/go-elasticsearch/v8/typedapi/indices/explaindatalifecycle github.com/elastic/go-elasticsearch/v8/typedapi/indices/fieldusagestats github.com/elastic/go-elasticsearch/v8/typedapi/indices/flush github.com/elastic/go-elasticsearch/v8/typedapi/indices/forcemerge github.com/elastic/go-elasticsearch/v8/typedapi/indices/get github.com/elastic/go-elasticsearch/v8/typedapi/indices/getalias +github.com/elastic/go-elasticsearch/v8/typedapi/indices/getdatalifecycle github.com/elastic/go-elasticsearch/v8/typedapi/indices/getdatastream github.com/elastic/go-elasticsearch/v8/typedapi/indices/getfieldmapping github.com/elastic/go-elasticsearch/v8/typedapi/indices/getindextemplate @@ -300,6 +314,7 @@ github.com/elastic/go-elasticsearch/v8/typedapi/indices/modifydatastream github.com/elastic/go-elasticsearch/v8/typedapi/indices/open github.com/elastic/go-elasticsearch/v8/typedapi/indices/promotedatastream github.com/elastic/go-elasticsearch/v8/typedapi/indices/putalias +github.com/elastic/go-elasticsearch/v8/typedapi/indices/putdatalifecycle github.com/elastic/go-elasticsearch/v8/typedapi/indices/putindextemplate github.com/elastic/go-elasticsearch/v8/typedapi/indices/putmapping github.com/elastic/go-elasticsearch/v8/typedapi/indices/putsettings @@ -380,6 +395,7 @@ github.com/elastic/go-elasticsearch/v8/typedapi/ml/infertrainedmodel github.com/elastic/go-elasticsearch/v8/typedapi/ml/info github.com/elastic/go-elasticsearch/v8/typedapi/ml/openjob github.com/elastic/go-elasticsearch/v8/typedapi/ml/postcalendarevents +github.com/elastic/go-elasticsearch/v8/typedapi/ml/postdata github.com/elastic/go-elasticsearch/v8/typedapi/ml/previewdatafeed github.com/elastic/go-elasticsearch/v8/typedapi/ml/previewdataframeanalytics github.com/elastic/go-elasticsearch/v8/typedapi/ml/putcalendar @@ -409,6 +425,7 @@ github.com/elastic/go-elasticsearch/v8/typedapi/ml/updatemodelsnapshot github.com/elastic/go-elasticsearch/v8/typedapi/ml/upgradejobsnapshot github.com/elastic/go-elasticsearch/v8/typedapi/ml/validate github.com/elastic/go-elasticsearch/v8/typedapi/ml/validatedetector +github.com/elastic/go-elasticsearch/v8/typedapi/monitoring/bulk github.com/elastic/go-elasticsearch/v8/typedapi/nodes/clearrepositoriesmeteringarchive github.com/elastic/go-elasticsearch/v8/typedapi/nodes/getrepositoriesmeteringinfo github.com/elastic/go-elasticsearch/v8/typedapi/nodes/hotthreads @@ -416,6 +433,10 @@ github.com/elastic/go-elasticsearch/v8/typedapi/nodes/info github.com/elastic/go-elasticsearch/v8/typedapi/nodes/reloadsecuresettings github.com/elastic/go-elasticsearch/v8/typedapi/nodes/stats github.com/elastic/go-elasticsearch/v8/typedapi/nodes/usage +github.com/elastic/go-elasticsearch/v8/typedapi/queryruleset/delete +github.com/elastic/go-elasticsearch/v8/typedapi/queryruleset/get +github.com/elastic/go-elasticsearch/v8/typedapi/queryruleset/list +github.com/elastic/go-elasticsearch/v8/typedapi/queryruleset/put github.com/elastic/go-elasticsearch/v8/typedapi/rollup/deletejob github.com/elastic/go-elasticsearch/v8/typedapi/rollup/getjobs github.com/elastic/go-elasticsearch/v8/typedapi/rollup/getrollupcaps @@ -428,6 +449,14 @@ github.com/elastic/go-elasticsearch/v8/typedapi/searchablesnapshots/cachestats github.com/elastic/go-elasticsearch/v8/typedapi/searchablesnapshots/clearcache github.com/elastic/go-elasticsearch/v8/typedapi/searchablesnapshots/mount github.com/elastic/go-elasticsearch/v8/typedapi/searchablesnapshots/stats +github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/delete +github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/deletebehavioralanalytics +github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/get +github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/getbehavioralanalytics +github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/list +github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/put +github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/putbehavioralanalytics +github.com/elastic/go-elasticsearch/v8/typedapi/searchapplication/search github.com/elastic/go-elasticsearch/v8/typedapi/security/activateuserprofile github.com/elastic/go-elasticsearch/v8/typedapi/security/authenticate github.com/elastic/go-elasticsearch/v8/typedapi/security/bulkupdateapikeys @@ -438,6 +467,7 @@ github.com/elastic/go-elasticsearch/v8/typedapi/security/clearcachedrealms github.com/elastic/go-elasticsearch/v8/typedapi/security/clearcachedroles github.com/elastic/go-elasticsearch/v8/typedapi/security/clearcachedservicetokens github.com/elastic/go-elasticsearch/v8/typedapi/security/createapikey +github.com/elastic/go-elasticsearch/v8/typedapi/security/createcrossclusterapikey github.com/elastic/go-elasticsearch/v8/typedapi/security/createservicetoken github.com/elastic/go-elasticsearch/v8/typedapi/security/deleteprivileges github.com/elastic/go-elasticsearch/v8/typedapi/security/deleterole @@ -513,15 +543,24 @@ github.com/elastic/go-elasticsearch/v8/typedapi/sql/getasyncstatus github.com/elastic/go-elasticsearch/v8/typedapi/sql/query github.com/elastic/go-elasticsearch/v8/typedapi/sql/translate github.com/elastic/go-elasticsearch/v8/typedapi/ssl/certificates +github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/deletesynonym +github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/deletesynonymrule +github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/getsynonym +github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/getsynonymrule +github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/getsynonymssets +github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/putsynonym +github.com/elastic/go-elasticsearch/v8/typedapi/synonyms/putsynonymrule github.com/elastic/go-elasticsearch/v8/typedapi/tasks/cancel github.com/elastic/go-elasticsearch/v8/typedapi/tasks/get github.com/elastic/go-elasticsearch/v8/typedapi/tasks/list +github.com/elastic/go-elasticsearch/v8/typedapi/textstructure/findstructure github.com/elastic/go-elasticsearch/v8/typedapi/transform/deletetransform github.com/elastic/go-elasticsearch/v8/typedapi/transform/gettransform github.com/elastic/go-elasticsearch/v8/typedapi/transform/gettransformstats github.com/elastic/go-elasticsearch/v8/typedapi/transform/previewtransform github.com/elastic/go-elasticsearch/v8/typedapi/transform/puttransform github.com/elastic/go-elasticsearch/v8/typedapi/transform/resettransform +github.com/elastic/go-elasticsearch/v8/typedapi/transform/schedulenowtransform github.com/elastic/go-elasticsearch/v8/typedapi/transform/starttransform github.com/elastic/go-elasticsearch/v8/typedapi/transform/stoptransform github.com/elastic/go-elasticsearch/v8/typedapi/transform/updatetransform @@ -547,7 +586,9 @@ github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/cattrainedmodelscolu github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/cattransformcolumn github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/childscoremode github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/chunkingmode +github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/clusterinfotarget github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/clusterprivilege +github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/clustersearchstatus github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/combinedfieldsoperator github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/combinedfieldszeroterms github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/conditionop @@ -614,6 +655,7 @@ github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icucollationstrength github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icunormalizationmode github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icunormalizationtype github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/icutransformdirection +github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/impactarea github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/include github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexcheckonstartup github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexingjobstate @@ -622,6 +664,7 @@ github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexoptions github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexprivilege github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexroutingallocationoptions github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indexroutingrebalanceoptions +github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/indicatorhealthstatus github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/inputtype github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/jobblockedreason github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/jobstate @@ -647,6 +690,7 @@ github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/normalization github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/normalizemethod github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/numericfielddataformat github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/onscripterror +github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/operationtype github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/operator github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/optype github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/pagerdutycontexttype @@ -657,6 +701,8 @@ github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/phoneticnametype github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/phoneticruletype github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/policytype github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/quantifier +github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/queryrulecriteriatype +github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/queryruletype github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/rangerelation github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/ratemode github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/refresh @@ -678,9 +724,9 @@ github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/shapetype github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/shardroutingstate github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/shardsstatsstage github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/shardstoreallocation +github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/shardstorestatus github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/shutdownstatus github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/shutdowntype -github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/simplequerystringflag github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/snapshotsort github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/snapshotupgradestate github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/snowballlanguage @@ -721,12 +767,14 @@ github.com/elastic/go-elasticsearch/v8/typedapi/watcher/activatewatch github.com/elastic/go-elasticsearch/v8/typedapi/watcher/deactivatewatch github.com/elastic/go-elasticsearch/v8/typedapi/watcher/deletewatch github.com/elastic/go-elasticsearch/v8/typedapi/watcher/executewatch +github.com/elastic/go-elasticsearch/v8/typedapi/watcher/getsettings github.com/elastic/go-elasticsearch/v8/typedapi/watcher/getwatch github.com/elastic/go-elasticsearch/v8/typedapi/watcher/putwatch github.com/elastic/go-elasticsearch/v8/typedapi/watcher/querywatches github.com/elastic/go-elasticsearch/v8/typedapi/watcher/start github.com/elastic/go-elasticsearch/v8/typedapi/watcher/stats github.com/elastic/go-elasticsearch/v8/typedapi/watcher/stop +github.com/elastic/go-elasticsearch/v8/typedapi/watcher/updatesettings github.com/elastic/go-elasticsearch/v8/typedapi/xpack/info github.com/elastic/go-elasticsearch/v8/typedapi/xpack/usage # github.com/go-redis/redis/extra/rediscmd v0.2.0 @@ -824,8 +872,8 @@ github.com/jcmturner/rpc/v2/ndr # github.com/julienschmidt/httprouter v1.3.0 ## explicit; go 1.7 github.com/julienschmidt/httprouter -# github.com/klauspost/compress v1.15.14 -## explicit; go 1.17 +# github.com/klauspost/compress v1.16.6 +## explicit; go 1.18 github.com/klauspost/compress github.com/klauspost/compress/fse github.com/klauspost/compress/huff0 @@ -833,8 +881,6 @@ github.com/klauspost/compress/internal/cpuinfo github.com/klauspost/compress/internal/snapref github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd/internal/xxhash -# github.com/kr/text v0.2.0 -## explicit # github.com/matttproud/golang_protobuf_extensions v1.0.4 ## explicit; go 1.9 github.com/matttproud/golang_protobuf_extensions/pbutil @@ -863,22 +909,22 @@ github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.0 ## explicit github.com/pmezard/go-difflib/difflib -# github.com/prometheus/client_golang v1.16.0 -## explicit; go 1.17 +# github.com/prometheus/client_golang v1.17.0 +## explicit; go 1.19 github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promhttp github.com/prometheus/client_golang/prometheus/testutil github.com/prometheus/client_golang/prometheus/testutil/promlint -# github.com/prometheus/client_model v0.4.0 -## explicit; go 1.18 +# github.com/prometheus/client_model v0.5.0 +## explicit; go 1.19 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.42.0 +# github.com/prometheus/common v0.44.0 ## explicit; go 1.18 github.com/prometheus/common/expfmt github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg github.com/prometheus/common/model -# github.com/prometheus/procfs v0.10.1 +# github.com/prometheus/procfs v0.11.1 ## explicit; go 1.19 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs @@ -886,8 +932,6 @@ github.com/prometheus/procfs/internal/util # github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 ## explicit github.com/rcrowley/go-metrics -# github.com/rogpeppe/go-internal v1.9.0 -## explicit; go 1.17 # github.com/streadway/amqp v1.1.0 ## explicit; go 1.10 github.com/streadway/amqp @@ -931,7 +975,7 @@ github.com/xdg-go/stringprep # github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a ## explicit; go 1.12 github.com/youmark/pkcs8 -# go.mongodb.org/mongo-driver v1.11.7 +# go.mongodb.org/mongo-driver v1.13.0 ## explicit; go 1.13 go.mongodb.org/mongo-driver/bson go.mongodb.org/mongo-driver/bson/bsoncodec @@ -940,9 +984,22 @@ go.mongodb.org/mongo-driver/bson/bsonrw go.mongodb.org/mongo-driver/bson/bsontype go.mongodb.org/mongo-driver/bson/primitive go.mongodb.org/mongo-driver/event -go.mongodb.org/mongo-driver/internal +go.mongodb.org/mongo-driver/internal/aws +go.mongodb.org/mongo-driver/internal/aws/awserr +go.mongodb.org/mongo-driver/internal/aws/credentials +go.mongodb.org/mongo-driver/internal/aws/signer/v4 +go.mongodb.org/mongo-driver/internal/bsonutil +go.mongodb.org/mongo-driver/internal/codecutil +go.mongodb.org/mongo-driver/internal/credproviders +go.mongodb.org/mongo-driver/internal/csfle +go.mongodb.org/mongo-driver/internal/csot +go.mongodb.org/mongo-driver/internal/driverutil +go.mongodb.org/mongo-driver/internal/handshake +go.mongodb.org/mongo-driver/internal/httputil +go.mongodb.org/mongo-driver/internal/logger +go.mongodb.org/mongo-driver/internal/ptrutil +go.mongodb.org/mongo-driver/internal/rand go.mongodb.org/mongo-driver/internal/randutil -go.mongodb.org/mongo-driver/internal/randutil/rand go.mongodb.org/mongo-driver/internal/uuid go.mongodb.org/mongo-driver/mongo go.mongodb.org/mongo-driver/mongo/address @@ -953,11 +1010,10 @@ go.mongodb.org/mongo-driver/mongo/readpref go.mongodb.org/mongo-driver/mongo/writeconcern go.mongodb.org/mongo-driver/tag go.mongodb.org/mongo-driver/version -go.mongodb.org/mongo-driver/x/bsonx go.mongodb.org/mongo-driver/x/bsonx/bsoncore go.mongodb.org/mongo-driver/x/mongo/driver go.mongodb.org/mongo-driver/x/mongo/driver/auth -go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/awsv4 +go.mongodb.org/mongo-driver/x/mongo/driver/auth/creds go.mongodb.org/mongo-driver/x/mongo/driver/auth/internal/gssapi go.mongodb.org/mongo-driver/x/mongo/driver/connstring go.mongodb.org/mongo-driver/x/mongo/driver/dns @@ -977,8 +1033,8 @@ golang.org/x/crypto/md4 golang.org/x/crypto/ocsp golang.org/x/crypto/pbkdf2 golang.org/x/crypto/scrypt -# golang.org/x/exp v0.0.0-20230321023759-10a507213a29 -## explicit; go 1.18 +# golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa +## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/slices golang.org/x/exp/slog @@ -994,12 +1050,13 @@ golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace -# golang.org/x/sync v0.2.0 -## explicit +# golang.org/x/sync v0.3.0 +## explicit; go 1.17 golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.13.0 -## explicit; go 1.17 +golang.org/x/sync/singleflight +# golang.org/x/sys v0.14.0 +## explicit; go 1.18 golang.org/x/sys/unix golang.org/x/sys/windows # golang.org/x/text v0.13.0 @@ -1008,14 +1065,14 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.3.0 -## explicit +# golang.org/x/time v0.4.0 +## explicit; go 1.18 golang.org/x/time/rate -# google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d ## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.56.3 -## explicit; go 1.17 +# google.golang.org/grpc v1.59.0 +## explicit; go 1.19 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -1045,6 +1102,7 @@ google.golang.org/grpc/internal/grpclog google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync google.golang.org/grpc/internal/grpcutil +google.golang.org/grpc/internal/idle google.golang.org/grpc/internal/metadata google.golang.org/grpc/internal/pretty google.golang.org/grpc/internal/resolver @@ -1060,6 +1118,7 @@ google.golang.org/grpc/keepalive google.golang.org/grpc/metadata google.golang.org/grpc/peer google.golang.org/grpc/reflection +google.golang.org/grpc/reflection/grpc_reflection_v1 google.golang.org/grpc/reflection/grpc_reflection_v1alpha google.golang.org/grpc/resolver google.golang.org/grpc/serviceconfig @@ -1067,7 +1126,7 @@ google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap google.golang.org/grpc/test/bufconn -# google.golang.org/protobuf v1.30.0 +# google.golang.org/protobuf v1.31.0 ## explicit; go 1.11 google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext